Change log in v2:
- Boris: Add a preparatory patch to carve out per RDT domain initialization
code into a separate function.
- Minor changes in the comments of rdtgroup_init_xxx().
- Boris: s/cpu/CPU/g in the comments of update_domains().
- Boris: Format commit message by indenting the examples.
Xiaochen Shen (2):
x86/resctrl: Move per RDT domain initialization to a separate function
x86/resctrl: Initialize a new resource group with default MBA values
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +-
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 171 +++++++++++++++++-------------
2 files changed, 100 insertions(+), 75 deletions(-)
--
1.8.3.1
Currently, when a new resource group is created, the allocation values
of the MBA resource are not initialized and remain meaningless data.
For example:
mkdir /sys/fs/resctrl/p1
cat /sys/fs/resctrl/p1/schemata
MB:0=100;1=100
echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
cat /sys/fs/resctrl/p1/schemata
MB:0= 10;1= 20
rmdir /sys/fs/resctrl/p1
mkdir /sys/fs/resctrl/p2
cat /sys/fs/resctrl/p2/schemata
MB:0= 10;1= 20
Therefore, when the new group is created, it is reasonable to initialize
MBA resource with default values.
Initialize the MBA resource and cache resources in separate functions.
Signed-off-by: Xiaochen Shen <[email protected]>
Reviewed-by: Fenghua Yu <[email protected]>
Reviewed-by: Reinette Chatre <[email protected]>
---
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +--
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 52 +++++++++++++++++++------------
2 files changed, 34 insertions(+), 22 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 2dbd990..89320c0 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
if (cpumask_empty(cpu_mask) || mba_sc)
goto done;
cpu = get_cpu();
- /* Update CBM on this cpu if it's in cpu_mask. */
+ /* Update resource control msr on this CPU if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
rdt_ctrl_update(&msr_param);
- /* Update CBM on other cpus. */
+ /* Update resource control msr on other CPUs. */
smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 9cb6a1d..44b6dbf 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2581,8 +2581,8 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
return 0;
}
-/**
- * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+/*
+ * Initialize cache resources with default values.
*
* A new RDT group is being created on an allocation capable (CAT)
* supporting system. Set this group up to start off with all usable
@@ -2591,33 +2591,45 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
* If there are no more shareable bits available on any domain then
* the entire allocation will fail.
*/
+static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+{
+ struct rdt_domain *d;
+ int ret;
+
+ list_for_each_entry(d, &r->domains, list) {
+ ret = __init_one_rdt_domain(d, r, closid);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize MBA resource with default values. */
+static void rdtgroup_init_mba(struct rdt_resource *r)
+{
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+ d->have_new_ctrl = true;
+ }
+}
+
+/* Initialize the RDT group's allocations. */
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
struct rdt_resource *r;
- struct rdt_domain *d;
int ret;
for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
- list_for_each_entry(d, &r->domains, list) {
- ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
+ if (r->rid == RDT_RESOURCE_MBA) {
+ rdtgroup_init_mba(r);
+ } else {
+ ret = rdtgroup_init_cat(r, rdtgrp->closid);
if (ret < 0)
return ret;
}
- }
-
- for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
--
1.8.3.1
Carve out per rdt_domain initialization code in rdtgroup_init_alloc()
into a separate function.
No functional change, this preparatory patch will make the code more
readable and save us at least two indentation levels.
Signed-off-by: Xiaochen Shen <[email protected]>
---
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 131 ++++++++++++++++++---------------
1 file changed, 72 insertions(+), 59 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 08e0333..9cb6a1d 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2516,28 +2516,86 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
}
+/*
+ * Initialize cache resources per RDT domain
+ *
+ * Set the RDT domain up to start off with all usable allocations. That is,
+ * all shareable and unused bits. All-zero CBM is invalid.
+ */
+static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+ u32 closid)
+{
+ struct rdt_resource *r_cdp = NULL;
+ struct rdt_domain *d_cdp = NULL;
+ u32 used_b = 0, unused_b = 0;
+ unsigned long tmp_cbm;
+ enum rdtgrp_mode mode;
+ u32 peer_ctl, *ctrl;
+ int i;
+
+ rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
+ /*
+ * If CDP is active include peer domain's
+ * usage to ensure there is no overlap
+ * with an exclusive group.
+ */
+ if (d_cdp)
+ peer_ctl = d_cdp->ctrl_val[i];
+ else
+ peer_ctl = 0;
+ used_b |= *ctrl | peer_ctl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl | peer_ctl;
+ }
+ }
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure that
+ * bitmap_weight() does not access out-of-bound memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+
+ return 0;
+}
+
/**
* rdtgroup_init_alloc - Initialize the new RDT group's allocations
*
* A new RDT group is being created on an allocation capable (CAT)
* supporting system. Set this group up to start off with all usable
- * allocations. That is, all shareable and unused bits.
+ * allocations.
*
- * All-zero CBM is invalid. If there are no more shareable bits available
- * on any domain then the entire allocation will fail.
+ * If there are no more shareable bits available on any domain then
+ * the entire allocation will fail.
*/
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
- struct rdt_resource *r_cdp = NULL;
- struct rdt_domain *d_cdp = NULL;
- u32 used_b = 0, unused_b = 0;
- u32 closid = rdtgrp->closid;
struct rdt_resource *r;
- unsigned long tmp_cbm;
- enum rdtgrp_mode mode;
struct rdt_domain *d;
- u32 peer_ctl, *ctrl;
- int i, ret;
+ int ret;
for_each_alloc_enabled_rdt_resource(r) {
/*
@@ -2547,54 +2605,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
if (r->rid == RDT_RESOURCE_MBA)
continue;
list_for_each_entry(d, &r->domains, list) {
- rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
- d->have_new_ctrl = false;
- d->new_ctrl = r->cache.shareable_bits;
- used_b = r->cache.shareable_bits;
- ctrl = d->ctrl_val;
- for (i = 0; i < closids_supported(); i++, ctrl++) {
- if (closid_allocated(i) && i != closid) {
- mode = rdtgroup_mode_by_closid(i);
- if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- break;
- /*
- * If CDP is active include peer
- * domain's usage to ensure there
- * is no overlap with an exclusive
- * group.
- */
- if (d_cdp)
- peer_ctl = d_cdp->ctrl_val[i];
- else
- peer_ctl = 0;
- used_b |= *ctrl | peer_ctl;
- if (mode == RDT_MODE_SHAREABLE)
- d->new_ctrl |= *ctrl | peer_ctl;
- }
- }
- if (d->plr && d->plr->cbm > 0)
- used_b |= d->plr->cbm;
- unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
- unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
- d->new_ctrl |= unused_b;
- /*
- * Force the initial CBM to be valid, user can
- * modify the CBM based on system availability.
- */
- cbm_ensure_valid(&d->new_ctrl, r);
- /*
- * Assign the u32 CBM to an unsigned long to ensure
- * that bitmap_weight() does not access out-of-bound
- * memory.
- */
- tmp_cbm = d->new_ctrl;
- if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n",
- r->name, d->id);
- return -ENOSPC;
- }
- d->have_new_ctrl = true;
+ ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
+ if (ret < 0)
+ return ret;
}
}
--
1.8.3.1
Commit-ID: 8cea8808525eb3a36f3d54412843948f169d6a6e
Gitweb: https://git.kernel.org/tip/8cea8808525eb3a36f3d54412843948f169d6a6e
Author: Xiaochen Shen <[email protected]>
AuthorDate: Wed, 17 Apr 2019 19:08:48 +0800
Committer: Borislav Petkov <[email protected]>
CommitDate: Wed, 17 Apr 2019 21:03:38 +0200
x86/resctrl: Move per RDT domain initialization to a separate function
Carve out per rdt_domain initialization code from rdtgroup_init_alloc()
into a separate function.
No functional change, make the code more readable and save us at least
two indentation levels.
Signed-off-by: Xiaochen Shen <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Cc: Fenghua Yu <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: [email protected]
Cc: Reinette Chatre <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 131 ++++++++++++++++++---------------
1 file changed, 72 insertions(+), 59 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 399601eda8e4..2f7e35849527 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2516,28 +2516,86 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
}
+/*
+ * Initialize cache resources per RDT domain
+ *
+ * Set the RDT domain up to start off with all usable allocations. That is,
+ * all shareable and unused bits. All-zero CBM is invalid.
+ */
+static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+ u32 closid)
+{
+ struct rdt_resource *r_cdp = NULL;
+ struct rdt_domain *d_cdp = NULL;
+ u32 used_b = 0, unused_b = 0;
+ unsigned long tmp_cbm;
+ enum rdtgrp_mode mode;
+ u32 peer_ctl, *ctrl;
+ int i;
+
+ rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
+ /*
+ * If CDP is active include peer domain's
+ * usage to ensure there is no overlap
+ * with an exclusive group.
+ */
+ if (d_cdp)
+ peer_ctl = d_cdp->ctrl_val[i];
+ else
+ peer_ctl = 0;
+ used_b |= *ctrl | peer_ctl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl | peer_ctl;
+ }
+ }
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure that
+ * bitmap_weight() does not access out-of-bound memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+
+ return 0;
+}
+
/**
* rdtgroup_init_alloc - Initialize the new RDT group's allocations
*
* A new RDT group is being created on an allocation capable (CAT)
* supporting system. Set this group up to start off with all usable
- * allocations. That is, all shareable and unused bits.
+ * allocations.
*
- * All-zero CBM is invalid. If there are no more shareable bits available
- * on any domain then the entire allocation will fail.
+ * If there are no more shareable bits available on any domain then
+ * the entire allocation will fail.
*/
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
- struct rdt_resource *r_cdp = NULL;
- struct rdt_domain *d_cdp = NULL;
- u32 used_b = 0, unused_b = 0;
- u32 closid = rdtgrp->closid;
struct rdt_resource *r;
- unsigned long tmp_cbm;
- enum rdtgrp_mode mode;
struct rdt_domain *d;
- u32 peer_ctl, *ctrl;
- int i, ret;
+ int ret;
for_each_alloc_enabled_rdt_resource(r) {
/*
@@ -2547,54 +2605,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
if (r->rid == RDT_RESOURCE_MBA)
continue;
list_for_each_entry(d, &r->domains, list) {
- rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
- d->have_new_ctrl = false;
- d->new_ctrl = r->cache.shareable_bits;
- used_b = r->cache.shareable_bits;
- ctrl = d->ctrl_val;
- for (i = 0; i < closids_supported(); i++, ctrl++) {
- if (closid_allocated(i) && i != closid) {
- mode = rdtgroup_mode_by_closid(i);
- if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- break;
- /*
- * If CDP is active include peer
- * domain's usage to ensure there
- * is no overlap with an exclusive
- * group.
- */
- if (d_cdp)
- peer_ctl = d_cdp->ctrl_val[i];
- else
- peer_ctl = 0;
- used_b |= *ctrl | peer_ctl;
- if (mode == RDT_MODE_SHAREABLE)
- d->new_ctrl |= *ctrl | peer_ctl;
- }
- }
- if (d->plr && d->plr->cbm > 0)
- used_b |= d->plr->cbm;
- unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
- unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
- d->new_ctrl |= unused_b;
- /*
- * Force the initial CBM to be valid, user can
- * modify the CBM based on system availability.
- */
- cbm_ensure_valid(&d->new_ctrl, r);
- /*
- * Assign the u32 CBM to an unsigned long to ensure
- * that bitmap_weight() does not access out-of-bound
- * memory.
- */
- tmp_cbm = d->new_ctrl;
- if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n",
- r->name, d->id);
- return -ENOSPC;
- }
- d->have_new_ctrl = true;
+ ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
+ if (ret < 0)
+ return ret;
}
}
Commit-ID: 7b05c9c1fd39cb08017353a47e7ff14ad1a3b875
Gitweb: https://git.kernel.org/tip/7b05c9c1fd39cb08017353a47e7ff14ad1a3b875
Author: Xiaochen Shen <[email protected]>
AuthorDate: Wed, 17 Apr 2019 19:08:49 +0800
Committer: Borislav Petkov <[email protected]>
CommitDate: Wed, 17 Apr 2019 21:18:05 +0200
x86/resctrl: Initialize a new resource group with default MBA values
Currently, when a new resource group is created, the allocation values
of the MBA resource are not initialized and remain meaningless data.
For example:
mkdir /sys/fs/resctrl/p1
cat /sys/fs/resctrl/p1/schemata
MB:0=100;1=100
echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
cat /sys/fs/resctrl/p1/schemata
MB:0= 10;1= 20
rmdir /sys/fs/resctrl/p1
mkdir /sys/fs/resctrl/p2
cat /sys/fs/resctrl/p2/schemata
MB:0= 10;1= 20
Therefore, when the new group is created, it is reasonable to initialize
MBA resource with default values.
Initialize the MBA resource and cache resources in separate functions.
[ bp: Add newlines between code blocks for better readability. ]
Signed-off-by: Xiaochen Shen <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Reviewed-by: Fenghua Yu <[email protected]>
Reviewed-by: Reinette Chatre <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: [email protected]
Cc: Thomas Gleixner <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +--
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 52 ++++++++++++++++++++-----------
2 files changed, 35 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 2dbd990a2eb7..89320c0396b1 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
if (cpumask_empty(cpu_mask) || mba_sc)
goto done;
cpu = get_cpu();
- /* Update CBM on this cpu if it's in cpu_mask. */
+ /* Update resource control msr on this CPU if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
rdt_ctrl_update(&msr_param);
- /* Update CBM on other cpus. */
+ /* Update resource control msr on other CPUs. */
smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 2f7e35849527..b8d88a15007a 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2581,8 +2581,8 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
return 0;
}
-/**
- * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+/*
+ * Initialize cache resources with default values.
*
* A new RDT group is being created on an allocation capable (CAT)
* supporting system. Set this group up to start off with all usable
@@ -2591,38 +2591,52 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
* If there are no more shareable bits available on any domain then
* the entire allocation will fail.
*/
+static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+{
+ struct rdt_domain *d;
+ int ret;
+
+ list_for_each_entry(d, &r->domains, list) {
+ ret = __init_one_rdt_domain(d, r, closid);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize MBA resource with default values. */
+static void rdtgroup_init_mba(struct rdt_resource *r)
+{
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+ d->have_new_ctrl = true;
+ }
+}
+
+/* Initialize the RDT group's allocations. */
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
struct rdt_resource *r;
- struct rdt_domain *d;
int ret;
for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
- list_for_each_entry(d, &r->domains, list) {
- ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
+ if (r->rid == RDT_RESOURCE_MBA) {
+ rdtgroup_init_mba(r);
+ } else {
+ ret = rdtgroup_init_cat(r, rdtgrp->closid);
if (ret < 0)
return ret;
}
- }
- for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
}
+
rdtgrp->mode = RDT_MODE_SHAREABLE;
}
Commit-ID: 7390619ab9ea9fd0ba9f4c3e4749ee20262cba7d
Gitweb: https://git.kernel.org/tip/7390619ab9ea9fd0ba9f4c3e4749ee20262cba7d
Author: Xiaochen Shen <[email protected]>
AuthorDate: Wed, 17 Apr 2019 19:08:48 +0800
Committer: Borislav Petkov <[email protected]>
CommitDate: Wed, 17 Apr 2019 23:59:56 +0200
x86/resctrl: Move per RDT domain initialization to a separate function
Carve out per rdt_domain initialization code from rdtgroup_init_alloc()
into a separate function.
No functional change, make the code more readable and save us at least
two indentation levels.
Signed-off-by: Xiaochen Shen <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Cc: Fenghua Yu <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: [email protected]
Cc: Reinette Chatre <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 131 ++++++++++++++++++---------------
1 file changed, 72 insertions(+), 59 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 85212a32b54d..36ace51ee705 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2516,28 +2516,86 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
}
+/*
+ * Initialize cache resources per RDT domain
+ *
+ * Set the RDT domain up to start off with all usable allocations. That is,
+ * all shareable and unused bits. All-zero CBM is invalid.
+ */
+static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+ u32 closid)
+{
+ struct rdt_resource *r_cdp = NULL;
+ struct rdt_domain *d_cdp = NULL;
+ u32 used_b = 0, unused_b = 0;
+ unsigned long tmp_cbm;
+ enum rdtgrp_mode mode;
+ u32 peer_ctl, *ctrl;
+ int i;
+
+ rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
+ /*
+ * If CDP is active include peer domain's
+ * usage to ensure there is no overlap
+ * with an exclusive group.
+ */
+ if (d_cdp)
+ peer_ctl = d_cdp->ctrl_val[i];
+ else
+ peer_ctl = 0;
+ used_b |= *ctrl | peer_ctl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl | peer_ctl;
+ }
+ }
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure that
+ * bitmap_weight() does not access out-of-bound memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+
+ return 0;
+}
+
/**
* rdtgroup_init_alloc - Initialize the new RDT group's allocations
*
* A new RDT group is being created on an allocation capable (CAT)
* supporting system. Set this group up to start off with all usable
- * allocations. That is, all shareable and unused bits.
+ * allocations.
*
- * All-zero CBM is invalid. If there are no more shareable bits available
- * on any domain then the entire allocation will fail.
+ * If there are no more shareable bits available on any domain then
+ * the entire allocation will fail.
*/
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
- struct rdt_resource *r_cdp = NULL;
- struct rdt_domain *d_cdp = NULL;
- u32 used_b = 0, unused_b = 0;
- u32 closid = rdtgrp->closid;
struct rdt_resource *r;
- unsigned long tmp_cbm;
- enum rdtgrp_mode mode;
struct rdt_domain *d;
- u32 peer_ctl, *ctrl;
- int i, ret;
+ int ret;
for_each_alloc_enabled_rdt_resource(r) {
/*
@@ -2547,54 +2605,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
if (r->rid == RDT_RESOURCE_MBA)
continue;
list_for_each_entry(d, &r->domains, list) {
- rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
- d->have_new_ctrl = false;
- d->new_ctrl = r->cache.shareable_bits;
- used_b = r->cache.shareable_bits;
- ctrl = d->ctrl_val;
- for (i = 0; i < closids_supported(); i++, ctrl++) {
- if (closid_allocated(i) && i != closid) {
- mode = rdtgroup_mode_by_closid(i);
- if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- break;
- /*
- * If CDP is active include peer
- * domain's usage to ensure there
- * is no overlap with an exclusive
- * group.
- */
- if (d_cdp)
- peer_ctl = d_cdp->ctrl_val[i];
- else
- peer_ctl = 0;
- used_b |= *ctrl | peer_ctl;
- if (mode == RDT_MODE_SHAREABLE)
- d->new_ctrl |= *ctrl | peer_ctl;
- }
- }
- if (d->plr && d->plr->cbm > 0)
- used_b |= d->plr->cbm;
- unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
- unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
- d->new_ctrl |= unused_b;
- /*
- * Force the initial CBM to be valid, user can
- * modify the CBM based on system availability.
- */
- cbm_ensure_valid(&d->new_ctrl, r);
- /*
- * Assign the u32 CBM to an unsigned long to ensure
- * that bitmap_weight() does not access out-of-bound
- * memory.
- */
- tmp_cbm = d->new_ctrl;
- if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n",
- r->name, d->id);
- return -ENOSPC;
- }
- d->have_new_ctrl = true;
+ ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
+ if (ret < 0)
+ return ret;
}
}
Commit-ID: 47820e73f5b3a1fdb8ebd1219191edc96e0c85c1
Gitweb: https://git.kernel.org/tip/47820e73f5b3a1fdb8ebd1219191edc96e0c85c1
Author: Xiaochen Shen <[email protected]>
AuthorDate: Wed, 17 Apr 2019 19:08:49 +0800
Committer: Borislav Petkov <[email protected]>
CommitDate: Thu, 18 Apr 2019 00:06:31 +0200
x86/resctrl: Initialize a new resource group with default MBA values
Currently, when a new resource group is created, the allocation values
of the MBA resource are not initialized and remain meaningless data.
For example:
mkdir /sys/fs/resctrl/p1
cat /sys/fs/resctrl/p1/schemata
MB:0=100;1=100
echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
cat /sys/fs/resctrl/p1/schemata
MB:0= 10;1= 20
rmdir /sys/fs/resctrl/p1
mkdir /sys/fs/resctrl/p2
cat /sys/fs/resctrl/p2/schemata
MB:0= 10;1= 20
Therefore, when the new group is created, it is reasonable to initialize
MBA resource with default values.
Initialize the MBA resource and cache resources in separate functions.
[ bp: Add newlines between code blocks for better readability. ]
Signed-off-by: Xiaochen Shen <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Reviewed-by: Fenghua Yu <[email protected]>
Reviewed-by: Reinette Chatre <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: [email protected]
Cc: Thomas Gleixner <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +--
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 52 ++++++++++++++++++++-----------
2 files changed, 35 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 2dbd990a2eb7..89320c0396b1 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
if (cpumask_empty(cpu_mask) || mba_sc)
goto done;
cpu = get_cpu();
- /* Update CBM on this cpu if it's in cpu_mask. */
+ /* Update resource control msr on this CPU if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
rdt_ctrl_update(&msr_param);
- /* Update CBM on other cpus. */
+ /* Update resource control msr on other CPUs. */
smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 36ace51ee705..333c177a2471 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2581,8 +2581,8 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
return 0;
}
-/**
- * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+/*
+ * Initialize cache resources with default values.
*
* A new RDT group is being created on an allocation capable (CAT)
* supporting system. Set this group up to start off with all usable
@@ -2591,38 +2591,52 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
* If there are no more shareable bits available on any domain then
* the entire allocation will fail.
*/
+static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+{
+ struct rdt_domain *d;
+ int ret;
+
+ list_for_each_entry(d, &r->domains, list) {
+ ret = __init_one_rdt_domain(d, r, closid);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize MBA resource with default values. */
+static void rdtgroup_init_mba(struct rdt_resource *r)
+{
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+ d->have_new_ctrl = true;
+ }
+}
+
+/* Initialize the RDT group's allocations. */
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
struct rdt_resource *r;
- struct rdt_domain *d;
int ret;
for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
- list_for_each_entry(d, &r->domains, list) {
- ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
+ if (r->rid == RDT_RESOURCE_MBA) {
+ rdtgroup_init_mba(r);
+ } else {
+ ret = rdtgroup_init_cat(r, rdtgrp->closid);
if (ret < 0)
return ret;
}
- }
- for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
}
+
}
rdtgrp->mode = RDT_MODE_SHAREABLE;
Hi Boris,
I found a nitpick - an unnecessary newline at the end of the patch.
Please help double check. Thank you.
On 4/18/2019 6:14, tip-bot for Xiaochen Shen wrote:
> Commit-ID: 47820e73f5b3a1fdb8ebd1219191edc96e0c85c1
> Gitweb: https://git.kernel.org/tip/47820e73f5b3a1fdb8ebd1219191edc96e0c85c1
> Author: Xiaochen Shen <[email protected]>
> AuthorDate: Wed, 17 Apr 2019 19:08:49 +0800
> Committer: Borislav Petkov <[email protected]>
> CommitDate: Thu, 18 Apr 2019 00:06:31 +0200
>
> x86/resctrl: Initialize a new resource group with default MBA values
>
> Currently, when a new resource group is created, the allocation values
> of the MBA resource are not initialized and remain meaningless data.
>
> For example:
>
> mkdir /sys/fs/resctrl/p1
> cat /sys/fs/resctrl/p1/schemata
> MB:0=100;1=100
>
> echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
> cat /sys/fs/resctrl/p1/schemata
> MB:0= 10;1= 20
>
> rmdir /sys/fs/resctrl/p1
> mkdir /sys/fs/resctrl/p2
> cat /sys/fs/resctrl/p2/schemata
> MB:0= 10;1= 20
>
> Therefore, when the new group is created, it is reasonable to initialize
> MBA resource with default values.
>
> Initialize the MBA resource and cache resources in separate functions.
>
> [ bp: Add newlines between code blocks for better readability. ]
>
> Signed-off-by: Xiaochen Shen <[email protected]>
> Signed-off-by: Borislav Petkov <[email protected]>
> Reviewed-by: Fenghua Yu <[email protected]>
> Reviewed-by: Reinette Chatre <[email protected]>
> Cc: "H. Peter Anvin" <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: [email protected]
> Cc: Thomas Gleixner <[email protected]>
> Cc: Tony Luck <[email protected]>
> Cc: x86-ml <[email protected]>
> Link: https://lkml.kernel.org/r/[email protected]
> ---
> arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +--
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 52 ++++++++++++++++++++-----------
> 2 files changed, 35 insertions(+), 21 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> index 2dbd990a2eb7..89320c0396b1 100644
> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> @@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
> if (cpumask_empty(cpu_mask) || mba_sc)
> goto done;
> cpu = get_cpu();
> - /* Update CBM on this cpu if it's in cpu_mask. */
> + /* Update resource control msr on this CPU if it's in cpu_mask. */
> if (cpumask_test_cpu(cpu, cpu_mask))
> rdt_ctrl_update(&msr_param);
> - /* Update CBM on other cpus. */
> + /* Update resource control msr on other CPUs. */
> smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
> put_cpu();
>
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 36ace51ee705..333c177a2471 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -2581,8 +2581,8 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
> return 0;
> }
>
> -/**
> - * rdtgroup_init_alloc - Initialize the new RDT group's allocations
> +/*
> + * Initialize cache resources with default values.
> *
> * A new RDT group is being created on an allocation capable (CAT)
> * supporting system. Set this group up to start off with all usable
> @@ -2591,38 +2591,52 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
> * If there are no more shareable bits available on any domain then
> * the entire allocation will fail.
> */
> +static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
> +{
> + struct rdt_domain *d;
> + int ret;
> +
> + list_for_each_entry(d, &r->domains, list) {
> + ret = __init_one_rdt_domain(d, r, closid);
> + if (ret < 0)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +/* Initialize MBA resource with default values. */
> +static void rdtgroup_init_mba(struct rdt_resource *r)
> +{
> + struct rdt_domain *d;
> +
> + list_for_each_entry(d, &r->domains, list) {
> + d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
> + d->have_new_ctrl = true;
> + }
> +}
> +
> +/* Initialize the RDT group's allocations. */
> static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
> {
> struct rdt_resource *r;
> - struct rdt_domain *d;
> int ret;
>
> for_each_alloc_enabled_rdt_resource(r) {
> - /*
> - * Only initialize default allocations for CBM cache
> - * resources
> - */
> - if (r->rid == RDT_RESOURCE_MBA)
> - continue;
> - list_for_each_entry(d, &r->domains, list) {
> - ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
> + if (r->rid == RDT_RESOURCE_MBA) {
> + rdtgroup_init_mba(r);
> + } else {
> + ret = rdtgroup_init_cat(r, rdtgrp->closid);
> if (ret < 0)
> return ret;
> }
> - }
>
> - for_each_alloc_enabled_rdt_resource(r) {
> - /*
> - * Only initialize default allocations for CBM cache
> - * resources
> - */
> - if (r->rid == RDT_RESOURCE_MBA)
> - continue;
> ret = update_domains(r, rdtgrp->closid);
> if (ret < 0) {
> rdt_last_cmd_puts("Failed to initialize allocations\n");
> return ret;
> }
> +
In my opinion, this newline is unnecessary. Thank you.
> }
>
> rdtgrp->mode = RDT_MODE_SHAREABLE;
>
--
Best regards,
Xiaochen
On Thu, Apr 18, 2019 at 03:03:35PM +0800, Xiaochen Shen wrote:
> In my opinion, this newline is unnecessary. Thank you.
See commit message:
> [ bp: Add newlines between code blocks for better readability. ]
And I didn't add enough. That code is too crammed.
For example, the new __init_one_rdt_domain() could use some newlines
too, to separate code blocks for better readability. At least before
each comment so that one can visually distinguish code groups
better/faster.
Here the whole function pasted with newlines added where I think they
make sense. This way you have visual separation between the code blocks
and not one big fat clump of code which one has to painstakingly pick
apart.
static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
u32 closid)
{
struct rdt_resource *r_cdp = NULL;
struct rdt_domain *d_cdp = NULL;
u32 used_b = 0, unused_b = 0;
unsigned long tmp_cbm;
enum rdtgrp_mode mode;
u32 peer_ctl, *ctrl;
int i;
rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
d->have_new_ctrl = false;
d->new_ctrl = r->cache.shareable_bits;
used_b = r->cache.shareable_bits;
ctrl = d->ctrl_val;
for (i = 0; i < closids_supported(); i++, ctrl++) {
if (closid_allocated(i) && i != closid) {
mode = rdtgroup_mode_by_closid(i);
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
break;
/*
* If CDP is active include peer domain's
* usage to ensure there is no overlap
* with an exclusive group.
*/
if (d_cdp)
peer_ctl = d_cdp->ctrl_val[i];
else
peer_ctl = 0;
used_b |= *ctrl | peer_ctl;
if (mode == RDT_MODE_SHAREABLE)
d->new_ctrl |= *ctrl | peer_ctl;
}
}
if (d->plr && d->plr->cbm > 0)
used_b |= d->plr->cbm;
unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
d->new_ctrl |= unused_b;
/*
* Force the initial CBM to be valid, user can
* modify the CBM based on system availability.
*/
cbm_ensure_valid(&d->new_ctrl, r);
/*
* Assign the u32 CBM to an unsigned long to ensure that
* bitmap_weight() does not access out-of-bound memory.
*/
tmp_cbm = d->new_ctrl;
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
return -ENOSPC;
}
d->have_new_ctrl = true;
return 0;
}
--
Regards/Gruss,
Boris.
Good mailing practices for 400: avoid top-posting and trim the reply.
Hi Boris,
On 4/18/2019 15:28, Borislav Petkov wrote:
> On Thu, Apr 18, 2019 at 03:03:35PM +0800, Xiaochen Shen wrote:
>> In my opinion, this newline is unnecessary. Thank you.
>
> See commit message:
>
>> [ bp: Add newlines between code blocks for better readability. ]
>
I got this commit message and the code changes.
Really appreciated that you helped add several newlines between code
blocks. The newlines really make the readability of the code better.
> for_each_alloc_enabled_rdt_resource(r) {
> ...;
> ret = update_domains(r, rdtgrp->closid);
> if (ret < 0) {
> rdt_last_cmd_puts("Failed to initialize allocations\n");
> return ret;
> }
> +
> }
But is this newline an exception?
This newline is in the middle of two "}"s - the first "}" is the end of
if condition, and the second "}" is the end of
"for_each_alloc_enabled_rdt_resource" loop. I don't think the newline is
necessary.
> And I didn't add enough. That code is too crammed.
>
> For example, the new __init_one_rdt_domain() could use some newlines
> too, to separate code blocks for better readability. At least before
> each comment so that one can visually distinguish code groups
> better/faster.
>
> Here the whole function pasted with newlines added where I think they
> make sense. This way you have visual separation between the code blocks
> and not one big fat clump of code which one has to painstakingly pick
> apart.
Thank you very much for pointing this out and helping make the changes.
Should I submit a separate fixing patch for __init_one_rdt_domain()?
Thank you.
>
> static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
> u32 closid)
> {
> struct rdt_resource *r_cdp = NULL;
> struct rdt_domain *d_cdp = NULL;
> u32 used_b = 0, unused_b = 0;
> unsigned long tmp_cbm;
> enum rdtgrp_mode mode;
> u32 peer_ctl, *ctrl;
> int i;
>
> rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
> d->have_new_ctrl = false;
> d->new_ctrl = r->cache.shareable_bits;
> used_b = r->cache.shareable_bits;
> ctrl = d->ctrl_val;
>
> for (i = 0; i < closids_supported(); i++, ctrl++) {
> if (closid_allocated(i) && i != closid) {
> mode = rdtgroup_mode_by_closid(i);
> if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
> break;
>
> /*
> * If CDP is active include peer domain's
> * usage to ensure there is no overlap
> * with an exclusive group.
> */
> if (d_cdp)
> peer_ctl = d_cdp->ctrl_val[i];
> else
> peer_ctl = 0;
>
> used_b |= *ctrl | peer_ctl;
> if (mode == RDT_MODE_SHAREABLE)
> d->new_ctrl |= *ctrl | peer_ctl;
> }
> }
>
> if (d->plr && d->plr->cbm > 0)
> used_b |= d->plr->cbm;
>
> unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
> unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
> d->new_ctrl |= unused_b;
>
> /*
> * Force the initial CBM to be valid, user can
> * modify the CBM based on system availability.
> */
> cbm_ensure_valid(&d->new_ctrl, r);
>
> /*
> * Assign the u32 CBM to an unsigned long to ensure that
> * bitmap_weight() does not access out-of-bound memory.
> */
> tmp_cbm = d->new_ctrl;
>
> if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
> rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
> return -ENOSPC;
> }
> d->have_new_ctrl = true;
>
> return 0;
> }
>
--
Best regards,
Xiaochen
On Thu, Apr 18, 2019 at 04:20:57PM +0800, Xiaochen Shen wrote:
> Should I submit a separate fixing patch for __init_one_rdt_domain()?
You don't have to send a separate patch just for removing an excessive
newline. Simply next time someone is touching the code around there,
that newline can be removed too.
Thx.
--
Regards/Gruss,
Boris.
Good mailing practices for 400: avoid top-posting and trim the reply.