Currently when a new resource group is created, the allocation values
of MBA resource are not initialized and remain meaningless data.
For example:
mkdir /sys/fs/resctrl/p1
cat /sys/fs/resctrl/p1/schemata
MB:0=100;1=100
echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
cat /sys/fs/resctrl/p1/schemata
MB:0= 10;1= 20
rmdir /sys/fs/resctrl/p1
mkdir /sys/fs/resctrl/p2
cat /sys/fs/resctrl/p2/schemata
MB:0= 10;1= 20
When the new group is created, it is reasonable to initialize MBA
resource with default values.
Initialize MBA resource and cache resources in separate functions.
Signed-off-by: Xiaochen Shen <[email protected]>
Reviewed-by: Fenghua Yu <[email protected]>
Reviewed-by: Reinette Chatre <[email protected]>
---
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +-
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 139 ++++++++++++++++--------------
2 files changed, 75 insertions(+), 68 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 2dbd990..576bb6a 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
if (cpumask_empty(cpu_mask) || mba_sc)
goto done;
cpu = get_cpu();
- /* Update CBM on this cpu if it's in cpu_mask. */
+ /* Update resource control msr on this cpu if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
rdt_ctrl_update(&msr_param);
- /* Update CBM on other cpus. */
+ /* Update resource control msr on other cpus. */
smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 08e0333..9f12a02 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2516,8 +2516,8 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
}
-/**
- * rdtgroup_init_alloc - Initialize the new RDT group's allocations
+/*
+ * Initialize cache resources with default values.
*
* A new RDT group is being created on an allocation capable (CAT)
* supporting system. Set this group up to start off with all usable
@@ -2526,85 +2526,92 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
* All-zero CBM is invalid. If there are no more shareable bits available
* on any domain then the entire allocation will fail.
*/
-static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
{
struct rdt_resource *r_cdp = NULL;
struct rdt_domain *d_cdp = NULL;
u32 used_b = 0, unused_b = 0;
- u32 closid = rdtgrp->closid;
- struct rdt_resource *r;
unsigned long tmp_cbm;
enum rdtgrp_mode mode;
struct rdt_domain *d;
u32 peer_ctl, *ctrl;
- int i, ret;
+ int i;
- for_each_alloc_enabled_rdt_resource(r) {
+ list_for_each_entry(d, &r->domains, list) {
+ rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
+ /*
+ * If CDP is active include peer
+ * domain's usage to ensure there
+ * is no overlap with an exclusive
+ * group.
+ */
+ if (d_cdp)
+ peer_ctl = d_cdp->ctrl_val[i];
+ else
+ peer_ctl = 0;
+ used_b |= *ctrl | peer_ctl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl | peer_ctl;
+ }
+ }
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ cbm_ensure_valid(&d->new_ctrl, r);
/*
- * Only initialize default allocations for CBM cache
- * resources
+ * Assign the u32 CBM to an unsigned long to ensure
+ * that bitmap_weight() does not access out-of-bound
+ * memory.
*/
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
- list_for_each_entry(d, &r->domains, list) {
- rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
- d->have_new_ctrl = false;
- d->new_ctrl = r->cache.shareable_bits;
- used_b = r->cache.shareable_bits;
- ctrl = d->ctrl_val;
- for (i = 0; i < closids_supported(); i++, ctrl++) {
- if (closid_allocated(i) && i != closid) {
- mode = rdtgroup_mode_by_closid(i);
- if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- break;
- /*
- * If CDP is active include peer
- * domain's usage to ensure there
- * is no overlap with an exclusive
- * group.
- */
- if (d_cdp)
- peer_ctl = d_cdp->ctrl_val[i];
- else
- peer_ctl = 0;
- used_b |= *ctrl | peer_ctl;
- if (mode == RDT_MODE_SHAREABLE)
- d->new_ctrl |= *ctrl | peer_ctl;
- }
- }
- if (d->plr && d->plr->cbm > 0)
- used_b |= d->plr->cbm;
- unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
- unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
- d->new_ctrl |= unused_b;
- /*
- * Force the initial CBM to be valid, user can
- * modify the CBM based on system availability.
- */
- cbm_ensure_valid(&d->new_ctrl, r);
- /*
- * Assign the u32 CBM to an unsigned long to ensure
- * that bitmap_weight() does not access out-of-bound
- * memory.
- */
- tmp_cbm = d->new_ctrl;
- if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n",
- r->name, d->id);
- return -ENOSPC;
- }
- d->have_new_ctrl = true;
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+ r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("No space on %s:%d\n",
+ r->name, d->id);
+ return -ENOSPC;
}
+ d->have_new_ctrl = true;
}
+ return 0;
+}
+
+/* Initialize MBA resource with default values. */
+static void rdtgroup_init_mba(struct rdt_resource *r)
+{
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+ d->have_new_ctrl = true;
+ }
+}
+
+/* Initialize the RDT group's allocations. */
+static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r;
+ int ret;
+
for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
+ if (r->rid == RDT_RESOURCE_MBA) {
+ rdtgroup_init_mba(r);
+ } else {
+ ret = rdtgroup_init_cat(r, rdtgrp->closid);
+ if (ret < 0)
+ return ret;
+ }
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
--
1.8.3.1
On Wed, Apr 10, 2019 at 03:55:28AM +0800, Xiaochen Shen wrote:
> Currently when a new resource group is created, the allocation values
> of MBA resource are not initialized and remain meaningless data.
>
> For example:
> mkdir /sys/fs/resctrl/p1
> cat /sys/fs/resctrl/p1/schemata
> MB:0=100;1=100
>
> echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
> cat /sys/fs/resctrl/p1/schemata
> MB:0= 10;1= 20
>
> rmdir /sys/fs/resctrl/p1
> mkdir /sys/fs/resctrl/p2
> cat /sys/fs/resctrl/p2/schemata
> MB:0= 10;1= 20
>
> When the new group is created, it is reasonable to initialize MBA
> resource with default values.
>
> Initialize MBA resource and cache resources in separate functions.
Please format your commit message by indenting the examples:
x86/resctrl: Initialize a new resource group with default MBA values
Currently, when a new resource group is created, the allocation values
of the MBA resource are not initialized and remain meaningless data.
For example:
mkdir /sys/fs/resctrl/p1
cat /sys/fs/resctrl/p1/schemata
MB:0=100;1=100
echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
cat /sys/fs/resctrl/p1/schemata
MB:0= 10;1= 20
rmdir /sys/fs/resctrl/p1
mkdir /sys/fs/resctrl/p2
cat /sys/fs/resctrl/p2/schemata
MB:0= 10;1= 20
Therefore, when the new group is created, it is reasonable to initialize
MBA resource with default values.
Initialize the MBA resource and cache resources in separate functions."
Thx.
>
> Signed-off-by: Xiaochen Shen <[email protected]>
> Reviewed-by: Fenghua Yu <[email protected]>
> Reviewed-by: Reinette Chatre <[email protected]>
> ---
> arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +-
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 139 ++++++++++++++++--------------
> 2 files changed, 75 insertions(+), 68 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> index 2dbd990..576bb6a 100644
> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> @@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
> if (cpumask_empty(cpu_mask) || mba_sc)
> goto done;
> cpu = get_cpu();
> - /* Update CBM on this cpu if it's in cpu_mask. */
> + /* Update resource control msr on this cpu if it's in cpu_mask. */
s/cpu/CPU/g
> if (cpumask_test_cpu(cpu, cpu_mask))
> rdt_ctrl_update(&msr_param);
> - /* Update CBM on other cpus. */
> + /* Update resource control msr on other cpus. */
Ditto.
> smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
> put_cpu();
>
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 08e0333..9f12a02 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -2516,8 +2516,8 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
> bitmap_clear(val, zero_bit, cbm_len - zero_bit);
> }
>
> -/**
> - * rdtgroup_init_alloc - Initialize the new RDT group's allocations
> +/*
> + * Initialize cache resources with default values.
> *
> * A new RDT group is being created on an allocation capable (CAT)
> * supporting system. Set this group up to start off with all usable
> @@ -2526,85 +2526,92 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
> * All-zero CBM is invalid. If there are no more shareable bits available
> * on any domain then the entire allocation will fail.
> */
> -static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
> +static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
> {
> struct rdt_resource *r_cdp = NULL;
> struct rdt_domain *d_cdp = NULL;
> u32 used_b = 0, unused_b = 0;
> - u32 closid = rdtgrp->closid;
> - struct rdt_resource *r;
> unsigned long tmp_cbm;
> enum rdtgrp_mode mode;
> struct rdt_domain *d;
> u32 peer_ctl, *ctrl;
> - int i, ret;
> + int i;
>
> - for_each_alloc_enabled_rdt_resource(r) {
> + list_for_each_entry(d, &r->domains, list) {
> + rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
> + d->have_new_ctrl = false;
> + d->new_ctrl = r->cache.shareable_bits;
> + used_b = r->cache.shareable_bits;
> + ctrl = d->ctrl_val;
> + for (i = 0; i < closids_supported(); i++, ctrl++) {
> + if (closid_allocated(i) && i != closid) {
> + mode = rdtgroup_mode_by_closid(i);
> + if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
> + break;
> + /*
> + * If CDP is active include peer
> + * domain's usage to ensure there
> + * is no overlap with an exclusive
> + * group.
> + */
> + if (d_cdp)
> + peer_ctl = d_cdp->ctrl_val[i];
> + else
> + peer_ctl = 0;
> + used_b |= *ctrl | peer_ctl;
> + if (mode == RDT_MODE_SHAREABLE)
> + d->new_ctrl |= *ctrl | peer_ctl;
> + }
> + }
> + if (d->plr && d->plr->cbm > 0)
> + used_b |= d->plr->cbm;
> + unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
> + unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
> + d->new_ctrl |= unused_b;
> + cbm_ensure_valid(&d->new_ctrl, r);
> /*
> - * Only initialize default allocations for CBM cache
> - * resources
> + * Assign the u32 CBM to an unsigned long to ensure
> + * that bitmap_weight() does not access out-of-bound
> + * memory.
> */
So all this code working on a rdt_domain *d pointer could be carved out
into a separate function called something like:
__init_one_rdt_domain(d, ...)
and this will make the code more readable and save us at least 2
indentation levels.
Please do that in a preparatory patch.
> - if (r->rid == RDT_RESOURCE_MBA)
> - continue;
Then, after having done that, it would be very obvious when you do this
above because you won't be calling that __init_one_rdt_domain() function
for an MBA anyway.
Thx.
--
Regards/Gruss,
Boris.
Good mailing practices for 400: avoid top-posting and trim the reply.
Hi Boris,
Thank you very much for code review.
I will fix these issues in v2 patch.
Please find more comments inline.
Thank you.
On 4/15/2019 19:34, Borislav Petkov wrote:
> On Wed, Apr 10, 2019 at 03:55:28AM +0800, Xiaochen Shen wrote:
>> Currently when a new resource group is created, the allocation values
>> of MBA resource are not initialized and remain meaningless data.
>>
>> For example:
>> mkdir /sys/fs/resctrl/p1
>> cat /sys/fs/resctrl/p1/schemata
>> MB:0=100;1=100
>>
>> echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
>> cat /sys/fs/resctrl/p1/schemata
>> MB:0= 10;1= 20
>>
>> rmdir /sys/fs/resctrl/p1
>> mkdir /sys/fs/resctrl/p2
>> cat /sys/fs/resctrl/p2/schemata
>> MB:0= 10;1= 20
>>
>> When the new group is created, it is reasonable to initialize MBA
>> resource with default values.
>>
>> Initialize MBA resource and cache resources in separate functions.
>
> Please format your commit message by indenting the examples:
OK. Thank you.
>
> x86/resctrl: Initialize a new resource group with default MBA values
>
> Currently, when a new resource group is created, the allocation values
> of the MBA resource are not initialized and remain meaningless data.
>
> For example:
>
> mkdir /sys/fs/resctrl/p1
> cat /sys/fs/resctrl/p1/schemata
> MB:0=100;1=100
>
> echo "MB:0=10;1=20" > /sys/fs/resctrl/p1/schemata
> cat /sys/fs/resctrl/p1/schemata
> MB:0= 10;1= 20
>
> rmdir /sys/fs/resctrl/p1
> mkdir /sys/fs/resctrl/p2
> cat /sys/fs/resctrl/p2/schemata
> MB:0= 10;1= 20
>
> Therefore, when the new group is created, it is reasonable to initialize
> MBA resource with default values.
>
> Initialize the MBA resource and cache resources in separate functions."
>
> Thx.
>
>>
>> Signed-off-by: Xiaochen Shen <[email protected]>
>> Reviewed-by: Fenghua Yu <[email protected]>
>> Reviewed-by: Reinette Chatre <[email protected]>
>> ---
>> arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 +-
>> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 139 ++++++++++++++++--------------
>> 2 files changed, 75 insertions(+), 68 deletions(-)
>>
>> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
>> index 2dbd990..576bb6a 100644
>> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
>> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
>> @@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
>> if (cpumask_empty(cpu_mask) || mba_sc)
>> goto done;
>> cpu = get_cpu();
>> - /* Update CBM on this cpu if it's in cpu_mask. */
>> + /* Update resource control msr on this cpu if it's in cpu_mask. */
>
> s/cpu/CPU/g
>
OK.
>> if (cpumask_test_cpu(cpu, cpu_mask))
>> rdt_ctrl_update(&msr_param);
>> - /* Update CBM on other cpus. */
>> + /* Update resource control msr on other cpus. */
>
> Ditto.
OK.
>
>> smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
>> put_cpu();
>>
>> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
>> index 08e0333..9f12a02 100644
>> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
>> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
>> @@ -2516,8 +2516,8 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
>> bitmap_clear(val, zero_bit, cbm_len - zero_bit);
>> }
>>
>> -/**
>> - * rdtgroup_init_alloc - Initialize the new RDT group's allocations
>> +/*
>> + * Initialize cache resources with default values.
>> *
>> * A new RDT group is being created on an allocation capable (CAT)
>> * supporting system. Set this group up to start off with all usable
>> @@ -2526,85 +2526,92 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
>> * All-zero CBM is invalid. If there are no more shareable bits available
>> * on any domain then the entire allocation will fail.
>> */
>> -static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
>> +static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
>> {
>> struct rdt_resource *r_cdp = NULL;
>> struct rdt_domain *d_cdp = NULL;
>> u32 used_b = 0, unused_b = 0;
>> - u32 closid = rdtgrp->closid;
>> - struct rdt_resource *r;
>> unsigned long tmp_cbm;
>> enum rdtgrp_mode mode;
>> struct rdt_domain *d;
>> u32 peer_ctl, *ctrl;
>> - int i, ret;
>> + int i;
>>
>> - for_each_alloc_enabled_rdt_resource(r) {
>> + list_for_each_entry(d, &r->domains, list) {
>> + rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
>> + d->have_new_ctrl = false;
>> + d->new_ctrl = r->cache.shareable_bits;
>> + used_b = r->cache.shareable_bits;
>> + ctrl = d->ctrl_val;
>> + for (i = 0; i < closids_supported(); i++, ctrl++) {
>> + if (closid_allocated(i) && i != closid) {
>> + mode = rdtgroup_mode_by_closid(i);
>> + if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
>> + break;
>> + /*
>> + * If CDP is active include peer
>> + * domain's usage to ensure there
>> + * is no overlap with an exclusive
>> + * group.
>> + */
>> + if (d_cdp)
>> + peer_ctl = d_cdp->ctrl_val[i];
>> + else
>> + peer_ctl = 0;
>> + used_b |= *ctrl | peer_ctl;
>> + if (mode == RDT_MODE_SHAREABLE)
>> + d->new_ctrl |= *ctrl | peer_ctl;
>> + }
>> + }
>> + if (d->plr && d->plr->cbm > 0)
>> + used_b |= d->plr->cbm;
>> + unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
>> + unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
>> + d->new_ctrl |= unused_b;
>> + cbm_ensure_valid(&d->new_ctrl, r);
>> /*
>> - * Only initialize default allocations for CBM cache
>> - * resources
>> + * Assign the u32 CBM to an unsigned long to ensure
>> + * that bitmap_weight() does not access out-of-bound
>> + * memory.
>> */
>
> So all this code working on a rdt_domain *d pointer could be carved out
> into a separate function called something like:
>
> __init_one_rdt_domain(d, ...)
>
> and this will make the code more readable and save us at least 2
> indentation levels.
>
> Please do that in a preparatory patch.
Good suggestion. I will do it in v2 patch.
>
>> - if (r->rid == RDT_RESOURCE_MBA)
>> - continue;
>
> Then, after having done that, it would be very obvious when you do this
> above because you won't be calling that __init_one_rdt_domain() function
> for an MBA anyway. >
> Thx.
>
In this patch we initialize MBA resource and cache resources in separate
functions rdtgroup_init_cat() and rdtgroup_init_mba(). If
__init_one_rdt_domain() is only called by rdtgroup_init_cat(), how about
using function name "__init_one_rdt_domain_cat()"?
Thank you.
Best regards,
Xiaochen
On Wed, Apr 17, 2019 at 04:51:18AM +0800, Xiaochen Shen wrote:
> In this patch we initialize MBA resource and cache resources in separate
> functions rdtgroup_init_cat() and rdtgroup_init_mba(). If
> __init_one_rdt_domain() is only called by rdtgroup_init_cat(), how about
> using function name "__init_one_rdt_domain_cat()"?
I guess but be conservative when adding too many words to a function's
name - that might get heavy when reading the code later.
For example, there's an argument to not suffix it with "_cat": it is
only called by rdtgroup_init_cat() - rdtgroup_init_mba() will not call
it - so the path will remain unique anyway...
Anyway, something to think about - I personally don't have a strong
preference here.
Thx.
--
Regards/Gruss,
Boris.
Good mailing practices for 400: avoid top-posting and trim the reply.
Hi Boris,
On 4/17/2019 4:59, Borislav Petkov wrote:
> I guess but be conservative when adding too many words to a function's
> name - that might get heavy when reading the code later.
>
> For example, there's an argument to not suffix it with "_cat": it is
> only called by rdtgroup_init_cat() - rdtgroup_init_mba() will not call
> it - so the path will remain unique anyway...
>
I got it. I will use the function name "__init_one_rdt_domain()" you
suggested.
Thank you.
> Anyway, something to think about - I personally don't have a strong
> preference here.
Best regards,
Xiaochen