2022-03-09 10:41:13

by Miaohe Lin

[permalink] [raw]
Subject: [PATCH] mm/slub: remove forced_order parameter in calculate_sizes

Since commit 32a6f409b693 ("mm, slub: remove runtime allocation order
changes"), forced_order is always -1. Remove this unneeded parameter
to simplify the code.

Signed-off-by: Miaohe Lin <[email protected]>
---
mm/slub.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 180354d7e741..7f09901ae6b2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4038,7 +4038,7 @@ static void set_cpu_partial(struct kmem_cache *s)
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
*/
-static int calculate_sizes(struct kmem_cache *s, int forced_order)
+static int calculate_sizes(struct kmem_cache *s)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
@@ -4142,10 +4142,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
size = ALIGN(size, s->align);
s->size = size;
s->reciprocal_size = reciprocal_value(size);
- if (forced_order >= 0)
- order = forced_order;
- else
- order = calculate_order(size);
+ order = calculate_order(size);

if ((int)order < 0)
return 0;
@@ -4181,7 +4178,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
s->random = get_random_long();
#endif

- if (!calculate_sizes(s, -1))
+ if (!calculate_sizes(s))
goto error;
if (disable_higher_order_debug) {
/*
@@ -4191,7 +4188,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
- if (!calculate_sizes(s, -1))
+ if (!calculate_sizes(s))
goto error;
}
}
--
2.23.0


2022-03-09 10:54:35

by Hyeonggon Yoo

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: remove forced_order parameter in calculate_sizes

On Wed, Mar 09, 2022 at 05:20:36PM +0800, Miaohe Lin wrote:
> Since commit 32a6f409b693 ("mm, slub: remove runtime allocation order
> changes"), forced_order is always -1. Remove this unneeded parameter
> to simplify the code.
>
> Signed-off-by: Miaohe Lin <[email protected]>
> ---
> mm/slub.c | 11 ++++-------
> 1 file changed, 4 insertions(+), 7 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 180354d7e741..7f09901ae6b2 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4038,7 +4038,7 @@ static void set_cpu_partial(struct kmem_cache *s)
> * calculate_sizes() determines the order and the distribution of data within
> * a slab object.
> */
> -static int calculate_sizes(struct kmem_cache *s, int forced_order)
> +static int calculate_sizes(struct kmem_cache *s)
> {
> slab_flags_t flags = s->flags;
> unsigned int size = s->object_size;
> @@ -4142,10 +4142,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
> size = ALIGN(size, s->align);
> s->size = size;
> s->reciprocal_size = reciprocal_value(size);
> - if (forced_order >= 0)
> - order = forced_order;
> - else
> - order = calculate_order(size);
> + order = calculate_order(size);
>
> if ((int)order < 0)
> return 0;
> @@ -4181,7 +4178,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
> s->random = get_random_long();
> #endif
>
> - if (!calculate_sizes(s, -1))
> + if (!calculate_sizes(s))
> goto error;
> if (disable_higher_order_debug) {
> /*
> @@ -4191,7 +4188,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
> if (get_order(s->size) > get_order(s->object_size)) {
> s->flags &= ~DEBUG_METADATA_FLAGS;
> s->offset = 0;
> - if (!calculate_sizes(s, -1))
> + if (!calculate_sizes(s))
> goto error;
> }
> }

Looks good to me.

Reviewed-by: Hyeonggon Yoo <[email protected]>

Thanks!

> --
> 2.23.0

--
Thank you, You are awesome!
Hyeonggon :-)

2022-03-09 12:13:18

by Vlastimil Babka

[permalink] [raw]
Subject: Re: [PATCH] mm/slub: remove forced_order parameter in calculate_sizes

On 3/9/22 10:55, Hyeonggon Yoo wrote:
> On Wed, Mar 09, 2022 at 05:20:36PM +0800, Miaohe Lin wrote:
>> Since commit 32a6f409b693 ("mm, slub: remove runtime allocation order
>> changes"), forced_order is always -1. Remove this unneeded parameter
>> to simplify the code.
>>
>> Signed-off-by: Miaohe Lin <[email protected]>
>> ---
>> mm/slub.c | 11 ++++-------
>> 1 file changed, 4 insertions(+), 7 deletions(-)
>>
>> diff --git a/mm/slub.c b/mm/slub.c
>> index 180354d7e741..7f09901ae6b2 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -4038,7 +4038,7 @@ static void set_cpu_partial(struct kmem_cache *s)
>> * calculate_sizes() determines the order and the distribution of data within
>> * a slab object.
>> */
>> -static int calculate_sizes(struct kmem_cache *s, int forced_order)
>> +static int calculate_sizes(struct kmem_cache *s)
>> {
>> slab_flags_t flags = s->flags;
>> unsigned int size = s->object_size;
>> @@ -4142,10 +4142,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
>> size = ALIGN(size, s->align);
>> s->size = size;
>> s->reciprocal_size = reciprocal_value(size);
>> - if (forced_order >= 0)
>> - order = forced_order;
>> - else
>> - order = calculate_order(size);
>> + order = calculate_order(size);
>>
>> if ((int)order < 0)
>> return 0;
>> @@ -4181,7 +4178,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
>> s->random = get_random_long();
>> #endif
>>
>> - if (!calculate_sizes(s, -1))
>> + if (!calculate_sizes(s))
>> goto error;
>> if (disable_higher_order_debug) {
>> /*
>> @@ -4191,7 +4188,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
>> if (get_order(s->size) > get_order(s->object_size)) {
>> s->flags &= ~DEBUG_METADATA_FLAGS;
>> s->offset = 0;
>> - if (!calculate_sizes(s, -1))
>> + if (!calculate_sizes(s))
>> goto error;
>> }
>> }
>
> Looks good to me.
>
> Reviewed-by: Hyeonggon Yoo <[email protected]>

Thanks both, added to slab-next

> Thanks!
>
>> --
>> 2.23.0
>
> --
> Thank you, You are awesome!
> Hyeonggon :-)