2011-03-16 02:28:12

by George Spelvin

[permalink] [raw]
Subject: [PATCH 5/8] mm/slub: Factor out some common code.

For sysfs files that map a boolean to a flags bit.
---
mm/slub.c | 93 ++++++++++++++++++++++++++++--------------------------------
1 files changed, 43 insertions(+), 50 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f..856246f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3982,38 +3982,61 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(objects_partial);

+static ssize_t flag_show(struct kmem_cache *s, char *buf, unsigned flag)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & flag));
+}
+
+static ssize_t flag_store(struct kmem_cache *s,
+ const char *buf, size_t length, unsigned flag)
+{
+ s->flags &= ~flag;
+ if (buf[0] == '1')
+ s->flags |= flag;
+ return length;
+}
+
+/* Like above, but changes allocation size; so only allowed on empty slab */
+static ssize_t flag_store_sizechange(struct kmem_cache *s,
+ const char *buf, size_t length, unsigned flag)
+{
+ if (any_slab_objects(s))
+ return -EBUSY;
+
+ flag_store(s, buf, length, flag);
+ calculate_sizes(s, -1);
+ return length;
+}
+
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
+ return flag_show(s, buf, SLAB_RECLAIM_ACCOUNT);
}

static ssize_t reclaim_account_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- s->flags &= ~SLAB_RECLAIM_ACCOUNT;
- if (buf[0] == '1')
- s->flags |= SLAB_RECLAIM_ACCOUNT;
- return length;
+ return flag_store(s, buf, length, SLAB_RECLAIM_ACCOUNT);
}
SLAB_ATTR(reclaim_account);

static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
+ return flag_show(s, buf, SLAB_HWCACHE_ALIGN);
}
SLAB_ATTR_RO(hwcache_align);

#ifdef CONFIG_ZONE_DMA
static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
+ return flag_show(s, buf, SLAB_CACHE_DMA);
}
SLAB_ATTR_RO(cache_dma);
#endif

static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
+ return flag_show(s, buf, SLAB_DESTROY_BY_RCU);
}
SLAB_ATTR_RO(destroy_by_rcu);

@@ -4032,88 +4055,61 @@ SLAB_ATTR_RO(total_objects);

static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
+ return flag_show(s, buf, SLAB_DEBUG_FREE);
}

static ssize_t sanity_checks_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- s->flags &= ~SLAB_DEBUG_FREE;
- if (buf[0] == '1')
- s->flags |= SLAB_DEBUG_FREE;
- return length;
+ return flag_store(s, buf, length, SLAB_DEBUG_FREE);
}
SLAB_ATTR(sanity_checks);

static ssize_t trace_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
+ return flag_show(s, buf, SLAB_TRACE);
}

static ssize_t trace_store(struct kmem_cache *s, const char *buf,
size_t length)
{
- s->flags &= ~SLAB_TRACE;
- if (buf[0] == '1')
- s->flags |= SLAB_TRACE;
- return length;
+ return flag_store(s, buf, length, SLAB_TRACE);
}
SLAB_ATTR(trace);

static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
+ return flag_show(s, buf, SLAB_RED_ZONE);
}

static ssize_t red_zone_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- if (any_slab_objects(s))
- return -EBUSY;
-
- s->flags &= ~SLAB_RED_ZONE;
- if (buf[0] == '1')
- s->flags |= SLAB_RED_ZONE;
- calculate_sizes(s, -1);
- return length;
+ return flag_store_sizechange(s, buf, length, SLAB_RED_ZONE);
}
SLAB_ATTR(red_zone);

static ssize_t poison_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
+ return flag_show(s, buf, SLAB_POISON);
}

static ssize_t poison_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- if (any_slab_objects(s))
- return -EBUSY;
-
- s->flags &= ~SLAB_POISON;
- if (buf[0] == '1')
- s->flags |= SLAB_POISON;
- calculate_sizes(s, -1);
- return length;
+ return flag_store_sizechange(s, buf, length, SLAB_POISON);
}
SLAB_ATTR(poison);

static ssize_t store_user_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
+ return flag_show(s, buf, SLAB_STORE_USER);
}

static ssize_t store_user_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- if (any_slab_objects(s))
- return -EBUSY;
-
- s->flags &= ~SLAB_STORE_USER;
- if (buf[0] == '1')
- s->flags |= SLAB_STORE_USER;
- calculate_sizes(s, -1);
- return length;
+ return flag_store_sizechange(s, buf, length, SLAB_STORE_USER);
}
SLAB_ATTR(store_user);

@@ -4156,16 +4152,13 @@ SLAB_ATTR_RO(free_calls);
#ifdef CONFIG_FAILSLAB
static ssize_t failslab_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
+ return flag_show(s, buf, SLAB_FAILSLAB);
}

static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
size_t length)
{
- s->flags &= ~SLAB_FAILSLAB;
- if (buf[0] == '1')
- s->flags |= SLAB_FAILSLAB;
- return length;
+ return flag_store(s, buf, length, SLAB_FAILSLAB);
}
SLAB_ATTR(failslab);
#endif
--
1.7.4.1


2011-03-16 03:12:16

by Matt Mackall

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

On Mon, 2011-03-14 at 21:58 -0400, George Spelvin wrote:
> For sysfs files that map a boolean to a flags bit.

This one's actually pretty nice. You should really try to put all the
uncontroversial bits of a series first.

> ---
> mm/slub.c | 93 ++++++++++++++++++++++++++++--------------------------------
> 1 files changed, 43 insertions(+), 50 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index e15aa7f..856246f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3982,38 +3982,61 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
> }
> SLAB_ATTR_RO(objects_partial);
>
> +static ssize_t flag_show(struct kmem_cache *s, char *buf, unsigned flag)
> +{
> + return sprintf(buf, "%d\n", !!(s->flags & flag));
> +}
> +
> +static ssize_t flag_store(struct kmem_cache *s,
> + const char *buf, size_t length, unsigned flag)
> +{
> + s->flags &= ~flag;
> + if (buf[0] == '1')
> + s->flags |= flag;
> + return length;
> +}
> +
> +/* Like above, but changes allocation size; so only allowed on empty slab */
> +static ssize_t flag_store_sizechange(struct kmem_cache *s,
> + const char *buf, size_t length, unsigned flag)
> +{
> + if (any_slab_objects(s))
> + return -EBUSY;
> +
> + flag_store(s, buf, length, flag);
> + calculate_sizes(s, -1);
> + return length;
> +}
> +
> static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
> + return flag_show(s, buf, SLAB_RECLAIM_ACCOUNT);
> }
>
> static ssize_t reclaim_account_store(struct kmem_cache *s,
> const char *buf, size_t length)
> {
> - s->flags &= ~SLAB_RECLAIM_ACCOUNT;
> - if (buf[0] == '1')
> - s->flags |= SLAB_RECLAIM_ACCOUNT;
> - return length;
> + return flag_store(s, buf, length, SLAB_RECLAIM_ACCOUNT);
> }
> SLAB_ATTR(reclaim_account);
>
> static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
> + return flag_show(s, buf, SLAB_HWCACHE_ALIGN);
> }
> SLAB_ATTR_RO(hwcache_align);
>
> #ifdef CONFIG_ZONE_DMA
> static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
> + return flag_show(s, buf, SLAB_CACHE_DMA);
> }
> SLAB_ATTR_RO(cache_dma);
> #endif
>
> static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
> + return flag_show(s, buf, SLAB_DESTROY_BY_RCU);
> }
> SLAB_ATTR_RO(destroy_by_rcu);
>
> @@ -4032,88 +4055,61 @@ SLAB_ATTR_RO(total_objects);
>
> static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
> + return flag_show(s, buf, SLAB_DEBUG_FREE);
> }
>
> static ssize_t sanity_checks_store(struct kmem_cache *s,
> const char *buf, size_t length)
> {
> - s->flags &= ~SLAB_DEBUG_FREE;
> - if (buf[0] == '1')
> - s->flags |= SLAB_DEBUG_FREE;
> - return length;
> + return flag_store(s, buf, length, SLAB_DEBUG_FREE);
> }
> SLAB_ATTR(sanity_checks);
>
> static ssize_t trace_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
> + return flag_show(s, buf, SLAB_TRACE);
> }
>
> static ssize_t trace_store(struct kmem_cache *s, const char *buf,
> size_t length)
> {
> - s->flags &= ~SLAB_TRACE;
> - if (buf[0] == '1')
> - s->flags |= SLAB_TRACE;
> - return length;
> + return flag_store(s, buf, length, SLAB_TRACE);
> }
> SLAB_ATTR(trace);
>
> static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
> + return flag_show(s, buf, SLAB_RED_ZONE);
> }
>
> static ssize_t red_zone_store(struct kmem_cache *s,
> const char *buf, size_t length)
> {
> - if (any_slab_objects(s))
> - return -EBUSY;
> -
> - s->flags &= ~SLAB_RED_ZONE;
> - if (buf[0] == '1')
> - s->flags |= SLAB_RED_ZONE;
> - calculate_sizes(s, -1);
> - return length;
> + return flag_store_sizechange(s, buf, length, SLAB_RED_ZONE);
> }
> SLAB_ATTR(red_zone);
>
> static ssize_t poison_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
> + return flag_show(s, buf, SLAB_POISON);
> }
>
> static ssize_t poison_store(struct kmem_cache *s,
> const char *buf, size_t length)
> {
> - if (any_slab_objects(s))
> - return -EBUSY;
> -
> - s->flags &= ~SLAB_POISON;
> - if (buf[0] == '1')
> - s->flags |= SLAB_POISON;
> - calculate_sizes(s, -1);
> - return length;
> + return flag_store_sizechange(s, buf, length, SLAB_POISON);
> }
> SLAB_ATTR(poison);
>
> static ssize_t store_user_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
> + return flag_show(s, buf, SLAB_STORE_USER);
> }
>
> static ssize_t store_user_store(struct kmem_cache *s,
> const char *buf, size_t length)
> {
> - if (any_slab_objects(s))
> - return -EBUSY;
> -
> - s->flags &= ~SLAB_STORE_USER;
> - if (buf[0] == '1')
> - s->flags |= SLAB_STORE_USER;
> - calculate_sizes(s, -1);
> - return length;
> + return flag_store_sizechange(s, buf, length, SLAB_STORE_USER);
> }
> SLAB_ATTR(store_user);
>
> @@ -4156,16 +4152,13 @@ SLAB_ATTR_RO(free_calls);
> #ifdef CONFIG_FAILSLAB
> static ssize_t failslab_show(struct kmem_cache *s, char *buf)
> {
> - return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
> + return flag_show(s, buf, SLAB_FAILSLAB);
> }
>
> static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
> size_t length)
> {
> - s->flags &= ~SLAB_FAILSLAB;
> - if (buf[0] == '1')
> - s->flags |= SLAB_FAILSLAB;
> - return length;
> + return flag_store(s, buf, length, SLAB_FAILSLAB);
> }
> SLAB_ATTR(failslab);
> #endif


--
Mathematics is the supreme nostalgia of our time.

2011-03-16 06:32:35

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

On Tue, Mar 15, 2011 at 3:58 AM, George Spelvin <[email protected]> wrote:
> For sysfs files that map a boolean to a flags bit.

Looks good to me. I'll cc Christoph and David too.

> ---
> ?mm/slub.c | ? 93 ++++++++++++++++++++++++++++--------------------------------
> ?1 files changed, 43 insertions(+), 50 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index e15aa7f..856246f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3982,38 +3982,61 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
> ?}
> ?SLAB_ATTR_RO(objects_partial);
>
> +static ssize_t flag_show(struct kmem_cache *s, char *buf, unsigned flag)
> +{
> + ? ? ? return sprintf(buf, "%d\n", !!(s->flags & flag));
> +}
> +
> +static ssize_t flag_store(struct kmem_cache *s,
> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? const char *buf, size_t length, unsigned flag)
> +{
> + ? ? ? s->flags &= ~flag;
> + ? ? ? if (buf[0] == '1')
> + ? ? ? ? ? ? ? s->flags |= flag;
> + ? ? ? return length;
> +}
> +
> +/* Like above, but changes allocation size; so only allowed on empty slab */
> +static ssize_t flag_store_sizechange(struct kmem_cache *s,
> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? const char *buf, size_t length, unsigned flag)
> +{
> + ? ? ? if (any_slab_objects(s))
> + ? ? ? ? ? ? ? return -EBUSY;
> +
> + ? ? ? flag_store(s, buf, length, flag);
> + ? ? ? calculate_sizes(s, -1);
> + ? ? ? return length;
> +}
> +
> ?static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
> + ? ? ? return flag_show(s, buf, SLAB_RECLAIM_ACCOUNT);
> ?}
>
> ?static ssize_t reclaim_account_store(struct kmem_cache *s,
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t length)
> ?{
> - ? ? ? s->flags &= ~SLAB_RECLAIM_ACCOUNT;
> - ? ? ? if (buf[0] == '1')
> - ? ? ? ? ? ? ? s->flags |= SLAB_RECLAIM_ACCOUNT;
> - ? ? ? return length;
> + ? ? ? return flag_store(s, buf, length, SLAB_RECLAIM_ACCOUNT);
> ?}
> ?SLAB_ATTR(reclaim_account);
>
> ?static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
> + ? ? ? return flag_show(s, buf, SLAB_HWCACHE_ALIGN);
> ?}
> ?SLAB_ATTR_RO(hwcache_align);
>
> ?#ifdef CONFIG_ZONE_DMA
> ?static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
> + ? ? ? return flag_show(s, buf, SLAB_CACHE_DMA);
> ?}
> ?SLAB_ATTR_RO(cache_dma);
> ?#endif
>
> ?static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
> + ? ? ? return flag_show(s, buf, SLAB_DESTROY_BY_RCU);
> ?}
> ?SLAB_ATTR_RO(destroy_by_rcu);
>
> @@ -4032,88 +4055,61 @@ SLAB_ATTR_RO(total_objects);
>
> ?static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
> + ? ? ? return flag_show(s, buf, SLAB_DEBUG_FREE);
> ?}
>
> ?static ssize_t sanity_checks_store(struct kmem_cache *s,
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t length)
> ?{
> - ? ? ? s->flags &= ~SLAB_DEBUG_FREE;
> - ? ? ? if (buf[0] == '1')
> - ? ? ? ? ? ? ? s->flags |= SLAB_DEBUG_FREE;
> - ? ? ? return length;
> + ? ? ? return flag_store(s, buf, length, SLAB_DEBUG_FREE);
> ?}
> ?SLAB_ATTR(sanity_checks);
>
> ?static ssize_t trace_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
> + ? ? ? return flag_show(s, buf, SLAB_TRACE);
> ?}
>
> ?static ssize_t trace_store(struct kmem_cache *s, const char *buf,
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?size_t length)
> ?{
> - ? ? ? s->flags &= ~SLAB_TRACE;
> - ? ? ? if (buf[0] == '1')
> - ? ? ? ? ? ? ? s->flags |= SLAB_TRACE;
> - ? ? ? return length;
> + ? ? ? return flag_store(s, buf, length, SLAB_TRACE);
> ?}
> ?SLAB_ATTR(trace);
>
> ?static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
> + ? ? ? return flag_show(s, buf, SLAB_RED_ZONE);
> ?}
>
> ?static ssize_t red_zone_store(struct kmem_cache *s,
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t length)
> ?{
> - ? ? ? if (any_slab_objects(s))
> - ? ? ? ? ? ? ? return -EBUSY;
> -
> - ? ? ? s->flags &= ~SLAB_RED_ZONE;
> - ? ? ? if (buf[0] == '1')
> - ? ? ? ? ? ? ? s->flags |= SLAB_RED_ZONE;
> - ? ? ? calculate_sizes(s, -1);
> - ? ? ? return length;
> + ? ? ? return flag_store_sizechange(s, buf, length, SLAB_RED_ZONE);
> ?}
> ?SLAB_ATTR(red_zone);
>
> ?static ssize_t poison_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
> + ? ? ? return flag_show(s, buf, SLAB_POISON);
> ?}
>
> ?static ssize_t poison_store(struct kmem_cache *s,
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t length)
> ?{
> - ? ? ? if (any_slab_objects(s))
> - ? ? ? ? ? ? ? return -EBUSY;
> -
> - ? ? ? s->flags &= ~SLAB_POISON;
> - ? ? ? if (buf[0] == '1')
> - ? ? ? ? ? ? ? s->flags |= SLAB_POISON;
> - ? ? ? calculate_sizes(s, -1);
> - ? ? ? return length;
> + ? ? ? return flag_store_sizechange(s, buf, length, SLAB_POISON);
> ?}
> ?SLAB_ATTR(poison);
>
> ?static ssize_t store_user_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
> + ? ? ? return flag_show(s, buf, SLAB_STORE_USER);
> ?}
>
> ?static ssize_t store_user_store(struct kmem_cache *s,
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t length)
> ?{
> - ? ? ? if (any_slab_objects(s))
> - ? ? ? ? ? ? ? return -EBUSY;
> -
> - ? ? ? s->flags &= ~SLAB_STORE_USER;
> - ? ? ? if (buf[0] == '1')
> - ? ? ? ? ? ? ? s->flags |= SLAB_STORE_USER;
> - ? ? ? calculate_sizes(s, -1);
> - ? ? ? return length;
> + ? ? ? return flag_store_sizechange(s, buf, length, SLAB_STORE_USER);
> ?}
> ?SLAB_ATTR(store_user);
>
> @@ -4156,16 +4152,13 @@ SLAB_ATTR_RO(free_calls);
> ?#ifdef CONFIG_FAILSLAB
> ?static ssize_t failslab_show(struct kmem_cache *s, char *buf)
> ?{
> - ? ? ? return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
> + ? ? ? return flag_show(s, buf, SLAB_FAILSLAB);
> ?}
>
> ?static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?size_t length)
> ?{
> - ? ? ? s->flags &= ~SLAB_FAILSLAB;
> - ? ? ? if (buf[0] == '1')
> - ? ? ? ? ? ? ? s->flags |= SLAB_FAILSLAB;
> - ? ? ? return length;
> + ? ? ? return flag_store(s, buf, length, SLAB_FAILSLAB);
> ?}
> ?SLAB_ATTR(failslab);
> ?#endif
> --
> 1.7.4.1
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to [email protected]. ?For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
> Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
>

2011-03-16 20:28:47

by David Rientjes

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

On Mon, 14 Mar 2011, George Spelvin wrote:

> For sysfs files that map a boolean to a flags bit.

Where's your signed-off-by?

> ---
> mm/slub.c | 93 ++++++++++++++++++++++++++++--------------------------------
> 1 files changed, 43 insertions(+), 50 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index e15aa7f..856246f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3982,38 +3982,61 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
> }
> SLAB_ATTR_RO(objects_partial);
>
> +static ssize_t flag_show(struct kmem_cache *s, char *buf, unsigned flag)
> +{
> + return sprintf(buf, "%d\n", !!(s->flags & flag));
> +}
> +
> +static ssize_t flag_store(struct kmem_cache *s,
> + const char *buf, size_t length, unsigned flag)
> +{
> + s->flags &= ~flag;
> + if (buf[0] == '1')
> + s->flags |= flag;
> + return length;
> +}
> +
> +/* Like above, but changes allocation size; so only allowed on empty slab */
> +static ssize_t flag_store_sizechange(struct kmem_cache *s,
> + const char *buf, size_t length, unsigned flag)
> +{
> + if (any_slab_objects(s))
> + return -EBUSY;
> +
> + flag_store(s, buf, length, flag);
> + calculate_sizes(s, -1);
> + return length;
> +}
> +

Nice cleanup.

"flag" should be unsigned long in all of these functions: the constants
are declared with UL suffixes in slab.h.

After that's fixed,

Acked-by: David Rientjes <[email protected]>

2011-03-16 20:51:47

by George Spelvin

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

> Where's your signed-off-by?

Somewhere under the pile of crap on my desk. :-)
(More to the point, waiting for me to think it's good enough to submit
For Real.)

> Nice cleanup.
>
> "flag" should be unsigned long in all of these functions: the constants
> are declared with UL suffixes in slab.h.

Actually, I did that deliberately. Because there's a problem I keep
wondering about, which repeats many many times in the kernel:

*Why* are they unsigned long? That's an awkward type: 32 bits on many
architectures, so we can't portably assign more than 32 bits, and on
platforms where it's 64 bits, the upper 32 are just wasting space.
(And REX prefixes on x86-64.)

Wouldn't it be a better cleanup to convert the whole lot to unsigned
or u32?

2011-03-16 21:51:26

by David Rientjes

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

On Wed, 16 Mar 2011, George Spelvin wrote:

> > Where's your signed-off-by?
>
> Somewhere under the pile of crap on my desk. :-)
> (More to the point, waiting for me to think it's good enough to submit
> For Real.)
>

Patches that you would like to propose but don't think are ready for merge
should have s/PATCH/RFC/ done on the subject line.

> > Nice cleanup.
> >
> > "flag" should be unsigned long in all of these functions: the constants
> > are declared with UL suffixes in slab.h.
>
> Actually, I did that deliberately. Because there's a problem I keep
> wondering about, which repeats many many times in the kernel:
>

You deliberately created a helper function to take an unsigned int when
the actuals being passed in are all unsigned long to trigger a discussion
on why they are unsigned long?

> *Why* are they unsigned long? That's an awkward type: 32 bits on many
> architectures, so we can't portably assign more than 32 bits, and on
> platforms where it's 64 bits, the upper 32 are just wasting space.
> (And REX prefixes on x86-64.)
>

unsigned long uses the native word size of the architecture which can
generate more efficient code; we typically imply that flags have a limited
size by including leading zeros in their definition for 32-bit
compatibility:

#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
...

2011-03-16 22:36:38

by George Spelvin

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

> Patches that you would like to propose but don't think are ready for merge
> should have s/PATCH/RFC/ done on the subject line.

You're right; I should have. I blame git-format-patch's defaults, but mea culpa.
(Now I know about the --subject-prefix=RFC option!)

> You deliberately created a helper function to take an unsigned int when
> the actuals being passed in are all unsigned long to trigger a discussion
> on why they are unsigned long?

Er, no, I'm not that Machiavellian.
I deliberately did it because it was obvious that the flags would always
fit into an "unsigned", so I didn't need "unsigned long".

(Actually, I owe you an apology; when writing that e-mail, I remember
thinking "I should go back and clarify that statement", but forgot before
hitting send.)

> unsigned long uses the native word size of the architecture which can
> generate more efficient code; we typically imply that flags have a limited
> size by including leading zeros in their definition for 32-bit
> compatibility:

Um, can you name a (64-bit) architecture on which 32-bit is more
expensive than 64-bit? On x86-64, it's potentially cheaper, and even
the infamous Alpha 21064 has no penalty for 32-bit accesses. SPARC,
MIPS, PPC, Itanium, what else? I don't know about z/ARchitecture,
but given the emphasis on backward compatibility in IBM's mainframes,
it seems hard to imagine.

2011-03-17 06:23:42

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

Hi George!

On Thu, Mar 17, 2011 at 12:36 AM, George Spelvin <[email protected]> wrote:
> Um, can you name a (64-bit) architecture on which 32-bit is more
> expensive than 64-bit?

I certainly don't but I'd still like to ask you to change it to
'unsigned long'. That's a Linux kernel idiom and we're not going to
change the whole kernel.

Pekka

2011-03-17 07:07:10

by George Spelvin

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

> I certainly don't but I'd still like to ask you to change it to
> 'unsigned long'. That's a Linux kernel idiom and we're not going to
> change the whole kernel.

Damn, and I just prepared the following patch. Should I, instead, do

--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -62,5 +62,5 @@ struct kmem_cache {
/* 3) touched by every alloc & free from the backend */

- unsigned int flags; /* constant flags */
+ unsigned long flags; /* constant flags */
unsigned int num; /* # of objs per slab */

... because the original slab code uses an unsigned int. To fix it
the other way (for SLAB_ flags only) is a patch like this:

>From 217645dfbb1fd42a451ae27aa1ea2c503ff31ef1 Mon Sep 17 00:00:00 2001
From: George Spelvin <[email protected]>
Date: Wed, 16 Mar 2011 22:36:50 -0400
Subject: [PATCH 01/11] mm: Make SLAB_* flags consistently 32-bit unsigned int.

The type of "unsigned long" is silly for a portable flags field, because
you can't portably use more than 32 bits, but it takes 64 bits on 64-bit
processors. And 32-bit manipulations are never slower than 64-bit ones.

Also, include/slab_def.h has always used an "unsigned int" flags field.
This just updates the other implementations to do the same.
(EXCEPTION: SLOB structure must match "struct page".)

Signed-off-by: George Spelvin <[email protected]>
---
include/linux/slab.h | 38 +++++++++++++++++++-------------------
include/linux/slub_def.h | 2 +-
mm/slab.c | 2 +-
mm/slob.c | 5 ++---
mm/slub.c | 22 +++++++++++-----------
5 files changed, 34 insertions(+), 35 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index fa90866..0ff4221 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -16,13 +16,13 @@
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
*/
-#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
-#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
-#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
-#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
-#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
-#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
-#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
+#define SLAB_DEBUG_FREE 0x00000100U /* DEBUG: Perform (expensive) checks on free */
+#define SLAB_RED_ZONE 0x00000400U /* DEBUG: Red zone objs in a cache */
+#define SLAB_POISON 0x00000800U /* DEBUG: Poison objects */
+#define SLAB_HWCACHE_ALIGN 0x00002000U /* Align objs on cache lines */
+#define SLAB_CACHE_DMA 0x00004000U /* Use GFP_DMA memory */
+#define SLAB_STORE_USER 0x00010000U /* DEBUG: Store the last owner for bug hunting */
+#define SLAB_PANIC 0x00040000U /* Panic if kmem_cache_create() fails */
/*
* SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
*
@@ -51,33 +51,33 @@
*
* See also the comment on struct slab_rcu in mm/slab.c.
*/
-#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
-#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
-#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
+#define SLAB_DESTROY_BY_RCU 0x00080000U /* Defer freeing slabs to RCU */
+#define SLAB_MEM_SPREAD 0x00100000U /* Spread some memory over cpuset */
+#define SLAB_TRACE 0x00200000U /* Trace allocations and frees */

/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS 0x00400000UL
+# define SLAB_DEBUG_OBJECTS 0x00400000U
#else
-# define SLAB_DEBUG_OBJECTS 0x00000000UL
+# define SLAB_DEBUG_OBJECTS 0x00000000U
#endif

-#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+#define SLAB_NOLEAKTRACE 0x00800000U /* Avoid kmemleak tracing */

/* Don't track use of uninitialized memory */
#ifdef CONFIG_KMEMCHECK
-# define SLAB_NOTRACK 0x01000000UL
+# define SLAB_NOTRACK 0x01000000U
#else
-# define SLAB_NOTRACK 0x00000000UL
+# define SLAB_NOTRACK 0x00000000U
#endif
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
+# define SLAB_FAILSLAB 0x02000000U /* Fault injection mark */
#else
-# define SLAB_FAILSLAB 0x00000000UL
+# define SLAB_FAILSLAB 0x00000000U
#endif

/* The following flags affect the page allocator grouping pages by mobility */
-#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+#define SLAB_RECLAIM_ACCOUNT 0x00020000U /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -99,7 +99,7 @@ void __init kmem_cache_init(void);
int slab_is_available(void);

struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
- unsigned long,
+ unsigned int,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 8b6e8ae..5ed8408 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -69,7 +69,7 @@ struct kmem_cache_order_objects {
struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab;
/* Used for retriving partial slabs etc */
- unsigned long flags;
+ unsigned int flags;
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
diff --git a/mm/slab.c b/mm/slab.c
index 37961d1..40e71b2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2164,7 +2164,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
*/
struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ unsigned int flags, void (*ctor)(void *))
{
size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL, *pc;
diff --git a/mm/slob.c b/mm/slob.c
index 3588eaa..7745256 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -558,14 +558,13 @@ size_t ksize(const void *block)
EXPORT_SYMBOL(ksize);

struct kmem_cache {
- unsigned int size, align;
- unsigned long flags;
+ unsigned int size, align, flags;
const char *name;
void (*ctor)(void *);
};

struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+ size_t align, unsigned int flags, void (*ctor)(void *))
{
struct kmem_cache *c;

diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f..ab36cbe 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1046,8 +1046,8 @@ out:

__setup("slub_debug", setup_slub_debug);

-static unsigned long kmem_cache_flags(unsigned long objsize,
- unsigned long flags, const char *name,
+static unsigned int kmem_cache_flags(unsigned long objsize,
+ unsigned int flags, const char *name,
void (*ctor)(void *))
{
/*
@@ -1074,8 +1074,8 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
-static inline unsigned long kmem_cache_flags(unsigned long objsize,
- unsigned long flags, const char *name,
+static inline unsigned int kmem_cache_flags(unsigned long objsize,
+ unsigned int flags, const char *name,
void (*ctor)(void *))
{
return flags;
@@ -2069,7 +2069,7 @@ static inline int calculate_order(int size)
/*
* Figure out what the alignment of the objects will be.
*/
-static unsigned long calculate_alignment(unsigned long flags,
+static unsigned long calculate_alignment(unsigned int flags,
unsigned long align, unsigned long size)
{
/*
@@ -2220,7 +2220,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
*/
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
- unsigned long flags = s->flags;
+ unsigned int flags = s->flags;
unsigned long size = s->objsize;
unsigned long align = s->align;
int order;
@@ -2340,7 +2340,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)

static int kmem_cache_open(struct kmem_cache *s,
const char *name, size_t size,
- size_t align, unsigned long flags,
+ size_t align, unsigned int flags,
void (*ctor)(void *))
{
memset(s, 0, kmem_size);
@@ -2384,7 +2384,7 @@ static int kmem_cache_open(struct kmem_cache *s,
error:
if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u "
- "order=%u offset=%u flags=%lx\n",
+ "order=%u offset=%u flags=%x\n",
s->name, (unsigned long)size, s->size, oo_order(s->oo),
s->offset, flags);
return 0;
@@ -3160,7 +3160,7 @@ static int slab_unmergeable(struct kmem_cache *s)
}

static struct kmem_cache *find_mergeable(size_t size,
- size_t align, unsigned long flags, const char *name,
+ size_t align, unsigned int flags, const char *name,
void (*ctor)(void *))
{
struct kmem_cache *s;
@@ -3201,7 +3201,7 @@ static struct kmem_cache *find_mergeable(size_t size,
}

struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+ size_t align, unsigned int flags, void (*ctor)(void *))
{
struct kmem_cache *s;
char *n;
@@ -3760,7 +3760,7 @@ enum slab_stat_type {
#define SO_TOTAL (1 << SL_TOTAL)

static ssize_t show_slab_objects(struct kmem_cache *s,
- char *buf, unsigned long flags)
+ char *buf, unsigned int flags)
{
unsigned long total = 0;
int node;
--
1.7.4.1

2011-03-17 08:01:47

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH 5/8] mm/slub: Factor out some common code.

On Thu, Mar 17, 2011 at 9:07 AM, George Spelvin <[email protected]> wrote:
>> I certainly don't but I'd still like to ask you to change it to
>> 'unsigned long'. That's a Linux kernel idiom and we're not going to
>> change the whole kernel.
>
> Damn, and I just prepared the following patch. ?Should I, instead, do
>
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -62,5 +62,5 @@ struct kmem_cache {
> ?/* 3) touched by every alloc & free from the backend */
>
> - ? ? ? unsigned int flags; ? ? ? ? ? ? /* constant flags */
> + ? ? ? unsigned long flags; ? ? ? ? ? ?/* constant flags */
> ? ? ? ?unsigned int num; ? ? ? ? ? ? ? /* # of objs per slab */
>
> ... because the original slab code uses an unsigned int.

Looks good to me.