This field was being used to store size allocation so it could be
retrieved by ksize(). However, it is a bad practice to not mark a page
as a slab page and then use fields for special purposes.
There is no need to store the allocated size and
ksize() can simply return PAGE_SIZE << compound_order(page).
Cc: Pekka Penberg <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Ezequiel Garcia <[email protected]>
---
mm/slob.c | 24 ++++++++++--------------
1 files changed, 10 insertions(+), 14 deletions(-)
diff --git a/mm/slob.c b/mm/slob.c
index a08e468..06a5ec7 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -28,9 +28,8 @@
* from kmalloc are prepended with a 4-byte header with the kmalloc size.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* alloc_pages() directly, allocating compound pages so the page order
- * does not have to be separately tracked, and also stores the exact
- * allocation size in page->private so that it can be used to accurately
- * provide ksize(). These objects are detected in kfree() because slob_page()
+ * does not have to be separately tracked.
+ * These objects are detected in kfree() because PageSlab()
* is false for them.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
@@ -455,11 +454,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
if (likely(order))
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
- if (ret) {
- struct page *page;
- page = virt_to_page(ret);
- page->private = size;
- }
trace_kmalloc_node(caller, ret,
size, PAGE_SIZE << order, gfp, node);
@@ -514,18 +508,20 @@ EXPORT_SYMBOL(kfree);
size_t ksize(const void *block)
{
struct page *sp;
+ int align;
+ unsigned int *m;
BUG_ON(!block);
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
sp = virt_to_page(block);
- if (PageSlab(sp)) {
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
- unsigned int *m = (unsigned int *)(block - align);
- return SLOB_UNITS(*m) * SLOB_UNIT;
- } else
- return sp->private;
+ if (unlikely(!PageSlab(sp)))
+ return PAGE_SIZE << compound_order(sp);
+
+ align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ m = (unsigned int *)(block - align);
+ return SLOB_UNITS(*m) * SLOB_UNIT;
}
EXPORT_SYMBOL(ksize);
--
1.7.8.6
Fields object_size and size are not the same: the latter might include
slab metadata. Return object_size field in kmem_cache_size().
Also, improve trace accuracy by correctly tracing reported size.
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Matt Mackall <[email protected]>
Signed-off-by: Ezequiel Garcia <[email protected]>
---
mm/slob.c | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/slob.c b/mm/slob.c
index 06a5ec7..287a88a 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -554,12 +554,12 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
- trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
SLOB_UNITS(c->size) * SLOB_UNIT,
flags, node);
} else {
b = slob_new_pages(flags, get_order(c->size), node);
- trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
PAGE_SIZE << get_order(c->size),
flags, node);
}
@@ -606,7 +606,7 @@ EXPORT_SYMBOL(kmem_cache_free);
unsigned int kmem_cache_size(struct kmem_cache *c)
{
- return c->size;
+ return c->object_size;
}
EXPORT_SYMBOL(kmem_cache_size);
--
1.7.8.6
This function is identically defined in all three allocators
and it's trivial to move it to slab.h
Since now it's static, inline, header-defined function
this patch also drops the EXPORT_SYMBOL tag.
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Matt Mackall <[email protected]>
Signed-off-by: Ezequiel Garcia <[email protected]>
---
include/linux/slab.h | 9 ++++++++-
mm/slab.c | 6 ------
mm/slob.c | 6 ------
mm/slub.c | 9 ---------
4 files changed, 8 insertions(+), 22 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 83d1a14..d513147 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
-unsigned int kmem_cache_size(struct kmem_cache *);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+/*
+ * Determine the size of a slab object
+ */
+unsigned int kmem_cache_size(struct kmem_cache *s)
+{
+ return s->object_size;
+}
+
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */
diff --git a/mm/slab.c b/mm/slab.c
index 87c55b0..92a3fec 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3969,12 +3969,6 @@ void kfree(const void *objp)
}
EXPORT_SYMBOL(kfree);
-unsigned int kmem_cache_size(struct kmem_cache *cachep)
-{
- return cachep->object_size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-
/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
diff --git a/mm/slob.c b/mm/slob.c
index 287a88a..fffbc82 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -604,12 +604,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
}
EXPORT_SYMBOL(kmem_cache_free);
-unsigned int kmem_cache_size(struct kmem_cache *c)
-{
- return c->object_size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-
int __kmem_cache_shutdown(struct kmem_cache *c)
{
/* No way to check for remaining objects */
diff --git a/mm/slub.c b/mm/slub.c
index a0d6984..1f826b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3127,15 +3127,6 @@ error:
return -EINVAL;
}
-/*
- * Determine the size of a slab object
- */
-unsigned int kmem_cache_size(struct kmem_cache *s)
-{
- return s->object_size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-
static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
--
1.7.8.6
On Thu, 18 Oct 2012, Ezequiel Garcia wrote:
> This field was being used to store size allocation so it could be
> retrieved by ksize(). However, it is a bad practice to not mark a page
> as a slab page and then use fields for special purposes.
> There is no need to store the allocated size and
> ksize() can simply return PAGE_SIZE << compound_order(page).
>
> Cc: Pekka Penberg <[email protected]>
Is Pekka Penberg the long distant cousin of Pekka Enberg? :) You should
probably cc the author of slob, Matt Mackall <[email protected]>, on slob
patches.
> Acked-by: Christoph Lameter <[email protected]>
> Signed-off-by: Ezequiel Garcia <[email protected]>
> ---
> mm/slob.c | 24 ++++++++++--------------
> 1 files changed, 10 insertions(+), 14 deletions(-)
>
> diff --git a/mm/slob.c b/mm/slob.c
> index a08e468..06a5ec7 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -28,9 +28,8 @@
> * from kmalloc are prepended with a 4-byte header with the kmalloc size.
> * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
> * alloc_pages() directly, allocating compound pages so the page order
> - * does not have to be separately tracked, and also stores the exact
> - * allocation size in page->private so that it can be used to accurately
> - * provide ksize(). These objects are detected in kfree() because slob_page()
> + * does not have to be separately tracked.
> + * These objects are detected in kfree() because PageSlab()
> * is false for them.
> *
> * SLAB is emulated on top of SLOB by simply calling constructors and
> @@ -455,11 +454,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
> if (likely(order))
> gfp |= __GFP_COMP;
> ret = slob_new_pages(gfp, order, node);
> - if (ret) {
> - struct page *page;
> - page = virt_to_page(ret);
> - page->private = size;
> - }
>
> trace_kmalloc_node(caller, ret,
> size, PAGE_SIZE << order, gfp, node);
> @@ -514,18 +508,20 @@ EXPORT_SYMBOL(kfree);
> size_t ksize(const void *block)
> {
> struct page *sp;
> + int align;
> + unsigned int *m;
>
> BUG_ON(!block);
> if (unlikely(block == ZERO_SIZE_PTR))
> return 0;
>
> sp = virt_to_page(block);
> - if (PageSlab(sp)) {
> - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
> - unsigned int *m = (unsigned int *)(block - align);
> - return SLOB_UNITS(*m) * SLOB_UNIT;
> - } else
> - return sp->private;
> + if (unlikely(!PageSlab(sp)))
> + return PAGE_SIZE << compound_order(sp);
> +
> + align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
> + m = (unsigned int *)(block - align);
> + return SLOB_UNITS(*m) * SLOB_UNIT;
> }
> EXPORT_SYMBOL(ksize);
>
On Thu, 18 Oct 2012, Ezequiel Garcia wrote:
> Fields object_size and size are not the same: the latter might include
> slab metadata. Return object_size field in kmem_cache_size().
> Also, improve trace accuracy by correctly tracing reported size.
>
> Cc: Christoph Lameter <[email protected]>
> Cc: Pekka Enberg <[email protected]>
> Cc: Matt Mackall <[email protected]>
> Signed-off-by: Ezequiel Garcia <[email protected]>
Acked-by: David Rientjes <[email protected]>
On Thu, Oct 18, 2012 at 7:46 PM, David Rientjes <[email protected]> wrote:
> On Thu, 18 Oct 2012, Ezequiel Garcia wrote:
>
>> This field was being used to store size allocation so it could be
>> retrieved by ksize(). However, it is a bad practice to not mark a page
>> as a slab page and then use fields for special purposes.
>> There is no need to store the allocated size and
>> ksize() can simply return PAGE_SIZE << compound_order(page).
>>
>> Cc: Pekka Penberg <[email protected]>
>
> Is Pekka Penberg the long distant cousin of Pekka Enberg? :) You should
> probably cc the author of slob, Matt Mackall <[email protected]>, on slob
> patches.
>
Ouch! ;-)
I found another typo so I'll just re-send the whole patchset.
Thanks for the review!
Ezequiel
On Fri, Oct 19, 2012 at 1:41 AM, Ezequiel Garcia <[email protected]> wrote:
> This field was being used to store size allocation so it could be
> retrieved by ksize(). However, it is a bad practice to not mark a page
> as a slab page and then use fields for special purposes.
> There is no need to store the allocated size and
> ksize() can simply return PAGE_SIZE << compound_order(page).
>
> Cc: Pekka Penberg <[email protected]>
> Acked-by: Christoph Lameter <[email protected]>
> Signed-off-by: Ezequiel Garcia <[email protected]>
Applied all three patches. Thanks, Ezequiel!