__ksize() returns size of objects allocated from slab allocator.
When invalid object is passed to __ksize(), returning zero
prevents further memory corruption and makes caller be able to
check if there is an error.
If address of large object is not beginning of folio or size of
the folio is too small, it must be invalid. Return zero in such cases.
Suggested-by: Vlastimil Babka <[email protected]>
Signed-off-by: Hyeonggon Yoo <[email protected]>
---
mm/slab_common.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 07ed382ed5a9..acb1d27fc9e3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -986,8 +986,12 @@ size_t __ksize(const void *object)
folio = virt_to_folio(object);
- if (unlikely(!folio_test_slab(folio)))
+ if (unlikely(!folio_test_slab(folio))) {
+ if (object != folio_address(folio) ||
+ folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)
+ return 0;
return folio_size(folio);
+ }
return slab_ksize(folio_slab(folio)->slab_cache);
}
--
2.33.1
Vlastimil wrote:
> On 3/8/22 12:41, Hyeonggon Yoo wrote:
> > +
> > static __always_inline void *
> > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > + unsigned long caller)
> > {
> > unsigned long save_flags;
> > - void *objp;
> > + void *ptr;
> > + int slab_node = numa_mem_id();
> > struct obj_cgroup *objcg = NULL;
> > bool init = false;
> >
> > @@ -3299,21 +3255,49 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo
> > if (unlikely(!cachep))
> > return NULL;
> >
> > - objp = kfence_alloc(cachep, orig_size, flags);
> > - if (unlikely(objp))
> > - goto out;
> > + ptr = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(ptr))
> > + goto out_hooks;
> >
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> > - objp = __do_cache_alloc(cachep, flags);
>
> Looks like after this patch, slab_alloc() (without a node specified)
> will not end up in __do_cache_alloc() anymore, so there's no more
> possibility of alternate_node_alloc(), which looks like a functional
> regression?
>
Ah, that was not intended. Thank you for catching this!
Will fix in v2.
Thank you so much.
> > +
> > + if (node_match(nodeid, slab_node)) {
> > + /*
> > + * Use the locally cached objects if possible.
> > + * However ____cache_alloc does not allow fallback
> > + * to other nodes. It may fail while we still have
> > + * objects on other nodes available.
> > + */
> > + ptr = ____cache_alloc(cachep, flags);
> > + if (ptr)
> > + goto out;
> > + }
> > +#ifdef CONFIG_NUMA
> > + else if (unlikely(!get_node(cachep, nodeid))) {
> > + /* Node not bootstrapped yet */
> > + ptr = fallback_alloc(cachep, flags);
> > + goto out;
> > + }
> > +
> > + /* ___cache_alloc_node can fall back to other nodes */
> > + ptr = ____cache_alloc_node(cachep, flags, nodeid);
> > +#endif
> > +out:
> > local_irq_restore(save_flags);
> > - objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
> > - prefetchw(objp);
> > + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
> > + prefetchw(ptr);
> > init = slab_want_init_on_alloc(flags, cachep);
> >
> > -out:
> > - slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
> > - return objp;
> > +out_hooks:
> > + slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
> > + return ptr;
> > +}
> > +
> > +static __always_inline void *
> > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > +{
> > + return slab_alloc_node(cachep, flags, NUMA_NO_NODE, orig_size, caller);
> > }
> >
> > /*
--
Thank you, You are awesome!
Hyeonggon :-)