When kmem_cache_alloc_bulk() fails to allocate, leave the freed pointers
in the array. This enables a more accurate simulation of the kernel's
behavior and allows for testing potential double-free scenarios.
Signed-off-by: Peng Zhang <[email protected]>
---
tools/testing/radix-tree/linux.c | 45 +++++++++++++++++++++++---------
1 file changed, 33 insertions(+), 12 deletions(-)
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index 61fe2601cb3a..4eb442206d01 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -93,13 +93,9 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
return p;
}
-void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
+void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
{
assert(objp);
- uatomic_dec(&nr_allocated);
- uatomic_dec(&cachep->nr_allocated);
- if (kmalloc_verbose)
- printf("Freeing %p to slab\n", objp);
if (cachep->nr_objs > 10 || cachep->align) {
memset(objp, POISON_FREE, cachep->size);
free(objp);
@@ -111,6 +107,15 @@ void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
}
}
+void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
+{
+ uatomic_dec(&nr_allocated);
+ uatomic_dec(&cachep->nr_allocated);
+ if (kmalloc_verbose)
+ printf("Freeing %p to slab\n", objp);
+ __kmem_cache_free_locked(cachep, objp);
+}
+
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
pthread_mutex_lock(&cachep->lock);
@@ -141,18 +146,17 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
if (kmalloc_verbose)
pr_debug("Bulk alloc %lu\n", size);
- if (!(gfp & __GFP_DIRECT_RECLAIM)) {
- if (cachep->non_kernel < size)
- return 0;
-
- cachep->non_kernel -= size;
- }
-
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs >= size) {
struct radix_tree_node *node;
for (i = 0; i < size; i++) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+ if (!cachep->non_kernel)
+ break;
+ cachep->non_kernel--;
+ }
+
node = cachep->objs;
cachep->nr_objs--;
cachep->objs = node->parent;
@@ -163,11 +167,19 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
} else {
pthread_mutex_unlock(&cachep->lock);
for (i = 0; i < size; i++) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+ if (!cachep->non_kernel)
+ break;
+ cachep->non_kernel--;
+ }
+
if (cachep->align) {
posix_memalign(&p[i], cachep->align,
cachep->size);
} else {
p[i] = malloc(cachep->size);
+ if (!p[i])
+ break;
}
if (cachep->ctor)
cachep->ctor(p[i]);
@@ -176,6 +188,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
}
}
+ if (i < size) {
+ size = i;
+ pthread_mutex_lock(&cachep->lock);
+ for (i = 0; i < size; i++)
+ __kmem_cache_free_locked(cachep, p[i]);
+ pthread_mutex_unlock(&cachep->lock);
+ return 0;
+ }
+
for (i = 0; i < size; i++) {
uatomic_inc(&nr_allocated);
uatomic_inc(&cachep->nr_allocated);
--
2.20.1