Mempools keep allocated objects in reserved for situations
when ordinary allocation may not be possible to satisfy.
These objects shouldn't be accessed before they leave
the pool.
This patch poison elements when get into the pool
and unpoison when they leave it. This will let KASan
to detect use-after-free of mempool's elements.
Signed-off-by: Andrey Ryabinin <[email protected]>
---
include/linux/kasan.h | 2 ++
mm/kasan/kasan.c | 13 +++++++++++++
mm/mempool.c | 23 +++++++++++++++++++++++
3 files changed, 38 insertions(+)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5bb0744..5486d77 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kfree_large(const void *ptr);
+void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size);
@@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 936d816..6c513a6 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -389,6 +389,19 @@ void kasan_krealloc(const void *object, size_t size)
kasan_kmalloc(page->slab_cache, object, size);
}
+void kasan_kfree(void *ptr)
+{
+ struct page *page;
+
+ page = virt_to_head_page(ptr);
+
+ if (unlikely(!PageSlab(page)))
+ kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
+ KASAN_FREE_PAGE);
+ else
+ kasan_slab_free(page->slab_cache, ptr);
+}
+
void kasan_kfree_large(const void *ptr)
{
struct page *page = virt_to_page(ptr);
diff --git a/mm/mempool.c b/mm/mempool.c
index acd597f..f884e24 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/export.h>
#include <linux/mempool.h>
@@ -100,10 +101,31 @@ static inline void poison_element(mempool_t *pool, void *element)
}
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+static void kasan_poison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_free(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_kfree(element);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_free_pages(element, (unsigned long)pool->pool_data);
+}
+
+static void kasan_unpoison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_alloc(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_krealloc(element, (size_t)pool->pool_data);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_alloc_pages(element, (unsigned long)pool->pool_data);
+}
+
static void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
+ kasan_poison_element(pool, element);
pool->elements[pool->curr_nr++] = element;
}
@@ -113,6 +135,7 @@ static void *remove_element(mempool_t *pool)
BUG_ON(pool->curr_nr < 0);
check_element(pool, element);
+ kasan_unpoison_element(pool, element);
return element;
}
--
2.3.5
On Fri, 03 Apr 2015 17:47:47 +0300 Andrey Ryabinin <[email protected]> wrote:
> Mempools keep allocated objects in reserved for situations
> when ordinary allocation may not be possible to satisfy.
> These objects shouldn't be accessed before they leave
> the pool.
> This patch poison elements when get into the pool
> and unpoison when they leave it. This will let KASan
> to detect use-after-free of mempool's elements.
>
> ...
>
> +static void kasan_poison_element(mempool_t *pool, void *element)
> +{
> + if (pool->alloc == mempool_alloc_slab)
> + kasan_slab_free(pool->pool_data, element);
> + if (pool->alloc == mempool_kmalloc)
> + kasan_kfree(element);
> + if (pool->alloc == mempool_alloc_pages)
> + kasan_free_pages(element, (unsigned long)pool->pool_data);
> +}
We recently discovered that mempool pages (from alloc_pages, not slab)
can be in highmem. But kasan apepars to handle highmem pages (by
baling out) so we should be OK with that.
Can kasan be taught to use kmap_atomic() or is it more complicated than
that? It probably isn't worthwhile - highmem pages don'[t get used by the
kernel much and most bugs will be found using 64-bit testing anyway.
On Fri, 3 Apr 2015, Andrey Ryabinin wrote:
> Mempools keep allocated objects in reserved for situations
> when ordinary allocation may not be possible to satisfy.
> These objects shouldn't be accessed before they leave
> the pool.
> This patch poison elements when get into the pool
> and unpoison when they leave it. This will let KASan
> to detect use-after-free of mempool's elements.
>
> Signed-off-by: Andrey Ryabinin <[email protected]>
Tested-by: David Rientjes <[email protected]>
On 04/04/2015 01:07 AM, Andrew Morton wrote:
> On Fri, 03 Apr 2015 17:47:47 +0300 Andrey Ryabinin <[email protected]> wrote:
>
>> Mempools keep allocated objects in reserved for situations
>> when ordinary allocation may not be possible to satisfy.
>> These objects shouldn't be accessed before they leave
>> the pool.
>> This patch poison elements when get into the pool
>> and unpoison when they leave it. This will let KASan
>> to detect use-after-free of mempool's elements.
>>
>> ...
>>
>> +static void kasan_poison_element(mempool_t *pool, void *element)
>> +{
>> + if (pool->alloc == mempool_alloc_slab)
>> + kasan_slab_free(pool->pool_data, element);
>> + if (pool->alloc == mempool_kmalloc)
>> + kasan_kfree(element);
>> + if (pool->alloc == mempool_alloc_pages)
>> + kasan_free_pages(element, (unsigned long)pool->pool_data);
>> +}
>
> We recently discovered that mempool pages (from alloc_pages, not slab)
> can be in highmem. But kasan apepars to handle highmem pages (by
> baling out) so we should be OK with that.
>
> Can kasan be taught to use kmap_atomic() or is it more complicated than
> that? It probably isn't worthwhile - highmem pages don'[t get used by the
> kernel much and most bugs will be found using 64-bit testing anyway.
>
kasan could only tell whether it's ok to use some virtual address or not.
So it can't be used for catching use after free of highmem page.
If highmem page was kmapped at some address than it's ok to dereference that address.
However, kasan can be used to unpoison/poison kmapped/kunmapped addresses to find use-after-kunmap bugs.
AFAIK kunmap has some sort of lazy unmap logic and kunmaped page might be still accessible for some time.
Another idea - poison lowmem pages if they were allocated with __GFP_HIGHMEM, unpoison them only on kmap, and poison back on kunmap.
Generally such pages shouldn't be accessed without mapping them first.
However it might be some false-positives. User could check if page is in lowmem and don't use kmap in that case.
It probably isn't worthwhile as well - 32bit testing will find these bugs without kasan.