Every io_uring request is represented by struct io_kiocb, which is
cached locally by io_uring (not SLAB/SLUB) in the list called
submit_state.freelist. This patch simply enabled KASAN for this free
list.
This list is initially created by KMEM_CACHE, but later, managed by
io_uring. This patch basically poisons the objects that are not used
(i.e., they are the free list), and unpoisons it when the object is
allocated/removed from the list.
Touching these poisoned objects while in the freelist will cause a KASAN
warning.
Suggested-by: Jens Axboe <[email protected]>
Signed-off-by: Breno Leitao <[email protected]>
---
io_uring/io_uring.c | 3 ++-
io_uring/io_uring.h | 11 ++++++++---
2 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 2ac1cd8d23ea..8cc0f12034d1 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -151,7 +151,7 @@ static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
static __cold void io_fallback_tw(struct io_uring_task *tctx);
-static struct kmem_cache *req_cachep;
+struct kmem_cache *req_cachep;
struct sock *io_uring_get_socket(struct file *file)
{
@@ -230,6 +230,7 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
+ kasan_poison_object_data(req_cachep, req);
}
static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index ab4b2a1c3b7e..0ccf62a19b65 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -3,6 +3,7 @@
#include <linux/errno.h>
#include <linux/lockdep.h>
+#include <linux/kasan.h>
#include <linux/io_uring_types.h>
#include <uapi/linux/eventpoll.h>
#include "io-wq.h"
@@ -379,12 +380,16 @@ static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
return true;
}
+extern struct kmem_cache *req_cachep;
+
static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
{
- struct io_wq_work_node *node;
+ struct io_kiocb *req;
- node = wq_stack_extract(&ctx->submit_state.free_list);
- return container_of(node, struct io_kiocb, comp_list);
+ req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
+ kasan_unpoison_object_data(req_cachep, req);
+ wq_stack_extract(&ctx->submit_state.free_list);
+ return req;
}
static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
--
2.30.2
On 1/18/23 15:56, Breno Leitao wrote:
> Every io_uring request is represented by struct io_kiocb, which is
> cached locally by io_uring (not SLAB/SLUB) in the list called
> submit_state.freelist. This patch simply enabled KASAN for this free
> list.
>
> This list is initially created by KMEM_CACHE, but later, managed by
> io_uring. This patch basically poisons the objects that are not used
> (i.e., they are the free list), and unpoisons it when the object is
> allocated/removed from the list.
>
> Touching these poisoned objects while in the freelist will cause a KASAN
> warning.
Doesn't apply cleanly to for-6.3/io_uring, but otherwise looks good
Reviewed-by: Pavel Begunkov <[email protected]>
> Suggested-by: Jens Axboe <[email protected]>
> Signed-off-by: Breno Leitao <[email protected]>
> ---
> io_uring/io_uring.c | 3 ++-
> io_uring/io_uring.h | 11 ++++++++---
> 2 files changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 2ac1cd8d23ea..8cc0f12034d1 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -151,7 +151,7 @@ static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
> static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
> static __cold void io_fallback_tw(struct io_uring_task *tctx);
>
> -static struct kmem_cache *req_cachep;
> +struct kmem_cache *req_cachep;
>
> struct sock *io_uring_get_socket(struct file *file)
> {
> @@ -230,6 +230,7 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
> static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
> {
> wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
> + kasan_poison_object_data(req_cachep, req);
> }
>
> static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
> diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
> index ab4b2a1c3b7e..0ccf62a19b65 100644
> --- a/io_uring/io_uring.h
> +++ b/io_uring/io_uring.h
> @@ -3,6 +3,7 @@
>
> #include <linux/errno.h>
> #include <linux/lockdep.h>
> +#include <linux/kasan.h>
> #include <linux/io_uring_types.h>
> #include <uapi/linux/eventpoll.h>
> #include "io-wq.h"
> @@ -379,12 +380,16 @@ static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
> return true;
> }
>
> +extern struct kmem_cache *req_cachep;
> +
> static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
> {
> - struct io_wq_work_node *node;
> + struct io_kiocb *req;
>
> - node = wq_stack_extract(&ctx->submit_state.free_list);
> - return container_of(node, struct io_kiocb, comp_list);
> + req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
> + kasan_unpoison_object_data(req_cachep, req);
> + wq_stack_extract(&ctx->submit_state.free_list);
> + return req;
> }
>
> static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
--
Pavel Begunkov
On 1/20/23 8:09 AM, Pavel Begunkov wrote:
> On 1/18/23 15:56, Breno Leitao wrote:
>> Every io_uring request is represented by struct io_kiocb, which is
>> cached locally by io_uring (not SLAB/SLUB) in the list called
>> submit_state.freelist. This patch simply enabled KASAN for this free
>> list.
>>
>> This list is initially created by KMEM_CACHE, but later, managed by
>> io_uring. This patch basically poisons the objects that are not used
>> (i.e., they are the free list), and unpoisons it when the object is
>> allocated/removed from the list.
>>
>> Touching these poisoned objects while in the freelist will cause a KASAN
>> warning.
>
> Doesn't apply cleanly to for-6.3/io_uring, but otherwise looks good
>
> Reviewed-by: Pavel Begunkov <[email protected]>
I ran testing on this yesterday and noticed the same thing, just a
trivial fuzz reject. I can fix it up while applying. Thanks for
reviewing!
--
Jens Axboe
On Wed, 18 Jan 2023 07:56:30 -0800, Breno Leitao wrote:
> Every io_uring request is represented by struct io_kiocb, which is
> cached locally by io_uring (not SLAB/SLUB) in the list called
> submit_state.freelist. This patch simply enabled KASAN for this free
> list.
>
> This list is initially created by KMEM_CACHE, but later, managed by
> io_uring. This patch basically poisons the objects that are not used
> (i.e., they are the free list), and unpoisons it when the object is
> allocated/removed from the list.
>
> [...]
Applied, thanks!
[1/1] io_uring: Enable KASAN for request cache
commit: 9f61fe5e410444ccecb429e69536ecd981c73c08
Best regards,
--
Jens Axboe