2022-04-21 20:05:28

by Dylan Yudaken

[permalink] [raw]
Subject: [PATCH 4/6] io_uring: use constants for cq_overflow bitfield

Prepare to use this bitfield for more flags by using constants instead of
magic value 0

Signed-off-by: Dylan Yudaken <[email protected]>
---
fs/io_uring.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 1837b3afa47f..db878c114e16 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -431,7 +431,7 @@ struct io_ring_ctx {
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;

- unsigned long check_cq_overflow;
+ unsigned long check_cq;

struct {
/*
@@ -903,6 +903,10 @@ struct io_cqe {
};
};

+enum {
+ IO_CHECK_CQ_OVERFLOW_BIT,
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -2024,7 +2028,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)

all_flushed = list_empty(&ctx->cq_overflow_list);
if (all_flushed) {
- clear_bit(0, &ctx->check_cq_overflow);
+ clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
WRITE_ONCE(ctx->rings->sq_flags,
ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
}
@@ -2040,7 +2044,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{
bool ret = true;

- if (test_bit(0, &ctx->check_cq_overflow)) {
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
/* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock);
@@ -2118,7 +2122,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
return false;
}
if (list_empty(&ctx->cq_overflow_list)) {
- set_bit(0, &ctx->check_cq_overflow);
+ set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
WRITE_ONCE(ctx->rings->sq_flags,
ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);

@@ -2961,7 +2965,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
- if (test_bit(0, &ctx->check_cq_overflow))
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
__io_cqring_overflow_flush(ctx, false);
if (io_cqring_events(ctx))
return 0;
@@ -8271,7 +8275,8 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
* Cannot safely flush overflowed CQEs from here, ensure we wake up
* the task, and the next invocation will do it.
*/
- if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
+ if (io_should_wake(iowq) ||
+ test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
return autoremove_wake_function(curr, mode, wake_flags, key);
return -1;
}
@@ -8299,7 +8304,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (ret || io_should_wake(iowq))
return ret;
/* let the caller flush overflows, retry */
- if (test_bit(0, &ctx->check_cq_overflow))
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
return 1;

if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
@@ -10094,7 +10099,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
* Users may get EPOLLIN meanwhile seeing nothing in cqring, this
* pushs them to do the flush.
*/
- if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
+ if (io_cqring_events(ctx) ||
+ test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
mask |= EPOLLIN | EPOLLRDNORM;

return mask;
--
2.30.2