This series implements bio pcpu caching for normal / IRQ-driven I/O
extending REQ_ALLOC_CACHE currently limited to iopoll. The allocation side
still only works from non-irq context, which is the reason it's not enabled
by default, but turning it on for other users (e.g. filesystems) is
as a matter of passing a flag.
t/io_uring with an Optane SSD setup showed +7% for batches of 32 requests
and +4.3% for batches of 8.
IRQ, 128/32/32, cache off
IOPS=59.08M, BW=28.84GiB/s, IOS/call=31/31
IOPS=59.30M, BW=28.96GiB/s, IOS/call=32/32
IOPS=59.97M, BW=29.28GiB/s, IOS/call=31/31
IOPS=59.92M, BW=29.26GiB/s, IOS/call=32/32
IOPS=59.81M, BW=29.20GiB/s, IOS/call=32/31
IRQ, 128/32/32, cache on
IOPS=64.05M, BW=31.27GiB/s, IOS/call=32/31
IOPS=64.22M, BW=31.36GiB/s, IOS/call=32/32
IOPS=64.04M, BW=31.27GiB/s, IOS/call=31/31
IOPS=63.16M, BW=30.84GiB/s, IOS/call=32/32
IRQ, 32/8/8, cache off
IOPS=50.60M, BW=24.71GiB/s, IOS/call=7/8
IOPS=50.22M, BW=24.52GiB/s, IOS/call=8/7
IOPS=49.54M, BW=24.19GiB/s, IOS/call=8/8
IOPS=50.07M, BW=24.45GiB/s, IOS/call=7/7
IOPS=50.46M, BW=24.64GiB/s, IOS/call=8/8
IRQ, 32/8/8, cache on
IOPS=51.39M, BW=25.09GiB/s, IOS/call=8/7
IOPS=52.52M, BW=25.64GiB/s, IOS/call=7/8
IOPS=52.57M, BW=25.67GiB/s, IOS/call=8/8
IOPS=52.58M, BW=25.67GiB/s, IOS/call=8/7
IOPS=52.61M, BW=25.69GiB/s, IOS/call=8/8
The main part is in patch 3. Would be great to take patch 1 separately
for 6.1 for extra safety.
Pavel Begunkov (4):
bio: safeguard REQ_ALLOC_CACHE bio put
bio: split pcpu cache part of bio_put into a helper
block/bio: add pcpu caching for non-polling bio_put
io_uring/rw: enable bio caches for IRQ rw
block/bio.c | 92 +++++++++++++++++++++++++++++++++++++++------------
io_uring/rw.c | 3 +-
2 files changed, 73 insertions(+), 22 deletions(-)
--
2.38.0
Now we can use IOCB_ALLOC_CACHE not only for iopoll'ed reads/write but
also for normal IRQ driven I/O.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/rw.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 100de2626e47..ff609b762742 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -667,6 +667,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
ret = kiocb_set_rw_flags(kiocb, rw->flags);
if (unlikely(ret))
return ret;
+ kiocb->ki_flags |= IOCB_ALLOC_CACHE;
/*
* If the file is marked O_NONBLOCK, still allow retry for it if it
@@ -682,7 +683,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
return -EOPNOTSUPP;
kiocb->private = NULL;
- kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
+ kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
} else {
--
2.38.0
bio_put() with REQ_ALLOC_CACHE assumes that it's executed not from
an irq context. Let's add a warning if the invariant is not respected,
especially since there is a couple of places removing REQ_POLLED by hand
without also clearing REQ_ALLOC_CACHE.
Signed-off-by: Pavel Begunkov <[email protected]>
---
block/bio.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/block/bio.c b/block/bio.c
index 7cb7d2ff139b..5b4594daa259 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -741,7 +741,7 @@ void bio_put(struct bio *bio)
return;
}
- if (bio->bi_opf & REQ_ALLOC_CACHE) {
+ if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) {
struct bio_alloc_cache *cache;
bio_uninit(bio);
--
2.38.0
This patch extends REQ_ALLOC_CACHE to IRQ completions, whenever
currently it's only limited to iopoll. Instead of guarding the list with
irq toggling on alloc, which is expensive, it keeps an additional
irq-safe list from which bios are spliced in batches to ammortise
overhead. On the put side it toggles irqs, but in many cases they're
already disabled and so cheap.
Signed-off-by: Pavel Begunkov <[email protected]>
---
block/bio.c | 62 ++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 50 insertions(+), 12 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index ac16cc154476..75107dc27304 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -25,9 +25,15 @@
#include "blk-rq-qos.h"
#include "blk-cgroup.h"
+#define ALLOC_CACHE_THRESHOLD 16
+#define ALLOC_CACHE_SLACK 64
+#define ALLOC_CACHE_MAX 512
+
struct bio_alloc_cache {
struct bio *free_list;
+ struct bio *free_list_irq;
unsigned int nr;
+ unsigned int nr_irq;
};
static struct biovec_slab {
@@ -408,6 +414,22 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
queue_work(bs->rescue_workqueue, &bs->rescue_work);
}
+static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
+{
+ unsigned long flags;
+
+ /* cache->free_list must be empty */
+ if (WARN_ON_ONCE(cache->free_list))
+ return;
+
+ local_irq_save(flags);
+ cache->free_list = cache->free_list_irq;
+ cache->free_list_irq = NULL;
+ cache->nr += cache->nr_irq;
+ cache->nr_irq = 0;
+ local_irq_restore(flags);
+}
+
static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
struct bio_set *bs)
@@ -416,9 +438,13 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
struct bio *bio;
cache = per_cpu_ptr(bs->cache, get_cpu());
- if (!cache->free_list) {
- put_cpu();
- return NULL;
+ if (!cache->free_list &&
+ READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) {
+ bio_alloc_irq_cache_splice(cache);
+ if (!cache->free_list) {
+ put_cpu();
+ return NULL;
+ }
}
bio = cache->free_list;
cache->free_list = bio->bi_next;
@@ -676,11 +702,8 @@ void guard_bio_eod(struct bio *bio)
bio_truncate(bio, maxsector << 9);
}
-#define ALLOC_CACHE_MAX 512
-#define ALLOC_CACHE_SLACK 64
-
-static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
- unsigned int nr)
+static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
+ unsigned int nr)
{
unsigned int i = 0;
struct bio *bio;
@@ -692,6 +715,17 @@ static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
if (++i == nr)
break;
}
+ return i;
+}
+
+static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
+ unsigned int nr)
+{
+ nr -= __bio_alloc_cache_prune(cache, nr);
+ if (!READ_ONCE(cache->free_list)) {
+ bio_alloc_irq_cache_splice(cache);
+ __bio_alloc_cache_prune(cache, nr);
+ }
}
static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
@@ -728,6 +762,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs)
static inline void bio_put_percpu_cache(struct bio *bio)
{
struct bio_alloc_cache *cache;
+ unsigned long flags;
cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
bio_uninit(bio);
@@ -737,12 +772,15 @@ static inline void bio_put_percpu_cache(struct bio *bio)
cache->free_list = bio;
cache->nr++;
} else {
- put_cpu();
- bio_free(bio);
- return;
+ local_irq_save(flags);
+ bio->bi_next = cache->free_list_irq;
+ cache->free_list_irq = bio;
+ cache->nr_irq++;
+ local_irq_restore(flags);
}
- if (cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
+ if (READ_ONCE(cache->nr_irq) + cache->nr >
+ ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
put_cpu();
}
--
2.38.0
Extract a helper out of bio_put for recycling into percpu caches.
It's a preparation patch without functional changes.
Signed-off-by: Pavel Begunkov <[email protected]>
---
block/bio.c | 38 +++++++++++++++++++++++++-------------
1 file changed, 25 insertions(+), 13 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 5b4594daa259..ac16cc154476 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -725,6 +725,28 @@ static void bio_alloc_cache_destroy(struct bio_set *bs)
bs->cache = NULL;
}
+static inline void bio_put_percpu_cache(struct bio *bio)
+{
+ struct bio_alloc_cache *cache;
+
+ cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
+ bio_uninit(bio);
+
+ if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
+ bio->bi_next = cache->free_list;
+ cache->free_list = bio;
+ cache->nr++;
+ } else {
+ put_cpu();
+ bio_free(bio);
+ return;
+ }
+
+ if (cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
+ bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
+ put_cpu();
+}
+
/**
* bio_put - release a reference to a bio
* @bio: bio to release reference to
@@ -740,20 +762,10 @@ void bio_put(struct bio *bio)
if (!atomic_dec_and_test(&bio->__bi_cnt))
return;
}
-
- if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) {
- struct bio_alloc_cache *cache;
-
- bio_uninit(bio);
- cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
- bio->bi_next = cache->free_list;
- cache->free_list = bio;
- if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
- bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
- put_cpu();
- } else {
+ if (bio->bi_opf & REQ_ALLOC_CACHE)
+ bio_put_percpu_cache(bio);
+ else
bio_free(bio);
- }
}
EXPORT_SYMBOL(bio_put);
--
2.38.0
On 10/18/22 19:47, Pavel Begunkov wrote:
> This series implements bio pcpu caching for normal / IRQ-driven I/O
> extending REQ_ALLOC_CACHE currently limited to iopoll. The allocation side
> still only works from non-irq context, which is the reason it's not enabled
> by default, but turning it on for other users (e.g. filesystems) is
> as a matter of passing a flag.
Ooops, wrong version, will resend
> t/io_uring with an Optane SSD setup showed +7% for batches of 32 requests
> and +4.3% for batches of 8.
>
> IRQ, 128/32/32, cache off
> IOPS=59.08M, BW=28.84GiB/s, IOS/call=31/31
> IOPS=59.30M, BW=28.96GiB/s, IOS/call=32/32
> IOPS=59.97M, BW=29.28GiB/s, IOS/call=31/31
> IOPS=59.92M, BW=29.26GiB/s, IOS/call=32/32
> IOPS=59.81M, BW=29.20GiB/s, IOS/call=32/31
>
> IRQ, 128/32/32, cache on
> IOPS=64.05M, BW=31.27GiB/s, IOS/call=32/31
> IOPS=64.22M, BW=31.36GiB/s, IOS/call=32/32
> IOPS=64.04M, BW=31.27GiB/s, IOS/call=31/31
> IOPS=63.16M, BW=30.84GiB/s, IOS/call=32/32
>
> IRQ, 32/8/8, cache off
> IOPS=50.60M, BW=24.71GiB/s, IOS/call=7/8
> IOPS=50.22M, BW=24.52GiB/s, IOS/call=8/7
> IOPS=49.54M, BW=24.19GiB/s, IOS/call=8/8
> IOPS=50.07M, BW=24.45GiB/s, IOS/call=7/7
> IOPS=50.46M, BW=24.64GiB/s, IOS/call=8/8
>
> IRQ, 32/8/8, cache on
> IOPS=51.39M, BW=25.09GiB/s, IOS/call=8/7
> IOPS=52.52M, BW=25.64GiB/s, IOS/call=7/8
> IOPS=52.57M, BW=25.67GiB/s, IOS/call=8/8
> IOPS=52.58M, BW=25.67GiB/s, IOS/call=8/7
> IOPS=52.61M, BW=25.69GiB/s, IOS/call=8/8
>
> The main part is in patch 3. Would be great to take patch 1 separately
> for 6.1 for extra safety.
>
> Pavel Begunkov (4):
> bio: safeguard REQ_ALLOC_CACHE bio put
> bio: split pcpu cache part of bio_put into a helper
> block/bio: add pcpu caching for non-polling bio_put
> io_uring/rw: enable bio caches for IRQ rw
>
> block/bio.c | 92 +++++++++++++++++++++++++++++++++++++++------------
> io_uring/rw.c | 3 +-
> 2 files changed, 73 insertions(+), 22 deletions(-)
>
--
Pavel Begunkov