In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the dm-bufio shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct dm_bufio_client.
Signed-off-by: Qi Zheng <[email protected]>
---
drivers/md/dm-bufio.c | 26 +++++++++++++++-----------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index bc309e41d074..028e30a4b2d0 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -963,7 +963,7 @@ struct dm_bufio_client {
sector_t start;
- struct shrinker shrinker;
+ struct shrinker *shrinker;
struct work_struct shrink_work;
atomic_long_t need_shrink;
@@ -2368,7 +2368,7 @@ static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink
{
struct dm_bufio_client *c;
- c = container_of(shrink, struct dm_bufio_client, shrinker);
+ c = shrink->private_data;
atomic_long_add(sc->nr_to_scan, &c->need_shrink);
queue_work(dm_bufio_wq, &c->shrink_work);
@@ -2377,7 +2377,7 @@ static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink
static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
+ struct dm_bufio_client *c = shrink->private_data;
unsigned long count = cache_total(&c->cache);
unsigned long retain_target = get_retain_buffers(c);
unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
@@ -2490,15 +2490,19 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
INIT_WORK(&c->shrink_work, shrink_work);
atomic_long_set(&c->need_shrink, 0);
- c->shrinker.count_objects = dm_bufio_shrink_count;
- c->shrinker.scan_objects = dm_bufio_shrink_scan;
- c->shrinker.seeks = 1;
- c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
- MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
- if (r)
+ c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ if (!c->shrinker)
goto bad;
+ c->shrinker->count_objects = dm_bufio_shrink_count;
+ c->shrinker->scan_objects = dm_bufio_shrink_scan;
+ c->shrinker->seeks = 1;
+ c->shrinker->batch = 0;
+ c->shrinker->private_data = c;
+
+ shrinker_register(c->shrinker);
+
mutex_lock(&dm_bufio_clients_lock);
dm_bufio_client_count++;
list_add(&c->client_list, &dm_bufio_all_clients);
@@ -2537,7 +2541,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
drop_buffers(c);
- unregister_shrinker(&c->shrinker);
+ shrinker_unregister(c->shrinker);
flush_work(&c->shrink_work);
mutex_lock(&dm_bufio_clients_lock);
--
2.30.2
> On Jul 24, 2023, at 17:43, Qi Zheng <[email protected]> wrote:
>
> In preparation for implementing lockless slab shrink, use new APIs to
> dynamically allocate the dm-bufio shrinker, so that it can be freed
> asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
> read-side critical section when releasing the struct dm_bufio_client.
>
> Signed-off-by: Qi Zheng <[email protected]>
Reviewed-by: Muchun Song <[email protected]>