2012-06-04 14:43:04

by Tao Guo

[permalink] [raw]
Subject: [PATCH 1/2] block: Move general unplug callback function from md/raid to blk-core

Other components may also require an unplug callback, so move this
function from md/raid to block generic layer.

Signed-off-by: Tao Guo <[email protected]>
Cc: Neil Brown <[email protected]>
Cc: Jens Axboe <[email protected]>
Cc: <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: <[email protected]>
---
block/blk-core.c | 36 ++++++++++++++++++++++++++++++++-
block/blk-settings.c | 1 +
block/blk.h | 1 -
drivers/md/md.c | 51 ++++-------------------------------------------
drivers/md/md.h | 3 --
drivers/md/raid1.c | 2 +-
drivers/md/raid5.c | 4 +-
include/linux/blkdev.h | 8 ++++++-
8 files changed, 51 insertions(+), 55 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 3c923a7..5639a3d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2840,6 +2840,39 @@ void blk_start_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_start_plug);

+/* Check that an unplug wakeup will come shortly.
+ */
+bool blk_check_plugged(struct request_queue *q, plug_cb_fn cb_fn)
+{
+ struct blk_plug *plug = current->plug;
+ struct blk_plug_cb *cb;
+
+ if (!plug)
+ return false;
+
+ list_for_each_entry(cb, &plug->cb_list, list) {
+ if (cb->cb_fn == cb_fn && cb->q == q) {
+ /* Already on the list, move to top */
+ if (cb != list_first_entry(&plug->cb_list,
+ struct blk_plug_cb,
+ list))
+ list_move(&cb->list, &plug->cb_list);
+ return true;
+ }
+ }
+ /* Not currently on the callback list */
+ cb = kmalloc(sizeof(*cb), GFP_ATOMIC);
+ if (!cb)
+ return false;
+
+ cb->q = q;
+ cb->cb_fn = cb_fn;
+ atomic_inc(&q->plug_cnt);
+ list_add(&cb->list, &plug->cb_list);
+ return true;
+}
+EXPORT_SYMBOL(blk_check_plugged);
+
static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct request *rqa = container_of(a, struct request, queuelist);
@@ -2897,7 +2930,8 @@ static void flush_plug_callbacks(struct blk_plug *plug)
struct blk_plug_cb,
list);
list_del(&cb->list);
- cb->callback(cb);
+ cb->cb_fn(cb);
+ kfree(cb);
}
}

diff --git a/block/blk-settings.c b/block/blk-settings.c
index d3234fc..c54d603 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -181,6 +181,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ;
+ atomic_set(&q->plug_cnt, 0);

blk_set_default_limits(&q->limits);

diff --git a/block/blk.h b/block/blk.h
index 85f6ae4..a62195b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -33,7 +33,6 @@ bool __blk_end_bidi_request(struct request *rq, int error,
void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
-void __generic_unplug_device(struct request_queue *);

/*
* Internal atomic flags for request handling
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1c2f904..08b64ef 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -498,25 +498,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_flush_request);

-/* Support for plugging.
- * This mirrors the plugging support in request_queue, but does not
- * require having a whole queue or request structures.
- * We allocate an md_plug_cb for each md device and each thread it gets
- * plugged on. This links tot the private plug_handle structure in the
- * personality data where we keep a count of the number of outstanding
- * plugs so other code can see if a plug is active.
- */
-struct md_plug_cb {
- struct blk_plug_cb cb;
- struct mddev *mddev;
-};
-
-static void plugger_unplug(struct blk_plug_cb *cb)
+static void mddev_unplug(struct blk_plug_cb *cb)
{
- struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
- if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
- md_wakeup_thread(mdcb->mddev->thread);
- kfree(mdcb);
+ struct mddev *mddev = cb->q->queuedata;
+ if (atomic_dec_and_test(&cb->q->plug_cnt))
+ md_wakeup_thread(mddev->thread);
}

/* Check that an unplug wakeup will come shortly.
@@ -524,33 +510,7 @@ static void plugger_unplug(struct blk_plug_cb *cb)
*/
int mddev_check_plugged(struct mddev *mddev)
{
- struct blk_plug *plug = current->plug;
- struct md_plug_cb *mdcb;
-
- if (!plug)
- return 0;
-
- list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
- if (mdcb->cb.callback == plugger_unplug &&
- mdcb->mddev == mddev) {
- /* Already on the list, move to top */
- if (mdcb != list_first_entry(&plug->cb_list,
- struct md_plug_cb,
- cb.list))
- list_move(&mdcb->cb.list, &plug->cb_list);
- return 1;
- }
- }
- /* Not currently on the callback list */
- mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
- if (!mdcb)
- return 0;
-
- mdcb->mddev = mddev;
- mdcb->cb.callback = plugger_unplug;
- atomic_inc(&mddev->plug_cnt);
- list_add(&mdcb->cb.list, &plug->cb_list);
- return 1;
+ return blk_check_plugged(mddev->queue, mddev_unplug);
}
EXPORT_SYMBOL_GPL(mddev_check_plugged);

@@ -602,7 +562,6 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
- atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7b4a3c3..91786c4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -266,9 +266,6 @@ struct mddev {
int new_chunk_sectors;
int reshape_backwards;

- atomic_t plug_cnt; /* If device is expecting
- * more bios soon.
- */
struct md_thread *thread; /* management thread */
struct md_thread *sync_thread; /* doing resync or reconstruct */
sector_t curr_resync; /* last block scheduled */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 835de71..6e6d65f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2170,7 +2170,7 @@ static void raid1d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {

- if (atomic_read(&mddev->plug_cnt) == 0)
+ if (atomic_read(&mddev->queue->plug_cnt) == 0)
flush_pending_writes(conf);

spin_lock_irqsave(&conf->device_lock, flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d267672..e98bf00 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4521,7 +4521,7 @@ static void raid5d(struct mddev *mddev)
while (1) {
struct bio *bio;

- if (atomic_read(&mddev->plug_cnt) == 0 &&
+ if (atomic_read(&mddev->queue->plug_cnt) == 0 &&
!list_empty(&conf->bitmap_list)) {
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
@@ -4531,7 +4531,7 @@ static void raid5d(struct mddev *mddev)
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf);
}
- if (atomic_read(&mddev->plug_cnt) == 0)
+ if (atomic_read(&mddev->queue->plug_cnt) == 0)
raid5_activate_delayed(conf);

while ((bio = remove_bio_from_retry(conf))) {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ba43f40..69945e4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -316,6 +316,9 @@ struct request_queue {
* ll_rw_blk doesn't touch it.
*/
void *queuedata;
+ atomic_t plug_cnt; /* If device is expecting
+ * more bios soon.
+ */

/*
* various queue flags, see QUEUE_* below
@@ -914,12 +917,15 @@ struct blk_plug {

struct blk_plug_cb {
struct list_head list;
- void (*callback)(struct blk_plug_cb *);
+ struct request_queue *q;
+ void (*cb_fn)(struct blk_plug_cb *);
};
+typedef void (plug_cb_fn) (struct blk_plug_cb *cb);

extern void blk_start_plug(struct blk_plug *);
extern void blk_finish_plug(struct blk_plug *);
extern void blk_flush_plug_list(struct blk_plug *, bool);
+extern bool blk_check_plugged(struct request_queue *q, plug_cb_fn cb_fn);

static inline void blk_flush_plug(struct task_struct *tsk)
{
--
1.7.7.6


2012-06-04 14:43:11

by Tao Guo

[permalink] [raw]
Subject: [PATCH 2/2] umem: fix up unplugging

In that patch, Jens removed the whole mm_unplug_device()
function, which used to be the trigger to make umem start to work.

We need to implement unplugging to make umem start to work, or I/O
will never be triggered.

Signed-off-by: Tao Guo <[email protected]>
Cc: Neil Brown <[email protected]>
Cc: Jens Axboe <[email protected]>
Cc: <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: <[email protected]>
---
drivers/block/umem.c | 11 +++++++++++
1 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index aa27120..89cc9a6 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,6 +513,15 @@ static void process_page(unsigned long data)
}
}

+static void mm_unplug(struct blk_plug_cb *cb)
+{
+ struct cardinfo *card = cb->q->queuedata;
+
+ spin_lock_irq(&card->lock);
+ activate(card);
+ spin_unlock_irq(&card->lock);
+}
+
static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
@@ -523,6 +532,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
+ if (bio->bi_rw & REQ_SYNC || !blk_check_plugged(q, mm_unplug))
+ activate(card);
spin_unlock_irq(&card->lock);

return;
--
1.7.7.6

2012-06-04 14:49:39

by Tao Guo

[permalink] [raw]
Subject: Re: [PATCH 2/2] umem: fix up unplugging

This patch is to fix a regression introduced by 7eaceaccab5f40
("block: remove per-queueplugging").

-Tao

On Mon, Jun 4, 2012 at 10:41 PM, Tao Guo <[email protected]> wrote:
> In that patch, Jens removed the whole mm_unplug_device()
> function, which used to be the trigger to make umem start to work.
>
> We need to implement unplugging to make umem start to work, or I/O
> will never be triggered.
>
> Signed-off-by: Tao Guo <[email protected]>
> Cc: Neil Brown <[email protected]>
> Cc: Jens Axboe <[email protected]>
> Cc: <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: <[email protected]>
> ---
>  drivers/block/umem.c |   11 +++++++++++
>  1 files changed, 11 insertions(+), 0 deletions(-)
>
> diff --git a/drivers/block/umem.c b/drivers/block/umem.c
> index aa27120..89cc9a6 100644
> --- a/drivers/block/umem.c
> +++ b/drivers/block/umem.c
> @@ -513,6 +513,15 @@ static void process_page(unsigned long data)
>        }
>  }
>
> +static void mm_unplug(struct blk_plug_cb *cb)
> +{
> +       struct cardinfo *card = cb->q->queuedata;
> +
> +       spin_lock_irq(&card->lock);
> +       activate(card);
> +       spin_unlock_irq(&card->lock);
> +}
> +
>  static void mm_make_request(struct request_queue *q, struct bio *bio)
>  {
>        struct cardinfo *card = q->queuedata;
> @@ -523,6 +532,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
>        *card->biotail = bio;
>        bio->bi_next = NULL;
>        card->biotail = &bio->bi_next;
> +       if (bio->bi_rw & REQ_SYNC || !blk_check_plugged(q, mm_unplug))
> +               activate(card);
>        spin_unlock_irq(&card->lock);
>
>        return;
> --
> 1.7.7.6
>

2012-06-05 01:21:26

by Shaohua Li

[permalink] [raw]
Subject: Re: [PATCH 1/2] block: Move general unplug callback function from md/raid to blk-core

On Mon, Jun 04, 2012 at 10:41:50AM -0400, Tao Guo wrote:
> Other components may also require an unplug callback, so move this
> function from md/raid to block generic layer.

I saw no point this should be generic code, for example, why blk_plug_cb only
handles only one request_queue? If umem needs unplug, just do in it.

2012-06-05 01:53:27

by Tao.Guo

[permalink] [raw]
Subject: RE: [PATCH 1/2] block: Move general unplug callback function from md/raid to blk-core

If you ever tried to implement unplug function in umem, you would find the code was almost identical as in dm/raid driver.
For other components which also need such unplug mechanism, it will much more convenient to have such facilities.
PS: What's your specific concern about blk_plug_cb handles each request_queue?

Thanks,
-Tao

> -----Original Message-----
> From: Shaohua Li [mailto:[email protected]] On Behalf Of Shaohua Li
> Sent: Tuesday, June 05, 2012 9:21 AM
> To: Tao Guo
> Cc: [email protected]; Guo, Tao; Neil Brown; Jens Axboe;
> [email protected]; Andrew Morton
> Subject: Re: [PATCH 1/2] block: Move general unplug callback function
> from md/raid to blk-core
>
> On Mon, Jun 04, 2012 at 10:41:50AM -0400, Tao Guo wrote:
> > Other components may also require an unplug callback, so move this
> > function from md/raid to block generic layer.
>
> I saw no point this should be generic code, for example, why
> blk_plug_cb only
> handles only one request_queue? If umem needs unplug, just do in it.

2012-06-06 02:07:12

by Shaohua Li

[permalink] [raw]
Subject: Re: [PATCH 1/2] block: Move general unplug callback function from md/raid to blk-core

2012/6/5 <[email protected]>:
> If you ever tried to implement unplug function in umem, you would find the code was almost identical as in dm/raid driver.
> For other components which also need such unplug mechanism, it will much more convenient to have such facilities.
It's just several lines of code and your usage actually doesn't need
the counter.
It's unclear to me your usage requires to move the callback to list head.

> PS: What's your specific concern about blk_plug_cb handles each request_queue?
the blk_plug_cb stores request_queue, which is incorrect to me. Why can't
one plug cb unplug two or more request_queues?

2012-06-07 17:07:12

by Tao.Guo

[permalink] [raw]
Subject: RE: [PATCH 1/2] block: Move general unplug callback function from md/raid to blk-core

Well, I think I will take your suggestions. I am sending another version
of the patch.

Thanks,
-Tao

> -----Original Message-----
> From: [email protected] [mailto:[email protected]] On Behalf Of
> Shaohua Li
> Sent: Wednesday, June 06, 2012 10:07 AM
> To: Guo, Tao
> Cc: [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected]
> Subject: Re: [PATCH 1/2] block: Move general unplug callback function
> from md/raid to blk-core
>
> 2012/6/5 <[email protected]>:
> > If you ever tried to implement unplug function in umem, you would
> find the code was almost identical as in dm/raid driver.
> > For other components which also need such unplug mechanism, it will
> much more convenient to have such facilities.
> It's just several lines of code and your usage actually doesn't need
> the counter.
> It's unclear to me your usage requires to move the callback to list
> head.
>
> > PS: What's your specific concern about blk_plug_cb handles each
> request_queue?
> the blk_plug_cb stores request_queue, which is incorrect to me. Why
> can't
> one plug cb unplug two or more request_queues?