Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755045AbbBOIUQ (ORCPT ); Sun, 15 Feb 2015 03:20:16 -0500 Received: from aserp1040.oracle.com ([141.146.126.69]:33878 "EHLO aserp1040.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754551AbbBOIUL (ORCPT ); Sun, 15 Feb 2015 03:20:11 -0500 From: Bob Liu To: xen-devel@lists.xen.org Cc: david.vrabel@citrix.com, linux-kernel@vger.kernel.org, roger.pau@citrix.com, konrad.wilk@oracle.com, felipe.franciosi@citrix.com, axboe@fb.com, hch@infradead.org, avanzini.arianna@gmail.com, Bob Liu Subject: [PATCH 04/10] xen/blkfront: separate ring information to an new struct Date: Sun, 15 Feb 2015 16:18:59 +0800 Message-Id: <1423988345-4005-5-git-send-email-bob.liu@oracle.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1423988345-4005-1-git-send-email-bob.liu@oracle.com> References: <1423988345-4005-1-git-send-email-bob.liu@oracle.com> X-Source-IP: aserv0021.oracle.com [141.146.126.233] Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 33908 Lines: 983 A ring is the representation of a hardware queue, this patch separate ring information from blkfront_info to an new struct blkfront_ring_info to make preparation for real multi hardware queues supporting. Signed-off-by: Arianna Avanzini Signed-off-by: Bob Liu --- drivers/block/xen-blkfront.c | 403 +++++++++++++++++++++++-------------------- 1 file changed, 218 insertions(+), 185 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 5a90a51..aaa4a0e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -102,23 +102,15 @@ MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) /* - * We have one of these per vbd, whether ide, scsi or 'other'. They - * hang in private_data off the gendisk structure. We may end up - * putting all kinds of interesting stuff here :-) + * Per-ring info. + * A blkfront_info structure can associate with one or more blkfront_ring_info, + * depending on how many hardware queues supported. */ -struct blkfront_info -{ +struct blkfront_ring_info { spinlock_t io_lock; - struct mutex mutex; - struct xenbus_device *xbdev; - struct gendisk *gd; - int vdevice; - blkif_vdev_t handle; - enum blkif_state connected; int ring_ref; struct blkif_front_ring ring; unsigned int evtchn, irq; - struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; @@ -126,6 +118,22 @@ struct blkfront_info struct list_head indirect_pages; unsigned int persistent_gnts_c; unsigned long shadow_free; + struct blkfront_info *info; +}; + +/* + * We have one of these per vbd, whether ide, scsi or 'other'. They + * hang in private_data off the gendisk structure. We may end up + * putting all kinds of interesting stuff here :-) + */ +struct blkfront_info { + struct mutex mutex; + struct xenbus_device *xbdev; + struct gendisk *gd; + int vdevice; + blkif_vdev_t handle; + enum blkif_state connected; + struct request_queue *rq; unsigned int feature_flush; unsigned int feature_discard:1; unsigned int feature_secdiscard:1; @@ -135,6 +143,7 @@ struct blkfront_info unsigned int max_indirect_segments; int is_ready; struct blk_mq_tag_set tag_set; + struct blkfront_ring_info rinfo; }; static unsigned int nr_minors; @@ -167,34 +176,35 @@ static DEFINE_SPINLOCK(minor_lock); #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) -static int blkfront_setup_indirect(struct blkfront_info *info); +static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); -static int get_id_from_freelist(struct blkfront_info *info) +static int get_id_from_freelist(struct blkfront_ring_info *rinfo) { - unsigned long free = info->shadow_free; + unsigned long free = rinfo->shadow_free; BUG_ON(free >= BLK_RING_SIZE); - info->shadow_free = info->shadow[free].req.u.rw.id; - info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ + rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id; + rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } -static int add_id_to_freelist(struct blkfront_info *info, +static int add_id_to_freelist(struct blkfront_ring_info *rinfo, unsigned long id) { - if (info->shadow[id].req.u.rw.id != id) + if (rinfo->shadow[id].req.u.rw.id != id) return -EINVAL; - if (info->shadow[id].request == NULL) + if (rinfo->shadow[id].request == NULL) return -EINVAL; - info->shadow[id].req.u.rw.id = info->shadow_free; - info->shadow[id].request = NULL; - info->shadow_free = id; + rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free; + rinfo->shadow[id].request = NULL; + rinfo->shadow_free = id; return 0; } -static int fill_grant_buffer(struct blkfront_info *info, int num) +static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; + struct blkfront_info *info = rinfo->info; int i = 0; while(i < num) { @@ -212,7 +222,7 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) } gnt_list_entry->gref = GRANT_INVALID_REF; - list_add(&gnt_list_entry->node, &info->grants); + list_add(&gnt_list_entry->node, &rinfo->grants); i++; } @@ -220,7 +230,7 @@ static int fill_grant_buffer(struct blkfront_info *info, int num) out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, - &info->grants, node) { + &rinfo->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); @@ -232,33 +242,33 @@ out_of_memory: } static struct grant *get_grant(grant_ref_t *gref_head, - unsigned long pfn, - struct blkfront_info *info) + unsigned long pfn, + struct blkfront_ring_info *rinfo) { struct grant *gnt_list_entry; unsigned long buffer_mfn; - BUG_ON(list_empty(&info->grants)); - gnt_list_entry = list_first_entry(&info->grants, struct grant, + BUG_ON(list_empty(&rinfo->grants)); + gnt_list_entry = list_first_entry(&rinfo->grants, struct grant, node); list_del(&gnt_list_entry->node); if (gnt_list_entry->gref != GRANT_INVALID_REF) { - info->persistent_gnts_c--; + rinfo->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (!info->feature_persistent) { + if (!rinfo->info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, - info->xbdev->otherend_id, - buffer_mfn, 0); + rinfo->info->xbdev->otherend_id, + buffer_mfn, 0); return gnt_list_entry; } @@ -328,8 +338,9 @@ static void xlbd_release_minors(unsigned int minor, unsigned int nr) static void blkif_restart_queue_callback(void *arg) { - struct blkfront_info *info = (struct blkfront_info *)arg; - schedule_work(&info->work); + struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg; + + schedule_work(&rinfo->work); } static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) @@ -387,7 +398,8 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, * * @req: a request struct */ -static int blkif_queue_request(struct request *req) +static int blkif_queue_request(struct request *req, + struct blkfront_ring_info *rinfo) { struct blkfront_info *info = req->rq_disk->private_data; struct blkif_request *ring_req; @@ -419,15 +431,15 @@ static int blkif_queue_request(struct request *req) max_grefs += INDIRECT_GREFS(req->nr_phys_segments); /* Check if we have enough grants to allocate a requests */ - if (info->persistent_gnts_c < max_grefs) { + if (rinfo->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references( - max_grefs - info->persistent_gnts_c, + max_grefs - rinfo->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback( - &info->callback, + &rinfo->callback, blkif_restart_queue_callback, - info, + rinfo, max_grefs); return 1; } @@ -435,9 +447,9 @@ static int blkif_queue_request(struct request *req) new_persistent_gnts = 0; /* Fill out a communications ring structure. */ - ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); - id = get_id_from_freelist(info); - info->shadow[id].request = req; + ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); + id = get_id_from_freelist(rinfo); + rinfo->shadow[id].request = req; if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { ring_req->operation = BLKIF_OP_DISCARD; @@ -453,7 +465,7 @@ static int blkif_queue_request(struct request *req) req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); - nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); + nseg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* @@ -496,7 +508,7 @@ static int blkif_queue_request(struct request *req) } ring_req->u.rw.nr_segments = nseg; } - for_each_sg(info->shadow[id].sg, sg, nseg, i) { + for_each_sg(rinfo->shadow[id].sg, sg, nseg, i) { fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; @@ -512,22 +524,22 @@ static int blkif_queue_request(struct request *req) struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ - BUG_ON(list_empty(&info->indirect_pages)); - indirect_page = list_first_entry(&info->indirect_pages, + BUG_ON(list_empty(&rinfo->indirect_pages)); + indirect_page = list_first_entry(&rinfo->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } - gnt_list_entry = get_grant(&gref_head, pfn, info); - info->shadow[id].indirect_grants[n] = gnt_list_entry; + gnt_list_entry = get_grant(&gref_head, pfn, rinfo); + rinfo->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } - gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); + gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), rinfo); ref = gnt_list_entry->gref; - info->shadow[id].grants_used[i] = gnt_list_entry; + rinfo->shadow[id].grants_used[i] = gnt_list_entry; if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; @@ -573,10 +585,10 @@ static int blkif_queue_request(struct request *req) kunmap_atomic(segments); } - info->ring.req_prod_pvt++; + rinfo->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ - info->shadow[id].req = *ring_req; + rinfo->shadow[id].req = *ring_req; if (new_persistent_gnts) gnttab_free_grant_references(gref_head); @@ -585,14 +597,14 @@ static int blkif_queue_request(struct request *req) } -static inline void flush_requests(struct blkfront_info *info) +static inline void flush_requests(struct blkfront_ring_info *rinfo) { int notify; - RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify); if (notify) - notify_remote_via_irq(info->irq); + notify_remote_via_irq(rinfo->irq); } static inline bool blkif_request_flush_invalid(struct request *req, @@ -608,40 +620,50 @@ static inline bool blkif_request_flush_invalid(struct request *req, static int blk_mq_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *qd) { - struct blkfront_info *info = qd->rq->rq_disk->private_data; + struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; int ret = BLK_MQ_RQ_QUEUE_OK; blk_mq_start_request(qd->rq); - spin_lock_irq(&info->io_lock); - if (RING_FULL(&info->ring)) { - spin_unlock_irq(&info->io_lock); + spin_lock_irq(&rinfo->io_lock); + if (RING_FULL(&rinfo->ring)) { + spin_unlock_irq(&rinfo->io_lock); blk_mq_stop_hw_queue(hctx); ret = BLK_MQ_RQ_QUEUE_BUSY; goto out; } - if (blkif_request_flush_invalid(qd->rq, info)) { - spin_unlock_irq(&info->io_lock); + if (blkif_request_flush_invalid(qd->rq, rinfo->info)) { + spin_unlock_irq(&rinfo->io_lock); ret = BLK_MQ_RQ_QUEUE_ERROR; goto out; } - if (blkif_queue_request(qd->rq)) { - spin_unlock_irq(&info->io_lock); + if (blkif_queue_request(qd->rq, rinfo)) { + spin_unlock_irq(&rinfo->io_lock); blk_mq_stop_hw_queue(hctx); ret = BLK_MQ_RQ_QUEUE_BUSY; goto out; } - flush_requests(info); - spin_unlock_irq(&info->io_lock); + flush_requests(rinfo); + spin_unlock_irq(&rinfo->io_lock); out: return ret; } +static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int index) +{ + struct blkfront_info *info = (struct blkfront_info *)data; + + hctx->driver_data = &info->rinfo; + return 0; +} + static struct blk_mq_ops blkfront_mq_ops = { .queue_rq = blk_mq_queue_rq, .map_queue = blk_mq_map_queue, + .init_hctx = blk_mq_init_hctx, }; static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, @@ -894,6 +916,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, static void xlvbd_release_gendisk(struct blkfront_info *info) { unsigned int minor, nr_minors; + struct blkfront_ring_info *rinfo = &info->rinfo; if (info->rq == NULL) return; @@ -902,10 +925,10 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) blk_mq_stop_hw_queues(info->rq); /* No more gnttab callback work. */ - gnttab_cancel_free_callback(&info->callback); + gnttab_cancel_free_callback(&rinfo->callback); /* Flush gnttab callback work. Must be done with no locks held. */ - flush_work(&info->work); + flush_work(&rinfo->work); del_gendisk(info->gd); @@ -921,25 +944,25 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) info->gd = NULL; } -static void kick_pending_request_queues(struct blkfront_info *info) +static void kick_pending_request_queues(struct blkfront_ring_info *rinfo) { unsigned long flags; - spin_lock_irqsave(&info->io_lock, flags); - if (!RING_FULL(&info->ring)) { - spin_unlock_irqrestore(&info->io_lock, flags); - blk_mq_start_stopped_hw_queues(info->rq, true); + spin_lock_irqsave(&rinfo->io_lock, flags); + if (!RING_FULL(&rinfo->ring)) { + spin_unlock_irqrestore(&rinfo->io_lock, flags); + blk_mq_start_stopped_hw_queues(rinfo->info->rq, true); return; } - spin_unlock_irqrestore(&info->io_lock, flags); + spin_unlock_irqrestore(&rinfo->io_lock, flags); } static void blkif_restart_queue(struct work_struct *work) { - struct blkfront_info *info = container_of(work, struct blkfront_info, work); + struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); - if (info->connected == BLKIF_STATE_CONNECTED) - kick_pending_request_queues(info); + if (rinfo->info->connected == BLKIF_STATE_CONNECTED) + kick_pending_request_queues(rinfo); } static void blkif_free(struct blkfront_info *info, int suspend) @@ -947,6 +970,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) struct grant *persistent_gnt; struct grant *n; int i, j, segs; + struct blkfront_ring_info *rinfo = &info->rinfo; /* Prevent new requests being issued until we fix things up. */ info->connected = suspend ? @@ -955,33 +979,33 @@ static void blkif_free(struct blkfront_info *info, int suspend) if (info->rq) blk_mq_stop_hw_queues(info->rq); - spin_lock_irq(&info->io_lock); + spin_lock_irq(&rinfo->io_lock); /* Remove all persistent grants */ - if (!list_empty(&info->grants)) { + if (!list_empty(&rinfo->grants)) { list_for_each_entry_safe(persistent_gnt, n, - &info->grants, node) { + &rinfo->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, - 0, 0UL); - info->persistent_gnts_c--; + 0, 0UL); + rinfo->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } - BUG_ON(info->persistent_gnts_c != 0); + BUG_ON(rinfo->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ - if (!list_empty(&info->indirect_pages)) { + if (!list_empty(&rinfo->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); - list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { + list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } @@ -992,21 +1016,21 @@ static void blkif_free(struct blkfront_info *info, int suspend) * Clear persistent grants present in requests already * on the shared ring */ - if (!info->shadow[i].request) + if (!rinfo->shadow[i].request) goto free_shadow; - segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? - info->shadow[i].req.u.indirect.nr_segments : - info->shadow[i].req.u.rw.nr_segments; + segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? + rinfo->shadow[i].req.u.indirect.nr_segments : + rinfo->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { - persistent_gnt = info->shadow[i].grants_used[j]; + persistent_gnt = rinfo->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } - if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) + if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments @@ -1014,42 +1038,42 @@ static void blkif_free(struct blkfront_info *info, int suspend) goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { - persistent_gnt = info->shadow[i].indirect_grants[j]; + persistent_gnt = rinfo->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: - kfree(info->shadow[i].grants_used); - info->shadow[i].grants_used = NULL; - kfree(info->shadow[i].indirect_grants); - info->shadow[i].indirect_grants = NULL; - kfree(info->shadow[i].sg); - info->shadow[i].sg = NULL; + kfree(rinfo->shadow[i].grants_used); + rinfo->shadow[i].grants_used = NULL; + kfree(rinfo->shadow[i].indirect_grants); + rinfo->shadow[i].indirect_grants = NULL; + kfree(rinfo->shadow[i].sg); + rinfo->shadow[i].sg = NULL; } /* No more gnttab callback work. */ - gnttab_cancel_free_callback(&info->callback); - spin_unlock_irq(&info->io_lock); + gnttab_cancel_free_callback(&rinfo->callback); + spin_unlock_irq(&rinfo->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ - flush_work(&info->work); + flush_work(&rinfo->work); /* Free resources associated with old device channel. */ - if (info->ring_ref != GRANT_INVALID_REF) { - gnttab_end_foreign_access(info->ring_ref, 0, - (unsigned long)info->ring.sring); - info->ring_ref = GRANT_INVALID_REF; - info->ring.sring = NULL; + if (rinfo->ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(rinfo->ring_ref, 0, + (unsigned long)rinfo->ring.sring); + rinfo->ring_ref = GRANT_INVALID_REF; + rinfo->ring.sring = NULL; } - if (info->irq) - unbind_from_irqhandler(info->irq, info); - info->evtchn = info->irq = 0; + if (rinfo->irq) + unbind_from_irqhandler(rinfo->irq, rinfo); + rinfo->evtchn = rinfo->irq = 0; } -static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, +static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *rinfo, struct blkif_response *bret) { int i = 0; @@ -1057,6 +1081,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, char *bvec_data; void *shared_data; int nseg; + struct blkfront_info *info = rinfo->info; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; @@ -1092,8 +1117,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, if (!info->feature_persistent) pr_alert_ratelimited("backed has not unmapped grant: %u\n", s->grants_used[i]->gref); - list_add(&s->grants_used[i]->node, &info->grants); - info->persistent_gnts_c++; + list_add(&s->grants_used[i]->node, &rinfo->grants); + rinfo->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the @@ -1103,7 +1128,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; - list_add_tail(&s->grants_used[i]->node, &info->grants); + list_add_tail(&s->grants_used[i]->node, &rinfo->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { @@ -1112,8 +1137,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, if (!info->feature_persistent) pr_alert_ratelimited("backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); - list_add(&s->indirect_grants[i]->node, &info->grants); - info->persistent_gnts_c++; + list_add(&s->indirect_grants[i]->node, &rinfo->grants); + rinfo->persistent_gnts_c++; } else { struct page *indirect_page; @@ -1123,9 +1148,9 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, * available pages for indirect grefs. */ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); - list_add(&indirect_page->lru, &info->indirect_pages); + list_add(&indirect_page->lru, &rinfo->indirect_pages); s->indirect_grants[i]->gref = GRANT_INVALID_REF; - list_add_tail(&s->indirect_grants[i]->node, &info->grants); + list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants); } } } @@ -1137,20 +1162,21 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) struct blkif_response *bret; RING_IDX i, rp; unsigned long flags; - struct blkfront_info *info = (struct blkfront_info *)dev_id; + struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; + struct blkfront_info *info = rinfo->info; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return IRQ_HANDLED; - spin_lock_irqsave(&info->io_lock, flags); + spin_lock_irqsave(&rinfo->io_lock, flags); again: - rp = info->ring.sring->rsp_prod; + rp = rinfo->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ - for (i = info->ring.rsp_cons; i != rp; i++) { + for (i = rinfo->ring.rsp_cons; i != rp; i++) { unsigned long id; - bret = RING_GET_RESPONSE(&info->ring, i); + bret = RING_GET_RESPONSE(&rinfo->ring, i); id = bret->id; /* * The backend has messed up and given us an id that we would @@ -1164,12 +1190,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) * the id is busted. */ continue; } - req = info->shadow[id].request; + req = rinfo->shadow[id].request; if (bret->operation != BLKIF_OP_DISCARD) - blkif_completion(&info->shadow[id], info, bret); + blkif_completion(&rinfo->shadow[id], rinfo, bret); - if (add_id_to_freelist(info, id)) { + if (add_id_to_freelist(rinfo, id)) { WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n", info->gd->disk_name, op_name(bret->operation), id); continue; @@ -1198,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) req->errors = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && - info->shadow[id].req.u.rw.nr_segments == 0)) { + rinfo->shadow[id].req.u.rw.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", info->gd->disk_name, op_name(bret->operation)); req->errors = -EOPNOTSUPP; @@ -1223,30 +1249,30 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) } } - info->ring.rsp_cons = i; + rinfo->ring.rsp_cons = i; - if (i != info->ring.req_prod_pvt) { + if (i != rinfo->ring.req_prod_pvt) { int more_to_do; - RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); + RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do); if (more_to_do) goto again; } else - info->ring.sring->rsp_event = i + 1; + rinfo->ring.sring->rsp_event = i + 1; - spin_unlock_irqrestore(&info->io_lock, flags); - kick_pending_request_queues(info); + spin_unlock_irqrestore(&rinfo->io_lock, flags); + kick_pending_request_queues(rinfo); return IRQ_HANDLED; } static int setup_blkring(struct xenbus_device *dev, - struct blkfront_info *info) + struct blkfront_ring_info *rinfo) { struct blkif_sring *sring; int err; - info->ring_ref = GRANT_INVALID_REF; + rinfo->ring_ref = GRANT_INVALID_REF; sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { @@ -1254,32 +1280,32 @@ static int setup_blkring(struct xenbus_device *dev, return -ENOMEM; } SHARED_RING_INIT(sring); - FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); + FRONT_RING_INIT(&rinfo->ring, sring, PAGE_SIZE); - err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); + err = xenbus_grant_ring(dev, virt_to_mfn(rinfo->ring.sring)); if (err < 0) { free_page((unsigned long)sring); - info->ring.sring = NULL; + rinfo->ring.sring = NULL; goto fail; } - info->ring_ref = err; + rinfo->ring_ref = err; - err = xenbus_alloc_evtchn(dev, &info->evtchn); + err = xenbus_alloc_evtchn(dev, &rinfo->evtchn); if (err) goto fail; - err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0, - "blkif", info); + err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0, + "blkif", rinfo); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler failed"); goto fail; } - info->irq = err; + rinfo->irq = err; return 0; fail: - blkif_free(info, 0); + blkif_free(rinfo->info, 0); return err; } @@ -1291,9 +1317,10 @@ static int talk_to_blkback(struct xenbus_device *dev, const char *message = NULL; struct xenbus_transaction xbt; int err; + struct blkfront_ring_info *rinfo = &info->rinfo; /* Create shared ring, alloc event channel. */ - err = setup_blkring(dev, info); + err = setup_blkring(dev, rinfo); if (err) goto out; @@ -1305,13 +1332,13 @@ again: } err = xenbus_printf(xbt, dev->nodename, - "ring-ref", "%u", info->ring_ref); + "ring-ref", "%u", rinfo->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, - "event-channel", "%u", info->evtchn); + "event-channel", "%u", rinfo->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; @@ -1361,6 +1388,7 @@ static int blkfront_probe(struct xenbus_device *dev, { int err, vdevice, i; struct blkfront_info *info; + struct blkfront_ring_info *rinfo; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, @@ -1410,19 +1438,21 @@ static int blkfront_probe(struct xenbus_device *dev, return -ENOMEM; } + rinfo = &info->rinfo; mutex_init(&info->mutex); - spin_lock_init(&info->io_lock); + spin_lock_init(&rinfo->io_lock); info->xbdev = dev; info->vdevice = vdevice; - INIT_LIST_HEAD(&info->grants); - INIT_LIST_HEAD(&info->indirect_pages); - info->persistent_gnts_c = 0; + INIT_LIST_HEAD(&rinfo->grants); + INIT_LIST_HEAD(&rinfo->indirect_pages); + rinfo->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; - INIT_WORK(&info->work, blkif_restart_queue); + rinfo->info = info; + INIT_WORK(&rinfo->work, blkif_restart_queue); for (i = 0; i < BLK_RING_SIZE; i++) - info->shadow[i].req.u.rw.id = i+1; - info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; + rinfo->shadow[i].req.u.rw.id = i+1; + rinfo->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); @@ -1465,21 +1495,22 @@ static int blkif_recover(struct blkfront_info *info) int pending, size; struct split_bio *split_bio; struct list_head requests; + struct blkfront_ring_info *rinfo = &info->rinfo; /* Stage 1: Make a safe copy of the shadow state. */ - copy = kmemdup(info->shadow, sizeof(info->shadow), + copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow), GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); if (!copy) return -ENOMEM; /* Stage 2: Set up free list. */ - memset(&info->shadow, 0, sizeof(info->shadow)); + memset(&rinfo->shadow, 0, sizeof(rinfo->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) - info->shadow[i].req.u.rw.id = i+1; - info->shadow_free = info->ring.req_prod_pvt; - info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; + rinfo->shadow[i].req.u.rw.id = i+1; + rinfo->shadow_free = rinfo->ring.req_prod_pvt; + rinfo->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; - rc = blkfront_setup_indirect(info); + rc = blkfront_setup_indirect(rinfo); if (rc) { kfree(copy); return rc; @@ -1521,7 +1552,7 @@ static int blkif_recover(struct blkfront_info *info) info->connected = BLKIF_STATE_CONNECTED; /* Kick any other new requests queued since we resumed */ - kick_pending_request_queues(info); + kick_pending_request_queues(rinfo); list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ @@ -1654,10 +1685,11 @@ static void blkfront_setup_discard(struct blkfront_info *info) info->feature_secdiscard = !!discard_secure; } -static int blkfront_setup_indirect(struct blkfront_info *info) +static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) { unsigned int indirect_segments, segs; int err, i; + struct blkfront_info *info = rinfo->info; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, @@ -1671,7 +1703,7 @@ static int blkfront_setup_indirect(struct blkfront_info *info) segs = info->max_indirect_segments; } - err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE); + err = fill_grant_buffer(rinfo, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE); if (err) goto out_of_memory; @@ -1683,31 +1715,31 @@ static int blkfront_setup_indirect(struct blkfront_info *info) */ int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE; - BUG_ON(!list_empty(&info->indirect_pages)); + BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; - list_add(&indirect_page->lru, &info->indirect_pages); + list_add(&indirect_page->lru, &rinfo->indirect_pages); } } for (i = 0; i < BLK_RING_SIZE; i++) { - info->shadow[i].grants_used = kzalloc( - sizeof(info->shadow[i].grants_used[0]) * segs, + rinfo->shadow[i].grants_used = kzalloc( + sizeof(rinfo->shadow[i].grants_used[0]) * segs, GFP_NOIO); - info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); + rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) - info->shadow[i].indirect_grants = kzalloc( - sizeof(info->shadow[i].indirect_grants[0]) * + rinfo->shadow[i].indirect_grants = kzalloc( + sizeof(rinfo->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); - if ((info->shadow[i].grants_used == NULL) || - (info->shadow[i].sg == NULL) || + if ((rinfo->shadow[i].grants_used == NULL) || + (rinfo->shadow[i].sg == NULL) || (info->max_indirect_segments && - (info->shadow[i].indirect_grants == NULL))) + (rinfo->shadow[i].indirect_grants == NULL))) goto out_of_memory; - sg_init_table(info->shadow[i].sg, segs); + sg_init_table(rinfo->shadow[i].sg, segs); } @@ -1715,16 +1747,16 @@ static int blkfront_setup_indirect(struct blkfront_info *info) out_of_memory: for (i = 0; i < BLK_RING_SIZE; i++) { - kfree(info->shadow[i].grants_used); - info->shadow[i].grants_used = NULL; - kfree(info->shadow[i].sg); - info->shadow[i].sg = NULL; - kfree(info->shadow[i].indirect_grants); - info->shadow[i].indirect_grants = NULL; - } - if (!list_empty(&info->indirect_pages)) { + kfree(rinfo->shadow[i].grants_used); + rinfo->shadow[i].grants_used = NULL; + kfree(rinfo->shadow[i].sg); + rinfo->shadow[i].sg = NULL; + kfree(rinfo->shadow[i].indirect_grants); + rinfo->shadow[i].indirect_grants = NULL; + } + if (!list_empty(&rinfo->indirect_pages)) { struct page *indirect_page, *n; - list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { + list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } @@ -1744,6 +1776,7 @@ static void blkfront_connect(struct blkfront_info *info) unsigned int binfo; int err; int barrier, flush, discard, persistent; + struct blkfront_ring_info *rinfo = &info->rinfo; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -1841,7 +1874,7 @@ static void blkfront_connect(struct blkfront_info *info) else info->feature_persistent = persistent; - err = blkfront_setup_indirect(info); + err = blkfront_setup_indirect(rinfo); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); @@ -1860,7 +1893,7 @@ static void blkfront_connect(struct blkfront_info *info) /* Kick pending requests. */ info->connected = BLKIF_STATE_CONNECTED; - kick_pending_request_queues(info); + kick_pending_request_queues(rinfo); add_disk(info->gd); -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/