Implement blk_limits_max_hw_sectors() and make
blk_queue_max_hw_sectors() a wrapper around it.
DM needs this to avoid setting queue_limits' max_hw_sectors and
max_sectors directly. dm_set_device_limits() now leverages
blk_limits_max_hw_sectors() logic to establish the appropriate
max_hw_sectors minimum (PAGE_SIZE). Fixes issue where DM was
incorrectly setting max_sectors rather than max_hw_sectors (which
caused dm_merge_bvec()'s max_hw_sectors check to be ineffective).
Signed-off-by: Mike Snitzer <[email protected]>
Cc: [email protected]
---
block/blk-settings.c | 26 ++++++++++++++++++++------
drivers/md/dm-table.c | 5 ++---
include/linux/blkdev.h | 1 +
3 files changed, 23 insertions(+), 9 deletions(-)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 701859f..6a2ac23 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q: the request queue for the device
+ * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
+ * @limits: the queue limits
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
* The soft limit can not exceed max_hw_sectors.
**/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
+void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
{
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
__func__, max_hw_sectors);
}
- q->limits.max_hw_sectors = max_hw_sectors;
- q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
- BLK_DEF_MAX_SECTORS);
+ limits->max_hw_sectors = max_hw_sectors;
+ limits->max_sectors = min_t(unsigned int, max_hw_sectors,
+ BLK_DEF_MAX_SECTORS);
+}
+EXPORT_SYMBOL(blk_limits_max_hw_sectors);
+
+/**
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
+ * @q: the request queue for the device
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ *
+ * Description:
+ * See description for blk_limits_max_hw_sectors().
+ **/
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
+{
+ blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 90267f8..3d098ce 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -517,9 +517,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
*/
if (q->merge_bvec_fn && !ti->type->merge)
- limits->max_sectors =
- min_not_zero(limits->max_sectors,
- (unsigned int) (PAGE_SIZE >> 9));
+ blk_limits_max_hw_sectors(limits,
+ (unsigned int) (PAGE_SIZE >> 9));
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aae86fd..f599dbc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -805,6 +805,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
+extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
>>>>> "Mike" == Mike Snitzer <[email protected]> writes:
Mike> Implement blk_limits_max_hw_sectors() and make
Mike> blk_queue_max_hw_sectors() a wrapper around it.
Mike> DM needs this to avoid setting queue_limits' max_hw_sectors and
Mike> max_sectors directly. dm_set_device_limits() now leverages
Mike> blk_limits_max_hw_sectors() logic to establish the appropriate
Mike> max_hw_sectors minimum (PAGE_SIZE). Fixes issue where DM was
Mike> incorrectly setting max_sectors rather than max_hw_sectors (which
Mike> caused dm_merge_bvec()'s max_hw_sectors check to be ineffective).
Acked-by: Martin K. Petersen <[email protected]>
Jens, please apply.
--
Martin K. Petersen Oracle Linux Engineering
On 2010-12-17 00:49, Martin K. Petersen wrote:
>>>>>> "Mike" == Mike Snitzer <[email protected]> writes:
>
> Mike> Implement blk_limits_max_hw_sectors() and make
> Mike> blk_queue_max_hw_sectors() a wrapper around it.
>
> Mike> DM needs this to avoid setting queue_limits' max_hw_sectors and
> Mike> max_sectors directly. dm_set_device_limits() now leverages
> Mike> blk_limits_max_hw_sectors() logic to establish the appropriate
> Mike> max_hw_sectors minimum (PAGE_SIZE). Fixes issue where DM was
> Mike> incorrectly setting max_sectors rather than max_hw_sectors (which
> Mike> caused dm_merge_bvec()'s max_hw_sectors check to be ineffective).
>
> Acked-by: Martin K. Petersen <[email protected]>
>
> Jens, please apply.
Thanks, it's in.
--
Jens Axboe