2012-06-01 18:56:03

by Maya Erez

[permalink] [raw]
Subject: [PATCH v2 1/1] mmc: block: Add write packing control

The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs by writing the required value to:
/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
The trigger for disabling the write packing is fetching a read request.

---
Documentation/mmc/mmc-dev-attrs.txt | 17 ++++++
drivers/mmc/card/block.c | 100 ++++++++++++++++++++++++++++++++++-
drivers/mmc/card/queue.c | 8 +++
drivers/mmc/card/queue.h | 3 +
include/linux/mmc/host.h | 1 +
5 files changed, 128 insertions(+), 1 deletions(-)

diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 22ae844..08f7312 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,23 @@ The following attributes are read/write.

force_ro Enforce read-only access even if write protect switch is off.

+ num_wr_reqs_to_start_packing This attribute is used to determine
+ the trigger for activating the write packing, in case the write
+ packing control feature is enabled.
+
+ When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+ write requests could be packed, it enables the write packing feature.
+ This allows us to start the write packing only when it is beneficial
+ and has minimum affect on the read latency.
+
+ The number of potential packed requests that will trigger the packing
+ can be configured via sysfs by writing the required value to:
+ /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
+
+ The default value of num_wr_reqs_to_start_packing was determined by
+ running parallel lmdd write and lmdd read operations and calculating
+ the max number of packed writes requests.
+
SD and MMC Device Attributes
============================

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2785fd4..ef192fb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -114,6 +114,7 @@ struct mmc_blk_data {
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
+ struct device_attribute num_wr_reqs_to_start_packing;
};

static DEFINE_MUTEX(open_lock);
@@ -281,6 +282,38 @@ out:
return ret;
}

+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int num_wr_reqs_to_start_packing;
+ int ret;
+
+ num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+ sscanf(buf, "%d", &value);
+ if (value >= 0)
+ md->queue.num_wr_reqs_to_start_packing = value;
+
+ mmc_blk_put(md);
+ return count;
+}
+
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_queue_bounce_pre(mqrq);
}

+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
+ int data_dir;
+
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR))
+ return;
+
+ /*
+ * In case the packing control is not supported by the host, it should
+ * not have an effect on the write packing. Therefore we have to enable
+ * the write packing
+ */
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
+ mq->wr_packing_enabled = true;
+ return;
+ }
+
+ if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+ return;
+ }
+
+ data_dir = rq_data_dir(req);
+
+ if (data_dir == READ) {
+ mq->num_of_potential_packed_wr_reqs = 0;
+ mq->wr_packing_enabled = false;
+ return;
+ } else if (data_dir == WRITE) {
+ mq->num_of_potential_packed_wr_reqs++;
+ }
+
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+
+}
+
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
@@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
!card->ext_csd.packed_event_en)
goto no_packed;

+ if (!mq->wr_packing_enabled)
+ goto no_packed;
+
if ((rq_data_dir(cur) == WRITE) &&
(card->host->caps2 & MMC_CAP2_PACKED_WR))
max_packed_rw = card->ext_csd.max_packed_writes;
@@ -1396,6 +1474,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
break;
}

+ if (rq_data_dir(next) == WRITE)
+ mq->num_of_potential_packed_wr_reqs++;
list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
cur = next;
reqs++;
@@ -1780,7 +1860,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out;
}

- if (req && req->cmd_flags & REQ_DISCARD) {
+ mmc_blk_write_packing_control(mq, req);
+
+ if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -2010,6 +2092,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)

if (md) {
card = md->queue.card;
+ device_remove_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2076,6 +2160,20 @@ static int mmc_add_disk(struct mmc_blk_data *md)
if (ret)
goto power_ro_lock_fail;
}
+
+ md->num_wr_reqs_to_start_packing.show =
+ num_wr_reqs_to_start_packing_show;
+ md->num_wr_reqs_to_start_packing.store =
+ num_wr_reqs_to_start_packing_store;
+ sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
+ md->num_wr_reqs_to_start_packing.attr.name =
+ "num_wr_reqs_to_start_packing";
+ md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
+ if (ret)
+ goto power_ro_lock_fail;
+
return ret;

power_ro_lock_fail:
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 165d85a..79ef91b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -25,6 +25,13 @@
#define MMC_QUEUE_SUSPENDED (1 << 0)

/*
+ * Based on benchmark tests the default num of requests to trigger the write
+ * packing was determined, to keep the read latency as low as possible and
+ * manage to keep the high write throughput.
+ */
+#define DEFAULT_NUM_REQS_TO_START_PACK 17
+
+/*
* Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
@@ -181,6 +188,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
+ mq->num_wr_reqs_to_start_packing = DEFAULT_NUM_REQS_TO_START_PACK;

blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d761bf1..6c29e0e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -44,6 +44,9 @@ struct mmc_queue {
struct mmc_queue_req mqrq[2];
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
+ bool wr_packing_enabled;
+ int num_of_potential_packed_wr_reqs;
+ int num_wr_reqs_to_start_packing;
};

extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9d0d946..0eb6c7b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -242,6 +242,7 @@ struct mmc_host {
#define MMC_CAP2_PACKED_WR (1 << 11) /* Allow packed write */
#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
MMC_CAP2_PACKED_WR) /* Allow packed commands */
+#define MMC_CAP2_PACKED_WR_CONTROL (1 << 12) /* Allow write packing control */

mmc_pm_flag_t pm_caps; /* supported pm features */
unsigned int power_notify_type;
--
1.7.3.3
--
Sent by a consultant of the Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.


2012-06-08 09:37:41

by Seungwon Jeon

[permalink] [raw]
Subject: RE: [PATCH v2 1/1] mmc: block: Add write packing control

Hi,

How can we check the effect?
Do you have any result?
Please check the several comment below.

Maya Erez <[email protected]> wrote:
> The write packing control will ensure that read requests latency is
> not increased due to long write packed commands.
>
> The trigger for enabling the write packing is managing to pack several
> write requests. The number of potential packed requests that will trigger
> the packing can be configured via sysfs by writing the required value to:
> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
> The trigger for disabling the write packing is fetching a read request.
>
> ---
> Documentation/mmc/mmc-dev-attrs.txt | 17 ++++++
> drivers/mmc/card/block.c | 100 ++++++++++++++++++++++++++++++++++-
> drivers/mmc/card/queue.c | 8 +++
> drivers/mmc/card/queue.h | 3 +
> include/linux/mmc/host.h | 1 +
> 5 files changed, 128 insertions(+), 1 deletions(-)
>
> diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
> index 22ae844..08f7312 100644
> --- a/Documentation/mmc/mmc-dev-attrs.txt
> +++ b/Documentation/mmc/mmc-dev-attrs.txt
> @@ -8,6 +8,23 @@ The following attributes are read/write.
>
> force_ro Enforce read-only access even if write protect switch is off.
>
> + num_wr_reqs_to_start_packing This attribute is used to determine
> + the trigger for activating the write packing, in case the write
> + packing control feature is enabled.
> +
> + When the MMC manages to reach a point where num_wr_reqs_to_start_packing
> + write requests could be packed, it enables the write packing feature.
> + This allows us to start the write packing only when it is beneficial
> + and has minimum affect on the read latency.
> +
> + The number of potential packed requests that will trigger the packing
> + can be configured via sysfs by writing the required value to:
> + /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
> +
> + The default value of num_wr_reqs_to_start_packing was determined by
> + running parallel lmdd write and lmdd read operations and calculating
> + the max number of packed writes requests.
> +
> SD and MMC Device Attributes
> ============================
>
> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
> index 2785fd4..ef192fb 100644
> --- a/drivers/mmc/card/block.c
> +++ b/drivers/mmc/card/block.c
> @@ -114,6 +114,7 @@ struct mmc_blk_data {
> struct device_attribute force_ro;
> struct device_attribute power_ro_lock;
> int area_type;
> + struct device_attribute num_wr_reqs_to_start_packing;
> };
>
> static DEFINE_MUTEX(open_lock);
> @@ -281,6 +282,38 @@ out:
> return ret;
> }
>
> +static ssize_t
> +num_wr_reqs_to_start_packing_show(struct device *dev,
> + struct device_attribute *attr, char *buf)
> +{
> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
> + int num_wr_reqs_to_start_packing;
> + int ret;
> +
> + num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
> +
> + ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
> +
> + mmc_blk_put(md);
> + return ret;
> +}
> +
> +static ssize_t
> +num_wr_reqs_to_start_packing_store(struct device *dev,
> + struct device_attribute *attr,
> + const char *buf, size_t count)
> +{
> + int value;
> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
> +
> + sscanf(buf, "%d", &value);
> + if (value >= 0)
> + md->queue.num_wr_reqs_to_start_packing = value;
> +
> + mmc_blk_put(md);
> + return count;
> +}
> +
> static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
> {
> struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
> mmc_queue_bounce_pre(mqrq);
> }
>
> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
> + struct request *req)
> +{
> + struct mmc_host *host = mq->card->host;
> + int data_dir;
> +
> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
> + return;
> +
> + /*
> + * In case the packing control is not supported by the host, it should
> + * not have an effect on the write packing. Therefore we have to enable
> + * the write packing
> + */
> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
> + mq->wr_packing_enabled = true;
> + return;
> + }
> +
> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
> + if (mq->num_of_potential_packed_wr_reqs >
> + mq->num_wr_reqs_to_start_packing)
> + mq->wr_packing_enabled = true;
> + return;
> + }
> +
> + data_dir = rq_data_dir(req);
> +
> + if (data_dir == READ) {
> + mq->num_of_potential_packed_wr_reqs = 0;
> + mq->wr_packing_enabled = false;
> + return;
> + } else if (data_dir == WRITE) {
> + mq->num_of_potential_packed_wr_reqs++;
> + }
> +
> + if (mq->num_of_potential_packed_wr_reqs >
> + mq->num_wr_reqs_to_start_packing)
> + mq->wr_packing_enabled = true;
Write Packing is available only if continuing write requests are over num_wr_reqs_to_start_packing?
That means individual request(1...17) will be issued with non-packing.
Could you explain your policy more?
> +
> +}
> +
> static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
> {
> struct request_queue *q = mq->queue;
> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
> !card->ext_csd.packed_event_en)
> goto no_packed;
>
> + if (!mq->wr_packing_enabled)
> + goto no_packed;
If wr_packing_enabled is set to true, several write requests can be packed.
We don't need to consider read request since packed write?

Thanks,
Seungwon Jeon
> +
> if ((rq_data_dir(cur) == WRITE) &&
> (card->host->caps2 & MMC_CAP2_PACKED_WR))
> max_packed_rw = card->ext_csd.max_packed_writes;
> @@ -1396,6 +1474,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
> break;
> }
>
> + if (rq_data_dir(next) == WRITE)
> + mq->num_of_potential_packed_wr_reqs++;
> list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
> cur = next;
> reqs++;
> @@ -1780,7 +1860,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
> goto out;
> }
>
> - if (req && req->cmd_flags & REQ_DISCARD) {
> + mmc_blk_write_packing_control(mq, req);
> +
> + if (req && req->cmd_flags & REQ_DISCARD) {
> /* complete ongoing async transfer before issuing discard */
> if (card->host->areq)
> mmc_blk_issue_rw_rq(mq, NULL);
> @@ -2010,6 +2092,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
>
> if (md) {
> card = md->queue.card;
> + device_remove_file(disk_to_dev(md->disk),
> + &md->num_wr_reqs_to_start_packing);
> if (md->disk->flags & GENHD_FL_UP) {
> device_remove_file(disk_to_dev(md->disk), &md->force_ro);
> if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
> @@ -2076,6 +2160,20 @@ static int mmc_add_disk(struct mmc_blk_data *md)
> if (ret)
> goto power_ro_lock_fail;
> }
> +
> + md->num_wr_reqs_to_start_packing.show =
> + num_wr_reqs_to_start_packing_show;
> + md->num_wr_reqs_to_start_packing.store =
> + num_wr_reqs_to_start_packing_store;
> + sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
> + md->num_wr_reqs_to_start_packing.attr.name =
> + "num_wr_reqs_to_start_packing";
> + md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
> + ret = device_create_file(disk_to_dev(md->disk),
> + &md->num_wr_reqs_to_start_packing);
> + if (ret)
> + goto power_ro_lock_fail;
> +
> return ret;
>
> power_ro_lock_fail:
> diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
> index 165d85a..79ef91b 100644
> --- a/drivers/mmc/card/queue.c
> +++ b/drivers/mmc/card/queue.c
> @@ -25,6 +25,13 @@
> #define MMC_QUEUE_SUSPENDED (1 << 0)
>
> /*
> + * Based on benchmark tests the default num of requests to trigger the write
> + * packing was determined, to keep the read latency as low as possible and
> + * manage to keep the high write throughput.
> + */
> +#define DEFAULT_NUM_REQS_TO_START_PACK 17
> +
> +/*
> * Prepare a MMC request. This just filters out odd stuff.
> */
> static int mmc_prep_request(struct request_queue *q, struct request *req)
> @@ -181,6 +188,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
> mq->mqrq_cur = mqrq_cur;
> mq->mqrq_prev = mqrq_prev;
> mq->queue->queuedata = mq;
> + mq->num_wr_reqs_to_start_packing = DEFAULT_NUM_REQS_TO_START_PACK;
>
> blk_queue_prep_rq(mq->queue, mmc_prep_request);
> queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
> diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
> index d761bf1..6c29e0e 100644
> --- a/drivers/mmc/card/queue.h
> +++ b/drivers/mmc/card/queue.h
> @@ -44,6 +44,9 @@ struct mmc_queue {
> struct mmc_queue_req mqrq[2];
> struct mmc_queue_req *mqrq_cur;
> struct mmc_queue_req *mqrq_prev;
> + bool wr_packing_enabled;
> + int num_of_potential_packed_wr_reqs;
> + int num_wr_reqs_to_start_packing;
> };
>
> extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
> diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
> index 9d0d946..0eb6c7b 100644
> --- a/include/linux/mmc/host.h
> +++ b/include/linux/mmc/host.h
> @@ -242,6 +242,7 @@ struct mmc_host {
> #define MMC_CAP2_PACKED_WR (1 << 11) /* Allow packed write */
> #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
> MMC_CAP2_PACKED_WR) /* Allow packed commands */
> +#define MMC_CAP2_PACKED_WR_CONTROL (1 << 12) /* Allow write packing control */
>
> mmc_pm_flag_t pm_caps; /* supported pm features */
> unsigned int power_notify_type;
> --
> 1.7.3.3
> --
> Sent by a consultant of the Qualcomm Innovation Center, Inc.
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum.
> --
> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html

2012-06-09 14:46:21

by Maya Erez

[permalink] [raw]
Subject: RE: [PATCH v2 1/1] mmc: block: Add write packing control


> Hi,
>
> How can we check the effect?
> Do you have any result?
We ran parallel lmdd read and write operations and found out that the
write packing causes the read throughput to drop from 24MB/s to 12MB/s.
The write packing control managed to increase the read throughput back to
the original value.
We also examined "real life" scenarios, such as performing a big push
operation in parallel to launching several applications. We measured the
read latency and found out that with the write packing control the worst
case of the read latency was smaller.

> Please check the several comment below.
>
> Maya Erez <[email protected]> wrote:
>> The write packing control will ensure that read requests latency is
>> not increased due to long write packed commands.
>>
>> The trigger for enabling the write packing is managing to pack several
>> write requests. The number of potential packed requests that will
>> trigger
>> the packing can be configured via sysfs by writing the required value
>> to:
>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>> The trigger for disabling the write packing is fetching a read request.
>>
>> ---
>> Documentation/mmc/mmc-dev-attrs.txt | 17 ++++++
>> drivers/mmc/card/block.c | 100
>> ++++++++++++++++++++++++++++++++++-
>> drivers/mmc/card/queue.c | 8 +++
>> drivers/mmc/card/queue.h | 3 +
>> include/linux/mmc/host.h | 1 +
>> 5 files changed, 128 insertions(+), 1 deletions(-)
>>
>> diff --git a/Documentation/mmc/mmc-dev-attrs.txt
>> b/Documentation/mmc/mmc-dev-attrs.txt
>> index 22ae844..08f7312 100644
>> --- a/Documentation/mmc/mmc-dev-attrs.txt
>> +++ b/Documentation/mmc/mmc-dev-attrs.txt
>> @@ -8,6 +8,23 @@ The following attributes are read/write.
>>
>> force_ro Enforce read-only access even if write protect switch is
>> off.
>>
>> + num_wr_reqs_to_start_packing This attribute is used to determine
>> + the trigger for activating the write packing, in case the write
>> + packing control feature is enabled.
>> +
>> + When the MMC manages to reach a point where
>> num_wr_reqs_to_start_packing
>> + write requests could be packed, it enables the write packing feature.
>> + This allows us to start the write packing only when it is beneficial
>> + and has minimum affect on the read latency.
>> +
>> + The number of potential packed requests that will trigger the packing
>> + can be configured via sysfs by writing the required value to:
>> + /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>> +
>> + The default value of num_wr_reqs_to_start_packing was determined by
>> + running parallel lmdd write and lmdd read operations and calculating
>> + the max number of packed writes requests.
>> +
>> SD and MMC Device Attributes
>> ============================
>>
>> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
>> index 2785fd4..ef192fb 100644
>> --- a/drivers/mmc/card/block.c
>> +++ b/drivers/mmc/card/block.c
>> @@ -114,6 +114,7 @@ struct mmc_blk_data {
>> struct device_attribute force_ro;
>> struct device_attribute power_ro_lock;
>> int area_type;
>> + struct device_attribute num_wr_reqs_to_start_packing;
>> };
>>
>> static DEFINE_MUTEX(open_lock);
>> @@ -281,6 +282,38 @@ out:
>> return ret;
>> }
>>
>> +static ssize_t
>> +num_wr_reqs_to_start_packing_show(struct device *dev,
>> + struct device_attribute *attr, char *buf)
>> +{
>> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>> + int num_wr_reqs_to_start_packing;
>> + int ret;
>> +
>> + num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
>> +
>> + ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
>> +
>> + mmc_blk_put(md);
>> + return ret;
>> +}
>> +
>> +static ssize_t
>> +num_wr_reqs_to_start_packing_store(struct device *dev,
>> + struct device_attribute *attr,
>> + const char *buf, size_t count)
>> +{
>> + int value;
>> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>> +
>> + sscanf(buf, "%d", &value);
>> + if (value >= 0)
>> + md->queue.num_wr_reqs_to_start_packing = value;
>> +
>> + mmc_blk_put(md);
>> + return count;
>> +}
>> +
>> static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
>> {
>> struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
>> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct
>> mmc_queue_req *mqrq,
>> mmc_queue_bounce_pre(mqrq);
>> }
>>
>> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
>> + struct request *req)
>> +{
>> + struct mmc_host *host = mq->card->host;
>> + int data_dir;
>> +
>> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
>> + return;
>> +
>> + /*
>> + * In case the packing control is not supported by the host, it should
>> + * not have an effect on the write packing. Therefore we have to
>> enable
>> + * the write packing
>> + */
>> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
>> + mq->wr_packing_enabled = true;
>> + return;
>> + }
>> +
>> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
>> + if (mq->num_of_potential_packed_wr_reqs >
>> + mq->num_wr_reqs_to_start_packing)
>> + mq->wr_packing_enabled = true;
>> + return;
>> + }
>> +
>> + data_dir = rq_data_dir(req);
>> +
>> + if (data_dir == READ) {
>> + mq->num_of_potential_packed_wr_reqs = 0;
>> + mq->wr_packing_enabled = false;
>> + return;
>> + } else if (data_dir == WRITE) {
>> + mq->num_of_potential_packed_wr_reqs++;
>> + }
>> +
>> + if (mq->num_of_potential_packed_wr_reqs >
>> + mq->num_wr_reqs_to_start_packing)
>> + mq->wr_packing_enabled = true;
> Write Packing is available only if continuing write requests are over
> num_wr_reqs_to_start_packing?
> That means individual request(1...17) will be issued with non-packing.
> Could you explain your policy more?
We try to identify the case where there is parallel read and write
operations. In our experiments we found out that the number of write
requests between read requests in parallel read and write operations
doesn't exceed 17 requests. Therefore, we can assume that fetching more
than 17 write requests without hitting a read request can indicate that
there is no read activity.
You are right that this affects the write throughput a bit but the goal of
this algorithm is to make sure the read throughput and latency are not
decreased due to write. If this is not the desired result, this algorithm
can be disabled.
>> +
>> +}
>> +
>> static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request
>> *req)
>> {
>> struct request_queue *q = mq->queue;
>> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct
>> mmc_queue *mq, struct request *req)
>> !card->ext_csd.packed_event_en)
>> goto no_packed;
>>
>> + if (!mq->wr_packing_enabled)
>> + goto no_packed;
> If wr_packing_enabled is set to true, several write requests can be
> packed.
> We don't need to consider read request since packed write?
I'm not sure I understand the question. We check if there was a read
request in the mmc_blk_write_packing_control, and in such a case set
mq->wr_packing_enabled to false.
If I didn't answer the question, please explain it again.

>
> Thanks,
> Seungwon Jeon

Thanks,
Maya Erez
--
Sent by a consultant of the Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-06-11 09:10:24

by Seungwon Jeon

[permalink] [raw]
Subject: RE: [PATCH v2 1/1] mmc: block: Add write packing control

Maya Erez <[email protected]> wrote:
>
> > Hi,
> >
> > How can we check the effect?
> > Do you have any result?
> We ran parallel lmdd read and write operations and found out that the
> write packing causes the read throughput to drop from 24MB/s to 12MB/s.
> The write packing control managed to increase the read throughput back to
> the original value.
> We also examined "real life" scenarios, such as performing a big push
> operation in parallel to launching several applications. We measured the
> read latency and found out that with the write packing control the worst
> case of the read latency was smaller.
>
> > Please check the several comment below.
> >
> > Maya Erez <[email protected]> wrote:
> >> The write packing control will ensure that read requests latency is
> >> not increased due to long write packed commands.
> >>
> >> The trigger for enabling the write packing is managing to pack several
> >> write requests. The number of potential packed requests that will
> >> trigger
> >> the packing can be configured via sysfs by writing the required value
> >> to:
> >> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
> >> The trigger for disabling the write packing is fetching a read request.
> >>
> >> ---
> >> Documentation/mmc/mmc-dev-attrs.txt | 17 ++++++
> >> drivers/mmc/card/block.c | 100
> >> ++++++++++++++++++++++++++++++++++-
> >> drivers/mmc/card/queue.c | 8 +++
> >> drivers/mmc/card/queue.h | 3 +
> >> include/linux/mmc/host.h | 1 +
> >> 5 files changed, 128 insertions(+), 1 deletions(-)
> >>
> >> diff --git a/Documentation/mmc/mmc-dev-attrs.txt
> >> b/Documentation/mmc/mmc-dev-attrs.txt
> >> index 22ae844..08f7312 100644
> >> --- a/Documentation/mmc/mmc-dev-attrs.txt
> >> +++ b/Documentation/mmc/mmc-dev-attrs.txt
> >> @@ -8,6 +8,23 @@ The following attributes are read/write.
> >>
> >> force_ro Enforce read-only access even if write protect switch is
> >> off.
> >>
> >> + num_wr_reqs_to_start_packing This attribute is used to determine
> >> + the trigger for activating the write packing, in case the write
> >> + packing control feature is enabled.
> >> +
> >> + When the MMC manages to reach a point where
> >> num_wr_reqs_to_start_packing
> >> + write requests could be packed, it enables the write packing feature.
> >> + This allows us to start the write packing only when it is beneficial
> >> + and has minimum affect on the read latency.
> >> +
> >> + The number of potential packed requests that will trigger the packing
> >> + can be configured via sysfs by writing the required value to:
> >> + /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
> >> +
> >> + The default value of num_wr_reqs_to_start_packing was determined by
> >> + running parallel lmdd write and lmdd read operations and calculating
> >> + the max number of packed writes requests.
> >> +
> >> SD and MMC Device Attributes
> >> ============================
> >>
> >> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
> >> index 2785fd4..ef192fb 100644
> >> --- a/drivers/mmc/card/block.c
> >> +++ b/drivers/mmc/card/block.c
> >> @@ -114,6 +114,7 @@ struct mmc_blk_data {
> >> struct device_attribute force_ro;
> >> struct device_attribute power_ro_lock;
> >> int area_type;
> >> + struct device_attribute num_wr_reqs_to_start_packing;
> >> };
> >>
> >> static DEFINE_MUTEX(open_lock);
> >> @@ -281,6 +282,38 @@ out:
> >> return ret;
> >> }
> >>
> >> +static ssize_t
> >> +num_wr_reqs_to_start_packing_show(struct device *dev,
> >> + struct device_attribute *attr, char *buf)
> >> +{
> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
> >> + int num_wr_reqs_to_start_packing;
> >> + int ret;
> >> +
> >> + num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
> >> +
> >> + ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
> >> +
> >> + mmc_blk_put(md);
> >> + return ret;
> >> +}
> >> +
> >> +static ssize_t
> >> +num_wr_reqs_to_start_packing_store(struct device *dev,
> >> + struct device_attribute *attr,
> >> + const char *buf, size_t count)
> >> +{
> >> + int value;
> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
> >> +
> >> + sscanf(buf, "%d", &value);
> >> + if (value >= 0)
> >> + md->queue.num_wr_reqs_to_start_packing = value;
> >> +
> >> + mmc_blk_put(md);
> >> + return count;
> >> +}
> >> +
> >> static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
> >> {
> >> struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
> >> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct
> >> mmc_queue_req *mqrq,
> >> mmc_queue_bounce_pre(mqrq);
> >> }
> >>
> >> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
> >> + struct request *req)
> >> +{
> >> + struct mmc_host *host = mq->card->host;
> >> + int data_dir;
> >> +
> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
> >> + return;
> >> +
> >> + /*
> >> + * In case the packing control is not supported by the host, it should
> >> + * not have an effect on the write packing. Therefore we have to
> >> enable
> >> + * the write packing
> >> + */
> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
> >> + mq->wr_packing_enabled = true;
> >> + return;
> >> + }
> >> +
> >> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
> >> + if (mq->num_of_potential_packed_wr_reqs >
> >> + mq->num_wr_reqs_to_start_packing)
> >> + mq->wr_packing_enabled = true;
> >> + return;
> >> + }
> >> +
> >> + data_dir = rq_data_dir(req);
> >> +
> >> + if (data_dir == READ) {
> >> + mq->num_of_potential_packed_wr_reqs = 0;
> >> + mq->wr_packing_enabled = false;
> >> + return;
> >> + } else if (data_dir == WRITE) {
> >> + mq->num_of_potential_packed_wr_reqs++;
> >> + }
> >> +
> >> + if (mq->num_of_potential_packed_wr_reqs >
> >> + mq->num_wr_reqs_to_start_packing)
> >> + mq->wr_packing_enabled = true;
> > Write Packing is available only if continuing write requests are over
> > num_wr_reqs_to_start_packing?
> > That means individual request(1...17) will be issued with non-packing.
> > Could you explain your policy more?
> We try to identify the case where there is parallel read and write
> operations. In our experiments we found out that the number of write
> requests between read requests in parallel read and write operations
> doesn't exceed 17 requests. Therefore, we can assume that fetching more
> than 17 write requests without hitting a read request can indicate that
> there is no read activity.
We can apply this experiment regardless I/O scheduler?
Which I/O scheduler was used with this experiment?

> You are right that this affects the write throughput a bit but the goal of
> this algorithm is to make sure the read throughput and latency are not
> decreased due to write. If this is not the desired result, this algorithm
> can be disabled.
> >> +
> >> +}
> >> +
> >> static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request
> >> *req)
> >> {
> >> struct request_queue *q = mq->queue;
> >> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct
> >> mmc_queue *mq, struct request *req)
> >> !card->ext_csd.packed_event_en)
> >> goto no_packed;
> >>
> >> + if (!mq->wr_packing_enabled)
> >> + goto no_packed;
> > If wr_packing_enabled is set to true, several write requests can be
> > packed.
> > We don't need to consider read request since packed write?
> I'm not sure I understand the question. We check if there was a read
> request in the mmc_blk_write_packing_control, and in such a case set
> mq->wr_packing_enabled to false.
> If I didn't answer the question, please explain it again.
Packed write can be possible after exceeding 17 requests.
Is it assured that read request doesn't follow immediately after packed write?
I wonder this case.

Thanks,
Seungwon Jeon.
>
> >
> > Thanks,
> > Seungwon Jeon
>
> Thanks,
> Maya Erez
> --
> Sent by a consultant of the Qualcomm Innovation Center, Inc.
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html

2012-06-11 13:55:38

by Maya Erez

[permalink] [raw]
Subject: RE: [PATCH v2 1/1] mmc: block: Add write packing control


> Maya Erez <[email protected]> wrote:
>>
>> > Hi,
>> >
>> > How can we check the effect?
>> > Do you have any result?
>> We ran parallel lmdd read and write operations and found out that the
>> write packing causes the read throughput to drop from 24MB/s to 12MB/s.
>> The write packing control managed to increase the read throughput back
>> to
>> the original value.
>> We also examined "real life" scenarios, such as performing a big push
>> operation in parallel to launching several applications. We measured the
>> read latency and found out that with the write packing control the worst
>> case of the read latency was smaller.
>>
>> > Please check the several comment below.
>> >
>> > Maya Erez <[email protected]> wrote:
>> >> The write packing control will ensure that read requests latency is
>> >> not increased due to long write packed commands.
>> >>
>> >> The trigger for enabling the write packing is managing to pack
>> several
>> >> write requests. The number of potential packed requests that will
>> >> trigger
>> >> the packing can be configured via sysfs by writing the required value
>> >> to:
>> >> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>> >> The trigger for disabling the write packing is fetching a read
>> request.
>> >>
>> >> ---
>> >> Documentation/mmc/mmc-dev-attrs.txt | 17 ++++++
>> >> drivers/mmc/card/block.c | 100
>> >> ++++++++++++++++++++++++++++++++++-
>> >> drivers/mmc/card/queue.c | 8 +++
>> >> drivers/mmc/card/queue.h | 3 +
>> >> include/linux/mmc/host.h | 1 +
>> >> 5 files changed, 128 insertions(+), 1 deletions(-)
>> >>
>> >> diff --git a/Documentation/mmc/mmc-dev-attrs.txt
>> >> b/Documentation/mmc/mmc-dev-attrs.txt
>> >> index 22ae844..08f7312 100644
>> >> --- a/Documentation/mmc/mmc-dev-attrs.txt
>> >> +++ b/Documentation/mmc/mmc-dev-attrs.txt
>> >> @@ -8,6 +8,23 @@ The following attributes are read/write.
>> >>
>> >> force_ro Enforce read-only access even if write protect switch is
>> >> off.
>> >>
>> >> + num_wr_reqs_to_start_packing This attribute is used to determine
>> >> + the trigger for activating the write packing, in case the write
>> >> + packing control feature is enabled.
>> >> +
>> >> + When the MMC manages to reach a point where
>> >> num_wr_reqs_to_start_packing
>> >> + write requests could be packed, it enables the write packing
>> feature.
>> >> + This allows us to start the write packing only when it is
>> beneficial
>> >> + and has minimum affect on the read latency.
>> >> +
>> >> + The number of potential packed requests that will trigger the
>> packing
>> >> + can be configured via sysfs by writing the required value to:
>> >> + /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>> >> +
>> >> + The default value of num_wr_reqs_to_start_packing was determined by
>> >> + running parallel lmdd write and lmdd read operations and
>> calculating
>> >> + the max number of packed writes requests.
>> >> +
>> >> SD and MMC Device Attributes
>> >> ============================
>> >>
>> >> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
>> >> index 2785fd4..ef192fb 100644
>> >> --- a/drivers/mmc/card/block.c
>> >> +++ b/drivers/mmc/card/block.c
>> >> @@ -114,6 +114,7 @@ struct mmc_blk_data {
>> >> struct device_attribute force_ro;
>> >> struct device_attribute power_ro_lock;
>> >> int area_type;
>> >> + struct device_attribute num_wr_reqs_to_start_packing;
>> >> };
>> >>
>> >> static DEFINE_MUTEX(open_lock);
>> >> @@ -281,6 +282,38 @@ out:
>> >> return ret;
>> >> }
>> >>
>> >> +static ssize_t
>> >> +num_wr_reqs_to_start_packing_show(struct device *dev,
>> >> + struct device_attribute *attr, char *buf)
>> >> +{
>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>> >> + int num_wr_reqs_to_start_packing;
>> >> + int ret;
>> >> +
>> >> + num_wr_reqs_to_start_packing =
>> md->queue.num_wr_reqs_to_start_packing;
>> >> +
>> >> + ret = snprintf(buf, PAGE_SIZE, "%d\n",
>> num_wr_reqs_to_start_packing);
>> >> +
>> >> + mmc_blk_put(md);
>> >> + return ret;
>> >> +}
>> >> +
>> >> +static ssize_t
>> >> +num_wr_reqs_to_start_packing_store(struct device *dev,
>> >> + struct device_attribute *attr,
>> >> + const char *buf, size_t count)
>> >> +{
>> >> + int value;
>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>> >> +
>> >> + sscanf(buf, "%d", &value);
>> >> + if (value >= 0)
>> >> + md->queue.num_wr_reqs_to_start_packing = value;
>> >> +
>> >> + mmc_blk_put(md);
>> >> + return count;
>> >> +}
>> >> +
>> >> static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
>> >> {
>> >> struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
>> >> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct
>> >> mmc_queue_req *mqrq,
>> >> mmc_queue_bounce_pre(mqrq);
>> >> }
>> >>
>> >> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
>> >> + struct request *req)
>> >> +{
>> >> + struct mmc_host *host = mq->card->host;
>> >> + int data_dir;
>> >> +
>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
>> >> + return;
>> >> +
>> >> + /*
>> >> + * In case the packing control is not supported by the host, it
>> should
>> >> + * not have an effect on the write packing. Therefore we have to
>> >> enable
>> >> + * the write packing
>> >> + */
>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
>> >> + mq->wr_packing_enabled = true;
>> >> + return;
>> >> + }
>> >> +
>> >> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
>> >> + if (mq->num_of_potential_packed_wr_reqs >
>> >> + mq->num_wr_reqs_to_start_packing)
>> >> + mq->wr_packing_enabled = true;
>> >> + return;
>> >> + }
>> >> +
>> >> + data_dir = rq_data_dir(req);
>> >> +
>> >> + if (data_dir == READ) {
>> >> + mq->num_of_potential_packed_wr_reqs = 0;
>> >> + mq->wr_packing_enabled = false;
>> >> + return;
>> >> + } else if (data_dir == WRITE) {
>> >> + mq->num_of_potential_packed_wr_reqs++;
>> >> + }
>> >> +
>> >> + if (mq->num_of_potential_packed_wr_reqs >
>> >> + mq->num_wr_reqs_to_start_packing)
>> >> + mq->wr_packing_enabled = true;
>> > Write Packing is available only if continuing write requests are over
>> > num_wr_reqs_to_start_packing?
>> > That means individual request(1...17) will be issued with non-packing.
>> > Could you explain your policy more?
>> We try to identify the case where there is parallel read and write
>> operations. In our experiments we found out that the number of write
>> requests between read requests in parallel read and write operations
>> doesn't exceed 17 requests. Therefore, we can assume that fetching more
>> than 17 write requests without hitting a read request can indicate that
>> there is no read activity.
> We can apply this experiment regardless I/O scheduler?
> Which I/O scheduler was used with this experiment?
The experiment was performed with the CFQ scheduler. Since the deadline
uses a batch of 16 requests it should also fit the deadline scheduler.
In case another value is required, this value can be changed via sysfs.
>
>> You are right that this affects the write throughput a bit but the goal
>> of
>> this algorithm is to make sure the read throughput and latency are not
>> decreased due to write. If this is not the desired result, this
>> algorithm
>> can be disabled.
>> >> +
>> >> +}
>> >> +
>> >> static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct
>> request
>> >> *req)
>> >> {
>> >> struct request_queue *q = mq->queue;
>> >> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct
>> >> mmc_queue *mq, struct request *req)
>> >> !card->ext_csd.packed_event_en)
>> >> goto no_packed;
>> >>
>> >> + if (!mq->wr_packing_enabled)
>> >> + goto no_packed;
>> > If wr_packing_enabled is set to true, several write requests can be
>> > packed.
>> > We don't need to consider read request since packed write?
>> I'm not sure I understand the question. We check if there was a read
>> request in the mmc_blk_write_packing_control, and in such a case set
>> mq->wr_packing_enabled to false.
>> If I didn't answer the question, please explain it again.
> Packed write can be possible after exceeding 17 requests.
> Is it assured that read request doesn't follow immediately after packed
> write?
> I wonder this case.
Currently in such a case we will send the packed command followed by the
read request. The latency of this read request will be high due to waiting
for the completion of the packed write. However, since we will disable the
write packing, the latency of the following read requests will be low.
We are working on a solution where the read request will bypass the write
requests in such a case. This change requires modification of the
scheduler in order to re-insert the write requests to the scheduler.
>
> Thanks,
> Seungwon Jeon.
>>
>> >
>> > Thanks,
>> > Seungwon Jeon
>>
>> Thanks,
>> Maya Erez
>> --
>> Sent by a consultant of the Qualcomm Innovation Center, Inc.
>> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
>> the body of a message to [email protected]
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
>

Thanks,
Maya Erez
Consultant for Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-06-11 14:39:46

by Venkatraman S

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Mon, Jun 11, 2012 at 7:25 PM, <[email protected]> wrote:
>
>> Maya Erez <[email protected]> wrote:
>>>
>>> > Hi,
>>> >
>>> > How can we check the effect?
>>> > Do you have any result?
>>> We ran parallel lmdd read and write operations and found out that the
>>> write packing causes the read throughput to drop from 24MB/s to 12MB/s.
>>> The write packing control managed to increase the read throughput back
>>> to
>>> the original value.
>>> We also examined "real life" scenarios, such as performing a big push
>>> operation in parallel to launching several applications. We measured the
>>> read latency and found out that with the write packing control the worst
>>> case of the read latency was smaller.
>>>
>>> > Please check the several comment below.
>>> >
>>> > Maya Erez <[email protected]> wrote:
>>> >> The write packing control will ensure that read requests latency is
>>> >> not increased due to long write packed commands.
>>> >>
>>> >> The trigger for enabling the write packing is managing to pack
>>> several
>>> >> write requests. The number of potential packed requests that will
>>> >> trigger
>>> >> the packing can be configured via sysfs by writing the required value
>>> >> to:
>>> >> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>> >> The trigger for disabling the write packing is fetching a read
>>> request.
>>> >>
>>> >> ---
>>> >> ?Documentation/mmc/mmc-dev-attrs.txt | ? 17 ++++++
>>> >> ?drivers/mmc/card/block.c ? ? ? ? ? ?| ?100
>>> >> ++++++++++++++++++++++++++++++++++-
>>> >> ?drivers/mmc/card/queue.c ? ? ? ? ? ?| ? ?8 +++
>>> >> ?drivers/mmc/card/queue.h ? ? ? ? ? ?| ? ?3 +
>>> >> ?include/linux/mmc/host.h ? ? ? ? ? ?| ? ?1 +
>>> >> ?5 files changed, 128 insertions(+), 1 deletions(-)
>>> >>
>>> >> diff --git a/Documentation/mmc/mmc-dev-attrs.txt
>>> >> b/Documentation/mmc/mmc-dev-attrs.txt
>>> >> index 22ae844..08f7312 100644
>>> >> --- a/Documentation/mmc/mmc-dev-attrs.txt
>>> >> +++ b/Documentation/mmc/mmc-dev-attrs.txt
>>> >> @@ -8,6 +8,23 @@ The following attributes are read/write.
>>> >>
>>> >> ? force_ro ? ? ? ? ? ? ? ?Enforce read-only access even if write protect switch is
>>> >> off.
>>> >>
>>> >> + num_wr_reqs_to_start_packing ? ?This attribute is used to determine
>>> >> + the trigger for activating the write packing, in case the write
>>> >> + packing control feature is enabled.
>>> >> +
>>> >> + When the MMC manages to reach a point where
>>> >> num_wr_reqs_to_start_packing
>>> >> + write requests could be packed, it enables the write packing
>>> feature.
>>> >> + This allows us to start the write packing only when it is
>>> beneficial
>>> >> + and has minimum affect on the read latency.
>>> >> +
>>> >> + The number of potential packed requests that will trigger the
>>> packing
>>> >> + can be configured via sysfs by writing the required value to:
>>> >> + /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>> >> +
>>> >> + The default value of num_wr_reqs_to_start_packing was determined by
>>> >> + running parallel lmdd write and lmdd read operations and
>>> calculating
>>> >> + the max number of packed writes requests.
>>> >> +
>>> >> ?SD and MMC Device Attributes
>>> >> ?============================
>>> >>
>>> >> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
>>> >> index 2785fd4..ef192fb 100644
>>> >> --- a/drivers/mmc/card/block.c
>>> >> +++ b/drivers/mmc/card/block.c
>>> >> @@ -114,6 +114,7 @@ struct mmc_blk_data {
>>> >> ? struct device_attribute force_ro;
>>> >> ? struct device_attribute power_ro_lock;
>>> >> ? int ? ? area_type;
>>> >> + struct device_attribute num_wr_reqs_to_start_packing;
>>> >> ?};
>>> >>
>>> >> ?static DEFINE_MUTEX(open_lock);
>>> >> @@ -281,6 +282,38 @@ out:
>>> >> ? return ret;
>>> >> ?}
>>> >>
>>> >> +static ssize_t
>>> >> +num_wr_reqs_to_start_packing_show(struct device *dev,
>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ? struct device_attribute *attr, char *buf)
>>> >> +{
>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>>> >> + int num_wr_reqs_to_start_packing;
>>> >> + int ret;
>>> >> +
>>> >> + num_wr_reqs_to_start_packing =
>>> md->queue.num_wr_reqs_to_start_packing;
>>> >> +
>>> >> + ret = snprintf(buf, PAGE_SIZE, "%d\n",
>>> num_wr_reqs_to_start_packing);
>>> >> +
>>> >> + mmc_blk_put(md);
>>> >> + return ret;
>>> >> +}
>>> >> +
>>> >> +static ssize_t
>>> >> +num_wr_reqs_to_start_packing_store(struct device *dev,
>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ?struct device_attribute *attr,
>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t count)
>>> >> +{
>>> >> + int value;
>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>>> >> +
>>> >> + sscanf(buf, "%d", &value);
>>> >> + if (value >= 0)
>>> >> + ? ? ? ? md->queue.num_wr_reqs_to_start_packing = value;
>>> >> +
>>> >> + mmc_blk_put(md);
>>> >> + return count;
>>> >> +}
>>> >> +
>>> >> ?static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
>>> >> ?{
>>> >> ? struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
>>> >> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct
>>> >> mmc_queue_req *mqrq,
>>> >> ? mmc_queue_bounce_pre(mqrq);
>>> >> ?}
>>> >>
>>> >> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? struct request *req)
>>> >> +{
>>> >> + struct mmc_host *host = mq->card->host;
>>> >> + int data_dir;
>>> >> +
>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
>>> >> + ? ? ? ? return;
>>> >> +
>>> >> + /*
>>> >> + ?* In case the packing control is not supported by the host, it
>>> should
>>> >> + ?* not have an effect on the write packing. Therefore we have to
>>> >> enable
>>> >> + ?* the write packing
>>> >> + ?*/
>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>> >> + ? ? ? ? return;
>>> >> + }
>>> >> +
>>> >> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
>>> >> + ? ? ? ? if (mq->num_of_potential_packed_wr_reqs >
>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing)
>>> >> + ? ? ? ? ? ? ? ? mq->wr_packing_enabled = true;
>>> >> + ? ? ? ? return;
>>> >> + }
>>> >> +
>>> >> + data_dir = rq_data_dir(req);
>>> >> +
>>> >> + if (data_dir == READ) {
>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs = 0;
>>> >> + ? ? ? ? mq->wr_packing_enabled = false;
>>> >> + ? ? ? ? return;
>>> >> + } else if (data_dir == WRITE) {
>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs++;
>>> >> + }
>>> >> +
>>> >> + if (mq->num_of_potential_packed_wr_reqs >
>>> >> + ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing)
>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>> > Write Packing is available only if continuing write requests are over
>>> > num_wr_reqs_to_start_packing?
>>> > That means individual request(1...17) will be issued with non-packing.
>>> > Could you explain your policy more?
>>> We try to identify the case where there is parallel read and write
>>> operations. In our experiments we found out that the number of write
>>> requests between read requests in parallel read and write operations
>>> doesn't exceed 17 requests. Therefore, we can assume that fetching more
>>> than 17 write requests without hitting a read request can indicate that
>>> there is no read activity.
>> We can apply this experiment regardless I/O scheduler?
>> Which I/O scheduler was used with this experiment?
> The experiment was performed with the CFQ scheduler. Since the deadline
> uses a batch of 16 requests it should also fit the deadline scheduler.
> In case another value is required, this value can be changed via sysfs.
>>
>>> You are right that this affects the write throughput a bit but the goal
>>> of
>>> this algorithm is to make sure the read throughput and latency are not
>>> decreased due to write. If this is not the desired result, this
>>> algorithm
>>> can be disabled.
>>> >> +
>>> >> +}
>>> >> +
>>> >> ?static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct
>>> request
>>> >> *req)
>>> >> ?{
>>> >> ? struct request_queue *q = mq->queue;
>>> >> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct
>>> >> mmc_queue *mq, struct request *req)
>>> >> ? ? ? ? ? ? ? ? ? !card->ext_csd.packed_event_en)
>>> >> ? ? ? ? ? goto no_packed;
>>> >>
>>> >> + if (!mq->wr_packing_enabled)
>>> >> + ? ? ? ? goto no_packed;
>>> > If wr_packing_enabled is set to true, several write requests can be
>>> > packed.
>>> > We don't need to consider read request since packed write?
>>> I'm not sure I understand the question. We check if there was a read
>>> request in the mmc_blk_write_packing_control, and in such a case set
>>> mq->wr_packing_enabled to false.
>>> If I didn't answer the question, please explain it again.
>> Packed write can be possible after exceeding 17 requests.
>> Is it assured that read request doesn't follow immediately after packed
>> write?
>> I wonder this case.
> Currently in such a case we will send the packed command followed by the
> read request. The latency of this read request will be high due to waiting
> for the completion of the packed write. However, since we will disable the
> write packing, the latency of the following read requests will be low.
> We are working on a solution where the read request will bypass the write
> requests in such a case. This change requires modification of the
> scheduler in order to re-insert the write requests to the scheduler.
>>

Thats the precise reason for using foreground HPI (shameless plug :-))
I understand the intent of write packing control, but using the number
of requests
as a metric is too coarse. Some writes could be for only one sector
(512B) and others
could be in 512KB or more, giving a 1000x variance.

Foreground HPI solves this problem by interrupting only on a wait threshold.

Another aspect is that if a packed write is in progress, and you have
a read request,
you will most likely disable packing for the _next_ write, not the
ongoing one, right ?
That's too late an intervention IMHO.

2012-06-11 17:20:19

by Venkatraman S

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Sat, Jun 9, 2012 at 8:16 PM, <[email protected]> wrote:
>
>> Hi,
>>
>> How can we check the effect?
>> Do you have any result?
> We ran parallel lmdd read and write operations and found out that the
> write packing causes the read throughput to drop from 24MB/s to 12MB/s.

Whoa! That's a big drop.
BTW, is there a problem with throughput or latency, or both ?
If these numbers are over long duration (>5 seconds), then where are
the cycles going?
It would be nice to see some blktrace figures for the issue, and then fix it,
rather than apply a band aid like the write-packing-control on top..


> The write packing control managed to increase the read throughput back to
> the original value.
> We also examined "real life" scenarios, such as performing a big push
> operation in parallel to launching several applications. We measured the
> read latency and found out that with the write packing control the worst
> case of the read latency was smaller.
>
>> Please check the several comment below.
>>
>> Maya Erez <[email protected]> wrote:
>>> The write packing control will ensure that read requests latency is
>>> not increased due to long write packed commands.
>>>
>>> The trigger for enabling the write packing is managing to pack several
>>> write requests. The number of potential packed requests that will
>>> trigger
>>> the packing can be configured via sysfs by writing the required value
>>> to:
>>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>> The trigger for disabling the write packing is fetching a read request.
>>>

2012-06-11 20:10:45

by Maya Erez

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control


> On Mon, Jun 11, 2012 at 7:25 PM, <[email protected]> wrote:
>>
>>> Maya Erez <[email protected]> wrote:
>>>>
>>>> > Hi,
>>>> >
>>>> > How can we check the effect?
>>>> > Do you have any result?
>>>> We ran parallel lmdd read and write operations and found out that the
>>>> write packing causes the read throughput to drop from 24MB/s to
>>>> 12MB/s.
>>>> The write packing control managed to increase the read throughput back
>>>> to
>>>> the original value.
>>>> We also examined "real life" scenarios, such as performing a big push
>>>> operation in parallel to launching several applications. We measured
>>>> the
>>>> read latency and found out that with the write packing control the
>>>> worst
>>>> case of the read latency was smaller.
>>>>
>>>> > Please check the several comment below.
>>>> >
>>>> > Maya Erez <[email protected]> wrote:
>>>> >> The write packing control will ensure that read requests latency is
>>>> >> not increased due to long write packed commands.
>>>> >>
>>>> >> The trigger for enabling the write packing is managing to pack
>>>> several
>>>> >> write requests. The number of potential packed requests that will
>>>> >> trigger
>>>> >> the packing can be configured via sysfs by writing the required
>>>> value
>>>> >> to:
>>>> >> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>> >> The trigger for disabling the write packing is fetching a read
>>>> request.
>>>> >>
>>>> >> ---
>>>> >> ?Documentation/mmc/mmc-dev-attrs.txt | ? 17 ++++++
>>>> >> ?drivers/mmc/card/block.c ? ? ? ? ? ?| ?100
>>>> >> ++++++++++++++++++++++++++++++++++-
>>>> >> ?drivers/mmc/card/queue.c ? ? ? ? ? ?| ? ?8 +++
>>>> >> ?drivers/mmc/card/queue.h ? ? ? ? ? ?| ? ?3 +
>>>> >> ?include/linux/mmc/host.h ? ? ? ? ? ?| ? ?1 +
>>>> >> ?5 files changed, 128 insertions(+), 1 deletions(-)
>>>> >>
>>>> >> diff --git a/Documentation/mmc/mmc-dev-attrs.txt
>>>> >> b/Documentation/mmc/mmc-dev-attrs.txt
>>>> >> index 22ae844..08f7312 100644
>>>> >> --- a/Documentation/mmc/mmc-dev-attrs.txt
>>>> >> +++ b/Documentation/mmc/mmc-dev-attrs.txt
>>>> >> @@ -8,6 +8,23 @@ The following attributes are read/write.
>>>> >>
>>>> >> ? force_ro ? ? ? ? ? ? ? ?Enforce read-only access even if write
>>>> protect switch is
>>>> >> off.
>>>> >>
>>>> >> + num_wr_reqs_to_start_packing ? ?This attribute is used to
>>>> determine
>>>> >> + the trigger for activating the write packing, in case the write
>>>> >> + packing control feature is enabled.
>>>> >> +
>>>> >> + When the MMC manages to reach a point where
>>>> >> num_wr_reqs_to_start_packing
>>>> >> + write requests could be packed, it enables the write packing
>>>> feature.
>>>> >> + This allows us to start the write packing only when it is
>>>> beneficial
>>>> >> + and has minimum affect on the read latency.
>>>> >> +
>>>> >> + The number of potential packed requests that will trigger the
>>>> packing
>>>> >> + can be configured via sysfs by writing the required value to:
>>>> >> + /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>> >> +
>>>> >> + The default value of num_wr_reqs_to_start_packing was determined
>>>> by
>>>> >> + running parallel lmdd write and lmdd read operations and
>>>> calculating
>>>> >> + the max number of packed writes requests.
>>>> >> +
>>>> >> ?SD and MMC Device Attributes
>>>> >> ?============================
>>>> >>
>>>> >> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
>>>> >> index 2785fd4..ef192fb 100644
>>>> >> --- a/drivers/mmc/card/block.c
>>>> >> +++ b/drivers/mmc/card/block.c
>>>> >> @@ -114,6 +114,7 @@ struct mmc_blk_data {
>>>> >> ? struct device_attribute force_ro;
>>>> >> ? struct device_attribute power_ro_lock;
>>>> >> ? int ? ? area_type;
>>>> >> + struct device_attribute num_wr_reqs_to_start_packing;
>>>> >> ?};
>>>> >>
>>>> >> ?static DEFINE_MUTEX(open_lock);
>>>> >> @@ -281,6 +282,38 @@ out:
>>>> >> ? return ret;
>>>> >> ?}
>>>> >>
>>>> >> +static ssize_t
>>>> >> +num_wr_reqs_to_start_packing_show(struct device *dev,
>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ? struct device_attribute *attr, char
>>>> *buf)
>>>> >> +{
>>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>>>> >> + int num_wr_reqs_to_start_packing;
>>>> >> + int ret;
>>>> >> +
>>>> >> + num_wr_reqs_to_start_packing =
>>>> md->queue.num_wr_reqs_to_start_packing;
>>>> >> +
>>>> >> + ret = snprintf(buf, PAGE_SIZE, "%d\n",
>>>> num_wr_reqs_to_start_packing);
>>>> >> +
>>>> >> + mmc_blk_put(md);
>>>> >> + return ret;
>>>> >> +}
>>>> >> +
>>>> >> +static ssize_t
>>>> >> +num_wr_reqs_to_start_packing_store(struct device *dev,
>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ?struct device_attribute *attr,
>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t count)
>>>> >> +{
>>>> >> + int value;
>>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>>>> >> +
>>>> >> + sscanf(buf, "%d", &value);
>>>> >> + if (value >= 0)
>>>> >> + ? ? ? ? md->queue.num_wr_reqs_to_start_packing = value;
>>>> >> +
>>>> >> + mmc_blk_put(md);
>>>> >> + return count;
>>>> >> +}
>>>> >> +
>>>> >> ?static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
>>>> >> ?{
>>>> >> ? struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
>>>> >> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct
>>>> >> mmc_queue_req *mqrq,
>>>> >> ? mmc_queue_bounce_pre(mqrq);
>>>> >> ?}
>>>> >>
>>>> >> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? struct request *req)
>>>> >> +{
>>>> >> + struct mmc_host *host = mq->card->host;
>>>> >> + int data_dir;
>>>> >> +
>>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
>>>> >> + ? ? ? ? return;
>>>> >> +
>>>> >> + /*
>>>> >> + ?* In case the packing control is not supported by the host, it
>>>> should
>>>> >> + ?* not have an effect on the write packing. Therefore we have to
>>>> >> enable
>>>> >> + ?* the write packing
>>>> >> + ?*/
>>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
>>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>>> >> + ? ? ? ? return;
>>>> >> + }
>>>> >> +
>>>> >> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
>>>> >> + ? ? ? ? if (mq->num_of_potential_packed_wr_reqs >
>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing)
>>>> >> + ? ? ? ? ? ? ? ? mq->wr_packing_enabled = true;
>>>> >> + ? ? ? ? return;
>>>> >> + }
>>>> >> +
>>>> >> + data_dir = rq_data_dir(req);
>>>> >> +
>>>> >> + if (data_dir == READ) {
>>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs = 0;
>>>> >> + ? ? ? ? mq->wr_packing_enabled = false;
>>>> >> + ? ? ? ? return;
>>>> >> + } else if (data_dir == WRITE) {
>>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs++;
>>>> >> + }
>>>> >> +
>>>> >> + if (mq->num_of_potential_packed_wr_reqs >
>>>> >> + ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing)
>>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>>> > Write Packing is available only if continuing write requests are
>>>> over
>>>> > num_wr_reqs_to_start_packing?
>>>> > That means individual request(1...17) will be issued with
>>>> non-packing.
>>>> > Could you explain your policy more?
>>>> We try to identify the case where there is parallel read and write
>>>> operations. In our experiments we found out that the number of write
>>>> requests between read requests in parallel read and write operations
>>>> doesn't exceed 17 requests. Therefore, we can assume that fetching
>>>> more
>>>> than 17 write requests without hitting a read request can indicate
>>>> that
>>>> there is no read activity.
>>> We can apply this experiment regardless I/O scheduler?
>>> Which I/O scheduler was used with this experiment?
>> The experiment was performed with the CFQ scheduler. Since the deadline
>> uses a batch of 16 requests it should also fit the deadline scheduler.
>> In case another value is required, this value can be changed via sysfs.
>>>
>>>> You are right that this affects the write throughput a bit but the
>>>> goal
>>>> of
>>>> this algorithm is to make sure the read throughput and latency are not
>>>> decreased due to write. If this is not the desired result, this
>>>> algorithm
>>>> can be disabled.
>>>> >> +
>>>> >> +}
>>>> >> +
>>>> >> ?static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct
>>>> request
>>>> >> *req)
>>>> >> ?{
>>>> >> ? struct request_queue *q = mq->queue;
>>>> >> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct
>>>> >> mmc_queue *mq, struct request *req)
>>>> >> ? ? ? ? ? ? ? ? ? !card->ext_csd.packed_event_en)
>>>> >> ? ? ? ? ? goto no_packed;
>>>> >>
>>>> >> + if (!mq->wr_packing_enabled)
>>>> >> + ? ? ? ? goto no_packed;
>>>> > If wr_packing_enabled is set to true, several write requests can be
>>>> > packed.
>>>> > We don't need to consider read request since packed write?
>>>> I'm not sure I understand the question. We check if there was a read
>>>> request in the mmc_blk_write_packing_control, and in such a case set
>>>> mq->wr_packing_enabled to false.
>>>> If I didn't answer the question, please explain it again.
>>> Packed write can be possible after exceeding 17 requests.
>>> Is it assured that read request doesn't follow immediately after packed
>>> write?
>>> I wonder this case.
>> Currently in such a case we will send the packed command followed by the
>> read request. The latency of this read request will be high due to
>> waiting
>> for the completion of the packed write. However, since we will disable
>> the
>> write packing, the latency of the following read requests will be low.
>> We are working on a solution where the read request will bypass the
>> write
>> requests in such a case. This change requires modification of the
>> scheduler in order to re-insert the write requests to the scheduler.
>>>
>
> Thats the precise reason for using foreground HPI (shameless plug :-))
> I understand the intent of write packing control, but using the number
> of requests
> as a metric is too coarse. Some writes could be for only one sector
> (512B) and others
> could be in 512KB or more, giving a 1000x variance.
>
> Foreground HPI solves this problem by interrupting only on a wait
> threshold.
>
> Another aspect is that if a packed write is in progress, and you have
> a read request,
> you will most likely disable packing for the _next_ write, not the
> ongoing one, right ?
> That's too late an intervention IMHO.
>
If a write request is in progress and a read is fetched we pln to use HPI
to stop it and re-insert the remider of the write packed command back to
the scheduler for a later dispatch.
Regarding the packing control trigger, we also tried using a trigger of an
amount of write bytes between read. However, the number of potential
packed requests seemed like the reasonable trigger since we would like to
activate the packing only when it will be beneficial, regardless of the
write requests sizes.

Thanks,
Maya Erez
Consultant for Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-06-11 20:20:00

by Maya Erez

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control


> On Sat, Jun 9, 2012 at 8:16 PM, <[email protected]> wrote:
>>
>>> Hi,
>>>
>>> How can we check the effect?
>>> Do you have any result?
>> We ran parallel lmdd read and write operations and found out that the
>> write packing causes the read throughput to drop from 24MB/s to 12MB/s.
>
> Whoa! That's a big drop.
> BTW, is there a problem with throughput or latency, or both ?
> If these numbers are over long duration (>5 seconds), then where are
> the cycles going?
> It would be nice to see some blktrace figures for the issue, and then fix
> it,
> rather than apply a band aid like the write-packing-control on top..
I believe this is because the write packing changes the dispatching policy
of the scheduler. Without write packing only 2 write requests were
fetched, giving the read requests a chance to be inserted into the
scheduler while we wait for the completion of the first write request.
Then when the next fetch was performed the read request would be the
chosen one. When write packing is enabled we keep fetching all the write
requests that are queued (assuming there are no read requests inserted
yet) and when the read is inserted and fetched is has to wait for the
completion of a bigger amount of write requests.

>
>
>> The write packing control managed to increase the read throughput back
>> to
>> the original value.
>> We also examined "real life" scenarios, such as performing a big push
>> operation in parallel to launching several applications. We measured the
>> read latency and found out that with the write packing control the worst
>> case of the read latency was smaller.
>>
>>> Please check the several comment below.
>>>
>>> Maya Erez <[email protected]> wrote:
>>>> The write packing control will ensure that read requests latency is
>>>> not increased due to long write packed commands.
>>>>
>>>> The trigger for enabling the write packing is managing to pack several
>>>> write requests. The number of potential packed requests that will
>>>> trigger
>>>> the packing can be configured via sysfs by writing the required value
>>>> to:
>>>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>> The trigger for disabling the write packing is fetching a read
>>>> request.
>>>>
>

Thanks,
Maya Erez
Consultant for Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-06-11 21:19:50

by Muthu Kumar

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Fri, Jun 1, 2012 at 11:55 AM, Maya Erez <[email protected]> wrote:
> The write packing control will ensure that read requests latency is
> not increased due to long write packed commands.
>
> The trigger for enabling the write packing is managing to pack several
> write requests. The number of potential packed requests that will trigger
> the packing can be configured via sysfs by writing the required value to:
> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
> The trigger for disabling the write packing is fetching a read request.
>

If it is applicable only to MMC why do we have this sysfs attr for all
block devices?

2012-06-12 00:28:47

by Muthu Kumar

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Mon, Jun 11, 2012 at 2:19 PM, Muthu Kumar <[email protected]> wrote:
> On Fri, Jun 1, 2012 at 11:55 AM, Maya Erez <[email protected]> wrote:
>> The write packing control will ensure that read requests latency is
>> not increased due to long write packed commands.
>>
>> The trigger for enabling the write packing is managing to pack several
>> write requests. The number of potential packed requests that will trigger
>> the packing can be configured via sysfs by writing the required value to:
>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>> The trigger for disabling the write packing is fetching a read request.
>>
>
> If it is applicable only to MMC why do we have this sysfs attr for all
> block devices?

Just to be clear, please create a directory, say mmc, under
/sys/block/<dev>/ and create the attr inside that.

You can refer to dm (dm-sysfs.c) for sample implementation.

Regards,
Muthu

2012-06-12 04:08:16

by Venkatraman S

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Tue, Jun 12, 2012 at 1:49 AM, <[email protected]> wrote:
>
>> On Sat, Jun 9, 2012 at 8:16 PM, ?<[email protected]> wrote:
>>>
>>>> Hi,
>>>>
>>>> How can we check the effect?
>>>> Do you have any result?
>>> We ran parallel lmdd read and write operations and found out that the
>>> write packing causes the read throughput to drop from 24MB/s to 12MB/s.
>>
>> Whoa! That's a big drop.
>> BTW, is there a problem with throughput or latency, or both ?
>> If these numbers are over long duration (>5 seconds), then where are
>> the cycles going?
>> It would be nice to see some blktrace figures for the issue, and then fix
>> it,
>> rather than apply a band aid like the write-packing-control on top..
> I believe this is because the write packing changes the dispatching policy
> of the scheduler. Without write packing only 2 write requests were
> fetched, giving the read requests a chance to be inserted into the
> scheduler while we wait for the completion of the first write request.

Which I/O scheduler are you using ? Both CFQ and deadline would do the
balancing
act to prevent writes overwhelming reads. Writes are async and reads
are sync (usually),
so this imbalance would have existed otherwise, packed command or not.

> Then when the next fetch was performed the read request would be the
> chosen one. When write packing is enabled we keep fetching all the write
> requests that are queued (assuming there are no read requests inserted
> yet) and when the read is inserted and fetched is has to wait for the
> completion of a bigger amount of write requests.
>

Yes - but that should introduce latency, not bandwidth drop - unless you are
using the no-op scheduler.

>>
>>
>>> The write packing control managed to increase the read throughput back
>>> to
>>> the original value.
>>> We also examined "real life" scenarios, such as performing a big push
>>> operation in parallel to launching several applications. We measured the
>>> read latency and found out that with the write packing control the worst
>>> case of the read latency was smaller.
>>>
>>>> Please check the several comment below.
>>>>
>>>> Maya Erez <[email protected]> wrote:
>>>>> The write packing control will ensure that read requests latency is
>>>>> not increased due to long write packed commands.
>>>>>
>>>>> The trigger for enabling the write packing is managing to pack several
>>>>> write requests. The number of potential packed requests that will
>>>>> trigger
>>>>> the packing can be configured via sysfs by writing the required value
>>>>> to:
>>>>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>>> The trigger for disabling the write packing is fetching a read
>>>>> request.
>>>>>
>>
>
> Thanks,
> Maya Erez
> Consultant for Qualcomm Innovation Center, Inc.
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum
>

2012-06-12 04:16:31

by Venkatraman S

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Tue, Jun 12, 2012 at 1:40 AM, <[email protected]> wrote:
>
>> On Mon, Jun 11, 2012 at 7:25 PM, ?<[email protected]> wrote:
>>>
>>>> Maya Erez <[email protected]> wrote:
>>>>>
>>>>> > Hi,
>>>>> >
>>>>> > How can we check the effect?
>>>>> > Do you have any result?
>>>>> We ran parallel lmdd read and write operations and found out that the
>>>>> write packing causes the read throughput to drop from 24MB/s to
>>>>> 12MB/s.
>>>>> The write packing control managed to increase the read throughput back
>>>>> to
>>>>> the original value.
>>>>> We also examined "real life" scenarios, such as performing a big push
>>>>> operation in parallel to launching several applications. We measured
>>>>> the
>>>>> read latency and found out that with the write packing control the
>>>>> worst
>>>>> case of the read latency was smaller.
>>>>>
>>>>> > Please check the several comment below.
>>>>> >
>>>>> > Maya Erez <[email protected]> wrote:
>>>>> >> The write packing control will ensure that read requests latency is
>>>>> >> not increased due to long write packed commands.
>>>>> >>
>>>>> >> The trigger for enabling the write packing is managing to pack
>>>>> several
>>>>> >> write requests. The number of potential packed requests that will
>>>>> >> trigger
>>>>> >> the packing can be configured via sysfs by writing the required
>>>>> value
>>>>> >> to:
>>>>> >> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>>> >> The trigger for disabling the write packing is fetching a read
>>>>> request.
>>>>> >>
>>>>> >> ---
>>>>> >> ?Documentation/mmc/mmc-dev-attrs.txt | ? 17 ++++++
>>>>> >> ?drivers/mmc/card/block.c ? ? ? ? ? ?| ?100
>>>>> >> ++++++++++++++++++++++++++++++++++-
>>>>> >> ?drivers/mmc/card/queue.c ? ? ? ? ? ?| ? ?8 +++
>>>>> >> ?drivers/mmc/card/queue.h ? ? ? ? ? ?| ? ?3 +
>>>>> >> ?include/linux/mmc/host.h ? ? ? ? ? ?| ? ?1 +
>>>>> >> ?5 files changed, 128 insertions(+), 1 deletions(-)
>>>>> >>
>>>>> >> diff --git a/Documentation/mmc/mmc-dev-attrs.txt
>>>>> >> b/Documentation/mmc/mmc-dev-attrs.txt
>>>>> >> index 22ae844..08f7312 100644
>>>>> >> --- a/Documentation/mmc/mmc-dev-attrs.txt
>>>>> >> +++ b/Documentation/mmc/mmc-dev-attrs.txt
>>>>> >> @@ -8,6 +8,23 @@ The following attributes are read/write.
>>>>> >>
>>>>> >> ? force_ro ? ? ? ? ? ? ? ?Enforce read-only access even if write
>>>>> protect switch is
>>>>> >> off.
>>>>> >>
>>>>> >> + num_wr_reqs_to_start_packing ? ?This attribute is used to
>>>>> determine
>>>>> >> + the trigger for activating the write packing, in case the write
>>>>> >> + packing control feature is enabled.
>>>>> >> +
>>>>> >> + When the MMC manages to reach a point where
>>>>> >> num_wr_reqs_to_start_packing
>>>>> >> + write requests could be packed, it enables the write packing
>>>>> feature.
>>>>> >> + This allows us to start the write packing only when it is
>>>>> beneficial
>>>>> >> + and has minimum affect on the read latency.
>>>>> >> +
>>>>> >> + The number of potential packed requests that will trigger the
>>>>> packing
>>>>> >> + can be configured via sysfs by writing the required value to:
>>>>> >> + /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>>> >> +
>>>>> >> + The default value of num_wr_reqs_to_start_packing was determined
>>>>> by
>>>>> >> + running parallel lmdd write and lmdd read operations and
>>>>> calculating
>>>>> >> + the max number of packed writes requests.
>>>>> >> +
>>>>> >> ?SD and MMC Device Attributes
>>>>> >> ?============================
>>>>> >>
>>>>> >> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
>>>>> >> index 2785fd4..ef192fb 100644
>>>>> >> --- a/drivers/mmc/card/block.c
>>>>> >> +++ b/drivers/mmc/card/block.c
>>>>> >> @@ -114,6 +114,7 @@ struct mmc_blk_data {
>>>>> >> ? struct device_attribute force_ro;
>>>>> >> ? struct device_attribute power_ro_lock;
>>>>> >> ? int ? ? area_type;
>>>>> >> + struct device_attribute num_wr_reqs_to_start_packing;
>>>>> >> ?};
>>>>> >>
>>>>> >> ?static DEFINE_MUTEX(open_lock);
>>>>> >> @@ -281,6 +282,38 @@ out:
>>>>> >> ? return ret;
>>>>> >> ?}
>>>>> >>
>>>>> >> +static ssize_t
>>>>> >> +num_wr_reqs_to_start_packing_show(struct device *dev,
>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ? struct device_attribute *attr, char
>>>>> *buf)
>>>>> >> +{
>>>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>>>>> >> + int num_wr_reqs_to_start_packing;
>>>>> >> + int ret;
>>>>> >> +
>>>>> >> + num_wr_reqs_to_start_packing =
>>>>> md->queue.num_wr_reqs_to_start_packing;
>>>>> >> +
>>>>> >> + ret = snprintf(buf, PAGE_SIZE, "%d\n",
>>>>> num_wr_reqs_to_start_packing);
>>>>> >> +
>>>>> >> + mmc_blk_put(md);
>>>>> >> + return ret;
>>>>> >> +}
>>>>> >> +
>>>>> >> +static ssize_t
>>>>> >> +num_wr_reqs_to_start_packing_store(struct device *dev,
>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ?struct device_attribute *attr,
>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t count)
>>>>> >> +{
>>>>> >> + int value;
>>>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
>>>>> >> +
>>>>> >> + sscanf(buf, "%d", &value);
>>>>> >> + if (value >= 0)
>>>>> >> + ? ? ? ? md->queue.num_wr_reqs_to_start_packing = value;
>>>>> >> +
>>>>> >> + mmc_blk_put(md);
>>>>> >> + return count;
>>>>> >> +}
>>>>> >> +
>>>>> >> ?static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
>>>>> >> ?{
>>>>> >> ? struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
>>>>> >> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct
>>>>> >> mmc_queue_req *mqrq,
>>>>> >> ? mmc_queue_bounce_pre(mqrq);
>>>>> >> ?}
>>>>> >>
>>>>> >> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? struct request *req)
>>>>> >> +{
>>>>> >> + struct mmc_host *host = mq->card->host;
>>>>> >> + int data_dir;
>>>>> >> +
>>>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
>>>>> >> + ? ? ? ? return;
>>>>> >> +
>>>>> >> + /*
>>>>> >> + ?* In case the packing control is not supported by the host, it
>>>>> should
>>>>> >> + ?* not have an effect on the write packing. Therefore we have to
>>>>> >> enable
>>>>> >> + ?* the write packing
>>>>> >> + ?*/
>>>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
>>>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>>>> >> + ? ? ? ? return;
>>>>> >> + }
>>>>> >> +
>>>>> >> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
>>>>> >> + ? ? ? ? if (mq->num_of_potential_packed_wr_reqs >
>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing)
>>>>> >> + ? ? ? ? ? ? ? ? mq->wr_packing_enabled = true;
>>>>> >> + ? ? ? ? return;
>>>>> >> + }
>>>>> >> +
>>>>> >> + data_dir = rq_data_dir(req);
>>>>> >> +
>>>>> >> + if (data_dir == READ) {
>>>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs = 0;
>>>>> >> + ? ? ? ? mq->wr_packing_enabled = false;
>>>>> >> + ? ? ? ? return;
>>>>> >> + } else if (data_dir == WRITE) {
>>>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs++;
>>>>> >> + }
>>>>> >> +
>>>>> >> + if (mq->num_of_potential_packed_wr_reqs >
>>>>> >> + ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing)
>>>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>>>> > Write Packing is available only if continuing write requests are
>>>>> over
>>>>> > num_wr_reqs_to_start_packing?
>>>>> > That means individual request(1...17) will be issued with
>>>>> non-packing.
>>>>> > Could you explain your policy more?
>>>>> We try to identify the case where there is parallel read and write
>>>>> operations. In our experiments we found out that the number of write
>>>>> requests between read requests in parallel read and write operations
>>>>> doesn't exceed 17 requests. Therefore, we can assume that fetching
>>>>> more
>>>>> than 17 write requests without hitting a read request can indicate
>>>>> that
>>>>> there is no read activity.
>>>> We can apply this experiment regardless I/O scheduler?
>>>> Which I/O scheduler was used with this experiment?
>>> The experiment was performed with the CFQ scheduler. Since the deadline
>>> uses a batch of 16 requests it should also fit the deadline scheduler.
>>> In case another value is required, this value can be changed via sysfs.
>>>>
>>>>> You are right that this affects the write throughput a bit but the
>>>>> goal
>>>>> of
>>>>> this algorithm is to make sure the read throughput and latency are not
>>>>> decreased due to write. If this is not the desired result, this
>>>>> algorithm
>>>>> can be disabled.
>>>>> >> +
>>>>> >> +}
>>>>> >> +
>>>>> >> ?static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct
>>>>> request
>>>>> >> *req)
>>>>> >> ?{
>>>>> >> ? struct request_queue *q = mq->queue;
>>>>> >> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct
>>>>> >> mmc_queue *mq, struct request *req)
>>>>> >> ? ? ? ? ? ? ? ? ? !card->ext_csd.packed_event_en)
>>>>> >> ? ? ? ? ? goto no_packed;
>>>>> >>
>>>>> >> + if (!mq->wr_packing_enabled)
>>>>> >> + ? ? ? ? goto no_packed;
>>>>> > If wr_packing_enabled is set to true, several write requests can be
>>>>> > packed.
>>>>> > We don't need to consider read request since packed write?
>>>>> I'm not sure I understand the question. We check if there was a read
>>>>> request in the mmc_blk_write_packing_control, and in such a case set
>>>>> mq->wr_packing_enabled to false.
>>>>> If I didn't answer the question, please explain it again.
>>>> Packed write can be possible after exceeding 17 requests.
>>>> Is it assured that read request doesn't follow immediately after packed
>>>> write?
>>>> I wonder this case.
>>> Currently in such a case we will send the packed command followed by the
>>> read request. The latency of this read request will be high due to
>>> waiting
>>> for the completion of the packed write. However, since we will disable
>>> the
>>> write packing, the latency of the following read requests will be low.
>>> We are working on a solution where the read request will bypass the
>>> write
>>> requests in such a case. This change requires modification of the
>>> scheduler in order to re-insert the write requests to the scheduler.
>>>>
>>
>> Thats the precise reason for using foreground HPI (shameless plug :-))
>> I understand the intent of write packing control, but using the number
>> of requests
>> as a metric is too coarse. Some writes could be for only one sector
>> (512B) and others
>> could be in 512KB or more, giving a 1000x variance.
>>
>> Foreground HPI solves this problem by interrupting only on a wait
>> threshold.
>>
>> Another aspect is that if a packed write is in progress, and you have
>> a read request,
>> you will most likely disable packing for the _next_ write, not the
>> ongoing one, right ?
>> That's too late an intervention IMHO.
>>
> If a write request is in progress and a read is fetched we pln to use HPI
> to stop it and re-insert the remider of the write packed command back to
> the scheduler for a later dispatch.
IIUC, there were 2 reasons mentioned by you for introducing write
packing control -
1) Read bandwidth drop
2) Use case "latency" or if I were to guess, "sluggish UI".

So if (2) is solved by HPI, we can investigate the reason for (1) and
fix that, rather
than adding another functionality (which belongs in the I/O scheduler
anyway) to MMC.

> Regarding the packing control trigger, we also tried using a trigger of an
> amount of write bytes between read. However, the number of potential
> packed requests seemed like the reasonable trigger since we would like to
> activate the packing only when it will be beneficial, regardless of the
> write requests sizes.
>
Why ? How do you know "when it will be beneficial" ? As I mentioned,
the number of
blocks per request would vary over time, and also depends on the
filesystem. OTOH, even small
writes could take a lot longer than usual (>500ms) due to garbage
collection etc.

2012-06-12 19:06:08

by Maya Erez

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

> On Tue, Jun 12, 2012 at 1:40 AM, <[email protected]> wrote:
>>> On Mon, Jun 11, 2012 at 7:25 PM, ?<[email protected]> wrote:
>>>>> Maya Erez <[email protected]> wrote:
>>>>>> > Hi,
>>>>>> >
>>>>>> > How can we check the effect?
>>>>>> > Do you have any result?
>>>>>> We ran parallel lmdd read and write operations and found out that
the
>>>>>> write packing causes the read throughput to drop from 24MB/s to
12MB/s.
>>>>>> The write packing control managed to increase the read throughput
back
>>>>>> to
>>>>>> the original value.
>>>>>> We also examined "real life" scenarios, such as performing a big
push
>>>>>> operation in parallel to launching several applications. We
measured
>>>>>> the
>>>>>> read latency and found out that with the write packing control the
worst
>>>>>> case of the read latency was smaller.
>>>>>> > Please check the several comment below.
>>>>>> >
>>>>>> > Maya Erez <[email protected]> wrote:
>>>>>> >> The write packing control will ensure that read requests latency
>>>>>> is
>>>>>> >> not increased due to long write packed commands.
>>>>>> >>
>>>>>> >> The trigger for enabling the write packing is managing to pack
>>>>>> several
>>>>>> >> write requests. The number of potential packed requests that
will
>>>>>> >> trigger
>>>>>> >> the packing can be configured via sysfs by writing the required
>>>>>> value
>>>>>> >> to:
>>>>>> >> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing. The
trigger for disabling the write packing is fetching a read
>>>>>> request.
>>>>>> >>
>>>>>> >> ---
>>>>>> >> ?Documentation/mmc/mmc-dev-attrs.txt | ? 17 ++++++
>>>>>> >> ?drivers/mmc/card/block.c ? ? ? ? ? ?| ?100
>>>>>> >> ++++++++++++++++++++++++++++++++++-
>>>>>> >> ?drivers/mmc/card/queue.c ? ? ? ? ? ?| ? ?8 +++
>>>>>> >> ?drivers/mmc/card/queue.h ? ? ? ? ? ?| ? ?3 +
>>>>>> >> ?include/linux/mmc/host.h ? ? ? ? ? ?| ? ?1 +
>>>>>> >> ?5 files changed, 128 insertions(+), 1 deletions(-)
>>>>>> >>
>>>>>> >> diff --git a/Documentation/mmc/mmc-dev-attrs.txt
>>>>>> >> b/Documentation/mmc/mmc-dev-attrs.txt
>>>>>> >> index 22ae844..08f7312 100644
>>>>>> >> --- a/Documentation/mmc/mmc-dev-attrs.txt
>>>>>> >> +++ b/Documentation/mmc/mmc-dev-attrs.txt
>>>>>> >> @@ -8,6 +8,23 @@ The following attributes are read/write.
>>>>>> >>
>>>>>> >> ? force_ro ? ? ? ? ? ? ? ?Enforce read-only access even if write
>>>>>> protect switch is
>>>>>> >> off.
>>>>>> >>
>>>>>> >> + num_wr_reqs_to_start_packing ? ?This attribute is used to
>>>>>> determine
>>>>>> >> + the trigger for activating the write packing, in case the
write
>>>>>> >> + packing control feature is enabled.
>>>>>> >> +
>>>>>> >> + When the MMC manages to reach a point where
>>>>>> >> num_wr_reqs_to_start_packing
>>>>>> >> + write requests could be packed, it enables the write packing
>>>>>> feature.
>>>>>> >> + This allows us to start the write packing only when it is
>>>>>> beneficial
>>>>>> >> + and has minimum affect on the read latency.
>>>>>> >> +
>>>>>> >> + The number of potential packed requests that will trigger the
>>>>>> packing
>>>>>> >> + can be configured via sysfs by writing the required value to:
+ /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing. +
>>>>>> >> + The default value of num_wr_reqs_to_start_packing was
>>>>>> determined
>>>>>> by
>>>>>> >> + running parallel lmdd write and lmdd read operations and
>>>>>> calculating
>>>>>> >> + the max number of packed writes requests.
>>>>>> >> +
>>>>>> >> ?SD and MMC Device Attributes
>>>>>> >> ?============================
>>>>>> >>
>>>>>> >> diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2785fd4..ef192fb 100644
>>>>>> >> --- a/drivers/mmc/card/block.c
>>>>>> >> +++ b/drivers/mmc/card/block.c
>>>>>> >> @@ -114,6 +114,7 @@ struct mmc_blk_data {
>>>>>> >> ? struct device_attribute force_ro;
>>>>>> >> ? struct device_attribute power_ro_lock;
>>>>>> >> ? int ? ? area_type;
>>>>>> >> + struct device_attribute num_wr_reqs_to_start_packing;
>>>>>> >> ?};
>>>>>> >>
>>>>>> >> ?static DEFINE_MUTEX(open_lock);
>>>>>> >> @@ -281,6 +282,38 @@ out:
>>>>>> >> ? return ret;
>>>>>> >> ?}
>>>>>> >>
>>>>>> >> +static ssize_t
>>>>>> >> +num_wr_reqs_to_start_packing_show(struct device *dev,
>>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ? struct device_attribute *attr, char
>>>>>> *buf)
>>>>>> >> +{
>>>>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + int
num_wr_reqs_to_start_packing;
>>>>>> >> + int ret;
>>>>>> >> +
>>>>>> >> + num_wr_reqs_to_start_packing =
>>>>>> md->queue.num_wr_reqs_to_start_packing;
>>>>>> >> +
>>>>>> >> + ret = snprintf(buf, PAGE_SIZE, "%d\n",
>>>>>> num_wr_reqs_to_start_packing);
>>>>>> >> +
>>>>>> >> + mmc_blk_put(md);
>>>>>> >> + return ret;
>>>>>> >> +}
>>>>>> >> +
>>>>>> >> +static ssize_t
>>>>>> >> +num_wr_reqs_to_start_packing_store(struct device *dev,
>>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? ?struct device_attribute *attr, + ? ?
? ? ? ? ? ? ? ? ? ? ?const char *buf, size_t count) +{
>>>>>> >> + int value;
>>>>>> >> + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + +
sscanf(buf, "%d", &value);
>>>>>> >> + if (value >= 0)
>>>>>> >> + ? ? ? ? md->queue.num_wr_reqs_to_start_packing = value; + +
mmc_blk_put(md);
>>>>>> >> + return count;
>>>>>> >> +}
>>>>>> >> +
>>>>>> >> ?static int mmc_blk_open(struct block_device *bdev, fmode_t
mode)
>>>>>> >> ?{
>>>>>> >> ? struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
>>>>>> >> @@ -1313,6 +1346,48 @@ static void mmc_blk_rw_rq_prep(struct
mmc_queue_req *mqrq,
>>>>>> >> ? mmc_queue_bounce_pre(mqrq);
>>>>>> >> ?}
>>>>>> >>
>>>>>> >> +static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? struct request *req) +{
>>>>>> >> + struct mmc_host *host = mq->card->host;
>>>>>> >> + int data_dir;
>>>>>> >> +
>>>>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR))
>>>>>> >> + ? ? ? ? return;
>>>>>> >> +
>>>>>> >> + /*
>>>>>> >> + ?* In case the packing control is not supported by the host,
it
>>>>>> should
>>>>>> >> + ?* not have an effect on the write packing. Therefore we have
>>>>>> to
>>>>>> >> enable
>>>>>> >> + ?* the write packing
>>>>>> >> + ?*/
>>>>>> >> + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
>>>>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>>>>> >> + ? ? ? ? return;
>>>>>> >> + }
>>>>>> >> +
>>>>>> >> + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
>>>>>> >> + ? ? ? ? if (mq->num_of_potential_packed_wr_reqs >
>>>>>> >> + ? ? ? ? ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing) + ?
? ? ? ? ? ? ? mq->wr_packing_enabled = true;
>>>>>> >> + ? ? ? ? return;
>>>>>> >> + }
>>>>>> >> +
>>>>>> >> + data_dir = rq_data_dir(req);
>>>>>> >> +
>>>>>> >> + if (data_dir == READ) {
>>>>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs = 0;
>>>>>> >> + ? ? ? ? mq->wr_packing_enabled = false;
>>>>>> >> + ? ? ? ? return;
>>>>>> >> + } else if (data_dir == WRITE) {
>>>>>> >> + ? ? ? ? mq->num_of_potential_packed_wr_reqs++;
>>>>>> >> + }
>>>>>> >> +
>>>>>> >> + if (mq->num_of_potential_packed_wr_reqs >
>>>>>> >> + ? ? ? ? ? ? ? ? mq->num_wr_reqs_to_start_packing)
>>>>>> >> + ? ? ? ? mq->wr_packing_enabled = true;
>>>>>> > Write Packing is available only if continuing write requests are
>>>>>> over
>>>>>> > num_wr_reqs_to_start_packing?
>>>>>> > That means individual request(1...17) will be issued with
>>>>>> non-packing.
>>>>>> > Could you explain your policy more?
>>>>>> We try to identify the case where there is parallel read and write
operations. In our experiments we found out that the number of
write
>>>>>> requests between read requests in parallel read and write
operations
>>>>>> doesn't exceed 17 requests. Therefore, we can assume that fetching
more
>>>>>> than 17 write requests without hitting a read request can indicate
that
>>>>>> there is no read activity.
>>>>> We can apply this experiment regardless I/O scheduler?
>>>>> Which I/O scheduler was used with this experiment?
>>>> The experiment was performed with the CFQ scheduler. Since the
deadline
>>>> uses a batch of 16 requests it should also fit the deadline
scheduler.
>>>> In case another value is required, this value can be changed via
sysfs.
>>>>>> You are right that this affects the write throughput a bit but the
goal
>>>>>> of
>>>>>> this algorithm is to make sure the read throughput and latency are
not
>>>>>> decreased due to write. If this is not the desired result, this
algorithm
>>>>>> can be disabled.
>>>>>> >> +
>>>>>> >> +}
>>>>>> >> +
>>>>>> >> ?static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct
>>>>>> request
>>>>>> >> *req)
>>>>>> >> ?{
>>>>>> >> ? struct request_queue *q = mq->queue;
>>>>>> >> @@ -1332,6 +1407,9 @@ static u8 mmc_blk_prep_packed_list(struct
mmc_queue *mq, struct request *req)
>>>>>> >> ? ? ? ? ? ? ? ? ? !card->ext_csd.packed_event_en)
>>>>>> >> ? ? ? ? ? goto no_packed;
>>>>>> >>
>>>>>> >> + if (!mq->wr_packing_enabled)
>>>>>> >> + ? ? ? ? goto no_packed;
>>>>>> > If wr_packing_enabled is set to true, several write requests can
>>>>>> be
>>>>>> > packed.
>>>>>> > We don't need to consider read request since packed write?
>>>>>> I'm not sure I understand the question. We check if there was a
read
>>>>>> request in the mmc_blk_write_packing_control, and in such a case
set
>>>>>> mq->wr_packing_enabled to false.
>>>>>> If I didn't answer the question, please explain it again.
>>>>> Packed write can be possible after exceeding 17 requests.
>>>>> Is it assured that read request doesn't follow immediately after
packed
>>>>> write?
>>>>> I wonder this case.
>>>> Currently in such a case we will send the packed command followed by
the
>>>> read request. The latency of this read request will be high due to
waiting
>>>> for the completion of the packed write. However, since we will
disable
>>>> the
>>>> write packing, the latency of the following read requests will be
low.
>>>> We are working on a solution where the read request will bypass the
write
>>>> requests in such a case. This change requires modification of the
scheduler in order to re-insert the write requests to the scheduler.
>>> Thats the precise reason for using foreground HPI (shameless plug :-))
I understand the intent of write packing control, but using the number of
requests
>>> as a metric is too coarse. Some writes could be for only one sector
(512B) and others
>>> could be in 512KB or more, giving a 1000x variance.
>>> Foreground HPI solves this problem by interrupting only on a wait
threshold.
>>> Another aspect is that if a packed write is in progress, and you have
a read request,
>>> you will most likely disable packing for the _next_ write, not the
ongoing one, right ?
>>> That's too late an intervention IMHO.
>> If a write request is in progress and a read is fetched we pln to use
HPI
>> to stop it and re-insert the remider of the write packed command back
to
>> the scheduler for a later dispatch.
> IIUC, there were 2 reasons mentioned by you for introducing write
packing control -
> 1) Read bandwidth drop
> 2) Use case "latency" or if I were to guess, "sluggish UI".
>
> So if (2) is solved by HPI, we can investigate the reason for (1) and
fix that, rather
> than adding another functionality (which belongs in the I/O scheduler
anyway) to MMC.
According to our measurements the stop transmission (CMD12) + HPI is a
heavy operation that takes several miliseconds. Intensive usage of HPI
will cause degradation of the performance.
When there is a packed write followed by a read request, we will stop the
packed write and issue the read request. Disabling the write packing due
to this read request (by the packing control function) will eliminate the
need for additional HPI operation to stop the next packed write request,
in case of a flow of read requests. When the read requests flow will end,
the write packing will be enabled again when there will be a flow of write
requests.

Regarding the degradation of the read throughput in case of mix read/write
operations:
Our test showed that there is a degradation of ~40% in the read throughput
in mix read/write operations even without packing. Enabling the write
packing increases this degradation to ~60%. Those numbers are applicable
to CFQ scheduler while the deadline scheduler resulted in even lower read
throughput in mix operations.
While investigating this degradation we found out that the main cause for
it is the way the application issues the read requests and the policy of
the scheduler.
Therefore, the write packing control is only one piece of the complete
solution that we are working on. The complete solution will be released in
a later phase and will include:
- Usage of stop transmission in order to stop a packed write in case of a
read request
- Issue a read request that is fetched at the end of a packed list
preparation before the packed write
- The ability to re-insert the write requests back to the scheduler in
case of the above cases (re-queuing them back to the dispatch queue will
not give a real solution in case of a flow of read requests).
- Modify the scheduler to ensure preferring of reads over writes in mix
read/write scenarios

I hope this makes things a bit clearer. Let me know if you have additional
questions.
>
>> Regarding the packing control trigger, we also tried using a trigger of
an
>> amount of write bytes between read. However, the number of potential
packed requests seemed like the reasonable trigger since we would like to
>> activate the packing only when it will be beneficial, regardless of the
write requests sizes.
> Why ? How do you know "when it will be beneficial" ? As I mentioned, the
number of
> blocks per request would vary over time, and also depends on the
filesystem. OTOH, even small
> writes could take a lot longer than usual (>500ms) due to garbage
collection etc.
>
Based on our experiments the write packing is mostly beneficial in long
sequential write scenarios. Therefore, we would still like the write
packing to be enabled in such cases (as long as there are no parallel
reads). The trigger of number of potential packed requests ensures that
the packing will be enabled in case of a flow of write.

Can you please explain why you refer to the number of blocks per request
when the write packing solution doesn't take into account the request
size?

Thanks,
Maya Erez

--
Sent by consultant of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum




2012-06-12 20:08:13

by Maya Erez

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control


On Mon, June 11, 2012 5:28 pm, Muthu Kumar wrote:
> On Mon, Jun 11, 2012 at 2:19 PM, Muthu Kumar <[email protected]> wrote:
>> On Fri, Jun 1, 2012 at 11:55 AM, Maya Erez <[email protected]> wrote:
>>> The write packing control will ensure that read requests latency is
>>> not increased due to long write packed commands.
>>>
>>> The trigger for enabling the write packing is managing to pack several
>>> write requests. The number of potential packed requests that will
>>> trigger
>>> the packing can be configured via sysfs by writing the required value
>>> to:
>>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>> The trigger for disabling the write packing is fetching a read request.
>>>
>>
>> If it is applicable only to MMC why do we have this sysfs attr for all
>> block devices?
>
> Just to be clear, please create a directory, say mmc, under
> /sys/block/<dev>/ and create the attr inside that.
>
> You can refer to dm (dm-sysfs.c) for sample implementation.
>
> Regards,
> Muthu
>

I will apply this change in the next patch.

Thanks,
Maya Erez

--
Sent by consultant of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-06-13 19:53:04

by Maya Erez

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control


On Mon, June 11, 2012 5:28 pm, Muthu Kumar wrote:
> On Mon, Jun 11, 2012 at 2:19 PM, Muthu Kumar <[email protected]> wrote:
>> On Fri, Jun 1, 2012 at 11:55 AM, Maya Erez <[email protected]> wrote:
>>> The write packing control will ensure that read requests latency is
>>> not increased due to long write packed commands.
>>>
>>> The trigger for enabling the write packing is managing to pack several
>>> write requests. The number of potential packed requests that will
>>> trigger
>>> the packing can be configured via sysfs by writing the required value
>>> to:
>>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>> The trigger for disabling the write packing is fetching a read request.
>>>
>>
>> If it is applicable only to MMC why do we have this sysfs attr for all
>> block devices?
>
> Just to be clear, please create a directory, say mmc, under
> /sys/block/<dev>/ and create the attr inside that.
>
> You can refer to dm (dm-sysfs.c) for sample implementation.
>
> Regards,
> Muthu
>
Hi Muthu,

I released a new version of this patch which doesn't include this change yet.

I understand why you think it would be best to distinguish the MMC
specific attribute from the general block devices attributes.
However, since this attribute is created only for the MMC block device,
other block devices won't be aware of it. Therefore, it doesn't
necessarily require a separation to a different folder.
Currently there is another MMC specific attribute (force_ro) which is also
created in the root directory. I think it would be better to also create
the num_wr_reqs_to_start_packing in the same folder as force_ro and not
make it an exceptional attribute in its location and the code that handles
it.
I would appreciate your opinion on that.

Thanks,
Maya
--
Sent by consultant of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-06-13 22:21:54

by Muthu Kumar

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Wed, Jun 13, 2012 at 12:52 PM, <[email protected]> wrote:
>
> On Mon, June 11, 2012 5:28 pm, Muthu Kumar wrote:
>> On Mon, Jun 11, 2012 at 2:19 PM, Muthu Kumar <[email protected]> wrote:
>>> On Fri, Jun 1, 2012 at 11:55 AM, Maya Erez <[email protected]> wrote:
>>>> trigger
>>>> the packing can be configured via sysfs by writing the required value
>>>> to:
>>>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>> The trigger for disabling the write packing is fetching a read request.
>>>>
>>>
>>> If it is applicable only to MMC why do we have this sysfs attr for all
>>> block devices?
>>
>> Just to be clear, please create a directory, say mmc, under
>> /sys/block/<dev>/ and create the attr inside that.
>>
>> You can refer to dm (dm-sysfs.c) for sample implementation.
> I understand why you think it would be best to distinguish the MMC
> specific attribute from the general block devices attributes.
> However, since this attribute is created only for the MMC block device,
> other block devices won't be aware of it.

I understand its created by the MMC code so will not be there for
other block devices. But having the device specific attributes inside
one <device> directory is better/cleaner. And since we are already
following that model for other devices, why not follow that for MMC
also?

> Therefore, it doesn't
> necessarily require a separation to a different folder.
> Currently there is another MMC specific attribute (force_ro) which is also
> created in the root directory. I think it would be better to also create
> the num_wr_reqs_to_start_packing in the same folder as force_ro and not
> make it an exceptional attribute in its location and the code that handles
> it.

Then time to move that as well to "mmc" directory.

Regards,
Muthu


> I would appreciate your opinion on that.
>
> Thanks,
> Maya
> --
> Sent by consultant of Qualcomm Innovation Center, Inc.
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum
>

2012-06-14 07:46:33

by Maya Erez

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control


On Wed, June 13, 2012 3:21 pm, Muthu Kumar wrote:
> On Wed, Jun 13, 2012 at 12:52 PM, <[email protected]> wrote:
>>
>> On Mon, June 11, 2012 5:28 pm, Muthu Kumar wrote:
>>> On Mon, Jun 11, 2012 at 2:19 PM, Muthu Kumar <[email protected]>
>>> wrote:
>>>> On Fri, Jun 1, 2012 at 11:55 AM, Maya Erez <[email protected]>
>>>> wrote:
>>>>> trigger
>>>>> the packing can be configured via sysfs by writing the required value
>>>>> to:
>>>>> /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
>>>>> The trigger for disabling the write packing is fetching a read
>>>>> request.
>>>>>
>>>>
>>>> If it is applicable only to MMC why do we have this sysfs attr for all
>>>> block devices?
>>>
>>> Just to be clear, please create a directory, say mmc, under
>>> /sys/block/<dev>/ and create the attr inside that.
>>>
>>> You can refer to dm (dm-sysfs.c) for sample implementation.
>> I understand why you think it would be best to distinguish the MMC
>> specific attribute from the general block devices attributes.
>> However, since this attribute is created only for the MMC block device,
>> other block devices won't be aware of it.
>
> I understand its created by the MMC code so will not be there for
> other block devices. But having the device specific attributes inside
> one <device> directory is better/cleaner. And since we are already
> following that model for other devices, why not follow that for MMC
> also?
>
>> Therefore, it doesn't
>> necessarily require a separation to a different folder.
>> Currently there is another MMC specific attribute (force_ro) which is
>> also
>> created in the root directory. I think it would be better to also create
>> the num_wr_reqs_to_start_packing in the same folder as force_ro and not
>> make it an exceptional attribute in its location and the code that
>> handles
>> it.
>
> Then time to move that as well to "mmc" directory.
>
> Regards,
> Muthu

I will make this change for the new attribute and for force_ro as well.

Thanks,
Maya

>
>
>> I would appreciate your opinion on that.
>>
>> Thanks,
>> Maya
>> --
>> Sent by consultant of Qualcomm Innovation Center, Inc.
>> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum
>>
>


--
Sent by consultant of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-08-27 18:28:24

by Maya Erez

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control


On Fri, July 27, 2012 2:07 am, S, Venkatraman wrote:
> On Fri, Jul 27, 2012 at 12:24 AM, <[email protected]> wrote:
>>
>> On Thu, July 26, 2012 8:28 am, S, Venkatraman wrote:
>>> On Tue, Jul 24, 2012 at 2:14 PM, <[email protected]> wrote:
>>>> On Mon, July 23, 2012 5:22 am, S, Venkatraman wrote:
>>>>> On Mon, Jul 23, 2012 at 5:13 PM, <[email protected]> wrote:
>>>>>> On Wed, July 18, 2012 12:26 am, Chris Ball wrote:
>>>>>>> Hi, [removing Jens and the documentation list, since now we're
>>>> talking about the MMC side only]
>>>>>>> On Wed, Jul 18 2012, [email protected] wrote:
>>>>>>>> Is there anything else that holds this patch from being pushed to
>>>>>> mmc-next?
>>>>>>> Yes, I'm still uncomfortable with the write packing patchsets for a
>>>>>> couple of reasons, and I suspect that the sum of those reasons means
>>>>>> that
>>>>>> we should probably plan on holding off merging it until after 3.6.
>>>>>>> Here are the open issues; please correct any misunderstandings:
>>>>>>> With
>>>> Seungwon's patchset ("Support packed write command"):
>>>>>>> * I still don't have a good set of representative benchmarks
>>>>>>> showing
>>>>>>> what kind of performance changes come with this patchset. It
>>>>>>> seems
>>>>>> like we've had a small amount of testing on one controller/eMMC part
>>>>>> combo
>>>>>> from Seungwon, and an entirely different test from Maya, and the
>>>> results
>>>>>> aren't documented fully anywhere to the level of describing what the
>>>> hardware was, what the test was, and what the results were before and
>>>> after the patchset.
>>>>>> Currently, there is only one card vendor that supports packed
>>>>>> commands.
>>>> Following are our sequential write (LMDD) test results on 2 of our
>>>> targets
>>>>>> (in MB/s):
>>>>>> No packing packing
>>>>>> Target 1 (SDR 50MHz) 15 25
>>>>>> Target 2 (DDR 50MHz) 20 30
>>>>>>> With the reads-during-writes regression:
>>>>>>> * Venkat still has open questions about the nature of the read
>>>>>>> regression, and thinks we should understand it with blktrace
>>>>>>> before
>>>>>> trying to fix it. Maya has a theory about writes overwhelming
>>>>>> reads,
>>>>>> but
>>>>>> Venkat doesn't understand why this would explain the observed
>>>>>> bandwidth drop.
>>>>>> The degradation of read due to writes is not a new behavior and
>>>>>> exists
>>>> also without the write packing feature (which only increases the
>>>> degradation). Our investigation of this phenomenon led us to the
>>>> Conclusion that a new scheduling policy should be used for mobile
>>>> devices,
>>>>>> but this is not related to the current discussion of the write
>>>>>> packing
>>>> feature.
>>>>>> The write packing feature increases the degradation of read due to
>>>> write
>>>>>> since it allows the MMC to fetch many write requests in a row,
>>>>>> instead
>>>>>> of
>>>>>> fetching only one at a time. Therefore some of the read requests
>>>>>> will
>>>> have to wait for the completion of more write requests before they can
>>>> be
>>>>>> issued.
>>>>>
>>>>> I am a bit puzzled by this claim. One thing I checked carefully when
>>>> reviewing write packing patches from SJeon was that the code didn't
>>>> plough through a mixed list of reads and writes and selected only
>>>> writes.
>>>>> This section of the code in "mmc_blk_prep_packed_list()", from v8
>>>> patchset..
>>>>> <Quote>
>>>>> + if (rq_data_dir(cur) != rq_data_dir(next)) {
>>>>> + put_back = 1;
>>>>> + break;
>>>>> + }
>>>>> </Quote>
>>>>>
>>>>> means that once a read is encountered in the middle of write packing,
>>>> the packing is stopped at that point and it is executed. Then the next
>>>> blk_fetch_request should get the next read and continue as before.
>>>>>
>>>>> IOW, the ordering of reads and writes is _not_ altered when using
>>>>> packed
>>>> commands.
>>>>> For example if there were 5 write requests, followed by 1 read,
>>>>> followed by 5 more write requests in the request_queue, the first 5
>>>> writes will be executed as one "packed command", then the read will be
>>>> executed, and then the remaining 5 writes will be executed as one
>>>> "packed command". So the read does not have to wait any more than it
>>>> waited before (packing feature)
>>>>
>>>> Let me try to better explain with your example.
>>>> Without packing the MMC layer will fetch 2 write requests and wait for
>>>> the
>>>> first write request completion before fetching another write request.
>>>> During this time the read request could be inserted into the CFQ and
>>>> since
>>>> it has higher priority than the async write it will be dispatched in
>>>> the
>>>> next fetch. So, the result would be 2 write requests followed by one
>>>> read
>>>> request and the read would have to wait for completion of only 2 write
>>>> requests.
>>>> With packing, all the 5 write requests will be fetched in a row, and
>>>> then
>>>> the read will arrive and be dispatched in the next fetch. Then the
>>>> read
>>>> will have to wait for the completion of 5 write requests.
>>>>
>>>> Few more clarifications:
>>>> Due to the plug list mechanism in the block layer the applications can
>>>> "aggregate" several requests to be inserted into the scheduler before
>>>> waking the MMC queue thread.
>>>> This leads to a situation where there are several write requests in
>>>> the
>>>> CFQ queue when MMC starts to do the fetches.
>>>>
>>>> If the read was inserted while we are building the packed command then
>>>> I
>>>> agree that we should have seen less effect on the read performance.
>>>> However, the write packing statistics show that in most of the cases
>>>> the
>>>> packing stopped due to an empty queue, meaning that the read was
>>>> inserted
>>>> to the CFQ after all the pending write requests were fetched and
>>>> packed.
>>>>
>>>> Following is an example for write packing statistics of a READ/WRITE
>>>> parallel scenario:
>>>> write packing statistics:
>>>> Packed 1 reqs - 448 times
>>>> Packed 2 reqs - 38 times
>>>> Packed 3 reqs - 23 times
>>>> Packed 4 reqs - 30 times
>>>> Packed 5 reqs - 14 times
>>>> Packed 6 reqs - 8 times
>>>> Packed 7 reqs - 4 times
>>>> Packed 8 reqs - 1 times
>>>> Packed 10 reqs - 1 times
>>>> Packed 34 reqs - 1 times
>>>> stopped packing due to the following reasons:
>>>> 2 times: wrong data direction (meaning a READ was fetched and stopped
>>>> the
>>>> packing)
>>>> 1 times: flush or discard
>>>> 565 times: empty queue (meaning blk_fetch_request returned NULL)
>>>>
>>>>>
>>>>> And I requested blktrace to confirm that this is indeed the
>>>>> behaviour.
>>>>
>>>> The trace logs show that in case of no packing, there are maximum of
>>>> 3-4
>>>> requests issued before a read request, while with packing there are
>>>> also
>>>> cases of 6 and 7 requests dispatched before a read request.
>>>>
>>>> I'm waiting for an approval for sharing the block trace logs.
>>>> Since this is a simple test to run you can collect the trace logs and
>>>> let
>>>> us know if you reach other conclusions.
>>>>
>>> Thanks for the brief. I don't have the eMMC4.5 device with me yet, so
>>> I can't reproduce the result.
>>
>> I sent the trace logs of both packing and non packing. Please let me
>> know
>> if you have additional questions after reviewing them.
>>
>> The problem you describe is most likely
>>> applicable
>>> to any block device driver with a large queue depth ( any queue depth
>>> >1).
>>> I'll check to see what knobs in block affect the result.
>>> Speaking of it, what is the host controller you use to test this ?
>>
>> The controller I use is msm_sdcc.
>>
>>> I was wondering if host->max_seg_size is taken into account while
>>> packed
>>> command
>>> is in use. If not, shouldn't it be ? - it could act as a better
>>> throttle for "packing density".
>>
>> The max segments (which is calculated from host->max_seg_size) is taking
>> into account when preparing the packed list (so that the whole packed
>> won't exceed the max number of segments).
>> I'm not sure I understand how host->max_seg_size can be used as a
>> throttle
>> for "packing density". Can you please explain?
>>
> Ok - I overlooked that max_segments is indeed used to limit the number
> of requests
> that are packed.(And this corresponds to max_seg_size, which is what I
> intended)
> I should be getting my MMC4.5 test gear in a couple of days - I'll run
> it through
> on some hosts and can either provide more feedback or Ack this patch.
> Regards,
> Venkat.

Hi Venkat,

Do you have additional questions/comments?

Thanks,
Maya
--
Sent by consultant of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum

2012-08-28 17:41:00

by Venkatraman S

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] mmc: block: Add write packing control

On Mon, Aug 27, 2012 at 11:58 PM, <[email protected]> wrote:
>
> On Fri, July 27, 2012 2:07 am, S, Venkatraman wrote:
>> On Fri, Jul 27, 2012 at 12:24 AM, <[email protected]> wrote:
>>>
>>> On Thu, July 26, 2012 8:28 am, S, Venkatraman wrote:
>>>> On Tue, Jul 24, 2012 at 2:14 PM, <[email protected]> wrote:
>>>>> On Mon, July 23, 2012 5:22 am, S, Venkatraman wrote:
>>>>>> On Mon, Jul 23, 2012 at 5:13 PM, <[email protected]> wrote:
>>>>>>> On Wed, July 18, 2012 12:26 am, Chris Ball wrote:
>>>>>>>> Hi, [removing Jens and the documentation list, since now we're
>>>>> talking about the MMC side only]
>>>>>>>> On Wed, Jul 18 2012, [email protected] wrote:
>>>>>>>>> Is there anything else that holds this patch from being pushed to
>>>>>>> mmc-next?
>>>>>>>> Yes, I'm still uncomfortable with the write packing patchsets for a
>>>>>>> couple of reasons, and I suspect that the sum of those reasons means
>>>>>>> that
>>>>>>> we should probably plan on holding off merging it until after 3.6.
>>>>>>>> Here are the open issues; please correct any misunderstandings:
>>>>>>>> With
>>>>> Seungwon's patchset ("Support packed write command"):
>>>>>>>> * I still don't have a good set of representative benchmarks
>>>>>>>> showing
>>>>>>>> what kind of performance changes come with this patchset. It
>>>>>>>> seems
>>>>>>> like we've had a small amount of testing on one controller/eMMC part
>>>>>>> combo
>>>>>>> from Seungwon, and an entirely different test from Maya, and the
>>>>> results
>>>>>>> aren't documented fully anywhere to the level of describing what the
>>>>> hardware was, what the test was, and what the results were before and
>>>>> after the patchset.
>>>>>>> Currently, there is only one card vendor that supports packed
>>>>>>> commands.
>>>>> Following are our sequential write (LMDD) test results on 2 of our
>>>>> targets
>>>>>>> (in MB/s):
>>>>>>> No packing packing
>>>>>>> Target 1 (SDR 50MHz) 15 25
>>>>>>> Target 2 (DDR 50MHz) 20 30
>>>>>>>> With the reads-during-writes regression:
>>>>>>>> * Venkat still has open questions about the nature of the read
>>>>>>>> regression, and thinks we should understand it with blktrace
>>>>>>>> before
>>>>>>> trying to fix it. Maya has a theory about writes overwhelming
>>>>>>> reads,
>>>>>>> but
>>>>>>> Venkat doesn't understand why this would explain the observed
>>>>>>> bandwidth drop.
>>>>>>> The degradation of read due to writes is not a new behavior and
>>>>>>> exists
>>>>> also without the write packing feature (which only increases the
>>>>> degradation). Our investigation of this phenomenon led us to the
>>>>> Conclusion that a new scheduling policy should be used for mobile
>>>>> devices,
>>>>>>> but this is not related to the current discussion of the write
>>>>>>> packing
>>>>> feature.
>>>>>>> The write packing feature increases the degradation of read due to
>>>>> write
>>>>>>> since it allows the MMC to fetch many write requests in a row,
>>>>>>> instead
>>>>>>> of
>>>>>>> fetching only one at a time. Therefore some of the read requests
>>>>>>> will
>>>>> have to wait for the completion of more write requests before they can
>>>>> be
>>>>>>> issued.
>>>>>>
>>>>>> I am a bit puzzled by this claim. One thing I checked carefully when
>>>>> reviewing write packing patches from SJeon was that the code didn't
>>>>> plough through a mixed list of reads and writes and selected only
>>>>> writes.
>>>>>> This section of the code in "mmc_blk_prep_packed_list()", from v8
>>>>> patchset..
>>>>>> <Quote>
>>>>>> + if (rq_data_dir(cur) != rq_data_dir(next)) {
>>>>>> + put_back = 1;
>>>>>> + break;
>>>>>> + }
>>>>>> </Quote>
>>>>>>
>>>>>> means that once a read is encountered in the middle of write packing,
>>>>> the packing is stopped at that point and it is executed. Then the next
>>>>> blk_fetch_request should get the next read and continue as before.
>>>>>>
>>>>>> IOW, the ordering of reads and writes is _not_ altered when using
>>>>>> packed
>>>>> commands.
>>>>>> For example if there were 5 write requests, followed by 1 read,
>>>>>> followed by 5 more write requests in the request_queue, the first 5
>>>>> writes will be executed as one "packed command", then the read will be
>>>>> executed, and then the remaining 5 writes will be executed as one
>>>>> "packed command". So the read does not have to wait any more than it
>>>>> waited before (packing feature)
>>>>>
>>>>> Let me try to better explain with your example.
>>>>> Without packing the MMC layer will fetch 2 write requests and wait for
>>>>> the
>>>>> first write request completion before fetching another write request.
>>>>> During this time the read request could be inserted into the CFQ and
>>>>> since
>>>>> it has higher priority than the async write it will be dispatched in
>>>>> the
>>>>> next fetch. So, the result would be 2 write requests followed by one
>>>>> read
>>>>> request and the read would have to wait for completion of only 2 write
>>>>> requests.
>>>>> With packing, all the 5 write requests will be fetched in a row, and
>>>>> then
>>>>> the read will arrive and be dispatched in the next fetch. Then the
>>>>> read
>>>>> will have to wait for the completion of 5 write requests.
>>>>>
>>>>> Few more clarifications:
>>>>> Due to the plug list mechanism in the block layer the applications can
>>>>> "aggregate" several requests to be inserted into the scheduler before
>>>>> waking the MMC queue thread.
>>>>> This leads to a situation where there are several write requests in
>>>>> the
>>>>> CFQ queue when MMC starts to do the fetches.
>>>>>
>>>>> If the read was inserted while we are building the packed command then
>>>>> I
>>>>> agree that we should have seen less effect on the read performance.
>>>>> However, the write packing statistics show that in most of the cases
>>>>> the
>>>>> packing stopped due to an empty queue, meaning that the read was
>>>>> inserted
>>>>> to the CFQ after all the pending write requests were fetched and
>>>>> packed.
>>>>>
>>>>> Following is an example for write packing statistics of a READ/WRITE
>>>>> parallel scenario:
>>>>> write packing statistics:
>>>>> Packed 1 reqs - 448 times
>>>>> Packed 2 reqs - 38 times
>>>>> Packed 3 reqs - 23 times
>>>>> Packed 4 reqs - 30 times
>>>>> Packed 5 reqs - 14 times
>>>>> Packed 6 reqs - 8 times
>>>>> Packed 7 reqs - 4 times
>>>>> Packed 8 reqs - 1 times
>>>>> Packed 10 reqs - 1 times
>>>>> Packed 34 reqs - 1 times
>>>>> stopped packing due to the following reasons:
>>>>> 2 times: wrong data direction (meaning a READ was fetched and stopped
>>>>> the
>>>>> packing)
>>>>> 1 times: flush or discard
>>>>> 565 times: empty queue (meaning blk_fetch_request returned NULL)
>>>>>
>>>>>>
>>>>>> And I requested blktrace to confirm that this is indeed the
>>>>>> behaviour.
>>>>>
>>>>> The trace logs show that in case of no packing, there are maximum of
>>>>> 3-4
>>>>> requests issued before a read request, while with packing there are
>>>>> also
>>>>> cases of 6 and 7 requests dispatched before a read request.
>>>>>
>>>>> I'm waiting for an approval for sharing the block trace logs.
>>>>> Since this is a simple test to run you can collect the trace logs and
>>>>> let
>>>>> us know if you reach other conclusions.
>>>>>
>>>> Thanks for the brief. I don't have the eMMC4.5 device with me yet, so
>>>> I can't reproduce the result.
>>>
>>> I sent the trace logs of both packing and non packing. Please let me
>>> know
>>> if you have additional questions after reviewing them.
>>>
>>> The problem you describe is most likely
>>>> applicable
>>>> to any block device driver with a large queue depth ( any queue depth
>>>> >1).
>>>> I'll check to see what knobs in block affect the result.
>>>> Speaking of it, what is the host controller you use to test this ?
>>>
>>> The controller I use is msm_sdcc.
>>>
>>>> I was wondering if host->max_seg_size is taken into account while
>>>> packed
>>>> command
>>>> is in use. If not, shouldn't it be ? - it could act as a better
>>>> throttle for "packing density".
>>>
>>> The max segments (which is calculated from host->max_seg_size) is taking
>>> into account when preparing the packed list (so that the whole packed
>>> won't exceed the max number of segments).
>>> I'm not sure I understand how host->max_seg_size can be used as a
>>> throttle
>>> for "packing density". Can you please explain?
>>>
>> Ok - I overlooked that max_segments is indeed used to limit the number
>> of requests
>> that are packed.(And this corresponds to max_seg_size, which is what I
>> intended)
>> I should be getting my MMC4.5 test gear in a couple of days - I'll run
>> it through
>> on some hosts and can either provide more feedback or Ack this patch.
>> Regards,
>> Venkat.
>
> Hi Venkat,
>
> Do you have additional questions/comments?
>
None. I am just running some stress tests on BKOPS patches right now
and after tomorrow I'll start testing packed command. Will all the
patches apply on top of current mmc-next ? If not, it would be great
if
you can send an updated version.

Thanks,
Venkat.