Cards with 4k native sector size may only be read 4k-aligned,
accommodate for this in the single read recovery and use it.
Fixes: 81196976ed946 (mmc: block: Add blk-mq support)
Signed-off-by: Christian Loehle <[email protected]>
---
drivers/mmc/core/block.c | 25 ++++++++++++-------------
1 file changed, 12 insertions(+), 13 deletions(-)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f4a1281658db..a75a208ce203 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -176,7 +176,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
@@ -1302,7 +1302,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr_p,
+ int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
@@ -1372,8 +1372,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
* at a time in order to accurately determine which
* sectors can be read successfully.
*/
- if (disable_multi)
- brq->data.blocks = 1;
+ if (recovery_mode)
+ brq->data.blocks = mmc_large_sector(card) ? 8 : 1;
/*
* Some controllers have HW issues while operating
@@ -1590,7 +1590,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq)
{
u32 readcmd, writecmd;
@@ -1599,7 +1599,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
- mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd;
@@ -1690,7 +1690,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
#define MMC_READ_SINGLE_RETRIES 2
-/* Single sector read during recovery */
+/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
@@ -1698,6 +1698,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
+ size_t bytes_per_read = mmc_large_sector(card) ? 4096 : 512;
do {
u32 status;
@@ -1732,13 +1733,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
else
error = BLK_STS_OK;
- } while (blk_update_request(req, error, 512));
+ } while (blk_update_request(req, error, bytes_per_read));
return;
error_exit:
mrq->data->bytes_xfered = 0;
- blk_update_request(req, BLK_STS_IOERR, 512);
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1879,10 +1880,8 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return;
}
- /* FIXME: Missing single sector read for large sector size */
- if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
- brq->data.blocks > 1) {
- /* Read one sector at a time */
+ if (rq_data_dir(req) == READ && brq->data.blocks > 1) {
+ /* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
return;
}
--
2.36.1
Hyperstone GmbH | Reichenaustr. 39a | 78467 Konstanz
Managing Director: Dr. Jan Peter Berns.
Commercial register of local courts: Freiburg HRB381782
On 24/06/22 18:29, Christian Löhle wrote:
> Cards with 4k native sector size may only be read 4k-aligned,
> accommodate for this in the single read recovery and use it.
Thanks for the patch.
>
> Fixes: 81196976ed946 (mmc: block: Add blk-mq support)
> Signed-off-by: Christian Loehle <[email protected]>
FYI checkpatch says:
WARNING: From:/Signed-off-by: email name mismatch: 'From: "Christian Löhle" <[email protected]>' != 'Signed-off-by: Christian Loehle <[email protected]>'
> ---
> drivers/mmc/core/block.c | 25 ++++++++++++-------------
> 1 file changed, 12 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
> index f4a1281658db..a75a208ce203 100644
> --- a/drivers/mmc/core/block.c
> +++ b/drivers/mmc/core/block.c
> @@ -176,7 +176,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
> unsigned int part_type);
> static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
> struct mmc_card *card,
> - int disable_multi,
> + int recovery_mode,
> struct mmc_queue *mq);
> static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
>
> @@ -1302,7 +1302,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
> }
>
> static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
> - int disable_multi, bool *do_rel_wr_p,
> + int recovery_mode, bool *do_rel_wr_p,
> bool *do_data_tag_p)
> {
> struct mmc_blk_data *md = mq->blkdata;
> @@ -1372,8 +1372,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
> * at a time in order to accurately determine which
> * sectors can be read successfully.
> */
> - if (disable_multi)
> - brq->data.blocks = 1;
> + if (recovery_mode)
> + brq->data.blocks = mmc_large_sector(card) ? 8 : 1;
I suggest changing to use queue_physical_block_size() here and further below
brq->data.blocks = queue_physical_block_size(req->q) >> SECTOR_SHIFT;
>
> /*
> * Some controllers have HW issues while operating
> @@ -1590,7 +1590,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
>
> static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
> struct mmc_card *card,
> - int disable_multi,
> + int recovery_mode,
> struct mmc_queue *mq)
> {
> u32 readcmd, writecmd;
> @@ -1599,7 +1599,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
> struct mmc_blk_data *md = mq->blkdata;
> bool do_rel_wr, do_data_tag;
>
> - mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
> + mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
>
> brq->mrq.cmd = &brq->cmd;
>
> @@ -1690,7 +1690,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
>
> #define MMC_READ_SINGLE_RETRIES 2
>
> -/* Single sector read during recovery */
> +/* Single (native) sector read during recovery */
> static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
> {
> struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
> @@ -1698,6 +1698,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
> struct mmc_card *card = mq->card;
> struct mmc_host *host = card->host;
> blk_status_t error = BLK_STS_OK;
> + size_t bytes_per_read = mmc_large_sector(card) ? 4096 : 512;
size_t bytes_per_read = queue_physical_block_size(req->q);
>
> do {
> u32 status;
> @@ -1732,13 +1733,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
> else
> error = BLK_STS_OK;
>
> - } while (blk_update_request(req, error, 512));
> + } while (blk_update_request(req, error, bytes_per_read));
>
> return;
>
> error_exit:
> mrq->data->bytes_xfered = 0;
> - blk_update_request(req, BLK_STS_IOERR, 512);
> + blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
> /* Let it try the remaining request again */
> if (mqrq->retries > MMC_MAX_RETRIES - 1)
> mqrq->retries = MMC_MAX_RETRIES - 1;
> @@ -1879,10 +1880,8 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
> return;
> }
>
> - /* FIXME: Missing single sector read for large sector size */
> - if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
> - brq->data.blocks > 1) {
> - /* Read one sector at a time */
> + if (rq_data_dir(req) == READ && brq->data.blocks > 1) {
> + /* Read one (native) sector at a time */
> mmc_blk_read_single(mq, req);
> return;
> }
> Cards with 4k native sector size may only be read 4k-aligned,
>> accommodate for this in the single read recovery and use it.
>
>Thanks for the patch.
>
>>
>> Fixes: 81196976ed946 (mmc: block: Add blk-mq support)
>> Signed-off-by: Christian Loehle <[email protected]>
>
>FYI checkpatch says:
>
>WARNING: From:/Signed-off-by: email name mismatch: 'From: "Christian Löhle" <[email protected]>' != 'Signed-off-by: Christian Loehle <[email protected]>'
Will be fixed in my future patches, thanks for the hint.
>
>> ---
>> drivers/mmc/core/block.c | 25 ++++++++++++-------------
>> 1 file changed, 12 insertions(+), 13 deletions(-)
>>
>> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
>> index f4a1281658db..a75a208ce203 100644
>> --- a/drivers/mmc/core/block.c
>> +++ b/drivers/mmc/core/block.c
>> @@ -176,7 +176,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
>> unsigned int part_type);
>> static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
>> struct mmc_card *card,
>> - int disable_multi,
>> + int recovery_mode,
>> struct mmc_queue *mq);
>> static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
>>
>> @@ -1302,7 +1302,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
>> }
>>
>> static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
>> - int disable_multi, bool *do_rel_wr_p,
>> + int recovery_mode, bool *do_rel_wr_p,
>> bool *do_data_tag_p)
>> {
>> struct mmc_blk_data *md = mq->blkdata;
>> @@ -1372,8 +1372,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
>> * at a time in order to accurately determine which
>> * sectors can be read successfully.
>> */
>> - if (disable_multi)
>> - brq->data.blocks = 1;
>> + if (recovery_mode)
>> + brq->data.blocks = mmc_large_sector(card) ? 8 : 1;
>
>I suggest changing to use queue_physical_block_size() here and further below
This part I'm impartial about, not sure if it makes it more readable, hopefully we never have to support another "native sector size" apart from the two.
Anyway I will send the next patch with queue_physical_block_size()
>
> brq->data.blocks = queue_physical_block_size(req->q) >> SECTOR_SHIFT;
Do we want to switch to SECTOR_SHIFT instead of 9? So far SECTOR_SHIFT is not used at all in mmc core.
If so I would go ahead and change all the others in another patch:
queue.c:187: q->limits.discard_granularity = card->pref_erase << 9;
core.c:103: data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
mmc.c:792:MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
mmc.c:793:MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
mmc_test.c:1557: sz = (unsigned long)test->card->pref_erase << 9;
mmc_test.c:1570: t->max_tfr = test->card->host->max_blk_count << 9;
mmc_test.c:2461: if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
sd.c:707:MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
sd.c:708:MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
block.c:1417: int i, data_size = brq->data.blocks << 9;
block.c:1851: brq->data.bytes_xfered = blocks << 9;
Hyperstone GmbH | Reichenaustr. 39a | 78467 Konstanz
Managing Director: Dr. Jan Peter Berns.
Commercial register of local courts: Freiburg HRB381782
On 28/06/22 12:08, Christian Löhle wrote:
>> Cards with 4k native sector size may only be read 4k-aligned,
>>> accommodate for this in the single read recovery and use it.
>>
>> Thanks for the patch.
>>
>>>
>>> Fixes: 81196976ed946 (mmc: block: Add blk-mq support)
>>> Signed-off-by: Christian Loehle <[email protected]>
>>
>> FYI checkpatch says:
>>
>> WARNING: From:/Signed-off-by: email name mismatch: 'From: "Christian Löhle" <[email protected]>' != 'Signed-off-by: Christian Loehle <[email protected]>'
>
> Will be fixed in my future patches, thanks for the hint.
>
>>
>>> ---
>>> drivers/mmc/core/block.c | 25 ++++++++++++-------------
>>> 1 file changed, 12 insertions(+), 13 deletions(-)
>>>
>>> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
>>> index f4a1281658db..a75a208ce203 100644
>>> --- a/drivers/mmc/core/block.c
>>> +++ b/drivers/mmc/core/block.c
>>> @@ -176,7 +176,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
>>> unsigned int part_type);
>>> static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
>>> struct mmc_card *card,
>>> - int disable_multi,
>>> + int recovery_mode,
>>> struct mmc_queue *mq);
>>> static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
>>>
>>> @@ -1302,7 +1302,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
>>> }
>>>
>>> static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
>>> - int disable_multi, bool *do_rel_wr_p,
>>> + int recovery_mode, bool *do_rel_wr_p,
>>> bool *do_data_tag_p)
>>> {
>>> struct mmc_blk_data *md = mq->blkdata;
>>> @@ -1372,8 +1372,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
>>> * at a time in order to accurately determine which
>>> * sectors can be read successfully.
>>> */
>>> - if (disable_multi)
>>> - brq->data.blocks = 1;
>>> + if (recovery_mode)
>>> + brq->data.blocks = mmc_large_sector(card) ? 8 : 1;
>>
>> I suggest changing to use queue_physical_block_size() here and further below
>
> This part I'm impartial about, not sure if it makes it more readable, hopefully we never have to support another "native sector size" apart from the two.
> Anyway I will send the next patch with queue_physical_block_size()
>
>>
>> brq->data.blocks = queue_physical_block_size(req->q) >> SECTOR_SHIFT;
>
> Do we want to switch to SECTOR_SHIFT instead of 9? So far SECTOR_SHIFT is not used at all in mmc core.
I guess '9' is more consistent
> If so I would go ahead and change all the others in another patch:
> queue.c:187: q->limits.discard_granularity = card->pref_erase << 9;
> core.c:103: data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
> mmc.c:792:MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
> mmc.c:793:MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
> mmc_test.c:1557: sz = (unsigned long)test->card->pref_erase << 9;
> mmc_test.c:1570: t->max_tfr = test->card->host->max_blk_count << 9;
> mmc_test.c:2461: if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
> sd.c:707:MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
> sd.c:708:MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
> block.c:1417: int i, data_size = brq->data.blocks << 9;
> block.c:1851: brq->data.bytes_xfered = blocks << 9;
>
>
>
>
> Hyperstone GmbH | Reichenaustr. 39a | 78467 Konstanz
> Managing Director: Dr. Jan Peter Berns.
> Commercial register of local courts: Freiburg HRB381782
...
> >> brq->data.blocks = queue_physical_block_size(req->q) >> SECTOR_SHIFT;
> >
> > Do we want to switch to SECTOR_SHIFT instead of 9? So far SECTOR_SHIFT is not used at all in mmc
> core.
>
> I guess '9' is more consistent
You can just multiply/divide by 512u (probably SECTOR_SIZE).
(In some senses 512u is actually better!)
More obvious still and the compiler will generate a shift anyway.
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)