2024-03-05 21:01:54

by Avri Altman

[permalink] [raw]
Subject: [PATCH v2 1/4] scsi: ufs: Re-use device management locking code

Group those 3 calls that repeat for every device management command into
a lock and unlock handlers.

Reviewed-by: Bart Van Assche <[email protected]>
Signed-off-by: Avri Altman <[email protected]>
---
drivers/ufs/core/ufshcd.c | 69 ++++++++++++++++++---------------------
1 file changed, 32 insertions(+), 37 deletions(-)

diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 21429eec1b82..983b7b8e3c7c 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -3272,6 +3272,20 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
return err;
}

+static void ufshcd_dev_man_lock(struct ufs_hba *hba)
+{
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
+{
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+}
+
/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
@@ -3294,8 +3308,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);

- down_read(&hba->clk_scaling_lock);
-
lrbp = &hba->lrb[tag];
lrbp->cmd = NULL;
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
@@ -3312,7 +3324,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);

out:
- up_read(&hba->clk_scaling_lock);
return err;
}

@@ -3383,8 +3394,8 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,

BUG_ON(!hba);

- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);

@@ -3426,8 +3437,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;

out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}

@@ -3457,9 +3467,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}

- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);

- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);

@@ -3489,8 +3498,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
*attr_val = be32_to_cpu(response->upiu_res.value);

out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}

@@ -3553,9 +3561,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}

- ufshcd_hold(hba);
+ ufshcd_dev_man_lock(hba);

- mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
hba->dev_cmd.query.descriptor = desc_buf;
@@ -3588,8 +3595,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,

out_unlock:
hba->dev_cmd.query.descriptor = NULL;
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
return err;
}

@@ -5070,8 +5076,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;

- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
+
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
hba->nop_out_timeout);
@@ -5081,8 +5087,8 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)

dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
}
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+
+ ufshcd_dev_man_unlock(hba);

if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -7209,8 +7215,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);

- down_read(&hba->clk_scaling_lock);
-
lrbp = &hba->lrb[tag];
lrbp->cmd = NULL;
lrbp->task_tag = tag;
@@ -7275,7 +7279,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);

- up_read(&hba->clk_scaling_lock);
return err;
}

@@ -7314,13 +7317,11 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
cmd_type, desc_op);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);

break;
case UPIU_TRANSACTION_TASK_REQ:
@@ -7380,9 +7381,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;

/* Protects use of hba->reserved_slot. */
- ufshcd_hold(hba);
- mutex_lock(&hba->dev_cmd.lock);
- down_read(&hba->clk_scaling_lock);
+ ufshcd_dev_man_lock(hba);

lrbp = &hba->lrb[tag];
lrbp->cmd = NULL;
@@ -7448,9 +7447,8 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
}
}

- up_read(&hba->clk_scaling_lock);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
+
return err ? : result;
}

@@ -8713,9 +8711,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
if (dev_info->wspecversion < 0x400)
return;

- ufshcd_hold(hba);
-
- mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_dev_man_lock(hba);

ufshcd_init_query(hba, &request, &response,
UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -8733,8 +8729,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
__func__, err);

- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_dev_man_unlock(hba);
}

/**
--
2.42.0



2024-03-06 21:21:34

by Bean Huo

[permalink] [raw]
Subject: Re: [PATCH v2 1/4] scsi: ufs: Re-use device management locking code

On Tue, 2024-03-05 at 23:00 +0200, Avri Altman wrote:
> Group those 3 calls that repeat for every device management command
> into
> a lock and unlock handlers.
>
> Reviewed-by: Bart Van Assche <[email protected]>
> Signed-off-by: Avri Altman <[email protected]>

Reviewed-by: Bean Huo <[email protected]>