From: Hannes Reinecke <[email protected]>
Add a latency-based I/O policy for multipathing. It uses the blk-nodelat
latency tracker to provide latencies for each node, and schedules
I/O on the path with the least latency for the submitting node.
Signed-off-by: Hannes Reinecke <[email protected]>
Make this compile when CONFIG_BLK_NODE_LATENCY is not set.
Advertise the 'latency' iopolicy in modinfo.
Signed-off-by: John Meneghini <[email protected]>
---
drivers/nvme/host/multipath.c | 63 ++++++++++++++++++++++++++++++-----
drivers/nvme/host/nvme.h | 1 +
2 files changed, 55 insertions(+), 9 deletions(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d916a5ddf5d4..e9330bb1990b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -18,6 +18,7 @@ static const char *nvme_iopolicy_names[] = {
[NVME_IOPOLICY_NUMA] = "numa",
[NVME_IOPOLICY_RR] = "round-robin",
[NVME_IOPOLICY_QD] = "queue-depth",
+ [NVME_IOPOLICY_LAT] = "latency",
};
static int iopolicy = NVME_IOPOLICY_NUMA;
@@ -32,6 +33,10 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
iopolicy = NVME_IOPOLICY_RR;
else if (!strncmp(val, "queue-depth", 11))
iopolicy = NVME_IOPOLICY_QD;
+#ifdef CONFIG_BLK_NODE_LATENCY
+ else if (!strncmp(val, "latency", 7))
+ iopolicy = NVME_IOPOLICY_LAT;
+#endif
else
return -EINVAL;
@@ -43,10 +48,36 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
}
+static int nvme_activate_iopolicy(struct nvme_subsystem *subsys, int iopolicy)
+{
+ struct nvme_ns_head *h;
+ struct nvme_ns *ns;
+ bool enable = iopolicy == NVME_IOPOLICY_LAT;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry) {
+ list_for_each_entry_rcu(ns, &h->list, siblings) {
+ if (enable) {
+ ret = blk_nlat_enable(ns->disk);
+ if (ret)
+ break;
+ } else
+ blk_nlat_disable(ns->disk);
+ }
+ }
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
&iopolicy, 0644);
MODULE_PARM_DESC(iopolicy,
+#if defined(CONFIG_BLK_NODE_LATENCY)
+ "Default multipath I/O policy; 'numa' (default) , 'round-robin', 'queue-depth' or 'latency'");
+#else
"Default multipath I/O policy; 'numa' (default) , 'round-robin' or 'queue-depth'");
+#endif
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
{
@@ -250,14 +281,16 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
{
int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
struct nvme_ns *found = NULL, *fallback = NULL, *ns;
+ int iopolicy = READ_ONCE(head->subsys->iopolicy);
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (nvme_path_is_disabled(ns))
continue;
- if (ns->ctrl->numa_node != NUMA_NO_NODE &&
- READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+ if (iopolicy == NVME_IOPOLICY_NUMA)
distance = node_distance(node, ns->ctrl->numa_node);
+ else if (iopolicy == NVME_IOPOLICY_LAT)
+ distance = blk_nlat_latency(ns->disk, node);
else
distance = LOCAL_DISTANCE;
@@ -381,8 +414,8 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
{
- int iopolicy = READ_ONCE(head->subsys->iopolicy);
int node;
+ int iopolicy = READ_ONCE(head->subsys->iopolicy);
struct nvme_ns *ns;
/*
@@ -401,8 +434,8 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
if (iopolicy == NVME_IOPOLICY_RR)
return nvme_round_robin_path(head, node, ns);
-
- if (unlikely(!nvme_path_is_optimized(ns)))
+ if (iopolicy == NVME_IOPOLICY_LAT ||
+ unlikely(!nvme_path_is_optimized(ns)))
return __nvme_find_path(head, node);
return ns;
}
@@ -872,15 +905,18 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
{
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
- int i;
+ int i, ret;
for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
- nvme_subsys_iopolicy_update(subsys, i);
- return count;
+ ret = nvme_activate_iopolicy(subsys, i);
+ if (!ret) {
+ nvme_subsys_iopolicy_update(subsys, i);
+ return count;
+ }
+ return ret;
}
}
-
return -EINVAL;
}
SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
@@ -916,6 +952,15 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
+ if (!blk_nlat_init(ns->disk) &&
+ READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_LAT) {
+ int ret = blk_nlat_enable(ns->disk);
+
+ if (unlikely(ret))
+ pr_warn("%s: Failed to enable latency tracking, error %d\n",
+ ns->disk->disk_name, ret);
+ }
+
if (nvme_ctrl_use_ana(ns->ctrl)) {
struct nvme_ana_group_desc desc = {
.grpid = anagrpid,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a557b4577c01..66bf003a6c48 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -411,6 +411,7 @@ enum nvme_iopolicy {
NVME_IOPOLICY_NUMA,
NVME_IOPOLICY_RR,
NVME_IOPOLICY_QD,
+ NVME_IOPOLICY_LAT,
};
struct nvme_subsystem {
--
2.39.3
On 5/10/24 05:43, John Meneghini wrote:
> From: Hannes Reinecke <[email protected]>
>
> Add a latency-based I/O policy for multipathing. It uses the blk-nodelat
> latency tracker to provide latencies for each node, and schedules
> I/O on the path with the least latency for the submitting node.
>
> Signed-off-by: Hannes Reinecke <[email protected]>
>
> Make this compile when CONFIG_BLK_NODE_LATENCY is not set.
> Advertise the 'latency' iopolicy in modinfo.
>
> Signed-off-by: John Meneghini <[email protected]>
> ---
> drivers/nvme/host/multipath.c | 63 ++++++++++++++++++++++++++++++-----
> drivers/nvme/host/nvme.h | 1 +
> 2 files changed, 55 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index d916a5ddf5d4..e9330bb1990b 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -18,6 +18,7 @@ static const char *nvme_iopolicy_names[] = {
> [NVME_IOPOLICY_NUMA] = "numa",
> [NVME_IOPOLICY_RR] = "round-robin",
> [NVME_IOPOLICY_QD] = "queue-depth",
> + [NVME_IOPOLICY_LAT] = "latency",
> };
>
> static int iopolicy = NVME_IOPOLICY_NUMA;
> @@ -32,6 +33,10 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
> iopolicy = NVME_IOPOLICY_RR;
> else if (!strncmp(val, "queue-depth", 11))
> iopolicy = NVME_IOPOLICY_QD;
> +#ifdef CONFIG_BLK_NODE_LATENCY
> + else if (!strncmp(val, "latency", 7))
> + iopolicy = NVME_IOPOLICY_LAT;
> +#endif
> else
> return -EINVAL;
>
> @@ -43,10 +48,36 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
> return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
> }
>
> +static int nvme_activate_iopolicy(struct nvme_subsystem *subsys, int iopolicy)
> +{
> + struct nvme_ns_head *h;
> + struct nvme_ns *ns;
> + bool enable = iopolicy == NVME_IOPOLICY_LAT;
> + int ret = 0;
> +
> + mutex_lock(&subsys->lock);
> + list_for_each_entry(h, &subsys->nsheads, entry) {
> + list_for_each_entry_rcu(ns, &h->list, siblings) {
> + if (enable) {
> + ret = blk_nlat_enable(ns->disk);
> + if (ret)
> + break;
> + } else
> + blk_nlat_disable(ns->disk);
Missing curly brackets for the else.
> + }
> + }
> + mutex_unlock(&subsys->lock);
> + return ret;
> +}
> +
> module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
> &iopolicy, 0644);
> MODULE_PARM_DESC(iopolicy,
> +#if defined(CONFIG_BLK_NODE_LATENCY)
What is so special about the latency policy that it needs to be conditionally
defined ? I missed that point. Why not drop CONFIG_BLK_NODE_LATENCY ?
> + "Default multipath I/O policy; 'numa' (default) , 'round-robin', 'queue-depth' or 'latency'");
> +#else
> "Default multipath I/O policy; 'numa' (default) , 'round-robin' or 'queue-depth'");
> +#endif
>
> void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
> {
> @@ -250,14 +281,16 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
> {
> int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
> struct nvme_ns *found = NULL, *fallback = NULL, *ns;
> + int iopolicy = READ_ONCE(head->subsys->iopolicy);
>
> list_for_each_entry_rcu(ns, &head->list, siblings) {
> if (nvme_path_is_disabled(ns))
> continue;
>
> - if (ns->ctrl->numa_node != NUMA_NO_NODE &&
> - READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
> + if (iopolicy == NVME_IOPOLICY_NUMA)
> distance = node_distance(node, ns->ctrl->numa_node);
> + else if (iopolicy == NVME_IOPOLICY_LAT)
> + distance = blk_nlat_latency(ns->disk, node);
> else
> distance = LOCAL_DISTANCE;
>
> @@ -381,8 +414,8 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>
> inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
> {
> - int iopolicy = READ_ONCE(head->subsys->iopolicy);
> int node;
> + int iopolicy = READ_ONCE(head->subsys->iopolicy);
No need to move this line.
> struct nvme_ns *ns;
>
> /*
> @@ -401,8 +434,8 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>
> if (iopolicy == NVME_IOPOLICY_RR)
> return nvme_round_robin_path(head, node, ns);
> -
> - if (unlikely(!nvme_path_is_optimized(ns)))
> + if (iopolicy == NVME_IOPOLICY_LAT ||
> + unlikely(!nvme_path_is_optimized(ns)))
> return __nvme_find_path(head, node);
> return ns;
> }
> @@ -872,15 +905,18 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
> {
> struct nvme_subsystem *subsys =
> container_of(dev, struct nvme_subsystem, dev);
> - int i;
> + int i, ret;
>
> for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
> if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
> - nvme_subsys_iopolicy_update(subsys, i);
> - return count;
> + ret = nvme_activate_iopolicy(subsys, i);
> + if (!ret) {
> + nvme_subsys_iopolicy_update(subsys, i);
> + return count;
> + }
> + return ret;
It would be nicer to have this as:
if (ret)
break
nvme_subsys_iopolicy_update(subsys, i);
return count;
> }
> }
> -
whiteline change.
> return -EINVAL;
And "return ret;" here with ret initialized to -EINVAL when declared.
> }
> SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
> @@ -916,6 +952,15 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
>
> void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
> {
> + if (!blk_nlat_init(ns->disk) &&
> + READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_LAT) {
> + int ret = blk_nlat_enable(ns->disk);
> +
> + if (unlikely(ret))
> + pr_warn("%s: Failed to enable latency tracking, error %d\n",
> + ns->disk->disk_name, ret);
> + }
> +
> if (nvme_ctrl_use_ana(ns->ctrl)) {
> struct nvme_ana_group_desc desc = {
> .grpid = anagrpid,
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index a557b4577c01..66bf003a6c48 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -411,6 +411,7 @@ enum nvme_iopolicy {
> NVME_IOPOLICY_NUMA,
> NVME_IOPOLICY_RR,
> NVME_IOPOLICY_QD,
> + NVME_IOPOLICY_LAT,
> };
>
> struct nvme_subsystem {
--
Damien Le Moal
Western Digital Research
On 5/10/24 09:17, Damien Le Moal wrote:
> On 5/10/24 05:43, John Meneghini wrote:
>> From: Hannes Reinecke <[email protected]>
>>
>> Add a latency-based I/O policy for multipathing. It uses the blk-nodelat
>> latency tracker to provide latencies for each node, and schedules
>> I/O on the path with the least latency for the submitting node.
>>
>> Signed-off-by: Hannes Reinecke <[email protected]>
>>
>> Make this compile when CONFIG_BLK_NODE_LATENCY is not set.
>> Advertise the 'latency' iopolicy in modinfo.
>>
>> Signed-off-by: John Meneghini <[email protected]>
>> ---
>> drivers/nvme/host/multipath.c | 63 ++++++++++++++++++++++++++++++-----
>> drivers/nvme/host/nvme.h | 1 +
>> 2 files changed, 55 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
>> index d916a5ddf5d4..e9330bb1990b 100644
>> --- a/drivers/nvme/host/multipath.c
>> +++ b/drivers/nvme/host/multipath.c
>> @@ -18,6 +18,7 @@ static const char *nvme_iopolicy_names[] = {
>> [NVME_IOPOLICY_NUMA] = "numa",
>> [NVME_IOPOLICY_RR] = "round-robin",
>> [NVME_IOPOLICY_QD] = "queue-depth",
>> + [NVME_IOPOLICY_LAT] = "latency",
>> };
>>
>> static int iopolicy = NVME_IOPOLICY_NUMA;
>> @@ -32,6 +33,10 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
>> iopolicy = NVME_IOPOLICY_RR;
>> else if (!strncmp(val, "queue-depth", 11))
>> iopolicy = NVME_IOPOLICY_QD;
>> +#ifdef CONFIG_BLK_NODE_LATENCY
>> + else if (!strncmp(val, "latency", 7))
>> + iopolicy = NVME_IOPOLICY_LAT;
>> +#endif
>> else
>> return -EINVAL;
>>
>> @@ -43,10 +48,36 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
>> return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
>> }
>>
>> +static int nvme_activate_iopolicy(struct nvme_subsystem *subsys, int iopolicy)
>> +{
>> + struct nvme_ns_head *h;
>> + struct nvme_ns *ns;
>> + bool enable = iopolicy == NVME_IOPOLICY_LAT;
>> + int ret = 0;
>> +
>> + mutex_lock(&subsys->lock);
>> + list_for_each_entry(h, &subsys->nsheads, entry) {
>> + list_for_each_entry_rcu(ns, &h->list, siblings) {
>> + if (enable) {
>> + ret = blk_nlat_enable(ns->disk);
>> + if (ret)
>> + break;
>> + } else
>> + blk_nlat_disable(ns->disk);
>
> Missing curly brackets for the else.
>
Ok.
>> + }
>> + }
>> + mutex_unlock(&subsys->lock);
>> + return ret;
>> +}
>> +
>> module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
>> &iopolicy, 0644);
>> MODULE_PARM_DESC(iopolicy,
>> +#if defined(CONFIG_BLK_NODE_LATENCY)
>
> What is so special about the latency policy that it needs to be conditionally
> defined ? I missed that point. Why not drop CONFIG_BLK_NODE_LATENCY ?
>
The 'latency' policy is using the blk-rqos infrastructure, which in
itself might not be compiled in.
So we don't want the user to give a false impression here.
>> + "Default multipath I/O policy; 'numa' (default) , 'round-robin', 'queue-depth' or 'latency'");
>> +#else
>> "Default multipath I/O policy; 'numa' (default) , 'round-robin' or 'queue-depth'");
>> +#endif
>>
>> void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
>> {
>> @@ -250,14 +281,16 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
>> {
>> int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
>> struct nvme_ns *found = NULL, *fallback = NULL, *ns;
>> + int iopolicy = READ_ONCE(head->subsys->iopolicy);
>>
>> list_for_each_entry_rcu(ns, &head->list, siblings) {
>> if (nvme_path_is_disabled(ns))
>> continue;
>>
>> - if (ns->ctrl->numa_node != NUMA_NO_NODE &&
>> - READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
>> + if (iopolicy == NVME_IOPOLICY_NUMA)
>> distance = node_distance(node, ns->ctrl->numa_node);
>> + else if (iopolicy == NVME_IOPOLICY_LAT)
>> + distance = blk_nlat_latency(ns->disk, node);
>> else
>> distance = LOCAL_DISTANCE;
>>
>> @@ -381,8 +414,8 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>>
>> inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>> {
>> - int iopolicy = READ_ONCE(head->subsys->iopolicy);
>> int node;
>> + int iopolicy = READ_ONCE(head->subsys->iopolicy);
>
> No need to move this line.
>
Sure.
>> struct nvme_ns *ns;
>>
>> /*
>> @@ -401,8 +434,8 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>>
>> if (iopolicy == NVME_IOPOLICY_RR)
>> return nvme_round_robin_path(head, node, ns);
>> -
>> - if (unlikely(!nvme_path_is_optimized(ns)))
>> + if (iopolicy == NVME_IOPOLICY_LAT ||
>> + unlikely(!nvme_path_is_optimized(ns)))
>> return __nvme_find_path(head, node);
>> return ns;
>> }
>> @@ -872,15 +905,18 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
>> {
>> struct nvme_subsystem *subsys =
>> container_of(dev, struct nvme_subsystem, dev);
>> - int i;
>> + int i, ret;
>>
>> for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
>> if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
>> - nvme_subsys_iopolicy_update(subsys, i);
>> - return count;
>> + ret = nvme_activate_iopolicy(subsys, i);
>> + if (!ret) {
>> + nvme_subsys_iopolicy_update(subsys, i);
>> + return count;
>> + }
>> + return ret;
>
> It would be nicer to have this as:
>
> if (ret)
> break
> nvme_subsys_iopolicy_update(subsys, i);
> return count;
>
Ok.
>> }
>> }
>> -
>
> whiteline change.
>
>> return -EINVAL;
>
> And "return ret;" here with ret initialized to -EINVAL when declared.
>
Ok.
>> }
>> SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
>> @@ -916,6 +952,15 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
>>
>> void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
>> {
>> + if (!blk_nlat_init(ns->disk) &&
>> + READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_LAT) {
>> + int ret = blk_nlat_enable(ns->disk);
>> +
>> + if (unlikely(ret))
>> + pr_warn("%s: Failed to enable latency tracking, error %d\n",
>> + ns->disk->disk_name, ret);
>> + }
>> +
>> if (nvme_ctrl_use_ana(ns->ctrl)) {
>> struct nvme_ana_group_desc desc = {
>> .grpid = anagrpid,
>> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
>> index a557b4577c01..66bf003a6c48 100644
>> --- a/drivers/nvme/host/nvme.h
>> +++ b/drivers/nvme/host/nvme.h
>> @@ -411,6 +411,7 @@ enum nvme_iopolicy {
>> NVME_IOPOLICY_NUMA,
>> NVME_IOPOLICY_RR,
>> NVME_IOPOLICY_QD,
>> + NVME_IOPOLICY_LAT,
>> };
>>
>> struct nvme_subsystem {
>
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
[email protected] +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Ivo Totev, Andrew McDonald,
Werner Knoblich