The following six patch series adds support to read vendor statistics
for a vdpa device.
The first patch is just a cleanup fix.
The second patch lays the ground to allow an upstream driver to provide
statistics in the form of an attribute name/attribute value pairs.
The third and fourth patches make changes to locking scheme to use
readers/writers semaphore instead if mutex.
The fifth patch implements this for mlx5_vdpa which gives received
descriptors and completed descriptors information for all the
virtqueues.
The sixth patch changes reslock mutex to readers/writers semaphore to
achieve the same goal as done in vdpa core.
v7 -> v8:
Convert cf_mutex to a rw_semaphore
Eli Cohen (6):
vdpa: Fix error logic in vdpa_nl_cmd_dev_get_doit
vdpa: Add support for querying vendor statistics
net/vdpa: Use readers/writers semaphore instead of vdpa_dev_mutex
net/vdpa: Use readers/writers semaphore instead of cf_mutex
vdpa/mlx5: Add support for reading descriptor statistics
vdpa/mlx5: Use readers/writers semaphore instead of mutex
drivers/vdpa/mlx5/core/mlx5_vdpa.h | 2 +
drivers/vdpa/mlx5/net/mlx5_vnet.c | 186 ++++++++++++++++++---
drivers/vdpa/vdpa.c | 252 ++++++++++++++++++++++++-----
include/linux/mlx5/mlx5_ifc.h | 1 +
include/linux/mlx5/mlx5_ifc_vdpa.h | 39 +++++
include/linux/vdpa.h | 15 +-
include/uapi/linux/vdpa.h | 6 +
7 files changed, 432 insertions(+), 69 deletions(-)
--
2.35.1
Replace cf_mutex with rw_semaphore to reflect the fact that some calls
could be called concurrently but can suffice with read lock.
Suggested-by: Si-Wei Liu <[email protected]>
Signed-off-by: Eli Cohen <[email protected]>
---
drivers/vdpa/vdpa.c | 25 ++++++++++++-------------
include/linux/vdpa.h | 12 ++++++------
2 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 2ff7de5e6b2f..9d3534a0bc5f 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -23,9 +23,9 @@ static DEFINE_IDA(vdpa_index_ida);
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->config->set_status(vdev, status);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL(vdpa_set_status);
@@ -148,7 +148,6 @@ static void vdpa_release_dev(struct device *d)
ops->free(vdev);
ida_simple_remove(&vdpa_index_ida, vdev->index);
- mutex_destroy(&vdev->cf_mutex);
kfree(vdev->driver_override);
kfree(vdev);
}
@@ -211,7 +210,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (err)
goto err_name;
- mutex_init(&vdev->cf_mutex);
+ init_rwsem(&vdev->cf_lock);
device_initialize(&vdev->dev);
return vdev;
@@ -407,9 +406,9 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
- mutex_lock(&vdev->cf_mutex);
+ down_read(&vdev->cf_lock);
vdpa_get_config_unlocked(vdev, offset, buf, len);
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
@@ -423,9 +422,9 @@ EXPORT_SYMBOL_GPL(vdpa_get_config);
void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
const void *buf, unsigned int length)
{
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->config->set_config(vdev, offset, buf, length);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
}
EXPORT_SYMBOL_GPL(vdpa_set_config);
@@ -866,7 +865,7 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
u8 status;
int err;
- mutex_lock(&vdev->cf_mutex);
+ down_read(&vdev->cf_lock);
status = vdev->config->get_status(vdev);
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
@@ -903,14 +902,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
if (err)
goto msg_err;
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
out:
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
return err;
}
@@ -954,7 +953,7 @@ static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
{
int err;
- mutex_lock(&vdev->cf_mutex);
+ down_read(&vdev->cf_lock);
if (!vdev->config->get_vendor_vq_stats) {
err = -EOPNOTSUPP;
goto out;
@@ -962,7 +961,7 @@ static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
err = vdpa_fill_stats_rec(vdev, msg, info, index);
out:
- mutex_unlock(&vdev->cf_mutex);
+ up_read(&vdev->cf_lock);
return err;
}
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 2ae8443331e1..2cb14847831e 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -66,7 +66,7 @@ struct vdpa_mgmt_dev;
* @dma_dev: the actual device that is performing DMA
* @driver_override: driver name to force a match
* @config: the configuration ops for this device.
- * @cf_mutex: Protects get and set access to configuration layout.
+ * @cf_lock: Protects get and set access to configuration layout.
* @index: device index
* @features_valid: were features initialized? for legacy guests
* @use_va: indicate whether virtual address must be used by this device
@@ -79,7 +79,7 @@ struct vdpa_device {
struct device *dma_dev;
const char *driver_override;
const struct vdpa_config_ops *config;
- struct mutex cf_mutex; /* Protects get/set config */
+ struct rw_semaphore cf_lock; /* Protects get/set config */
unsigned int index;
bool features_valid;
bool use_va;
@@ -398,10 +398,10 @@ static inline int vdpa_reset(struct vdpa_device *vdev)
const struct vdpa_config_ops *ops = vdev->config;
int ret;
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
vdev->features_valid = false;
ret = ops->reset(vdev);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
return ret;
}
@@ -420,9 +420,9 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
int ret;
- mutex_lock(&vdev->cf_mutex);
+ down_write(&vdev->cf_lock);
ret = vdpa_set_features_unlocked(vdev, features);
- mutex_unlock(&vdev->cf_mutex);
+ up_write(&vdev->cf_lock);
return ret;
}
--
2.35.1
Use rw_semaphore instead of mutex to control access to vdpa devices.
This can be especially beneficial in case processes poll on statistics
information.
Suggested-by: Si-Wei Liu <[email protected]>
Reviewed-by: Si-Wei Liu <[email protected]>
Acked-by: Jason Wang <[email protected]>
Signed-off-by: Eli Cohen <[email protected]>
---
drivers/vdpa/vdpa.c | 64 ++++++++++++++++++++++-----------------------
1 file changed, 32 insertions(+), 32 deletions(-)
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 31b5eb2c0778..2ff7de5e6b2f 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -18,7 +18,7 @@
static LIST_HEAD(mdev_head);
/* A global mutex that protects vdpa management device and device level operations. */
-static DEFINE_MUTEX(vdpa_dev_mutex);
+static DECLARE_RWSEM(vdpa_dev_lock);
static DEFINE_IDA(vdpa_index_ida);
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
@@ -238,7 +238,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
vdev->nvqs = nvqs;
- lockdep_assert_held(&vdpa_dev_mutex);
+ lockdep_assert_held(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
if (dev) {
put_device(dev);
@@ -278,9 +278,9 @@ int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{
int err;
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
err = __vdpa_register_device(vdev, nvqs);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
EXPORT_SYMBOL_GPL(vdpa_register_device);
@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(vdpa_register_device);
*/
void _vdpa_unregister_device(struct vdpa_device *vdev)
{
- lockdep_assert_held(&vdpa_dev_mutex);
+ lockdep_assert_held(&vdpa_dev_lock);
WARN_ON(!vdev->mdev);
device_unregister(&vdev->dev);
}
@@ -305,9 +305,9 @@ EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
*/
void vdpa_unregister_device(struct vdpa_device *vdev)
{
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
device_unregister(&vdev->dev);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_device);
@@ -352,9 +352,9 @@ int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
return -EINVAL;
INIT_LIST_HEAD(&mdev->list);
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
list_add_tail(&mdev->list, &mdev_head);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return 0;
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
@@ -371,14 +371,14 @@ static int vdpa_match_remove(struct device *dev, void *data)
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
{
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
list_del(&mdev->list);
/* Filter out all the entries belong to this management device and delete it. */
bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
@@ -532,17 +532,17 @@ static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *i
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
err = PTR_ERR(mdev);
goto out;
}
err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
if (err)
goto out;
err = genlmsg_reply(msg, info);
@@ -561,7 +561,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
int idx = 0;
int err;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
list_for_each_entry(mdev, &mdev_head, list) {
if (idx < start) {
idx++;
@@ -574,7 +574,7 @@ vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
idx++;
}
out:
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = idx;
return msg->len;
}
@@ -627,7 +627,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
@@ -643,7 +643,7 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
err = mdev->ops->dev_add(mdev, name, &config);
err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
@@ -659,7 +659,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
return -EINVAL;
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
- mutex_lock(&vdpa_dev_mutex);
+ down_write(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -677,7 +677,7 @@ static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *i
mdev_err:
put_device(dev);
dev_err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_write(&vdpa_dev_lock);
return err;
}
@@ -743,7 +743,7 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -761,13 +761,13 @@ static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
err = genlmsg_reply(msg, info);
put_device(dev);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
return err;
mdev_err:
put_device(dev);
err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
nlmsg_free(msg);
return err;
}
@@ -809,9 +809,9 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
info.start_idx = cb->args[0];
info.idx = 0;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
@@ -1031,7 +1031,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
if (!msg)
return -ENOMEM;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -1052,7 +1052,7 @@ static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info
mdev_err:
put_device(dev);
dev_err:
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
if (err)
nlmsg_free(msg);
return err;
@@ -1090,9 +1090,9 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *
info.start_idx = cb->args[0];
info.idx = 0;
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
cb->args[0] = info.idx;
return msg->len;
}
@@ -1119,7 +1119,7 @@ static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
return -ENOMEM;
index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
- mutex_lock(&vdpa_dev_mutex);
+ down_read(&vdpa_dev_lock);
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
if (!dev) {
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
@@ -1139,7 +1139,7 @@ static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
err = genlmsg_reply(msg, info);
put_device(dev);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
return err;
@@ -1147,7 +1147,7 @@ static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
put_device(dev);
dev_err:
nlmsg_free(msg);
- mutex_unlock(&vdpa_dev_mutex);
+ up_read(&vdpa_dev_lock);
return err;
}
--
2.35.1
Implement the get_vq_stats calback of vdpa_config_ops to return the
statistics for a virtqueue.
The statistics are provided as vendor specific statistics where the
driver provides a pair of attribute name and attribute value.
Currently supported are received descriptors and completed descriptors.
Signed-off-by: Eli Cohen <[email protected]>
---
drivers/vdpa/mlx5/core/mlx5_vdpa.h | 2 +
drivers/vdpa/mlx5/net/mlx5_vnet.c | 149 +++++++++++++++++++++++++++++
include/linux/mlx5/mlx5_ifc.h | 1 +
include/linux/mlx5/mlx5_ifc_vdpa.h | 39 ++++++++
4 files changed, 191 insertions(+)
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index daaf7b503677..44104093163b 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -61,6 +61,8 @@ struct mlx5_control_vq {
struct vringh_kiov riov;
struct vringh_kiov wiov;
unsigned short head;
+ unsigned int received_desc;
+ unsigned int completed_desc;
};
struct mlx5_vdpa_wq_ent {
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e0de44000d92..2b815ef850c8 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -119,6 +119,7 @@ struct mlx5_vdpa_virtqueue {
struct mlx5_vdpa_umem umem2;
struct mlx5_vdpa_umem umem3;
+ u32 counter_set_id;
bool initialized;
int index;
u32 virtq_id;
@@ -818,6 +819,12 @@ static u16 get_features_12_3(u64 features)
(!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6);
}
+static bool counters_supported(const struct mlx5_vdpa_dev *mvdev)
+{
+ return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) &
+ BIT_ULL(MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+}
+
static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
@@ -872,6 +879,8 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+ if (counters_supported(&ndev->mvdev))
+ MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err)
@@ -1135,6 +1144,47 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
return err;
}
+static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_virtio_q_counters_out)] = {};
+ void *cmd_hdr;
+ int err;
+
+ if (!counters_supported(&ndev->mvdev))
+ return 0;
+
+ cmd_hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_virtio_q_counters_out)] = {};
+
+ if (!counters_supported(&ndev->mvdev))
+ return;
+
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid);
+ MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
+ mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
+}
+
static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u16 idx = mvq->index;
@@ -1162,6 +1212,10 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
if (err)
goto err_connect;
+ err = counter_set_alloc(ndev, mvq);
+ if (err)
+ goto err_counter;
+
err = create_virtqueue(ndev, mvq);
if (err)
goto err_connect;
@@ -1179,6 +1233,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
return 0;
err_connect:
+ counter_set_dealloc(ndev, mvq);
+err_counter:
qp_destroy(ndev, &mvq->vqqp);
err_vqqp:
qp_destroy(ndev, &mvq->fwqp);
@@ -1223,6 +1279,7 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
suspend_vq(ndev, mvq);
destroy_virtqueue(ndev, mvq);
+ counter_set_dealloc(ndev, mvq);
qp_destroy(ndev, &mvq->vqqp);
qp_destroy(ndev, &mvq->fwqp);
cq_destroy(ndev, mvq->index);
@@ -1659,6 +1716,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (read != sizeof(ctrl))
break;
+ cvq->received_desc++;
switch (ctrl.class) {
case VIRTIO_NET_CTRL_MAC:
status = handle_ctrl_mac(mvdev, ctrl.cmd);
@@ -1682,6 +1740,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring);
+ cvq->completed_desc++;
queue_work(mvdev->wq, &wqent->work);
break;
}
@@ -2303,6 +2362,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->cur_num_vqs = 0;
+ ndev->mvdev.cvq.received_desc = 0;
+ ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
++mvdev->generation;
@@ -2422,6 +2483,93 @@ static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
return mvdev->actual_features;
}
+static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+ u64 *received_desc, u64 *completed_desc)
+{
+ u32 in[MLX5_ST_SZ_DW(query_virtio_q_counters_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {};
+ void *cmd_hdr;
+ void *ctx;
+ int err;
+
+ if (!counters_supported(&ndev->mvdev))
+ return -EOPNOTSUPP;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return -EAGAIN;
+
+ cmd_hdr = MLX5_ADDR_OF(query_virtio_q_counters_in, in, hdr);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id);
+
+ err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters);
+ *received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc);
+ *completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc);
+ return 0;
+}
+
+static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_control_vq *cvq;
+ u64 received_desc;
+ u64 completed_desc;
+ int err = 0;
+
+ mutex_lock(&ndev->reslock);
+ if (!is_index_valid(mvdev, idx)) {
+ NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid");
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ if (idx == ctrl_vq_idx(mvdev)) {
+ cvq = &mvdev->cvq;
+ received_desc = cvq->received_desc;
+ completed_desc = cvq->completed_desc;
+ goto out;
+ }
+
+ mvq = &ndev->vqs[idx];
+ err = counter_set_query(ndev, mvq, &received_desc, &completed_desc);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "failed to query hardware");
+ goto out_err;
+ }
+
+out:
+ err = -EMSGSIZE;
+ if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "received_desc"))
+ goto out_err;
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, received_desc,
+ VDPA_ATTR_PAD))
+ goto out_err;
+
+ if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "completed_desc"))
+ goto out_err;
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, completed_desc,
+ VDPA_ATTR_PAD))
+ goto out_err;
+
+ err = 0;
+out_err:
+ mutex_unlock(&ndev->reslock);
+ return err;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2431,6 +2579,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_ready = mlx5_vdpa_get_vq_ready,
.set_vq_state = mlx5_vdpa_set_vq_state,
.get_vq_state = mlx5_vdpa_get_vq_state,
+ .get_vendor_vq_stats = mlx5_vdpa_get_vendor_vq_stats,
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 49a48d7709ac..1d193d9b6029 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -94,6 +94,7 @@ enum {
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 1a9c9d94cb59..4414ed5b6ed2 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -165,4 +165,43 @@ struct mlx5_ifc_modify_virtio_net_q_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
+struct mlx5_ifc_virtio_q_counters_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 received_desc[0x40];
+ u8 completed_desc[0x40];
+ u8 error_cqes[0x20];
+ u8 bad_desc_errors[0x20];
+ u8 exceed_max_chain[0x20];
+ u8 invalid_buffer[0x20];
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
#endif /* __MLX5_IFC_VDPA_H_ */
--
2.35.1
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <[email protected]>
---
drivers/vdpa/vdpa.c | 162 ++++++++++++++++++++++++++++++++++++++
include/linux/vdpa.h | 3 +
include/uapi/linux/vdpa.h | 6 ++
3 files changed, 171 insertions(+)
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index fac89a0d8178..31b5eb2c0778 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -914,6 +914,108 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
return err;
}
+static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ struct virtio_net_config config = {};
+ u64 features;
+ u16 max_vqp;
+ u8 status;
+ int err;
+
+ status = vdev->config->get_status(vdev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
+ return -EAGAIN;
+ }
+ vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
+
+ max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
+ return -EMSGSIZE;
+
+ features = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
+ features, VDPA_ATTR_PAD))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
+ return -EMSGSIZE;
+
+ err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ int err;
+
+ mutex_lock(&vdev->cf_mutex);
+ if (!vdev->config->get_vendor_vq_stats) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = vdpa_fill_stats_rec(vdev, msg, info, index);
+out:
+ mutex_unlock(&vdev->cf_mutex);
+ return err;
+}
+
+static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
+ struct sk_buff *msg,
+ struct genl_info *info, u32 index)
+{
+ u32 device_id;
+ void *hdr;
+ int err;
+ u32 portid = info->snd_portid;
+ u32 seq = info->snd_seq;
+ u32 flags = 0;
+
+ hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
+ VDPA_CMD_DEV_VSTATS_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
+ err = -EMSGSIZE;
+ goto undo_msg;
+ }
+
+ device_id = vdev->config->get_device_id(vdev);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
+ err = -EMSGSIZE;
+ goto undo_msg;
+ }
+
+ switch (device_id) {
+ case VIRTIO_ID_NET:
+ if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
+ NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
+ err = -ERANGE;
+ break;
+ }
+
+ err = vendor_stats_fill(vdev, msg, info, index);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ genlmsg_end(msg, hdr);
+
+ return err;
+
+undo_msg:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct vdpa_device *vdev;
@@ -995,6 +1097,60 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *
return msg->len;
}
+static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct sk_buff *msg;
+ const char *devname;
+ struct device *dev;
+ u32 index;
+ int err;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
+ return -EINVAL;
+
+ devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
+ mutex_lock(&vdpa_dev_mutex);
+ dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
+ if (err)
+ goto mdev_err;
+
+ err = genlmsg_reply(msg, info);
+
+ put_device(dev);
+ mutex_unlock(&vdpa_dev_mutex);
+
+ return err;
+
+mdev_err:
+ put_device(dev);
+dev_err:
+ nlmsg_free(msg);
+ mutex_unlock(&vdpa_dev_mutex);
+ return err;
+}
+
static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
@@ -1035,6 +1191,12 @@ static const struct genl_ops vdpa_nl_ops[] = {
.doit = vdpa_nl_cmd_dev_config_get_doit,
.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
},
+ {
+ .cmd = VDPA_CMD_DEV_VSTATS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_stats_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family vdpa_nl_family __ro_after_init = {
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 8943a209202e..2ae8443331e1 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -276,6 +276,9 @@ struct vdpa_config_ops {
const struct vdpa_vq_state *state);
int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
struct vdpa_vq_state *state);
+ int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack);
struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* vq irq is not expected to be changed once DRIVER_OK is set */
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 1061d8d2d09d..25c55cab3d7c 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -18,6 +18,7 @@ enum vdpa_command {
VDPA_CMD_DEV_DEL,
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
+ VDPA_CMD_DEV_VSTATS_GET,
};
enum vdpa_attr {
@@ -46,6 +47,11 @@ enum vdpa_attr {
VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
+
+ VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */
+ VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
+ VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
+
/* new attributes must be added above here */
VDPA_ATTR_MAX,
};
--
2.35.1
Reading statistics could be done intensively and by several processes
concurrently. Reader's lock is sufficient in this case.
Change reslock from mutex to a rwsem.
Suggested-by: Si-Wei Liu <[email protected]>
Signed-off-by: Eli Cohen <[email protected]>
---
drivers/vdpa/mlx5/net/mlx5_vnet.c | 41 ++++++++++++++-----------------
1 file changed, 19 insertions(+), 22 deletions(-)
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 2b815ef850c8..57cfc64248b7 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -155,7 +155,7 @@ struct mlx5_vdpa_net {
* since memory map might change and we need to destroy and create
* resources while driver in operational.
*/
- struct mutex reslock;
+ struct rw_semaphore reslock;
struct mlx5_flow_table *rxft;
struct mlx5_fc *rx_counter;
struct mlx5_flow_handle *rx_rule_ucast;
@@ -1695,7 +1695,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
@@ -1746,7 +1746,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
}
out:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
}
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -2244,7 +2244,7 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
- WARN_ON(!mutex_is_locked(&ndev->reslock));
+ WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (ndev->setup) {
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
@@ -2292,7 +2292,7 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
static void teardown_driver(struct mlx5_vdpa_net *ndev)
{
- WARN_ON(!mutex_is_locked(&ndev->reslock));
+ WARN_ON(!rwsem_is_locked(&ndev->reslock));
if (!ndev->setup)
return;
@@ -2322,7 +2322,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
print_status(mvdev, status, true);
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
@@ -2338,14 +2338,14 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
}
ndev->mvdev.status = status;
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return;
err_setup:
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
}
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2356,7 +2356,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n");
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
teardown_driver(ndev);
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
@@ -2371,7 +2371,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return 0;
}
@@ -2411,7 +2411,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
bool change_map;
int err;
- mutex_lock(&ndev->reslock);
+ down_write(&ndev->reslock);
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
@@ -2423,7 +2423,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
err = mlx5_vdpa_change_map(mvdev, iotlb);
err:
- mutex_unlock(&ndev->reslock);
+ up_write(&ndev->reslock);
return err;
}
@@ -2442,7 +2442,6 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
}
mlx5_vdpa_free_resources(&ndev->mvdev);
- mutex_destroy(&ndev->reslock);
kfree(ndev->event_cbs);
kfree(ndev->vqs);
}
@@ -2527,7 +2526,7 @@ static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
u64 completed_desc;
int err = 0;
- mutex_lock(&ndev->reslock);
+ down_read(&ndev->reslock);
if (!is_index_valid(mvdev, idx)) {
NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid");
err = -EINVAL;
@@ -2566,7 +2565,7 @@ static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
err = 0;
out_err:
- mutex_unlock(&ndev->reslock);
+ up_read(&ndev->reslock);
return err;
}
@@ -2835,18 +2834,18 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
init_mvqs(ndev);
- mutex_init(&ndev->reslock);
+ init_rwsem(&ndev->reslock);
config = &ndev->config;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
err = config_func_mtu(mdev, add_config->net.mtu);
if (err)
- goto err_mtu;
+ goto err_alloc;
}
err = query_mtu(mdev, &mtu);
if (err)
- goto err_mtu;
+ goto err_alloc;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
@@ -2860,14 +2859,14 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
} else {
err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
if (err)
- goto err_mtu;
+ goto err_alloc;
}
if (!is_zero_ether_addr(config->mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
- goto err_mtu;
+ goto err_alloc;
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
@@ -2917,8 +2916,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
err_mpfs:
if (!is_zero_ether_addr(config->mac))
mlx5_mpfs_del_mac(pfmdev, config->mac);
-err_mtu:
- mutex_destroy(&ndev->reslock);
err_alloc:
put_device(&mvdev->vdev.dev);
return err;
--
2.35.1