Hi all:
This series tries to implement a vDPA driver for virtio-pci device
which will bridge between vDPA bus and virtio-pci device.
This could be used for future feature prototyping and testing.
Please review
Changes since V2:
- tweak config prompt
- switch from 'cb' to 'config_cb' for vp_vdpa config interrupt
- use a macro for vp_vdpa msix name length
Changes since V1:
- don't try to use devres for virtio-pci core
- tweak the commit log
- split the patches furtherly to ease the reviewing
Changes since RFC:
- Split common codes from virito-pci and share it with vDPA driver
- Use dynamic id in order to be less confusing with virtio-pci driver
- No feature whitelist, supporting any features (mq, config etc)
Thanks
Jason Wang (19):
virtio-pci: do not access iomem via struct virtio_pci_device directly
virtio-pci: split out modern device
virtio-pci-modern: factor out modern device initialization logic
virtio-pci-modern: introduce vp_modern_remove()
virtio-pci-modern: introduce helper to set config vector
virtio-pci-modern: introduce helpers for setting and getting status
virtio-pci-modern: introduce helpers for setting and getting features
virtio-pci-modern: introduce vp_modern_generation()
virtio-pci-modern: introduce vp_modern_set_queue_vector()
virtio-pci-modern: introduce vp_modern_queue_address()
virtio-pci-modern: introduce helper to set/get queue_enable
virtio-pci-modern: introduce helper for setting/geting queue size
virtio-pci-modern: introduce helper for getting queue nums
virtio-pci-modern: introduce helper to get notification offset
virito-pci-modern: rename map_capability() to
vp_modern_map_capability()
virtio-pci: introduce modern device module
vdpa: set the virtqueue num during register
virtio_vdpa: don't warn when fail to disable vq
vdpa: introduce virtio pci driver
drivers/vdpa/Kconfig | 6 +
drivers/vdpa/Makefile | 1 +
drivers/vdpa/ifcvf/ifcvf_main.c | 5 +-
drivers/vdpa/mlx5/net/mlx5_vnet.c | 5 +-
drivers/vdpa/vdpa.c | 8 +-
drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 +-
drivers/vdpa/virtio_pci/Makefile | 2 +
drivers/vdpa/virtio_pci/vp_vdpa.c | 456 +++++++++++++++++++
drivers/virtio/Kconfig | 10 +-
drivers/virtio/Makefile | 1 +
drivers/virtio/virtio_pci_common.h | 22 +-
drivers/virtio/virtio_pci_modern.c | 506 +++------------------
drivers/virtio/virtio_pci_modern_dev.c | 599 +++++++++++++++++++++++++
drivers/virtio/virtio_vdpa.c | 3 +-
include/linux/vdpa.h | 7 +-
include/linux/virtio_pci_modern.h | 111 +++++
16 files changed, 1274 insertions(+), 472 deletions(-)
create mode 100644 drivers/vdpa/virtio_pci/Makefile
create mode 100644 drivers/vdpa/virtio_pci/vp_vdpa.c
create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
create mode 100644 include/linux/virtio_pci_modern.h
--
2.25.1
This patch introduces helpers to allow set and get device status.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 37 +++++++++++++++++++++++-------
1 file changed, 29 insertions(+), 8 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 2e37bfc89655..ccde0a41209a 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -275,41 +275,62 @@ static u32 vp_generation(struct virtio_device *vdev)
return vp_ioread8(&cfg->config_generation);
}
+/*
+ * vp_modern_get_status - get the device status
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the status read from device
+ */
+static u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ return vp_ioread8(&cfg->device_status);
+}
+
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+
+ return vp_modern_get_status(&vp_dev->mdev);
+}
+
+/*
+ * vp_modern_set_status - set status to device
+ * @mdev: the modern virtio-pci device
+ * @status: the status set to device
+ */
+static void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
+ u8 status)
+{
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
- return vp_ioread8(&cfg->device_status);
+ vp_iowrite8(status, &cfg->device_status);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
/* We should never be setting status to 0. */
BUG_ON(status == 0);
- vp_iowrite8(status, &cfg->device_status);
+ vp_modern_set_status(&vp_dev->mdev, status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
/* 0 status means a reset. */
- vp_iowrite8(0, &cfg->device_status);
+ vp_modern_set_status(mdev, 0);
/* After writing 0 to device_status, the driver MUST wait for a read of
* device_status to return 0 before reinitializing the device.
* This will flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any.
*/
- while (vp_ioread8(&cfg->device_status))
+ while (vp_modern_get_status(mdev))
msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
--
2.25.1
This patch introduces vp_modern_config_vector() for setting config
vector.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 4be9afad547e..2e37bfc89655 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -315,9 +315,16 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
-static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
+/*
+ * vp_modern_config_vector - set the vector for config interrupt
+ * @mdev: the modern virtio-pci device
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+static u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
+ u16 vector)
{
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
/* Setup the vector used for configuration events */
@@ -327,6 +334,11 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
return vp_ioread16(&cfg->msix_config);
}
+static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
+{
+ return vp_modern_config_vector(&vp_dev->mdev, vector);
+}
+
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned index,
--
2.25.1
This patch splits out the virtio-pci modern device only attributes
into another structure. While at it, a dedicated probe method for
modern only attributes is introduced. This may help for split the
logic into a dedicated module.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_common.h | 25 +++--
drivers/virtio/virtio_pci_modern.c | 159 ++++++++++++++++-------------
2 files changed, 105 insertions(+), 79 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index b2f0eb4067cb..f35ff5b6b467 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -39,22 +39,16 @@ struct virtio_pci_vq_info {
unsigned msix_vector;
};
-/* Our device structure */
-struct virtio_pci_device {
- struct virtio_device vdev;
+struct virtio_pci_modern_device {
struct pci_dev *pci_dev;
- /* In legacy mode, these two point to within ->legacy. */
- /* Where to read and clear interrupt */
- u8 __iomem *isr;
-
- /* Modern only fields */
- /* The IO mapping for the PCI config space (non-legacy mode) */
struct virtio_pci_common_cfg __iomem *common;
/* Device-specific data (non-legacy mode) */
void __iomem *device;
/* Base of vq notifications (non-legacy mode). */
void __iomem *notify_base;
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
/* So we can sanity-check accesses. */
size_t notify_len;
@@ -68,6 +62,19 @@ struct virtio_pci_device {
int modern_bars;
+ struct virtio_device_id id;
+};
+
+/* Our device structure */
+struct virtio_pci_device {
+ struct virtio_device vdev;
+ struct pci_dev *pci_dev;
+ struct virtio_pci_modern_device mdev;
+
+ /* In legacy mode, these two point to within ->legacy. */
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
+
/* Legacy only field */
/* the IO mapping for the PCI config space */
void __iomem *ioaddr;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index df1481fd400c..524490a94ca4 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -141,7 +141,8 @@ static void __iomem *map_capability(struct pci_dev *dev, int off,
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
u64 features;
vp_iowrite32(0, &cfg->device_feature_select);
@@ -166,7 +167,8 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
static int vp_finalize_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
u64 features = vdev->features;
/* Give virtio_ring a chance to accept features. */
@@ -194,12 +196,13 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *device = vp_dev->device;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ void __iomem *device = mdev->device;
u8 b;
__le16 w;
__le32 l;
- BUG_ON(offset + len > vp_dev->device_len);
+ BUG_ON(offset + len > mdev->device_len);
switch (len) {
case 1:
@@ -231,12 +234,13 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *device = vp_dev->device;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ void __iomem *device = mdev->device;
u8 b;
__le16 w;
__le32 l;
- BUG_ON(offset + len > vp_dev->device_len);
+ BUG_ON(offset + len > mdev->device_len);
switch (len) {
case 1:
@@ -265,7 +269,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u32 vp_generation(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
return vp_ioread8(&cfg->config_generation);
}
@@ -274,7 +279,8 @@ static u32 vp_generation(struct virtio_device *vdev)
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
return vp_ioread8(&cfg->device_status);
}
@@ -282,7 +288,8 @@ static u8 vp_get_status(struct virtio_device *vdev)
static void vp_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
/* We should never be setting status to 0. */
BUG_ON(status == 0);
@@ -292,7 +299,8 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
/* 0 status means a reset. */
vp_iowrite8(0, &cfg->device_status);
@@ -309,7 +317,8 @@ static void vp_reset(struct virtio_device *vdev)
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
/* Setup the vector used for configuration events */
vp_iowrite16(vector, &cfg->msix_config);
@@ -326,7 +335,9 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
bool ctx,
u16 msix_vec)
{
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
struct virtqueue *vq;
u16 num, off;
int err;
@@ -369,25 +380,25 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
&cfg->queue_used_lo, &cfg->queue_used_hi);
- if (vp_dev->notify_base) {
+ if (mdev->notify_base) {
/* offset should not wrap */
- if ((u64)off * vp_dev->notify_offset_multiplier + 2
- > vp_dev->notify_len) {
- dev_warn(&vp_dev->pci_dev->dev,
+ if ((u64)off * mdev->notify_offset_multiplier + 2
+ > mdev->notify_len) {
+ dev_warn(&mdev->pci_dev->dev,
"bad notification offset %u (x %u) "
"for queue %u > %zd",
- off, vp_dev->notify_offset_multiplier,
- index, vp_dev->notify_len);
+ off, mdev->notify_offset_multiplier,
+ index, mdev->notify_len);
err = -EINVAL;
goto err_map_notify;
}
- vq->priv = (void __force *)vp_dev->notify_base +
- off * vp_dev->notify_offset_multiplier;
+ vq->priv = (void __force *)mdev->notify_base +
+ off * mdev->notify_offset_multiplier;
} else {
- vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
- vp_dev->notify_map_cap, 2, 2,
- off * vp_dev->notify_offset_multiplier, 2,
- NULL);
+ vq->priv = (void __force *)map_capability(mdev->pci_dev,
+ mdev->notify_map_cap, 2, 2,
+ off * mdev->notify_offset_multiplier, 2,
+ NULL);
}
if (!vq->priv) {
@@ -407,8 +418,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return vq;
err_assign_vector:
- if (!vp_dev->notify_base)
- pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
+ if (!mdev->notify_base)
+ pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
err_map_notify:
vring_del_virtqueue(vq);
return ERR_PTR(err);
@@ -421,7 +432,7 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->mdev.common;
struct virtqueue *vq;
int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
@@ -443,7 +454,9 @@ static void del_vq(struct virtio_pci_vq_info *info)
{
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
vp_iowrite16(vq->index, &cfg->queue_select);
@@ -454,8 +467,8 @@ static void del_vq(struct virtio_pci_vq_info *info)
vp_ioread16(&cfg->queue_msix_vector);
}
- if (!vp_dev->notify_base)
- pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
+ if (!mdev->notify_base)
+ pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
vring_del_virtqueue(vq);
}
@@ -693,6 +706,7 @@ static inline void check_offsets(void)
/* the PCI probing function */
int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
{
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct pci_dev *pci_dev = vp_dev->pci_dev;
int err, common, isr, notify, device;
u32 notify_length;
@@ -700,6 +714,8 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
check_offsets();
+ mdev->pci_dev = pci_dev;
+
/* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
return -ENODEV;
@@ -708,17 +724,17 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
/* Transitional devices: use the PCI subsystem device id as
* virtio device id, same as legacy driver always did.
*/
- vp_dev->vdev.id.device = pci_dev->subsystem_device;
+ mdev->id.device = pci_dev->subsystem_device;
} else {
/* Modern devices: simply use PCI device id, but start from 0x1040. */
- vp_dev->vdev.id.device = pci_dev->device - 0x1040;
+ mdev->id.device = pci_dev->device - 0x1040;
}
- vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
+ mdev->id.vendor = pci_dev->subsystem_vendor;
/* check for a common config: if not, use legacy mode (bar 0). */
common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
+ &mdev->modern_bars);
if (!common) {
dev_info(&pci_dev->dev,
"virtio_pci: leaving for legacy driver\n");
@@ -728,10 +744,10 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
/* If common is there, these should be too... */
isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
+ &mdev->modern_bars);
notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
+ &mdev->modern_bars);
if (!isr || !notify) {
dev_err(&pci_dev->dev,
"virtio_pci: missing capabilities %i/%i/%i\n",
@@ -751,31 +767,31 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
*/
device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
IORESOURCE_IO | IORESOURCE_MEM,
- &vp_dev->modern_bars);
+ &mdev->modern_bars);
- err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
+ err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
"virtio-pci-modern");
if (err)
return err;
err = -EINVAL;
- vp_dev->common = map_capability(pci_dev, common,
- sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_common_cfg),
- NULL);
- if (!vp_dev->common)
+ mdev->common = map_capability(pci_dev, common,
+ sizeof(struct virtio_pci_common_cfg), 4,
+ 0, sizeof(struct virtio_pci_common_cfg),
+ NULL);
+ if (!mdev->common)
goto err_map_common;
- vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
- 0, 1,
- NULL);
- if (!vp_dev->isr)
+ mdev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
+ 0, 1,
+ NULL);
+ if (!mdev->isr)
goto err_map_isr;
/* Read notify_off_multiplier from config space. */
pci_read_config_dword(pci_dev,
notify + offsetof(struct virtio_pci_notify_cap,
notify_off_multiplier),
- &vp_dev->notify_offset_multiplier);
+ &mdev->notify_offset_multiplier);
/* Read notify length and offset from config space. */
pci_read_config_dword(pci_dev,
notify + offsetof(struct virtio_pci_notify_cap,
@@ -792,23 +808,23 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
* Otherwise, map each VQ individually later.
*/
if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
- vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
- 0, notify_length,
- &vp_dev->notify_len);
- if (!vp_dev->notify_base)
+ mdev->notify_base = map_capability(pci_dev, notify, 2, 2,
+ 0, notify_length,
+ &mdev->notify_len);
+ if (!mdev->notify_base)
goto err_map_notify;
} else {
- vp_dev->notify_map_cap = notify;
+ mdev->notify_map_cap = notify;
}
/* Again, we don't know how much we should map, but PAGE_SIZE
* is more than enough for all existing devices.
*/
if (device) {
- vp_dev->device = map_capability(pci_dev, device, 0, 4,
- 0, PAGE_SIZE,
- &vp_dev->device_len);
- if (!vp_dev->device)
+ mdev->device = map_capability(pci_dev, device, 0, 4,
+ 0, PAGE_SIZE,
+ &mdev->device_len);
+ if (!mdev->device)
goto err_map_device;
vp_dev->vdev.config = &virtio_pci_config_ops;
@@ -819,29 +835,32 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
+ vp_dev->isr = mdev->isr;
+ vp_dev->vdev.id = mdev->id;
return 0;
err_map_device:
- if (vp_dev->notify_base)
- pci_iounmap(pci_dev, vp_dev->notify_base);
+ if (mdev->notify_base)
+ pci_iounmap(pci_dev, mdev->notify_base);
err_map_notify:
- pci_iounmap(pci_dev, vp_dev->isr);
+ pci_iounmap(pci_dev, mdev->isr);
err_map_isr:
- pci_iounmap(pci_dev, vp_dev->common);
+ pci_iounmap(pci_dev, mdev->common);
err_map_common:
return err;
}
void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
- struct pci_dev *pci_dev = vp_dev->pci_dev;
-
- if (vp_dev->device)
- pci_iounmap(pci_dev, vp_dev->device);
- if (vp_dev->notify_base)
- pci_iounmap(pci_dev, vp_dev->notify_base);
- pci_iounmap(pci_dev, vp_dev->isr);
- pci_iounmap(pci_dev, vp_dev->common);
- pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct pci_dev *pci_dev = mdev->pci_dev;
+
+ if (mdev->device)
+ pci_iounmap(pci_dev, mdev->device);
+ if (mdev->notify_base)
+ pci_iounmap(pci_dev, mdev->notify_base);
+ pci_iounmap(pci_dev, mdev->isr);
+ pci_iounmap(pci_dev, mdev->common);
+ pci_release_selected_regions(pci_dev, mdev->modern_bars);
}
--
2.25.1
This patch factors out the modern device initialization logic into a
helper. Note that it still depends on the caller to enable pci device
which allows the caller to use e.g devres.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 50 +++++++++++++++++++++---------
1 file changed, 36 insertions(+), 14 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 524490a94ca4..5d2d2ae0dfdb 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -703,11 +703,16 @@ static inline void check_offsets(void)
offsetof(struct virtio_pci_common_cfg, queue_used_hi));
}
-/* the PCI probing function */
-int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
+/*
+ * vp_modern_probe: probe the modern virtio pci device, note that the
+ * caller is required to enable PCI device before calling this function.
+ * @mdev: the modern virtio-pci device
+ *
+ * Return 0 on succeed otherwise fail
+ */
+static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
{
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- struct pci_dev *pci_dev = vp_dev->pci_dev;
+ struct pci_dev *pci_dev = mdev->pci_dev;
int err, common, isr, notify, device;
u32 notify_length;
u32 notify_offset;
@@ -826,18 +831,8 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
&mdev->device_len);
if (!mdev->device)
goto err_map_device;
-
- vp_dev->vdev.config = &virtio_pci_config_ops;
- } else {
- vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
}
- vp_dev->config_vector = vp_config_vector;
- vp_dev->setup_vq = setup_vq;
- vp_dev->del_vq = del_vq;
- vp_dev->isr = mdev->isr;
- vp_dev->vdev.id = mdev->id;
-
return 0;
err_map_device:
@@ -851,6 +846,33 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
return err;
}
+/* the PCI probing function */
+int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
+{
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct pci_dev *pci_dev = vp_dev->pci_dev;
+ int err;
+
+ mdev->pci_dev = pci_dev;
+
+ err = vp_modern_probe(mdev);
+ if (err)
+ return err;
+
+ if (mdev->device)
+ vp_dev->vdev.config = &virtio_pci_config_ops;
+ else
+ vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
+
+ vp_dev->config_vector = vp_config_vector;
+ vp_dev->setup_vq = setup_vq;
+ vp_dev->del_vq = del_vq;
+ vp_dev->isr = mdev->isr;
+ vp_dev->vdev.id = mdev->id;
+
+ return 0;
+}
+
void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
--
2.25.1
This patch introduces vp_modern_remove() doing device resources
cleanup to make it can be used.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 5d2d2ae0dfdb..4be9afad547e 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -873,9 +873,12 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
return 0;
}
-void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
+/*
+ * vp_modern_probe: remove and cleanup the modern virtio pci device
+ * @mdev: the modern virtio-pci device
+ */
+static void vp_modern_remove(struct virtio_pci_modern_device *mdev)
{
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct pci_dev *pci_dev = mdev->pci_dev;
if (mdev->device)
@@ -886,3 +889,10 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
pci_iounmap(pci_dev, mdev->common);
pci_release_selected_regions(pci_dev, mdev->modern_bars);
}
+
+void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
+{
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+
+ vp_modern_remove(mdev);
+}
--
2.25.1
This patch introduces a helper to set/get queue_enable for modern device.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 37 +++++++++++++++++++++++++-----
1 file changed, 31 insertions(+), 6 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 05b21e18f46c..0e62820b83ff 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -234,6 +234,34 @@ static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
&cfg->queue_used_hi);
}
+/*
+ * vp_modern_set_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @enable: whether the virtqueue is enable or not
+ */
+static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index, bool enable)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+ vp_iowrite16(enable, &mdev->common->queue_enable);
+}
+
+/*
+ * vp_modern_get_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns whether a virtqueue is enabled or not
+ */
+static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_enable);
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -460,7 +488,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
/* Check if queue is either not available or already active. */
num = vp_ioread16(&cfg->queue_size);
- if (!num || vp_ioread16(&cfg->queue_enable))
+ if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
if (num & (num - 1)) {
@@ -538,7 +566,6 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->mdev.common;
struct virtqueue *vq;
int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
@@ -548,10 +575,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
/* Select and activate all queues. Has to be done last: once we do
* this, there's no way to go back except reset.
*/
- list_for_each_entry(vq, &vdev->vqs, list) {
- vp_iowrite16(vq->index, &cfg->queue_select);
- vp_iowrite16(1, &cfg->queue_enable);
- }
+ list_for_each_entry(vq, &vdev->vqs, list)
+ vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
return 0;
}
--
2.25.1
To ease the split, map_capability() was renamed to
vp_modern_map_capability(). While at it, add the comments for the
arguments and switch to use virtio_pci_modern_device as the first
parameter.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 46 +++++++++++++++++++-----------
1 file changed, 30 insertions(+), 16 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 217573f2588d..a5e3a5e40323 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -63,12 +63,25 @@ static void vp_iowrite64_twopart(u64 val,
vp_iowrite32(val >> 32, hi);
}
-static void __iomem *map_capability(struct pci_dev *dev, int off,
- size_t minlen,
- u32 align,
- u32 start, u32 size,
- size_t *len)
+/*
+ * vp_modern_map_capability - map a part of virtio pci capability
+ * @mdev: the modern virtio-pci device
+ * @off: offset of the capability
+ * @minlen: minimal length of the capability
+ * @align: align requirement
+ * @start: start from the capability
+ * @size: map size
+ * @len: the length that is actually mapped
+ *
+ * Returns the io address of for the part of the capability
+ */
+void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
+ size_t minlen,
+ u32 align,
+ u32 start, u32 size,
+ size_t *len)
{
+ struct pci_dev *dev = mdev->pci_dev;
u8 bar;
u32 offset, length;
void __iomem *p;
@@ -582,7 +595,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
vq->priv = (void __force *)mdev->notify_base +
off * mdev->notify_offset_multiplier;
} else {
- vq->priv = (void __force *)map_capability(mdev->pci_dev,
+ vq->priv = (void __force *)vp_modern_map_capability(mdev,
mdev->notify_map_cap, 2, 2,
off * mdev->notify_offset_multiplier, 2,
NULL);
@@ -956,15 +969,15 @@ static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
return err;
err = -EINVAL;
- mdev->common = map_capability(pci_dev, common,
+ mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4,
0, sizeof(struct virtio_pci_common_cfg),
NULL);
if (!mdev->common)
goto err_map_common;
- mdev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
- 0, 1,
- NULL);
+ mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
+ 0, 1,
+ NULL);
if (!mdev->isr)
goto err_map_isr;
@@ -989,9 +1002,10 @@ static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
* Otherwise, map each VQ individually later.
*/
if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
- mdev->notify_base = map_capability(pci_dev, notify, 2, 2,
- 0, notify_length,
- &mdev->notify_len);
+ mdev->notify_base = vp_modern_map_capability(mdev, notify,
+ 2, 2,
+ 0, notify_length,
+ &mdev->notify_len);
if (!mdev->notify_base)
goto err_map_notify;
} else {
@@ -1002,9 +1016,9 @@ static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
* is more than enough for all existing devices.
*/
if (device) {
- mdev->device = map_capability(pci_dev, device, 0, 4,
- 0, PAGE_SIZE,
- &mdev->device_len);
+ mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
+ 0, PAGE_SIZE,
+ &mdev->device_len);
if (!mdev->device)
goto err_map_device;
}
--
2.25.1
This patch introduce a helper to set virtqueue address for modern address.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 33 ++++++++++++++++++++++++------
1 file changed, 27 insertions(+), 6 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 05cd409c0731..05b21e18f46c 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -210,6 +210,30 @@ static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
return vp_ioread16(&cfg->queue_msix_vector);
}
+/*
+ * vp_modern_queue_address - set the virtqueue address
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @desc_addr: address of the descriptor area
+ * @driver_addr: address of the driver area
+ * @device_addr: address of the device area
+ */
+static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
+ u16 index, u64 desc_addr, u64 driver_addr,
+ u64 device_addr)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite16(index, &cfg->queue_select);
+
+ vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -459,12 +483,9 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
/* activate the queue */
vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
- vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
- &cfg->queue_desc_lo, &cfg->queue_desc_hi);
- vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
- &cfg->queue_avail_lo, &cfg->queue_avail_hi);
- vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
- &cfg->queue_used_lo, &cfg->queue_used_hi);
+ vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
if (mdev->notify_base) {
/* offset should not wrap */
--
2.25.1
This patch introduces vp_modern_generation() to get device generation.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 17 ++++++++++++++---
1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index cb14fc334a9c..a128e5814045 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -289,15 +289,26 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
}
}
-static u32 vp_generation(struct virtio_device *vdev)
+/*
+ * vp_modern_generation - get the device genreation
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the genreation read from device
+ */
+static u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
return vp_ioread8(&cfg->config_generation);
}
+static u32 vp_generation(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ return vp_modern_generation(&vp_dev->mdev);
+}
+
/*
* vp_modern_get_status - get the device status
* @mdev: the modern virtio-pci device
--
2.25.1
This patch delay the queue number setting to vDPA device
registering. This allows us to probe the virtqueue numbers between
device allocation and registering.
Reviewed-by: Stefano Garzarella <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
---
drivers/vdpa/ifcvf/ifcvf_main.c | 5 ++---
drivers/vdpa/mlx5/net/mlx5_vnet.c | 5 ++---
drivers/vdpa/vdpa.c | 8 ++++----
drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 ++--
include/linux/vdpa.h | 7 +++----
5 files changed, 13 insertions(+), 16 deletions(-)
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 8b4028556cb6..d65f3221d8ed 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -438,8 +438,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops,
- IFCVF_MAX_QUEUE_PAIRS * 2);
+ dev, &ifc_vdpa_ops);
if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM;
@@ -463,7 +462,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
vf->vring[i].irq = -EINVAL;
- ret = vdpa_register_device(&adapter->vdpa);
+ ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
if (ret) {
IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
goto err;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index f1d54814db97..a1b9260bf04d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1958,8 +1958,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
- ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- 2 * mlx5_vdpa_max_qps(max_vqs));
+ ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -1986,7 +1985,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
if (err)
goto err_res;
- err = vdpa_register_device(&mvdev->vdev);
+ err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
if (err)
goto err_reg;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index a69ffc991e13..ba89238f9898 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -61,7 +61,6 @@ static void vdpa_release_dev(struct device *d)
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
- * @nvqs: number of virtqueues supported by this device
* @size: size of the parent structure that contains private data
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
@@ -72,7 +71,6 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- int nvqs,
size_t size)
{
struct vdpa_device *vdev;
@@ -99,7 +97,6 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->index = err;
vdev->config = config;
vdev->features_valid = false;
- vdev->nvqs = nvqs;
err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
if (err)
@@ -122,11 +119,14 @@ EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
* vdpa_register_device - register a vDPA device
* Callers must have a succeed call of vdpa_alloc_device() before.
* @vdev: the vdpa device to be registered to vDPA bus
+ * @nvqs: number of virtqueues supported by this device
*
* Returns an error when fail to add to vDPA bus
*/
-int vdpa_register_device(struct vdpa_device *vdev)
+int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
{
+ vdev->nvqs = nvqs;
+
return device_add(&vdev->dev);
}
EXPORT_SYMBOL_GPL(vdpa_register_device);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 6a90fdb9cbfc..b129cb4dd013 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -357,7 +357,7 @@ static struct vdpasim *vdpasim_create(void)
else
ops = &vdpasim_net_config_ops;
- vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops);
if (!vdpasim)
goto err_alloc;
@@ -393,7 +393,7 @@ static struct vdpasim *vdpasim_create(void)
vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
vdpasim->vdpa.dma_dev = dev;
- ret = vdpa_register_device(&vdpasim->vdpa);
+ ret = vdpa_register_device(&vdpasim->vdpa, VDPASIM_VQ_NUM);
if (ret)
goto err_iommu;
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 30bc7a7223bb..d9e9d17b9083 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -244,18 +244,17 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- int nvqs,
size_t size);
-#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
+#define vdpa_alloc_device(dev_struct, member, parent, config) \
container_of(__vdpa_alloc_device( \
- parent, config, nvqs, \
+ parent, config, \
sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
dev_struct, member))), \
dev_struct, member)
-int vdpa_register_device(struct vdpa_device *vdev);
+int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
void vdpa_unregister_device(struct vdpa_device *vdev);
/**
--
2.25.1
This patch introduces helper for setting/getting queue size for modern
device.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 34 ++++++++++++++++++++++++++++--
1 file changed, 32 insertions(+), 2 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 0e62820b83ff..7a89226135af 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -262,6 +262,36 @@ static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
return vp_ioread16(&mdev->common->queue_enable);
}
+/*
+ * vp_modern_set_queue_size - set size for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @size: the size of the virtqueue
+ */
+static void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 index, u16 size)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+ vp_iowrite16(size, &mdev->common->queue_size);
+
+}
+
+/*
+ * vp_modern_get_queue_size - get size for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns the size of the virtqueue
+ */
+static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_size);
+
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -487,7 +517,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
vp_iowrite16(index, &cfg->queue_select);
/* Check if queue is either not available or already active. */
- num = vp_ioread16(&cfg->queue_size);
+ num = vp_modern_get_queue_size(mdev, index);
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
@@ -510,7 +540,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return ERR_PTR(-ENOMEM);
/* activate the queue */
- vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
+ vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
virtqueue_get_avail_addr(vq),
virtqueue_get_used_addr(vq));
--
2.25.1
There's no guarantee that the device can disable a specific virtqueue
through set_vq_ready(). One example is the modern virtio-pci
device. So this patch removes the warning.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_vdpa.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 4a9ddb44b2a7..e28acf482e0c 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -225,9 +225,8 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vd_dev->lock, flags);
- /* Select and deactivate the queue */
+ /* Select and deactivate the queue (best effort) */
ops->set_vq_ready(vdpa, index, 0);
- WARN_ON(ops->get_vq_ready(vdpa, index));
vring_del_virtqueue(vq);
--
2.25.1
This patch introduces helper for getting queue num of modern device.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7a89226135af..bccad1329871 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -292,6 +292,17 @@ static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
}
+/*
+ * vp_modern_get_num_queues - get the number of virtqueues
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the number of virtqueues
+ */
+static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
+{
+ return vp_ioread16(&mdev->common->num_queues);
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -510,7 +521,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
u16 num, off;
int err;
- if (index >= vp_ioread16(&cfg->num_queues))
+ if (index >= vp_modern_get_num_queues(mdev))
return ERR_PTR(-ENOENT);
/* Select the queue we're interested in */
--
2.25.1
This patch introduces help to get notification offset of modern device.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index bccad1329871..217573f2588d 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -303,6 +303,21 @@ static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
return vp_ioread16(&mdev->common->num_queues);
}
+/*
+ * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns the notification offset for a virtqueue
+ */
+static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_notify_off);
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -516,7 +531,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
struct virtqueue *vq;
u16 num, off;
int err;
@@ -524,9 +538,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (index >= vp_modern_get_num_queues(mdev))
return ERR_PTR(-ENOENT);
- /* Select the queue we're interested in */
- vp_iowrite16(index, &cfg->queue_select);
-
/* Check if queue is either not available or already active. */
num = vp_modern_get_queue_size(mdev, index);
if (!num || vp_modern_get_queue_enable(mdev, index))
@@ -538,7 +549,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* get offset of notification word for this vq */
- off = vp_ioread16(&cfg->queue_notify_off);
+ off = vp_modern_get_queue_notify_off(mdev, index);
info->msix_vector = msix_vec;
--
2.25.1
This patch introduce a vDPA driver for virtio-pci device. It bridges
the virtio-pci control command to the vDPA bus. This will be used for
features prototyping and testing.
Note that get/restore virtqueue state is not supported which needs
extension on the virtio specification.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/vdpa/Kconfig | 6 +
drivers/vdpa/Makefile | 1 +
drivers/vdpa/virtio_pci/Makefile | 2 +
drivers/vdpa/virtio_pci/vp_vdpa.c | 456 ++++++++++++++++++++++++++++++
4 files changed, 465 insertions(+)
create mode 100644 drivers/vdpa/virtio_pci/Makefile
create mode 100644 drivers/vdpa/virtio_pci/vp_vdpa.c
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 6caf539091e5..81c6a3520813 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -49,4 +49,10 @@ config MLX5_VDPA_NET
be executed by the hardware. It also supports a variety of stateless
offloads depending on the actual device used and firmware version.
+config VP_VDPA
+ tristate "Virtio PCI bridge vDPA driver"
+ depends on PCI_MSI && VIRTIO_PCI_MODERN
+ help
+ This kernel module bridges virtio PCI device to vDPA bus.
+
endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index d160e9b63a66..67fe7f3d6943 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
+obj-$(CONFIG_VP_VDPA) += virtio_pci/
diff --git a/drivers/vdpa/virtio_pci/Makefile b/drivers/vdpa/virtio_pci/Makefile
new file mode 100644
index 000000000000..231088d3af7d
--- /dev/null
+++ b/drivers/vdpa/virtio_pci/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VP_VDPA) += vp_vdpa.o
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
new file mode 100644
index 000000000000..4eda926493d9
--- /dev/null
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vDPA bridge driver for modern virtio-pci device
+ *
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * Author: Jason Wang <[email protected]>
+ *
+ * Based on virtio_pci_modern.c.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_pci.h>
+#include <linux/virtio_pci_modern.h>
+
+#define VP_VDPA_QUEUE_MAX 256
+#define VP_VDPA_DRIVER_NAME "vp_vdpa"
+#define VP_VDPA_NAME_SIZE 256
+
+struct vp_vring {
+ void __iomem *notify;
+ char msix_name[VP_VDPA_NAME_SIZE];
+ struct vdpa_callback cb;
+ int irq;
+};
+
+struct vp_vdpa {
+ struct vdpa_device vdpa;
+ struct virtio_pci_modern_device mdev;
+ struct vp_vring *vring;
+ struct vdpa_callback config_cb;
+ char msix_name[VP_VDPA_NAME_SIZE];
+ int config_irq;
+ int queues;
+ int vectors;
+};
+
+static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
+{
+ return container_of(vdpa, struct vp_vdpa, vdpa);
+}
+
+static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ return &vp_vdpa->mdev;
+}
+
+static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_features(mdev);
+}
+
+static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_features(mdev, features);
+
+ return 0;
+}
+
+static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_status(mdev);
+}
+
+static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
+{
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
+ int i;
+
+ for (i = 0; i < vp_vdpa->queues; i++) {
+ if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
+ &vp_vdpa->vring[i]);
+ vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ }
+ }
+
+ if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
+ vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+ }
+
+ if (vp_vdpa->vectors) {
+ pci_free_irq_vectors(pdev);
+ vp_vdpa->vectors = 0;
+ }
+}
+
+static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
+{
+ struct vp_vring *vring = arg;
+
+ if (vring->cb.callback)
+ return vring->cb.callback(vring->cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
+{
+ struct vp_vdpa *vp_vdpa = arg;
+
+ if (vp_vdpa->config_cb.callback)
+ return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
+{
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ struct pci_dev *pdev = mdev->pci_dev;
+ int i, ret, irq;
+ int queues = vp_vdpa->queues;
+ int vectors = queues + 1;
+
+ ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
+ if (ret != vectors) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to allocate irq vectors want %d but %d\n",
+ vectors, ret);
+ return ret;
+ }
+
+ vp_vdpa->vectors = vectors;
+
+ for (i = 0; i < queues; i++) {
+ snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
+ "vp-vdpa[%s]-%d\n", pci_name(pdev), i);
+ irq = pci_irq_vector(pdev, i);
+ ret = devm_request_irq(&pdev->dev, irq,
+ vp_vdpa_vq_handler,
+ 0, vp_vdpa->vring[i].msix_name,
+ &vp_vdpa->vring[i]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to request irq for vq %d\n", i);
+ goto err;
+ }
+ vp_modern_queue_vector(mdev, i, i);
+ vp_vdpa->vring[i].irq = irq;
+ }
+
+ snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
+ pci_name(pdev));
+ irq = pci_irq_vector(pdev, queues);
+ ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
+ vp_vdpa->msix_name, vp_vdpa);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vp_vdpa: fail to request irq for vq %d\n", i);
+ goto err;
+ }
+ vp_modern_config_vector(mdev, queues);
+ vp_vdpa->config_irq = irq;
+
+ return 0;
+err:
+ vp_vdpa_free_irq(vp_vdpa);
+ return ret;
+}
+
+static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 s = vp_vdpa_get_status(vdpa);
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
+ !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ vp_vdpa_request_irq(vp_vdpa);
+ }
+
+ vp_modern_set_status(mdev, status);
+
+ if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ (s & VIRTIO_CONFIG_S_DRIVER_OK))
+ vp_vdpa_free_irq(vp_vdpa);
+}
+
+static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ return VP_VDPA_QUEUE_MAX;
+}
+
+static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_vq_state *state)
+{
+ /* Note that this is not supported by virtio specification, so
+ * we return -EOPNOTSUPP here. This means we can't support live
+ * migration, vhost device start/stop.
+ */
+ return -EOPNOTSUPP;
+}
+
+static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
+ const struct vdpa_vq_state *state)
+{
+ /* Note that this is not supported by virtio specification, so
+ * we return -ENOPOTSUPP here. This means we can't support live
+ * migration, vhost device start/stop.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_vdpa->vring[qid].cb = *cb;
+}
+
+static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
+ u16 qid, bool ready)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_queue_enable(mdev, qid, ready);
+}
+
+static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_queue_enable(mdev, qid);
+}
+
+static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
+ u32 num)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_set_queue_size(mdev, qid, num);
+}
+
+static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ vp_modern_queue_address(mdev, qid, desc_area,
+ driver_area, device_area);
+
+ return 0;
+}
+
+static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
+}
+
+static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_generation(mdev);
+}
+
+static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->id.device;
+}
+
+static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return mdev->id.vendor;
+}
+
+static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
+{
+ return PAGE_SIZE;
+}
+
+static void vp_vdpa_get_config(struct vdpa_device *vdpa,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 old, new;
+ u8 *p;
+ int i;
+
+ do {
+ old = vp_ioread8(&mdev->common->config_generation);
+ p = buf;
+ for (i = 0; i < len; i++)
+ *p++ = vp_ioread8(mdev->device + offset + i);
+
+ new = vp_ioread8(&mdev->common->config_generation);
+ } while (old != new);
+}
+
+static void vp_vdpa_set_config(struct vdpa_device *vdpa,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ const u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ vp_iowrite8(*p++, mdev->device + offset + i);
+}
+
+static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+
+ vp_vdpa->config_cb = *cb;
+}
+
+static const struct vdpa_config_ops vp_vdpa_ops = {
+ .get_features = vp_vdpa_get_features,
+ .set_features = vp_vdpa_set_features,
+ .get_status = vp_vdpa_get_status,
+ .set_status = vp_vdpa_set_status,
+ .get_vq_num_max = vp_vdpa_get_vq_num_max,
+ .get_vq_state = vp_vdpa_get_vq_state,
+ .set_vq_state = vp_vdpa_set_vq_state,
+ .set_vq_cb = vp_vdpa_set_vq_cb,
+ .set_vq_ready = vp_vdpa_set_vq_ready,
+ .get_vq_ready = vp_vdpa_get_vq_ready,
+ .set_vq_num = vp_vdpa_set_vq_num,
+ .set_vq_address = vp_vdpa_set_vq_address,
+ .kick_vq = vp_vdpa_kick_vq,
+ .get_generation = vp_vdpa_get_generation,
+ .get_device_id = vp_vdpa_get_device_id,
+ .get_vendor_id = vp_vdpa_get_vendor_id,
+ .get_vq_align = vp_vdpa_get_vq_align,
+ .get_config = vp_vdpa_get_config,
+ .set_config = vp_vdpa_set_config,
+ .set_config_cb = vp_vdpa_set_config_cb,
+};
+
+static void vp_vdpa_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct virtio_pci_modern_device *mdev;
+ struct device *dev = &pdev->dev;
+ struct vp_vdpa *vp_vdpa;
+ u16 notify_off;
+ int ret, i;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
+ dev, &vp_vdpa_ops);
+ if (vp_vdpa == NULL) {
+ dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
+ return -ENOMEM;
+ }
+
+ mdev = &vp_vdpa->mdev;
+ mdev->pci_dev = pdev;
+
+ if (vp_modern_probe(mdev)) {
+ dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
+ goto err;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, vp_vdpa);
+
+ vp_vdpa->vdpa.dma_dev = &pdev->dev;
+ vp_vdpa->queues = vp_modern_get_num_queues(mdev);
+
+ ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed for adding devres for freeing irq vectors\n");
+ goto err;
+ }
+
+ vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
+ sizeof(*vp_vdpa->vring),
+ GFP_KERNEL);
+ if (!vp_vdpa->vring) {
+ dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
+ goto err;
+ }
+
+ for (i = 0; i < vp_vdpa->queues; i++) {
+ notify_off = vp_modern_get_queue_notify_off(mdev, i);
+ vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ vp_vdpa->vring[i].notify = mdev->notify_base +
+ notify_off * mdev->notify_offset_multiplier;
+ }
+ vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+
+ ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ put_device(&vp_vdpa->vdpa.dev);
+ return ret;
+}
+
+static void vp_vdpa_remove(struct pci_dev *pdev)
+{
+ struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
+
+ vdpa_unregister_device(&vp_vdpa->vdpa);
+ vp_modern_remove(&vp_vdpa->mdev);
+}
+
+static struct pci_driver vp_vdpa_driver = {
+ .name = "vp-vdpa",
+ .id_table = NULL, /* only dynamic ids */
+ .probe = vp_vdpa_probe,
+ .remove = vp_vdpa_remove,
+};
+
+module_pci_driver(vp_vdpa_driver);
+
+MODULE_AUTHOR("Jason Wang <[email protected]>");
+MODULE_DESCRIPTION("vp-vdpa");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
--
2.25.1
This patch introduces helpers for setting and getting features.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 43 +++++++++++++++++++++++-------
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index ccde0a41209a..cb14fc334a9c 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -137,12 +137,16 @@ static void __iomem *map_capability(struct pci_dev *dev, int off,
return p;
}
-/* virtio config->get_features() implementation */
-static u64 vp_get_features(struct virtio_device *vdev)
+/*
+ * vp_modern_get_features - get features from device
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the features read from the device
+ */
+static u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
u64 features;
vp_iowrite32(0, &cfg->device_feature_select);
@@ -153,6 +157,14 @@ static u64 vp_get_features(struct virtio_device *vdev)
return features;
}
+/* virtio config->get_features() implementation */
+static u64 vp_get_features(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ return vp_modern_get_features(&vp_dev->mdev);
+}
+
static void vp_transport_features(struct virtio_device *vdev, u64 features)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -163,12 +175,26 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
}
+/*
+ * vp_modern_set_features - set features to device
+ * @mdev: the modern virtio-pci device
+ * @features: the features set to device
+ */
+static void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
+ u64 features)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite32(0, &cfg->guest_feature_select);
+ vp_iowrite32((u32)features, &cfg->guest_feature);
+ vp_iowrite32(1, &cfg->guest_feature_select);
+ vp_iowrite32(features >> 32, &cfg->guest_feature);
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
u64 features = vdev->features;
/* Give virtio_ring a chance to accept features. */
@@ -183,10 +209,7 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- vp_iowrite32(0, &cfg->guest_feature_select);
- vp_iowrite32((u32)vdev->features, &cfg->guest_feature);
- vp_iowrite32(1, &cfg->guest_feature_select);
- vp_iowrite32(vdev->features >> 32, &cfg->guest_feature);
+ vp_modern_set_features(&vp_dev->mdev, vdev->features);
return 0;
}
--
2.25.1
This patch introduces a helper to set virtqueue MSI vector.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 35 ++++++++++++++++++++----------
1 file changed, 23 insertions(+), 12 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index a128e5814045..05cd409c0731 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -191,6 +191,25 @@ static void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
vp_iowrite32(features >> 32, &cfg->guest_feature);
}
+/*
+ * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
+ u16 index, u16 vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite16(index, &cfg->queue_select);
+ vp_iowrite16(vector, &cfg->queue_msix_vector);
+ /* Flush the write out to device */
+ return vp_ioread16(&cfg->queue_msix_vector);
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -474,8 +493,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
- msix_vec = vp_ioread16(&cfg->queue_msix_vector);
+ msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto err_assign_vector;
@@ -522,17 +540,10 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- vp_iowrite16(vq->index, &cfg->queue_select);
-
- if (vp_dev->msix_enabled) {
- vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
- &cfg->queue_msix_vector);
- /* Flush the write out to device */
- vp_ioread16(&cfg->queue_msix_vector);
- }
+ if (vp_dev->msix_enabled)
+ vp_modern_queue_vector(mdev, vq->index,
+ VIRTIO_MSI_NO_VECTOR);
if (!mdev->notify_base)
pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
--
2.25.1
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/Kconfig | 10 +-
drivers/virtio/Makefile | 1 +
drivers/virtio/virtio_pci_common.h | 27 +-
drivers/virtio/virtio_pci_modern.c | 617 -------------------------
drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
include/linux/virtio_pci_modern.h | 111 +++++
6 files changed, 721 insertions(+), 644 deletions(-)
create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
create mode 100644 include/linux/virtio_pci_modern.h
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 7b41130d3f35..6b9b81f4b8c2 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
This option is selected if the architecture may need to enforce
VIRTIO_F_ACCESS_PLATFORM
+config VIRTIO_PCI_MODERN
+ tristate "Modern Virtio PCI Device"
+ depends on PCI
+ help
+ Modern PCI device implementation. This module implements the
+ basic probe and control for devices which are based on modern
+ PCI device with possible vendor specific extensions.
+
menuconfig VIRTIO_MENU
bool "Virtio drivers"
default y
@@ -20,7 +28,7 @@ if VIRTIO_MENU
config VIRTIO_PCI
tristate "PCI driver for virtio devices"
- depends on PCI
+ depends on VIRTIO_PCI_MODERN
select VIRTIO
help
This driver provides support for virtio based paravirtual device
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 591e6f72aa54..f097578aaa8f 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
+obj-$(CONFIG_VIRTIO_PCI_MODERN) += virtio_pci_modern_dev.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index f35ff5b6b467..beec047a8f8d 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -25,6 +25,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
+#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
@@ -39,32 +40,6 @@ struct virtio_pci_vq_info {
unsigned msix_vector;
};
-struct virtio_pci_modern_device {
- struct pci_dev *pci_dev;
-
- struct virtio_pci_common_cfg __iomem *common;
- /* Device-specific data (non-legacy mode) */
- void __iomem *device;
- /* Base of vq notifications (non-legacy mode). */
- void __iomem *notify_base;
- /* Where to read and clear interrupt */
- u8 __iomem *isr;
-
- /* So we can sanity-check accesses. */
- size_t notify_len;
- size_t device_len;
-
- /* Capability for when we need to map notifications per-vq. */
- int notify_map_cap;
-
- /* Multiply queue_notify_off by this value. (non-legacy mode). */
- u32 notify_offset_multiplier;
-
- int modern_bars;
-
- struct virtio_device_id id;
-};
-
/* Our device structure */
struct virtio_pci_device {
struct virtio_device vdev;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index a5e3a5e40323..fbd4ebc00eb6 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -19,158 +19,6 @@
#define VIRTIO_RING_NO_LEGACY
#include "virtio_pci_common.h"
-/*
- * Type-safe wrappers for io accesses.
- * Use these to enforce at compile time the following spec requirement:
- *
- * The driver MUST access each field using the “natural” access
- * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
- * for 16-bit fields and 8-bit accesses for 8-bit fields.
- */
-static inline u8 vp_ioread8(const u8 __iomem *addr)
-{
- return ioread8(addr);
-}
-static inline u16 vp_ioread16 (const __le16 __iomem *addr)
-{
- return ioread16(addr);
-}
-
-static inline u32 vp_ioread32(const __le32 __iomem *addr)
-{
- return ioread32(addr);
-}
-
-static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
-{
- iowrite8(value, addr);
-}
-
-static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
-{
- iowrite16(value, addr);
-}
-
-static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
-{
- iowrite32(value, addr);
-}
-
-static void vp_iowrite64_twopart(u64 val,
- __le32 __iomem *lo, __le32 __iomem *hi)
-{
- vp_iowrite32((u32)val, lo);
- vp_iowrite32(val >> 32, hi);
-}
-
-/*
- * vp_modern_map_capability - map a part of virtio pci capability
- * @mdev: the modern virtio-pci device
- * @off: offset of the capability
- * @minlen: minimal length of the capability
- * @align: align requirement
- * @start: start from the capability
- * @size: map size
- * @len: the length that is actually mapped
- *
- * Returns the io address of for the part of the capability
- */
-void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
- size_t minlen,
- u32 align,
- u32 start, u32 size,
- size_t *len)
-{
- struct pci_dev *dev = mdev->pci_dev;
- u8 bar;
- u32 offset, length;
- void __iomem *p;
-
- pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
- bar),
- &bar);
- pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
- &offset);
- pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
- &length);
-
- if (length <= start) {
- dev_err(&dev->dev,
- "virtio_pci: bad capability len %u (>%u expected)\n",
- length, start);
- return NULL;
- }
-
- if (length - start < minlen) {
- dev_err(&dev->dev,
- "virtio_pci: bad capability len %u (>=%zu expected)\n",
- length, minlen);
- return NULL;
- }
-
- length -= start;
-
- if (start + offset < offset) {
- dev_err(&dev->dev,
- "virtio_pci: map wrap-around %u+%u\n",
- start, offset);
- return NULL;
- }
-
- offset += start;
-
- if (offset & (align - 1)) {
- dev_err(&dev->dev,
- "virtio_pci: offset %u not aligned to %u\n",
- offset, align);
- return NULL;
- }
-
- if (length > size)
- length = size;
-
- if (len)
- *len = length;
-
- if (minlen + offset < minlen ||
- minlen + offset > pci_resource_len(dev, bar)) {
- dev_err(&dev->dev,
- "virtio_pci: map virtio %zu@%u "
- "out of range on bar %i length %lu\n",
- minlen, offset,
- bar, (unsigned long)pci_resource_len(dev, bar));
- return NULL;
- }
-
- p = pci_iomap_range(dev, bar, offset, length);
- if (!p)
- dev_err(&dev->dev,
- "virtio_pci: unable to map virtio %u@%u on bar %i\n",
- length, offset, bar);
- return p;
-}
-
-/*
- * vp_modern_get_features - get features from device
- * @mdev: the modern virtio-pci device
- *
- * Returns the features read from the device
- */
-static u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- u64 features;
-
- vp_iowrite32(0, &cfg->device_feature_select);
- features = vp_ioread32(&cfg->device_feature);
- vp_iowrite32(1, &cfg->device_feature_select);
- features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
-
- return features;
-}
-
-/* virtio config->get_features() implementation */
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -188,149 +36,6 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
}
-/*
- * vp_modern_set_features - set features to device
- * @mdev: the modern virtio-pci device
- * @features: the features set to device
- */
-static void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
- u64 features)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- vp_iowrite32(0, &cfg->guest_feature_select);
- vp_iowrite32((u32)features, &cfg->guest_feature);
- vp_iowrite32(1, &cfg->guest_feature_select);
- vp_iowrite32(features >> 32, &cfg->guest_feature);
-}
-
-/*
- * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
- * @mdev: the modern virtio-pci device
- * @index: queue index
- * @vector: the config vector
- *
- * Returns the config vector read from the device
- */
-static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
- u16 index, u16 vector)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- vp_iowrite16(index, &cfg->queue_select);
- vp_iowrite16(vector, &cfg->queue_msix_vector);
- /* Flush the write out to device */
- return vp_ioread16(&cfg->queue_msix_vector);
-}
-
-/*
- * vp_modern_queue_address - set the virtqueue address
- * @mdev: the modern virtio-pci device
- * @index: the queue index
- * @desc_addr: address of the descriptor area
- * @driver_addr: address of the driver area
- * @device_addr: address of the device area
- */
-static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
- u16 index, u64 desc_addr, u64 driver_addr,
- u64 device_addr)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- vp_iowrite16(index, &cfg->queue_select);
-
- vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
- &cfg->queue_desc_hi);
- vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
- &cfg->queue_avail_hi);
- vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
- &cfg->queue_used_hi);
-}
-
-/*
- * vp_modern_set_queue_enable - enable a virtqueue
- * @mdev: the modern virtio-pci device
- * @index: the queue index
- * @enable: whether the virtqueue is enable or not
- */
-static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
- u16 index, bool enable)
-{
- vp_iowrite16(index, &mdev->common->queue_select);
- vp_iowrite16(enable, &mdev->common->queue_enable);
-}
-
-/*
- * vp_modern_get_queue_enable - enable a virtqueue
- * @mdev: the modern virtio-pci device
- * @index: the queue index
- *
- * Returns whether a virtqueue is enabled or not
- */
-static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
- u16 index)
-{
- vp_iowrite16(index, &mdev->common->queue_select);
-
- return vp_ioread16(&mdev->common->queue_enable);
-}
-
-/*
- * vp_modern_set_queue_size - set size for a virtqueue
- * @mdev: the modern virtio-pci device
- * @index: the queue index
- * @size: the size of the virtqueue
- */
-static void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
- u16 index, u16 size)
-{
- vp_iowrite16(index, &mdev->common->queue_select);
- vp_iowrite16(size, &mdev->common->queue_size);
-
-}
-
-/*
- * vp_modern_get_queue_size - get size for a virtqueue
- * @mdev: the modern virtio-pci device
- * @index: the queue index
- *
- * Returns the size of the virtqueue
- */
-static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
- u16 index)
-{
- vp_iowrite16(index, &mdev->common->queue_select);
-
- return vp_ioread16(&mdev->common->queue_size);
-
-}
-
-/*
- * vp_modern_get_num_queues - get the number of virtqueues
- * @mdev: the modern virtio-pci device
- *
- * Returns the number of virtqueues
- */
-static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
-{
- return vp_ioread16(&mdev->common->num_queues);
-}
-
-/*
- * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
- * @mdev: the modern virtio-pci device
- * @index: the queue index
- *
- * Returns the notification offset for a virtqueue
- */
-static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
- u16 index)
-{
- vp_iowrite16(index, &mdev->common->queue_select);
-
- return vp_ioread16(&mdev->common->queue_notify_off);
-}
-
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
@@ -429,19 +134,6 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
}
}
-/*
- * vp_modern_generation - get the device genreation
- * @mdev: the modern virtio-pci device
- *
- * Returns the genreation read from device
- */
-static u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- return vp_ioread8(&cfg->config_generation);
-}
-
static u32 vp_generation(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -449,19 +141,6 @@ static u32 vp_generation(struct virtio_device *vdev)
return vp_modern_generation(&vp_dev->mdev);
}
-/*
- * vp_modern_get_status - get the device status
- * @mdev: the modern virtio-pci device
- *
- * Returns the status read from device
- */
-static u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- return vp_ioread8(&cfg->device_status);
-}
-
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
@@ -470,19 +149,6 @@ static u8 vp_get_status(struct virtio_device *vdev)
return vp_modern_get_status(&vp_dev->mdev);
}
-/*
- * vp_modern_set_status - set status to device
- * @mdev: the modern virtio-pci device
- * @status: the status set to device
- */
-static void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
- u8 status)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- vp_iowrite8(status, &cfg->device_status);
-}
-
static void vp_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -510,25 +176,6 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
-/*
- * vp_modern_config_vector - set the vector for config interrupt
- * @mdev: the modern virtio-pci device
- * @vector: the config vector
- *
- * Returns the config vector read from the device
- */
-static u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
- u16 vector)
-{
- struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
-
- /* Setup the vector used for configuration events */
- vp_iowrite16(vector, &cfg->msix_config);
- /* Verify we had enough resources to assign the vector */
- /* Will also flush the write out to device */
- return vp_ioread16(&cfg->msix_config);
-}
-
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -789,253 +436,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
};
-/**
- * virtio_pci_find_capability - walk capabilities to find device info.
- * @dev: the pci device
- * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
- * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
- * @bars: the bitmask of BARs
- *
- * Returns offset of the capability, or 0.
- */
-static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
- u32 ioresource_types, int *bars)
-{
- int pos;
-
- for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
- pos > 0;
- pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
- u8 type, bar;
- pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
- cfg_type),
- &type);
- pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
- bar),
- &bar);
-
- /* Ignore structures with reserved BAR values */
- if (bar > 0x5)
- continue;
-
- if (type == cfg_type) {
- if (pci_resource_len(dev, bar) &&
- pci_resource_flags(dev, bar) & ioresource_types) {
- *bars |= (1 << bar);
- return pos;
- }
- }
- }
- return 0;
-}
-
-/* This is part of the ABI. Don't screw with it. */
-static inline void check_offsets(void)
-{
- /* Note: disk space was harmed in compilation of this function. */
- BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
- offsetof(struct virtio_pci_cap, cap_vndr));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
- offsetof(struct virtio_pci_cap, cap_next));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
- offsetof(struct virtio_pci_cap, cap_len));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
- offsetof(struct virtio_pci_cap, cfg_type));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
- offsetof(struct virtio_pci_cap, bar));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
- offsetof(struct virtio_pci_cap, offset));
- BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
- offsetof(struct virtio_pci_cap, length));
- BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
- offsetof(struct virtio_pci_notify_cap,
- notify_off_multiplier));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
- offsetof(struct virtio_pci_common_cfg,
- device_feature_select));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
- offsetof(struct virtio_pci_common_cfg, device_feature));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
- offsetof(struct virtio_pci_common_cfg,
- guest_feature_select));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
- offsetof(struct virtio_pci_common_cfg, guest_feature));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
- offsetof(struct virtio_pci_common_cfg, msix_config));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
- offsetof(struct virtio_pci_common_cfg, num_queues));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
- offsetof(struct virtio_pci_common_cfg, device_status));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
- offsetof(struct virtio_pci_common_cfg, config_generation));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
- offsetof(struct virtio_pci_common_cfg, queue_select));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
- offsetof(struct virtio_pci_common_cfg, queue_size));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
- offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
- offsetof(struct virtio_pci_common_cfg, queue_enable));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
- offsetof(struct virtio_pci_common_cfg, queue_notify_off));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
- offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
- offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
- offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
- offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
- offsetof(struct virtio_pci_common_cfg, queue_used_lo));
- BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
- offsetof(struct virtio_pci_common_cfg, queue_used_hi));
-}
-
-/*
- * vp_modern_probe: probe the modern virtio pci device, note that the
- * caller is required to enable PCI device before calling this function.
- * @mdev: the modern virtio-pci device
- *
- * Return 0 on succeed otherwise fail
- */
-static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
-{
- struct pci_dev *pci_dev = mdev->pci_dev;
- int err, common, isr, notify, device;
- u32 notify_length;
- u32 notify_offset;
-
- check_offsets();
-
- mdev->pci_dev = pci_dev;
-
- /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
- if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
- return -ENODEV;
-
- if (pci_dev->device < 0x1040) {
- /* Transitional devices: use the PCI subsystem device id as
- * virtio device id, same as legacy driver always did.
- */
- mdev->id.device = pci_dev->subsystem_device;
- } else {
- /* Modern devices: simply use PCI device id, but start from 0x1040. */
- mdev->id.device = pci_dev->device - 0x1040;
- }
- mdev->id.vendor = pci_dev->subsystem_vendor;
-
- /* check for a common config: if not, use legacy mode (bar 0). */
- common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &mdev->modern_bars);
- if (!common) {
- dev_info(&pci_dev->dev,
- "virtio_pci: leaving for legacy driver\n");
- return -ENODEV;
- }
-
- /* If common is there, these should be too... */
- isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &mdev->modern_bars);
- notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &mdev->modern_bars);
- if (!isr || !notify) {
- dev_err(&pci_dev->dev,
- "virtio_pci: missing capabilities %i/%i/%i\n",
- common, isr, notify);
- return -EINVAL;
- }
-
- err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
- if (err)
- err = dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(32));
- if (err)
- dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
-
- /* Device capability is only mandatory for devices that have
- * device-specific configuration.
- */
- device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
- IORESOURCE_IO | IORESOURCE_MEM,
- &mdev->modern_bars);
-
- err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
- "virtio-pci-modern");
- if (err)
- return err;
-
- err = -EINVAL;
- mdev->common = vp_modern_map_capability(mdev, common,
- sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_common_cfg),
- NULL);
- if (!mdev->common)
- goto err_map_common;
- mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
- 0, 1,
- NULL);
- if (!mdev->isr)
- goto err_map_isr;
-
- /* Read notify_off_multiplier from config space. */
- pci_read_config_dword(pci_dev,
- notify + offsetof(struct virtio_pci_notify_cap,
- notify_off_multiplier),
- &mdev->notify_offset_multiplier);
- /* Read notify length and offset from config space. */
- pci_read_config_dword(pci_dev,
- notify + offsetof(struct virtio_pci_notify_cap,
- cap.length),
- ¬ify_length);
-
- pci_read_config_dword(pci_dev,
- notify + offsetof(struct virtio_pci_notify_cap,
- cap.offset),
- ¬ify_offset);
-
- /* We don't know how many VQs we'll map, ahead of the time.
- * If notify length is small, map it all now.
- * Otherwise, map each VQ individually later.
- */
- if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
- mdev->notify_base = vp_modern_map_capability(mdev, notify,
- 2, 2,
- 0, notify_length,
- &mdev->notify_len);
- if (!mdev->notify_base)
- goto err_map_notify;
- } else {
- mdev->notify_map_cap = notify;
- }
-
- /* Again, we don't know how much we should map, but PAGE_SIZE
- * is more than enough for all existing devices.
- */
- if (device) {
- mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
- 0, PAGE_SIZE,
- &mdev->device_len);
- if (!mdev->device)
- goto err_map_device;
- }
-
- return 0;
-
-err_map_device:
- if (mdev->notify_base)
- pci_iounmap(pci_dev, mdev->notify_base);
-err_map_notify:
- pci_iounmap(pci_dev, mdev->isr);
-err_map_isr:
- pci_iounmap(pci_dev, mdev->common);
-err_map_common:
- return err;
-}
-
/* the PCI probing function */
int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
{
@@ -1063,23 +463,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
return 0;
}
-/*
- * vp_modern_probe: remove and cleanup the modern virtio pci device
- * @mdev: the modern virtio-pci device
- */
-static void vp_modern_remove(struct virtio_pci_modern_device *mdev)
-{
- struct pci_dev *pci_dev = mdev->pci_dev;
-
- if (mdev->device)
- pci_iounmap(pci_dev, mdev->device);
- if (mdev->notify_base)
- pci_iounmap(pci_dev, mdev->notify_base);
- pci_iounmap(pci_dev, mdev->isr);
- pci_iounmap(pci_dev, mdev->common);
- pci_release_selected_regions(pci_dev, mdev->modern_bars);
-}
-
void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
new file mode 100644
index 000000000000..cbd667496bb1
--- /dev/null
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/virtio_pci_modern.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+/*
+ * vp_modern_map_capability - map a part of virtio pci capability
+ * @mdev: the modern virtio-pci device
+ * @off: offset of the capability
+ * @minlen: minimal length of the capability
+ * @align: align requirement
+ * @start: start from the capability
+ * @size: map size
+ * @len: the length that is actually mapped
+ *
+ * Returns the io address of for the part of the capability
+ */
+void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
+ size_t minlen,
+ u32 align,
+ u32 start, u32 size,
+ size_t *len)
+{
+ struct pci_dev *dev = mdev->pci_dev;
+ u8 bar;
+ u32 offset, length;
+ void __iomem *p;
+
+ pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
+ bar),
+ &bar);
+ pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
+ &offset);
+ pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
+ &length);
+
+ if (length <= start) {
+ dev_err(&dev->dev,
+ "virtio_pci: bad capability len %u (>%u expected)\n",
+ length, start);
+ return NULL;
+ }
+
+ if (length - start < minlen) {
+ dev_err(&dev->dev,
+ "virtio_pci: bad capability len %u (>=%zu expected)\n",
+ length, minlen);
+ return NULL;
+ }
+
+ length -= start;
+
+ if (start + offset < offset) {
+ dev_err(&dev->dev,
+ "virtio_pci: map wrap-around %u+%u\n",
+ start, offset);
+ return NULL;
+ }
+
+ offset += start;
+
+ if (offset & (align - 1)) {
+ dev_err(&dev->dev,
+ "virtio_pci: offset %u not aligned to %u\n",
+ offset, align);
+ return NULL;
+ }
+
+ if (length > size)
+ length = size;
+
+ if (len)
+ *len = length;
+
+ if (minlen + offset < minlen ||
+ minlen + offset > pci_resource_len(dev, bar)) {
+ dev_err(&dev->dev,
+ "virtio_pci: map virtio %zu@%u "
+ "out of range on bar %i length %lu\n",
+ minlen, offset,
+ bar, (unsigned long)pci_resource_len(dev, bar));
+ return NULL;
+ }
+
+ p = pci_iomap_range(dev, bar, offset, length);
+ if (!p)
+ dev_err(&dev->dev,
+ "virtio_pci: unable to map virtio %u@%u on bar %i\n",
+ length, offset, bar);
+ return p;
+}
+EXPORT_SYMBOL_GPL(vp_modern_map_capability);
+
+/**
+ * virtio_pci_find_capability - walk capabilities to find device info.
+ * @dev: the pci device
+ * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
+ * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
+ * @bars: the bitmask of BARs
+ *
+ * Returns offset of the capability, or 0.
+ */
+static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
+ u32 ioresource_types, int *bars)
+{
+ int pos;
+
+ for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+ pos > 0;
+ pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
+ u8 type, bar;
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ cfg_type),
+ &type);
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ bar),
+ &bar);
+
+ /* Ignore structures with reserved BAR values */
+ if (bar > 0x5)
+ continue;
+
+ if (type == cfg_type) {
+ if (pci_resource_len(dev, bar) &&
+ pci_resource_flags(dev, bar) & ioresource_types) {
+ *bars |= (1 << bar);
+ return pos;
+ }
+ }
+ }
+ return 0;
+}
+
+/* This is part of the ABI. Don't screw with it. */
+static inline void check_offsets(void)
+{
+ /* Note: disk space was harmed in compilation of this function. */
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
+ offsetof(struct virtio_pci_cap, cap_vndr));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
+ offsetof(struct virtio_pci_cap, cap_next));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
+ offsetof(struct virtio_pci_cap, cap_len));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
+ offsetof(struct virtio_pci_cap, cfg_type));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
+ offsetof(struct virtio_pci_cap, bar));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
+ offsetof(struct virtio_pci_cap, offset));
+ BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
+ offsetof(struct virtio_pci_cap, length));
+ BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
+ offsetof(struct virtio_pci_notify_cap,
+ notify_off_multiplier));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
+ offsetof(struct virtio_pci_common_cfg,
+ device_feature_select));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
+ offsetof(struct virtio_pci_common_cfg, device_feature));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
+ offsetof(struct virtio_pci_common_cfg,
+ guest_feature_select));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
+ offsetof(struct virtio_pci_common_cfg, guest_feature));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
+ offsetof(struct virtio_pci_common_cfg, msix_config));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
+ offsetof(struct virtio_pci_common_cfg, num_queues));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
+ offsetof(struct virtio_pci_common_cfg, device_status));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
+ offsetof(struct virtio_pci_common_cfg, config_generation));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
+ offsetof(struct virtio_pci_common_cfg, queue_select));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
+ offsetof(struct virtio_pci_common_cfg, queue_size));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
+ offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
+ offsetof(struct virtio_pci_common_cfg, queue_enable));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
+ offsetof(struct virtio_pci_common_cfg, queue_notify_off));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
+ offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
+ offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
+ offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
+ offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
+ offsetof(struct virtio_pci_common_cfg, queue_used_lo));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
+ offsetof(struct virtio_pci_common_cfg, queue_used_hi));
+}
+
+/*
+ * vp_modern_probe: probe the modern virtio pci device, note that the
+ * caller is required to enable PCI device before calling this function.
+ * @mdev: the modern virtio-pci device
+ *
+ * Return 0 on succeed otherwise fail
+ */
+int vp_modern_probe(struct virtio_pci_modern_device *mdev)
+{
+ struct pci_dev *pci_dev = mdev->pci_dev;
+ int err, common, isr, notify, device;
+ u32 notify_length;
+ u32 notify_offset;
+
+ check_offsets();
+
+ mdev->pci_dev = pci_dev;
+
+ /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
+ if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
+ return -ENODEV;
+
+ if (pci_dev->device < 0x1040) {
+ /* Transitional devices: use the PCI subsystem device id as
+ * virtio device id, same as legacy driver always did.
+ */
+ mdev->id.device = pci_dev->subsystem_device;
+ } else {
+ /* Modern devices: simply use PCI device id, but start from 0x1040. */
+ mdev->id.device = pci_dev->device - 0x1040;
+ }
+ mdev->id.vendor = pci_dev->subsystem_vendor;
+
+ /* check for a common config: if not, use legacy mode (bar 0). */
+ common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+ if (!common) {
+ dev_info(&pci_dev->dev,
+ "virtio_pci: leaving for legacy driver\n");
+ return -ENODEV;
+ }
+
+ /* If common is there, these should be too... */
+ isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+ notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+ if (!isr || !notify) {
+ dev_err(&pci_dev->dev,
+ "virtio_pci: missing capabilities %i/%i/%i\n",
+ common, isr, notify);
+ return -EINVAL;
+ }
+
+ err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err)
+ dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
+ /* Device capability is only mandatory for devices that have
+ * device-specific configuration.
+ */
+ device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
+ IORESOURCE_IO | IORESOURCE_MEM,
+ &mdev->modern_bars);
+
+ err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
+ "virtio-pci-modern");
+ if (err)
+ return err;
+
+ err = -EINVAL;
+ mdev->common = vp_modern_map_capability(mdev, common,
+ sizeof(struct virtio_pci_common_cfg), 4,
+ 0, sizeof(struct virtio_pci_common_cfg),
+ NULL);
+ if (!mdev->common)
+ goto err_map_common;
+ mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
+ 0, 1,
+ NULL);
+ if (!mdev->isr)
+ goto err_map_isr;
+
+ /* Read notify_off_multiplier from config space. */
+ pci_read_config_dword(pci_dev,
+ notify + offsetof(struct virtio_pci_notify_cap,
+ notify_off_multiplier),
+ &mdev->notify_offset_multiplier);
+ /* Read notify length and offset from config space. */
+ pci_read_config_dword(pci_dev,
+ notify + offsetof(struct virtio_pci_notify_cap,
+ cap.length),
+ ¬ify_length);
+
+ pci_read_config_dword(pci_dev,
+ notify + offsetof(struct virtio_pci_notify_cap,
+ cap.offset),
+ ¬ify_offset);
+
+ /* We don't know how many VQs we'll map, ahead of the time.
+ * If notify length is small, map it all now.
+ * Otherwise, map each VQ individually later.
+ */
+ if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
+ mdev->notify_base = vp_modern_map_capability(mdev, notify,
+ 2, 2,
+ 0, notify_length,
+ &mdev->notify_len);
+ if (!mdev->notify_base)
+ goto err_map_notify;
+ } else {
+ mdev->notify_map_cap = notify;
+ }
+
+ /* Again, we don't know how much we should map, but PAGE_SIZE
+ * is more than enough for all existing devices.
+ */
+ if (device) {
+ mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
+ 0, PAGE_SIZE,
+ &mdev->device_len);
+ if (!mdev->device)
+ goto err_map_device;
+ }
+
+ return 0;
+
+err_map_device:
+ if (mdev->notify_base)
+ pci_iounmap(pci_dev, mdev->notify_base);
+err_map_notify:
+ pci_iounmap(pci_dev, mdev->isr);
+err_map_isr:
+ pci_iounmap(pci_dev, mdev->common);
+err_map_common:
+ return err;
+}
+EXPORT_SYMBOL_GPL(vp_modern_probe);
+
+/*
+ * vp_modern_probe: remove and cleanup the modern virtio pci device
+ * @mdev: the modern virtio-pci device
+ */
+void vp_modern_remove(struct virtio_pci_modern_device *mdev)
+{
+ struct pci_dev *pci_dev = mdev->pci_dev;
+
+ if (mdev->device)
+ pci_iounmap(pci_dev, mdev->device);
+ if (mdev->notify_base)
+ pci_iounmap(pci_dev, mdev->notify_base);
+ pci_iounmap(pci_dev, mdev->isr);
+ pci_iounmap(pci_dev, mdev->common);
+ pci_release_selected_regions(pci_dev, mdev->modern_bars);
+}
+EXPORT_SYMBOL_GPL(vp_modern_remove);
+
+/*
+ * vp_modern_get_features - get features from device
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the features read from the device
+ */
+u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ u64 features;
+
+ vp_iowrite32(0, &cfg->device_feature_select);
+ features = vp_ioread32(&cfg->device_feature);
+ vp_iowrite32(1, &cfg->device_feature_select);
+ features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
+
+ return features;
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_features);
+
+/*
+ * vp_modern_set_features - set features to device
+ * @mdev: the modern virtio-pci device
+ * @features: the features set to device
+ */
+void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
+ u64 features)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite32(0, &cfg->guest_feature_select);
+ vp_iowrite32((u32)features, &cfg->guest_feature);
+ vp_iowrite32(1, &cfg->guest_feature_select);
+ vp_iowrite32(features >> 32, &cfg->guest_feature);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_features);
+
+/*
+ * vp_modern_generation - get the device genreation
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the genreation read from device
+ */
+u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ return vp_ioread8(&cfg->config_generation);
+}
+EXPORT_SYMBOL_GPL(vp_modern_generation);
+
+/*
+ * vp_modern_get_status - get the device status
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the status read from device
+ */
+u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ return vp_ioread8(&cfg->device_status);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_status);
+
+/*
+ * vp_modern_set_status - set status to device
+ * @mdev: the modern virtio-pci device
+ * @status: the status set to device
+ */
+void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
+ u8 status)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite8(status, &cfg->device_status);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_status);
+
+/*
+ * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
+ u16 index, u16 vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite16(index, &cfg->queue_select);
+ vp_iowrite16(vector, &cfg->queue_msix_vector);
+ /* Flush the write out to device */
+ return vp_ioread16(&cfg->queue_msix_vector);
+}
+EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
+
+/*
+ * vp_modern_config_vector - set the vector for config interrupt
+ * @mdev: the modern virtio-pci device
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
+ u16 vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ /* Setup the vector used for configuration events */
+ vp_iowrite16(vector, &cfg->msix_config);
+ /* Verify we had enough resources to assign the vector */
+ /* Will also flush the write out to device */
+ return vp_ioread16(&cfg->msix_config);
+}
+EXPORT_SYMBOL_GPL(vp_modern_config_vector);
+
+/*
+ * vp_modern_queue_address - set the virtqueue address
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @desc_addr: address of the descriptor area
+ * @driver_addr: address of the driver area
+ * @device_addr: address of the device area
+ */
+void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
+ u16 index, u64 desc_addr, u64 driver_addr,
+ u64 device_addr)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
+
+ vp_iowrite16(index, &cfg->queue_select);
+
+ vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+}
+EXPORT_SYMBOL_GPL(vp_modern_queue_address);
+
+/*
+ * vp_modern_set_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @enable: whether the virtqueue is enable or not
+ */
+void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index, bool enable)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+ vp_iowrite16(enable, &mdev->common->queue_enable);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
+
+/*
+ * vp_modern_get_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns whether a virtqueue is enabled or not
+ */
+bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_enable);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
+
+/*
+ * vp_modern_set_queue_size - set size for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @size: the size of the virtqueue
+ */
+void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 index, u16 size)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+ vp_iowrite16(size, &mdev->common->queue_size);
+
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
+
+/*
+ * vp_modern_get_queue_size - get size for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns the size of the virtqueue
+ */
+u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_size);
+
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
+
+/*
+ * vp_modern_get_num_queues - get the number of virtqueues
+ * @mdev: the modern virtio-pci device
+ *
+ * Returns the number of virtqueues
+ */
+u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
+{
+ return vp_ioread16(&mdev->common->num_queues);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
+
+/*
+ * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns the notification offset for a virtqueue
+ */
+u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_notify_off);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
+
+MODULE_VERSION("0.1");
+MODULE_DESCRIPTION("Modern Virtio PCI Device");
+MODULE_AUTHOR("Jason Wang <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
new file mode 100644
index 000000000000..f26acbeec965
--- /dev/null
+++ b/include/linux/virtio_pci_modern.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_MODERN_H
+#define _LINUX_VIRTIO_PCI_MODERN_H
+
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+
+struct virtio_pci_modern_device {
+ struct pci_dev *pci_dev;
+
+ struct virtio_pci_common_cfg __iomem *common;
+ /* Device-specific data (non-legacy mode) */
+ void __iomem *device;
+ /* Base of vq notifications (non-legacy mode). */
+ void __iomem *notify_base;
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
+
+ /* So we can sanity-check accesses. */
+ size_t notify_len;
+ size_t device_len;
+
+ /* Capability for when we need to map notifications per-vq. */
+ int notify_map_cap;
+
+ /* Multiply queue_notify_off by this value. (non-legacy mode). */
+ u32 notify_offset_multiplier;
+
+ int modern_bars;
+
+ struct virtio_device_id id;
+};
+
+/*
+ * Type-safe wrappers for io accesses.
+ * Use these to enforce at compile time the following spec requirement:
+ *
+ * The driver MUST access each field using the “natural” access
+ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
+ * for 16-bit fields and 8-bit accesses for 8-bit fields.
+ */
+static inline u8 vp_ioread8(const u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 vp_ioread16 (const __le16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 vp_ioread32(const __le32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static inline void vp_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo,
+ __le32 __iomem *hi)
+{
+ vp_iowrite32((u32)val, lo);
+ vp_iowrite32(val >> 32, hi);
+}
+
+u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
+ u64 features);
+u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
+u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
+ u8 status);
+u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 vector);
+u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
+ u16 vector);
+void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
+ u16 index, u64 desc_addr, u64 driver_addr,
+ u64 device_addr);
+void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx, bool enable);
+bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 size);
+u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
+u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
+ size_t minlen,
+ u32 align,
+ u32 start, u32 size,
+ size_t *len);
+int vp_modern_probe(struct virtio_pci_modern_device *mdev);
+void vp_modern_remove(struct virtio_pci_modern_device *mdev);
+#endif
--
2.25.1
Instead of accessing iomem via struct virito_pci_device directly,
tweak to call the io accessors through the iomem structure. This will
ease the splitting of modern virtio device logic.
Signed-off-by: Jason Wang <[email protected]>
---
drivers/virtio/virtio_pci_modern.c | 76 ++++++++++++++++++------------
1 file changed, 46 insertions(+), 30 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 3d6ae5a5e252..df1481fd400c 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -141,12 +141,13 @@ static void __iomem *map_capability(struct pci_dev *dev, int off,
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
u64 features;
- vp_iowrite32(0, &vp_dev->common->device_feature_select);
- features = vp_ioread32(&vp_dev->common->device_feature);
- vp_iowrite32(1, &vp_dev->common->device_feature_select);
- features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
+ vp_iowrite32(0, &cfg->device_feature_select);
+ features = vp_ioread32(&cfg->device_feature);
+ vp_iowrite32(1, &cfg->device_feature_select);
+ features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
return features;
}
@@ -165,6 +166,7 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
static int vp_finalize_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
u64 features = vdev->features;
/* Give virtio_ring a chance to accept features. */
@@ -179,10 +181,10 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- vp_iowrite32(0, &vp_dev->common->guest_feature_select);
- vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
- vp_iowrite32(1, &vp_dev->common->guest_feature_select);
- vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
+ vp_iowrite32(0, &cfg->guest_feature_select);
+ vp_iowrite32((u32)vdev->features, &cfg->guest_feature);
+ vp_iowrite32(1, &cfg->guest_feature_select);
+ vp_iowrite32(vdev->features >> 32, &cfg->guest_feature);
return 0;
}
@@ -192,6 +194,7 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ void __iomem *device = vp_dev->device;
u8 b;
__le16 w;
__le32 l;
@@ -200,21 +203,21 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
switch (len) {
case 1:
- b = ioread8(vp_dev->device + offset);
+ b = ioread8(device + offset);
memcpy(buf, &b, sizeof b);
break;
case 2:
- w = cpu_to_le16(ioread16(vp_dev->device + offset));
+ w = cpu_to_le16(ioread16(device + offset));
memcpy(buf, &w, sizeof w);
break;
case 4:
- l = cpu_to_le32(ioread32(vp_dev->device + offset));
+ l = cpu_to_le32(ioread32(device + offset));
memcpy(buf, &l, sizeof l);
break;
case 8:
- l = cpu_to_le32(ioread32(vp_dev->device + offset));
+ l = cpu_to_le32(ioread32(device + offset));
memcpy(buf, &l, sizeof l);
- l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
+ l = cpu_to_le32(ioread32(device + offset + sizeof l));
memcpy(buf + sizeof l, &l, sizeof l);
break;
default:
@@ -228,6 +231,7 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ void __iomem *device = vp_dev->device;
u8 b;
__le16 w;
__le32 l;
@@ -237,21 +241,21 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
switch (len) {
case 1:
memcpy(&b, buf, sizeof b);
- iowrite8(b, vp_dev->device + offset);
+ iowrite8(b, device + offset);
break;
case 2:
memcpy(&w, buf, sizeof w);
- iowrite16(le16_to_cpu(w), vp_dev->device + offset);
+ iowrite16(le16_to_cpu(w), device + offset);
break;
case 4:
memcpy(&l, buf, sizeof l);
- iowrite32(le32_to_cpu(l), vp_dev->device + offset);
+ iowrite32(le32_to_cpu(l), device + offset);
break;
case 8:
memcpy(&l, buf, sizeof l);
- iowrite32(le32_to_cpu(l), vp_dev->device + offset);
+ iowrite32(le32_to_cpu(l), device + offset);
memcpy(&l, buf + sizeof l, sizeof l);
- iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
+ iowrite32(le32_to_cpu(l), device + offset + sizeof l);
break;
default:
BUG();
@@ -261,35 +265,43 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u32 vp_generation(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return vp_ioread8(&vp_dev->common->config_generation);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+
+ return vp_ioread8(&cfg->config_generation);
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return vp_ioread8(&vp_dev->common->device_status);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+
+ return vp_ioread8(&cfg->device_status);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+
/* We should never be setting status to 0. */
BUG_ON(status == 0);
- vp_iowrite8(status, &vp_dev->common->device_status);
+ vp_iowrite8(status, &cfg->device_status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+
/* 0 status means a reset. */
- vp_iowrite8(0, &vp_dev->common->device_status);
+ vp_iowrite8(0, &cfg->device_status);
/* After writing 0 to device_status, the driver MUST wait for a read of
* device_status to return 0 before reinitializing the device.
* This will flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any.
*/
- while (vp_ioread8(&vp_dev->common->device_status))
+ while (vp_ioread8(&cfg->device_status))
msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
@@ -297,11 +309,13 @@ static void vp_reset(struct virtio_device *vdev)
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
+
/* Setup the vector used for configuration events */
- vp_iowrite16(vector, &vp_dev->common->msix_config);
+ vp_iowrite16(vector, &cfg->msix_config);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
- return vp_ioread16(&vp_dev->common->msix_config);
+ return vp_ioread16(&cfg->msix_config);
}
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
@@ -407,6 +421,7 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
struct virtqueue *vq;
int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
@@ -417,8 +432,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* this, there's no way to go back except reset.
*/
list_for_each_entry(vq, &vdev->vqs, list) {
- vp_iowrite16(vq->index, &vp_dev->common->queue_select);
- vp_iowrite16(1, &vp_dev->common->queue_enable);
+ vp_iowrite16(vq->index, &cfg->queue_select);
+ vp_iowrite16(1, &cfg->queue_enable);
}
return 0;
@@ -428,14 +443,15 @@ static void del_vq(struct virtio_pci_vq_info *info)
{
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
- vp_iowrite16(vq->index, &vp_dev->common->queue_select);
+ vp_iowrite16(vq->index, &cfg->queue_select);
if (vp_dev->msix_enabled) {
vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
- &vp_dev->common->queue_msix_vector);
+ &cfg->queue_msix_vector);
/* Flush the write out to device */
- vp_ioread16(&vp_dev->common->queue_msix_vector);
+ vp_ioread16(&cfg->queue_msix_vector);
}
if (!vp_dev->notify_base)
--
2.25.1
On 2021/1/4 下午2:54, Jason Wang wrote:
> Hi all:
>
> This series tries to implement a vDPA driver for virtio-pci device
> which will bridge between vDPA bus and virtio-pci device.
>
> This could be used for future feature prototyping and testing.
>
> Please review
>
> Changes since V2:
>
> - tweak config prompt
> - switch from 'cb' to 'config_cb' for vp_vdpa config interrupt
> - use a macro for vp_vdpa msix name length
Hi Michael:
Any comments on this series?
Thanks
>
> Changes since V1:
>
> - don't try to use devres for virtio-pci core
> - tweak the commit log
> - split the patches furtherly to ease the reviewing
>
> Changes since RFC:
>
> - Split common codes from virito-pci and share it with vDPA driver
> - Use dynamic id in order to be less confusing with virtio-pci driver
> - No feature whitelist, supporting any features (mq, config etc)
>
> Thanks
>
> Jason Wang (19):
> virtio-pci: do not access iomem via struct virtio_pci_device directly
> virtio-pci: split out modern device
> virtio-pci-modern: factor out modern device initialization logic
> virtio-pci-modern: introduce vp_modern_remove()
> virtio-pci-modern: introduce helper to set config vector
> virtio-pci-modern: introduce helpers for setting and getting status
> virtio-pci-modern: introduce helpers for setting and getting features
> virtio-pci-modern: introduce vp_modern_generation()
> virtio-pci-modern: introduce vp_modern_set_queue_vector()
> virtio-pci-modern: introduce vp_modern_queue_address()
> virtio-pci-modern: introduce helper to set/get queue_enable
> virtio-pci-modern: introduce helper for setting/geting queue size
> virtio-pci-modern: introduce helper for getting queue nums
> virtio-pci-modern: introduce helper to get notification offset
> virito-pci-modern: rename map_capability() to
> vp_modern_map_capability()
> virtio-pci: introduce modern device module
> vdpa: set the virtqueue num during register
> virtio_vdpa: don't warn when fail to disable vq
> vdpa: introduce virtio pci driver
>
> drivers/vdpa/Kconfig | 6 +
> drivers/vdpa/Makefile | 1 +
> drivers/vdpa/ifcvf/ifcvf_main.c | 5 +-
> drivers/vdpa/mlx5/net/mlx5_vnet.c | 5 +-
> drivers/vdpa/vdpa.c | 8 +-
> drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 +-
> drivers/vdpa/virtio_pci/Makefile | 2 +
> drivers/vdpa/virtio_pci/vp_vdpa.c | 456 +++++++++++++++++++
> drivers/virtio/Kconfig | 10 +-
> drivers/virtio/Makefile | 1 +
> drivers/virtio/virtio_pci_common.h | 22 +-
> drivers/virtio/virtio_pci_modern.c | 506 +++------------------
> drivers/virtio/virtio_pci_modern_dev.c | 599 +++++++++++++++++++++++++
> drivers/virtio/virtio_vdpa.c | 3 +-
> include/linux/vdpa.h | 7 +-
> include/linux/virtio_pci_modern.h | 111 +++++
> 16 files changed, 1274 insertions(+), 472 deletions(-)
> create mode 100644 drivers/vdpa/virtio_pci/Makefile
> create mode 100644 drivers/vdpa/virtio_pci/vp_vdpa.c
> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
> create mode 100644 include/linux/virtio_pci_modern.h
>
On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
> Signed-off-by: Jason Wang <[email protected]>
I don't exactly get why we need to split the modern driver out,
and it can confuse people who are used to be seeing virtio-pci.
The vdpa thing so far looks like a development tool, why do
we care that it depends on a bit of extra code?
> ---
> drivers/virtio/Kconfig | 10 +-
> drivers/virtio/Makefile | 1 +
> drivers/virtio/virtio_pci_common.h | 27 +-
> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
> include/linux/virtio_pci_modern.h | 111 +++++
> 6 files changed, 721 insertions(+), 644 deletions(-)
> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
> create mode 100644 include/linux/virtio_pci_modern.h
>
> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> index 7b41130d3f35..6b9b81f4b8c2 100644
> --- a/drivers/virtio/Kconfig
> +++ b/drivers/virtio/Kconfig
> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
> This option is selected if the architecture may need to enforce
> VIRTIO_F_ACCESS_PLATFORM
>
> +config VIRTIO_PCI_MODERN
> + tristate "Modern Virtio PCI Device"
> + depends on PCI
> + help
> + Modern PCI device implementation. This module implements the
> + basic probe and control for devices which are based on modern
> + PCI device with possible vendor specific extensions.
> +
> menuconfig VIRTIO_MENU
> bool "Virtio drivers"
> default y
> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>
> config VIRTIO_PCI
> tristate "PCI driver for virtio devices"
> - depends on PCI
> + depends on VIRTIO_PCI_MODERN
> select VIRTIO
> help
> This driver provides support for virtio based paravirtual device
> diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
> index 591e6f72aa54..f097578aaa8f 100644
> --- a/drivers/virtio/Makefile
> +++ b/drivers/virtio/Makefile
> @@ -1,5 +1,6 @@
> # SPDX-License-Identifier: GPL-2.0
> obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
> +obj-$(CONFIG_VIRTIO_PCI_MODERN) += virtio_pci_modern_dev.o
> obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
> obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
> virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
> index f35ff5b6b467..beec047a8f8d 100644
> --- a/drivers/virtio/virtio_pci_common.h
> +++ b/drivers/virtio/virtio_pci_common.h
> @@ -25,6 +25,7 @@
> #include <linux/virtio_config.h>
> #include <linux/virtio_ring.h>
> #include <linux/virtio_pci.h>
> +#include <linux/virtio_pci_modern.h>
> #include <linux/highmem.h>
> #include <linux/spinlock.h>
>
> @@ -39,32 +40,6 @@ struct virtio_pci_vq_info {
> unsigned msix_vector;
> };
>
> -struct virtio_pci_modern_device {
> - struct pci_dev *pci_dev;
> -
> - struct virtio_pci_common_cfg __iomem *common;
> - /* Device-specific data (non-legacy mode) */
> - void __iomem *device;
> - /* Base of vq notifications (non-legacy mode). */
> - void __iomem *notify_base;
> - /* Where to read and clear interrupt */
> - u8 __iomem *isr;
> -
> - /* So we can sanity-check accesses. */
> - size_t notify_len;
> - size_t device_len;
> -
> - /* Capability for when we need to map notifications per-vq. */
> - int notify_map_cap;
> -
> - /* Multiply queue_notify_off by this value. (non-legacy mode). */
> - u32 notify_offset_multiplier;
> -
> - int modern_bars;
> -
> - struct virtio_device_id id;
> -};
> -
> /* Our device structure */
> struct virtio_pci_device {
> struct virtio_device vdev;
> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> index a5e3a5e40323..fbd4ebc00eb6 100644
> --- a/drivers/virtio/virtio_pci_modern.c
> +++ b/drivers/virtio/virtio_pci_modern.c
> @@ -19,158 +19,6 @@
> #define VIRTIO_RING_NO_LEGACY
> #include "virtio_pci_common.h"
>
> -/*
> - * Type-safe wrappers for io accesses.
> - * Use these to enforce at compile time the following spec requirement:
> - *
> - * The driver MUST access each field using the “natural” access
> - * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
> - * for 16-bit fields and 8-bit accesses for 8-bit fields.
> - */
> -static inline u8 vp_ioread8(const u8 __iomem *addr)
> -{
> - return ioread8(addr);
> -}
> -static inline u16 vp_ioread16 (const __le16 __iomem *addr)
> -{
> - return ioread16(addr);
> -}
> -
> -static inline u32 vp_ioread32(const __le32 __iomem *addr)
> -{
> - return ioread32(addr);
> -}
> -
> -static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
> -{
> - iowrite8(value, addr);
> -}
> -
> -static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
> -{
> - iowrite16(value, addr);
> -}
> -
> -static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
> -{
> - iowrite32(value, addr);
> -}
> -
> -static void vp_iowrite64_twopart(u64 val,
> - __le32 __iomem *lo, __le32 __iomem *hi)
> -{
> - vp_iowrite32((u32)val, lo);
> - vp_iowrite32(val >> 32, hi);
> -}
> -
> -/*
> - * vp_modern_map_capability - map a part of virtio pci capability
> - * @mdev: the modern virtio-pci device
> - * @off: offset of the capability
> - * @minlen: minimal length of the capability
> - * @align: align requirement
> - * @start: start from the capability
> - * @size: map size
> - * @len: the length that is actually mapped
> - *
> - * Returns the io address of for the part of the capability
> - */
> -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
> - size_t minlen,
> - u32 align,
> - u32 start, u32 size,
> - size_t *len)
> -{
> - struct pci_dev *dev = mdev->pci_dev;
> - u8 bar;
> - u32 offset, length;
> - void __iomem *p;
> -
> - pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
> - bar),
> - &bar);
> - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
> - &offset);
> - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
> - &length);
> -
> - if (length <= start) {
> - dev_err(&dev->dev,
> - "virtio_pci: bad capability len %u (>%u expected)\n",
> - length, start);
> - return NULL;
> - }
> -
> - if (length - start < minlen) {
> - dev_err(&dev->dev,
> - "virtio_pci: bad capability len %u (>=%zu expected)\n",
> - length, minlen);
> - return NULL;
> - }
> -
> - length -= start;
> -
> - if (start + offset < offset) {
> - dev_err(&dev->dev,
> - "virtio_pci: map wrap-around %u+%u\n",
> - start, offset);
> - return NULL;
> - }
> -
> - offset += start;
> -
> - if (offset & (align - 1)) {
> - dev_err(&dev->dev,
> - "virtio_pci: offset %u not aligned to %u\n",
> - offset, align);
> - return NULL;
> - }
> -
> - if (length > size)
> - length = size;
> -
> - if (len)
> - *len = length;
> -
> - if (minlen + offset < minlen ||
> - minlen + offset > pci_resource_len(dev, bar)) {
> - dev_err(&dev->dev,
> - "virtio_pci: map virtio %zu@%u "
> - "out of range on bar %i length %lu\n",
> - minlen, offset,
> - bar, (unsigned long)pci_resource_len(dev, bar));
> - return NULL;
> - }
> -
> - p = pci_iomap_range(dev, bar, offset, length);
> - if (!p)
> - dev_err(&dev->dev,
> - "virtio_pci: unable to map virtio %u@%u on bar %i\n",
> - length, offset, bar);
> - return p;
> -}
> -
> -/*
> - * vp_modern_get_features - get features from device
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the features read from the device
> - */
> -static u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - u64 features;
> -
> - vp_iowrite32(0, &cfg->device_feature_select);
> - features = vp_ioread32(&cfg->device_feature);
> - vp_iowrite32(1, &cfg->device_feature_select);
> - features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
> -
> - return features;
> -}
> -
> -/* virtio config->get_features() implementation */
> static u64 vp_get_features(struct virtio_device *vdev)
> {
> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> @@ -188,149 +36,6 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
> __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> }
>
> -/*
> - * vp_modern_set_features - set features to device
> - * @mdev: the modern virtio-pci device
> - * @features: the features set to device
> - */
> -static void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
> - u64 features)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite32(0, &cfg->guest_feature_select);
> - vp_iowrite32((u32)features, &cfg->guest_feature);
> - vp_iowrite32(1, &cfg->guest_feature_select);
> - vp_iowrite32(features >> 32, &cfg->guest_feature);
> -}
> -
> -/*
> - * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: queue index
> - * @vector: the config vector
> - *
> - * Returns the config vector read from the device
> - */
> -static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
> - u16 index, u16 vector)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite16(index, &cfg->queue_select);
> - vp_iowrite16(vector, &cfg->queue_msix_vector);
> - /* Flush the write out to device */
> - return vp_ioread16(&cfg->queue_msix_vector);
> -}
> -
> -/*
> - * vp_modern_queue_address - set the virtqueue address
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - * @desc_addr: address of the descriptor area
> - * @driver_addr: address of the driver area
> - * @device_addr: address of the device area
> - */
> -static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
> - u16 index, u64 desc_addr, u64 driver_addr,
> - u64 device_addr)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite16(index, &cfg->queue_select);
> -
> - vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
> - &cfg->queue_desc_hi);
> - vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
> - &cfg->queue_avail_hi);
> - vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
> - &cfg->queue_used_hi);
> -}
> -
> -/*
> - * vp_modern_set_queue_enable - enable a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - * @enable: whether the virtqueue is enable or not
> - */
> -static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
> - u16 index, bool enable)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> - vp_iowrite16(enable, &mdev->common->queue_enable);
> -}
> -
> -/*
> - * vp_modern_get_queue_enable - enable a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - *
> - * Returns whether a virtqueue is enabled or not
> - */
> -static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
> - u16 index)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> -
> - return vp_ioread16(&mdev->common->queue_enable);
> -}
> -
> -/*
> - * vp_modern_set_queue_size - set size for a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - * @size: the size of the virtqueue
> - */
> -static void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
> - u16 index, u16 size)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> - vp_iowrite16(size, &mdev->common->queue_size);
> -
> -}
> -
> -/*
> - * vp_modern_get_queue_size - get size for a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - *
> - * Returns the size of the virtqueue
> - */
> -static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
> - u16 index)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> -
> - return vp_ioread16(&mdev->common->queue_size);
> -
> -}
> -
> -/*
> - * vp_modern_get_num_queues - get the number of virtqueues
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the number of virtqueues
> - */
> -static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
> -{
> - return vp_ioread16(&mdev->common->num_queues);
> -}
> -
> -/*
> - * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - *
> - * Returns the notification offset for a virtqueue
> - */
> -static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
> - u16 index)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> -
> - return vp_ioread16(&mdev->common->queue_notify_off);
> -}
> -
> /* virtio config->finalize_features() implementation */
> static int vp_finalize_features(struct virtio_device *vdev)
> {
> @@ -429,19 +134,6 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
> }
> }
>
> -/*
> - * vp_modern_generation - get the device genreation
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the genreation read from device
> - */
> -static u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - return vp_ioread8(&cfg->config_generation);
> -}
> -
> static u32 vp_generation(struct virtio_device *vdev)
> {
> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> @@ -449,19 +141,6 @@ static u32 vp_generation(struct virtio_device *vdev)
> return vp_modern_generation(&vp_dev->mdev);
> }
>
> -/*
> - * vp_modern_get_status - get the device status
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the status read from device
> - */
> -static u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - return vp_ioread8(&cfg->device_status);
> -}
> -
> /* config->{get,set}_status() implementations */
> static u8 vp_get_status(struct virtio_device *vdev)
> {
> @@ -470,19 +149,6 @@ static u8 vp_get_status(struct virtio_device *vdev)
> return vp_modern_get_status(&vp_dev->mdev);
> }
>
> -/*
> - * vp_modern_set_status - set status to device
> - * @mdev: the modern virtio-pci device
> - * @status: the status set to device
> - */
> -static void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
> - u8 status)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite8(status, &cfg->device_status);
> -}
> -
> static void vp_set_status(struct virtio_device *vdev, u8 status)
> {
> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> @@ -510,25 +176,6 @@ static void vp_reset(struct virtio_device *vdev)
> vp_synchronize_vectors(vdev);
> }
>
> -/*
> - * vp_modern_config_vector - set the vector for config interrupt
> - * @mdev: the modern virtio-pci device
> - * @vector: the config vector
> - *
> - * Returns the config vector read from the device
> - */
> -static u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
> - u16 vector)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - /* Setup the vector used for configuration events */
> - vp_iowrite16(vector, &cfg->msix_config);
> - /* Verify we had enough resources to assign the vector */
> - /* Will also flush the write out to device */
> - return vp_ioread16(&cfg->msix_config);
> -}
> -
> static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
> {
> return vp_modern_config_vector(&vp_dev->mdev, vector);
> @@ -789,253 +436,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
> .get_shm_region = vp_get_shm_region,
> };
>
> -/**
> - * virtio_pci_find_capability - walk capabilities to find device info.
> - * @dev: the pci device
> - * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
> - * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
> - * @bars: the bitmask of BARs
> - *
> - * Returns offset of the capability, or 0.
> - */
> -static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
> - u32 ioresource_types, int *bars)
> -{
> - int pos;
> -
> - for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
> - pos > 0;
> - pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
> - u8 type, bar;
> - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> - cfg_type),
> - &type);
> - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> - bar),
> - &bar);
> -
> - /* Ignore structures with reserved BAR values */
> - if (bar > 0x5)
> - continue;
> -
> - if (type == cfg_type) {
> - if (pci_resource_len(dev, bar) &&
> - pci_resource_flags(dev, bar) & ioresource_types) {
> - *bars |= (1 << bar);
> - return pos;
> - }
> - }
> - }
> - return 0;
> -}
> -
> -/* This is part of the ABI. Don't screw with it. */
> -static inline void check_offsets(void)
> -{
> - /* Note: disk space was harmed in compilation of this function. */
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
> - offsetof(struct virtio_pci_cap, cap_vndr));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
> - offsetof(struct virtio_pci_cap, cap_next));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
> - offsetof(struct virtio_pci_cap, cap_len));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
> - offsetof(struct virtio_pci_cap, cfg_type));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
> - offsetof(struct virtio_pci_cap, bar));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
> - offsetof(struct virtio_pci_cap, offset));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
> - offsetof(struct virtio_pci_cap, length));
> - BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
> - offsetof(struct virtio_pci_notify_cap,
> - notify_off_multiplier));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
> - offsetof(struct virtio_pci_common_cfg,
> - device_feature_select));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
> - offsetof(struct virtio_pci_common_cfg, device_feature));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
> - offsetof(struct virtio_pci_common_cfg,
> - guest_feature_select));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
> - offsetof(struct virtio_pci_common_cfg, guest_feature));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
> - offsetof(struct virtio_pci_common_cfg, msix_config));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
> - offsetof(struct virtio_pci_common_cfg, num_queues));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
> - offsetof(struct virtio_pci_common_cfg, device_status));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
> - offsetof(struct virtio_pci_common_cfg, config_generation));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
> - offsetof(struct virtio_pci_common_cfg, queue_select));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
> - offsetof(struct virtio_pci_common_cfg, queue_size));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
> - offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
> - offsetof(struct virtio_pci_common_cfg, queue_enable));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
> - offsetof(struct virtio_pci_common_cfg, queue_notify_off));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
> - offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
> - offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
> - offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
> - offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
> - offsetof(struct virtio_pci_common_cfg, queue_used_lo));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
> - offsetof(struct virtio_pci_common_cfg, queue_used_hi));
> -}
> -
> -/*
> - * vp_modern_probe: probe the modern virtio pci device, note that the
> - * caller is required to enable PCI device before calling this function.
> - * @mdev: the modern virtio-pci device
> - *
> - * Return 0 on succeed otherwise fail
> - */
> -static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
> -{
> - struct pci_dev *pci_dev = mdev->pci_dev;
> - int err, common, isr, notify, device;
> - u32 notify_length;
> - u32 notify_offset;
> -
> - check_offsets();
> -
> - mdev->pci_dev = pci_dev;
> -
> - /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
> - if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
> - return -ENODEV;
> -
> - if (pci_dev->device < 0x1040) {
> - /* Transitional devices: use the PCI subsystem device id as
> - * virtio device id, same as legacy driver always did.
> - */
> - mdev->id.device = pci_dev->subsystem_device;
> - } else {
> - /* Modern devices: simply use PCI device id, but start from 0x1040. */
> - mdev->id.device = pci_dev->device - 0x1040;
> - }
> - mdev->id.vendor = pci_dev->subsystem_vendor;
> -
> - /* check for a common config: if not, use legacy mode (bar 0). */
> - common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> - if (!common) {
> - dev_info(&pci_dev->dev,
> - "virtio_pci: leaving for legacy driver\n");
> - return -ENODEV;
> - }
> -
> - /* If common is there, these should be too... */
> - isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> - notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> - if (!isr || !notify) {
> - dev_err(&pci_dev->dev,
> - "virtio_pci: missing capabilities %i/%i/%i\n",
> - common, isr, notify);
> - return -EINVAL;
> - }
> -
> - err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
> - if (err)
> - err = dma_set_mask_and_coherent(&pci_dev->dev,
> - DMA_BIT_MASK(32));
> - if (err)
> - dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
> -
> - /* Device capability is only mandatory for devices that have
> - * device-specific configuration.
> - */
> - device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> -
> - err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
> - "virtio-pci-modern");
> - if (err)
> - return err;
> -
> - err = -EINVAL;
> - mdev->common = vp_modern_map_capability(mdev, common,
> - sizeof(struct virtio_pci_common_cfg), 4,
> - 0, sizeof(struct virtio_pci_common_cfg),
> - NULL);
> - if (!mdev->common)
> - goto err_map_common;
> - mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
> - 0, 1,
> - NULL);
> - if (!mdev->isr)
> - goto err_map_isr;
> -
> - /* Read notify_off_multiplier from config space. */
> - pci_read_config_dword(pci_dev,
> - notify + offsetof(struct virtio_pci_notify_cap,
> - notify_off_multiplier),
> - &mdev->notify_offset_multiplier);
> - /* Read notify length and offset from config space. */
> - pci_read_config_dword(pci_dev,
> - notify + offsetof(struct virtio_pci_notify_cap,
> - cap.length),
> - ¬ify_length);
> -
> - pci_read_config_dword(pci_dev,
> - notify + offsetof(struct virtio_pci_notify_cap,
> - cap.offset),
> - ¬ify_offset);
> -
> - /* We don't know how many VQs we'll map, ahead of the time.
> - * If notify length is small, map it all now.
> - * Otherwise, map each VQ individually later.
> - */
> - if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
> - mdev->notify_base = vp_modern_map_capability(mdev, notify,
> - 2, 2,
> - 0, notify_length,
> - &mdev->notify_len);
> - if (!mdev->notify_base)
> - goto err_map_notify;
> - } else {
> - mdev->notify_map_cap = notify;
> - }
> -
> - /* Again, we don't know how much we should map, but PAGE_SIZE
> - * is more than enough for all existing devices.
> - */
> - if (device) {
> - mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
> - 0, PAGE_SIZE,
> - &mdev->device_len);
> - if (!mdev->device)
> - goto err_map_device;
> - }
> -
> - return 0;
> -
> -err_map_device:
> - if (mdev->notify_base)
> - pci_iounmap(pci_dev, mdev->notify_base);
> -err_map_notify:
> - pci_iounmap(pci_dev, mdev->isr);
> -err_map_isr:
> - pci_iounmap(pci_dev, mdev->common);
> -err_map_common:
> - return err;
> -}
> -
> /* the PCI probing function */
> int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
> {
> @@ -1063,23 +463,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
> return 0;
> }
>
> -/*
> - * vp_modern_probe: remove and cleanup the modern virtio pci device
> - * @mdev: the modern virtio-pci device
> - */
> -static void vp_modern_remove(struct virtio_pci_modern_device *mdev)
> -{
> - struct pci_dev *pci_dev = mdev->pci_dev;
> -
> - if (mdev->device)
> - pci_iounmap(pci_dev, mdev->device);
> - if (mdev->notify_base)
> - pci_iounmap(pci_dev, mdev->notify_base);
> - pci_iounmap(pci_dev, mdev->isr);
> - pci_iounmap(pci_dev, mdev->common);
> - pci_release_selected_regions(pci_dev, mdev->modern_bars);
> -}
> -
> void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
> {
> struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
> new file mode 100644
> index 000000000000..cbd667496bb1
> --- /dev/null
> +++ b/drivers/virtio/virtio_pci_modern_dev.c
> @@ -0,0 +1,599 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +
> +#include <linux/virtio_pci_modern.h>
> +#include <linux/module.h>
> +#include <linux/pci.h>
> +
> +/*
> + * vp_modern_map_capability - map a part of virtio pci capability
> + * @mdev: the modern virtio-pci device
> + * @off: offset of the capability
> + * @minlen: minimal length of the capability
> + * @align: align requirement
> + * @start: start from the capability
> + * @size: map size
> + * @len: the length that is actually mapped
> + *
> + * Returns the io address of for the part of the capability
> + */
> +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
> + size_t minlen,
> + u32 align,
> + u32 start, u32 size,
> + size_t *len)
> +{
> + struct pci_dev *dev = mdev->pci_dev;
> + u8 bar;
> + u32 offset, length;
> + void __iomem *p;
> +
> + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
> + bar),
> + &bar);
> + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
> + &offset);
> + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
> + &length);
> +
> + if (length <= start) {
> + dev_err(&dev->dev,
> + "virtio_pci: bad capability len %u (>%u expected)\n",
> + length, start);
> + return NULL;
> + }
> +
> + if (length - start < minlen) {
> + dev_err(&dev->dev,
> + "virtio_pci: bad capability len %u (>=%zu expected)\n",
> + length, minlen);
> + return NULL;
> + }
> +
> + length -= start;
> +
> + if (start + offset < offset) {
> + dev_err(&dev->dev,
> + "virtio_pci: map wrap-around %u+%u\n",
> + start, offset);
> + return NULL;
> + }
> +
> + offset += start;
> +
> + if (offset & (align - 1)) {
> + dev_err(&dev->dev,
> + "virtio_pci: offset %u not aligned to %u\n",
> + offset, align);
> + return NULL;
> + }
> +
> + if (length > size)
> + length = size;
> +
> + if (len)
> + *len = length;
> +
> + if (minlen + offset < minlen ||
> + minlen + offset > pci_resource_len(dev, bar)) {
> + dev_err(&dev->dev,
> + "virtio_pci: map virtio %zu@%u "
> + "out of range on bar %i length %lu\n",
> + minlen, offset,
> + bar, (unsigned long)pci_resource_len(dev, bar));
> + return NULL;
> + }
> +
> + p = pci_iomap_range(dev, bar, offset, length);
> + if (!p)
> + dev_err(&dev->dev,
> + "virtio_pci: unable to map virtio %u@%u on bar %i\n",
> + length, offset, bar);
> + return p;
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_map_capability);
> +
> +/**
> + * virtio_pci_find_capability - walk capabilities to find device info.
> + * @dev: the pci device
> + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
> + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
> + * @bars: the bitmask of BARs
> + *
> + * Returns offset of the capability, or 0.
> + */
> +static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
> + u32 ioresource_types, int *bars)
> +{
> + int pos;
> +
> + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
> + pos > 0;
> + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
> + u8 type, bar;
> + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> + cfg_type),
> + &type);
> + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> + bar),
> + &bar);
> +
> + /* Ignore structures with reserved BAR values */
> + if (bar > 0x5)
> + continue;
> +
> + if (type == cfg_type) {
> + if (pci_resource_len(dev, bar) &&
> + pci_resource_flags(dev, bar) & ioresource_types) {
> + *bars |= (1 << bar);
> + return pos;
> + }
> + }
> + }
> + return 0;
> +}
> +
> +/* This is part of the ABI. Don't screw with it. */
> +static inline void check_offsets(void)
> +{
> + /* Note: disk space was harmed in compilation of this function. */
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
> + offsetof(struct virtio_pci_cap, cap_vndr));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
> + offsetof(struct virtio_pci_cap, cap_next));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
> + offsetof(struct virtio_pci_cap, cap_len));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
> + offsetof(struct virtio_pci_cap, cfg_type));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
> + offsetof(struct virtio_pci_cap, bar));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
> + offsetof(struct virtio_pci_cap, offset));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
> + offsetof(struct virtio_pci_cap, length));
> + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
> + offsetof(struct virtio_pci_notify_cap,
> + notify_off_multiplier));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
> + offsetof(struct virtio_pci_common_cfg,
> + device_feature_select));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
> + offsetof(struct virtio_pci_common_cfg, device_feature));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
> + offsetof(struct virtio_pci_common_cfg,
> + guest_feature_select));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
> + offsetof(struct virtio_pci_common_cfg, guest_feature));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
> + offsetof(struct virtio_pci_common_cfg, msix_config));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
> + offsetof(struct virtio_pci_common_cfg, num_queues));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
> + offsetof(struct virtio_pci_common_cfg, device_status));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
> + offsetof(struct virtio_pci_common_cfg, config_generation));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
> + offsetof(struct virtio_pci_common_cfg, queue_select));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
> + offsetof(struct virtio_pci_common_cfg, queue_size));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
> + offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
> + offsetof(struct virtio_pci_common_cfg, queue_enable));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
> + offsetof(struct virtio_pci_common_cfg, queue_notify_off));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
> + offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
> + offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
> + offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
> + offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
> + offsetof(struct virtio_pci_common_cfg, queue_used_lo));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
> + offsetof(struct virtio_pci_common_cfg, queue_used_hi));
> +}
> +
> +/*
> + * vp_modern_probe: probe the modern virtio pci device, note that the
> + * caller is required to enable PCI device before calling this function.
> + * @mdev: the modern virtio-pci device
> + *
> + * Return 0 on succeed otherwise fail
> + */
> +int vp_modern_probe(struct virtio_pci_modern_device *mdev)
> +{
> + struct pci_dev *pci_dev = mdev->pci_dev;
> + int err, common, isr, notify, device;
> + u32 notify_length;
> + u32 notify_offset;
> +
> + check_offsets();
> +
> + mdev->pci_dev = pci_dev;
> +
> + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
> + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
> + return -ENODEV;
> +
> + if (pci_dev->device < 0x1040) {
> + /* Transitional devices: use the PCI subsystem device id as
> + * virtio device id, same as legacy driver always did.
> + */
> + mdev->id.device = pci_dev->subsystem_device;
> + } else {
> + /* Modern devices: simply use PCI device id, but start from 0x1040. */
> + mdev->id.device = pci_dev->device - 0x1040;
> + }
> + mdev->id.vendor = pci_dev->subsystem_vendor;
> +
> + /* check for a common config: if not, use legacy mode (bar 0). */
> + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> + if (!common) {
> + dev_info(&pci_dev->dev,
> + "virtio_pci: leaving for legacy driver\n");
> + return -ENODEV;
> + }
> +
> + /* If common is there, these should be too... */
> + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> + if (!isr || !notify) {
> + dev_err(&pci_dev->dev,
> + "virtio_pci: missing capabilities %i/%i/%i\n",
> + common, isr, notify);
> + return -EINVAL;
> + }
> +
> + err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
> + if (err)
> + err = dma_set_mask_and_coherent(&pci_dev->dev,
> + DMA_BIT_MASK(32));
> + if (err)
> + dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
> +
> + /* Device capability is only mandatory for devices that have
> + * device-specific configuration.
> + */
> + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> +
> + err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
> + "virtio-pci-modern");
> + if (err)
> + return err;
> +
> + err = -EINVAL;
> + mdev->common = vp_modern_map_capability(mdev, common,
> + sizeof(struct virtio_pci_common_cfg), 4,
> + 0, sizeof(struct virtio_pci_common_cfg),
> + NULL);
> + if (!mdev->common)
> + goto err_map_common;
> + mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
> + 0, 1,
> + NULL);
> + if (!mdev->isr)
> + goto err_map_isr;
> +
> + /* Read notify_off_multiplier from config space. */
> + pci_read_config_dword(pci_dev,
> + notify + offsetof(struct virtio_pci_notify_cap,
> + notify_off_multiplier),
> + &mdev->notify_offset_multiplier);
> + /* Read notify length and offset from config space. */
> + pci_read_config_dword(pci_dev,
> + notify + offsetof(struct virtio_pci_notify_cap,
> + cap.length),
> + ¬ify_length);
> +
> + pci_read_config_dword(pci_dev,
> + notify + offsetof(struct virtio_pci_notify_cap,
> + cap.offset),
> + ¬ify_offset);
> +
> + /* We don't know how many VQs we'll map, ahead of the time.
> + * If notify length is small, map it all now.
> + * Otherwise, map each VQ individually later.
> + */
> + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
> + mdev->notify_base = vp_modern_map_capability(mdev, notify,
> + 2, 2,
> + 0, notify_length,
> + &mdev->notify_len);
> + if (!mdev->notify_base)
> + goto err_map_notify;
> + } else {
> + mdev->notify_map_cap = notify;
> + }
> +
> + /* Again, we don't know how much we should map, but PAGE_SIZE
> + * is more than enough for all existing devices.
> + */
> + if (device) {
> + mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
> + 0, PAGE_SIZE,
> + &mdev->device_len);
> + if (!mdev->device)
> + goto err_map_device;
> + }
> +
> + return 0;
> +
> +err_map_device:
> + if (mdev->notify_base)
> + pci_iounmap(pci_dev, mdev->notify_base);
> +err_map_notify:
> + pci_iounmap(pci_dev, mdev->isr);
> +err_map_isr:
> + pci_iounmap(pci_dev, mdev->common);
> +err_map_common:
> + return err;
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_probe);
> +
> +/*
> + * vp_modern_probe: remove and cleanup the modern virtio pci device
> + * @mdev: the modern virtio-pci device
> + */
> +void vp_modern_remove(struct virtio_pci_modern_device *mdev)
> +{
> + struct pci_dev *pci_dev = mdev->pci_dev;
> +
> + if (mdev->device)
> + pci_iounmap(pci_dev, mdev->device);
> + if (mdev->notify_base)
> + pci_iounmap(pci_dev, mdev->notify_base);
> + pci_iounmap(pci_dev, mdev->isr);
> + pci_iounmap(pci_dev, mdev->common);
> + pci_release_selected_regions(pci_dev, mdev->modern_bars);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_remove);
> +
> +/*
> + * vp_modern_get_features - get features from device
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the features read from the device
> + */
> +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + u64 features;
> +
> + vp_iowrite32(0, &cfg->device_feature_select);
> + features = vp_ioread32(&cfg->device_feature);
> + vp_iowrite32(1, &cfg->device_feature_select);
> + features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
> +
> + return features;
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_features);
> +
> +/*
> + * vp_modern_set_features - set features to device
> + * @mdev: the modern virtio-pci device
> + * @features: the features set to device
> + */
> +void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
> + u64 features)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite32(0, &cfg->guest_feature_select);
> + vp_iowrite32((u32)features, &cfg->guest_feature);
> + vp_iowrite32(1, &cfg->guest_feature_select);
> + vp_iowrite32(features >> 32, &cfg->guest_feature);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_features);
> +
> +/*
> + * vp_modern_generation - get the device genreation
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the genreation read from device
> + */
> +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + return vp_ioread8(&cfg->config_generation);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_generation);
> +
> +/*
> + * vp_modern_get_status - get the device status
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the status read from device
> + */
> +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + return vp_ioread8(&cfg->device_status);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_status);
> +
> +/*
> + * vp_modern_set_status - set status to device
> + * @mdev: the modern virtio-pci device
> + * @status: the status set to device
> + */
> +void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
> + u8 status)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite8(status, &cfg->device_status);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_status);
> +
> +/*
> + * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: queue index
> + * @vector: the config vector
> + *
> + * Returns the config vector read from the device
> + */
> +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
> + u16 index, u16 vector)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite16(index, &cfg->queue_select);
> + vp_iowrite16(vector, &cfg->queue_msix_vector);
> + /* Flush the write out to device */
> + return vp_ioread16(&cfg->queue_msix_vector);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
> +
> +/*
> + * vp_modern_config_vector - set the vector for config interrupt
> + * @mdev: the modern virtio-pci device
> + * @vector: the config vector
> + *
> + * Returns the config vector read from the device
> + */
> +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
> + u16 vector)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + /* Setup the vector used for configuration events */
> + vp_iowrite16(vector, &cfg->msix_config);
> + /* Verify we had enough resources to assign the vector */
> + /* Will also flush the write out to device */
> + return vp_ioread16(&cfg->msix_config);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_config_vector);
> +
> +/*
> + * vp_modern_queue_address - set the virtqueue address
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + * @desc_addr: address of the descriptor area
> + * @driver_addr: address of the driver area
> + * @device_addr: address of the device area
> + */
> +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
> + u16 index, u64 desc_addr, u64 driver_addr,
> + u64 device_addr)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite16(index, &cfg->queue_select);
> +
> + vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
> + &cfg->queue_desc_hi);
> + vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
> + &cfg->queue_avail_hi);
> + vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
> + &cfg->queue_used_hi);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_queue_address);
> +
> +/*
> + * vp_modern_set_queue_enable - enable a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + * @enable: whether the virtqueue is enable or not
> + */
> +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 index, bool enable)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> + vp_iowrite16(enable, &mdev->common->queue_enable);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
> +
> +/*
> + * vp_modern_get_queue_enable - enable a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + *
> + * Returns whether a virtqueue is enabled or not
> + */
> +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 index)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> +
> + return vp_ioread16(&mdev->common->queue_enable);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
> +
> +/*
> + * vp_modern_set_queue_size - set size for a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + * @size: the size of the virtqueue
> + */
> +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 index, u16 size)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> + vp_iowrite16(size, &mdev->common->queue_size);
> +
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
> +
> +/*
> + * vp_modern_get_queue_size - get size for a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + *
> + * Returns the size of the virtqueue
> + */
> +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 index)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> +
> + return vp_ioread16(&mdev->common->queue_size);
> +
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
> +
> +/*
> + * vp_modern_get_num_queues - get the number of virtqueues
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the number of virtqueues
> + */
> +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
> +{
> + return vp_ioread16(&mdev->common->num_queues);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
> +
> +/*
> + * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + *
> + * Returns the notification offset for a virtqueue
> + */
> +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
> + u16 index)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> +
> + return vp_ioread16(&mdev->common->queue_notify_off);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
> +
> +MODULE_VERSION("0.1");
> +MODULE_DESCRIPTION("Modern Virtio PCI Device");
> +MODULE_AUTHOR("Jason Wang <[email protected]>");
> +MODULE_LICENSE("GPL");
> diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
> new file mode 100644
> index 000000000000..f26acbeec965
> --- /dev/null
> +++ b/include/linux/virtio_pci_modern.h
> @@ -0,0 +1,111 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _LINUX_VIRTIO_PCI_MODERN_H
> +#define _LINUX_VIRTIO_PCI_MODERN_H
> +
> +#include <linux/pci.h>
> +#include <linux/virtio_pci.h>
> +
> +struct virtio_pci_modern_device {
> + struct pci_dev *pci_dev;
> +
> + struct virtio_pci_common_cfg __iomem *common;
> + /* Device-specific data (non-legacy mode) */
> + void __iomem *device;
> + /* Base of vq notifications (non-legacy mode). */
> + void __iomem *notify_base;
> + /* Where to read and clear interrupt */
> + u8 __iomem *isr;
> +
> + /* So we can sanity-check accesses. */
> + size_t notify_len;
> + size_t device_len;
> +
> + /* Capability for when we need to map notifications per-vq. */
> + int notify_map_cap;
> +
> + /* Multiply queue_notify_off by this value. (non-legacy mode). */
> + u32 notify_offset_multiplier;
> +
> + int modern_bars;
> +
> + struct virtio_device_id id;
> +};
> +
> +/*
> + * Type-safe wrappers for io accesses.
> + * Use these to enforce at compile time the following spec requirement:
> + *
> + * The driver MUST access each field using the “natural” access
> + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
> + * for 16-bit fields and 8-bit accesses for 8-bit fields.
> + */
> +static inline u8 vp_ioread8(const u8 __iomem *addr)
> +{
> + return ioread8(addr);
> +}
> +static inline u16 vp_ioread16 (const __le16 __iomem *addr)
> +{
> + return ioread16(addr);
> +}
> +
> +static inline u32 vp_ioread32(const __le32 __iomem *addr)
> +{
> + return ioread32(addr);
> +}
> +
> +static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
> +{
> + iowrite8(value, addr);
> +}
> +
> +static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
> +{
> + iowrite16(value, addr);
> +}
> +
> +static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
> +{
> + iowrite32(value, addr);
> +}
> +
> +static inline void vp_iowrite64_twopart(u64 val,
> + __le32 __iomem *lo,
> + __le32 __iomem *hi)
> +{
> + vp_iowrite32((u32)val, lo);
> + vp_iowrite32(val >> 32, hi);
> +}
> +
> +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
> +void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
> + u64 features);
> +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
> +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
> +void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
> + u8 status);
> +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
> + u16 idx, u16 vector);
> +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
> + u16 vector);
> +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
> + u16 index, u64 desc_addr, u64 driver_addr,
> + u64 device_addr);
> +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 idx, bool enable);
> +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 idx);
> +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 idx, u16 size);
> +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 idx);
> +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
> +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
> + u16 idx);
> +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
> + size_t minlen,
> + u32 align,
> + u32 start, u32 size,
> + size_t *len);
> +int vp_modern_probe(struct virtio_pci_modern_device *mdev);
> +void vp_modern_remove(struct virtio_pci_modern_device *mdev);
> +#endif
> --
> 2.25.1
On Mon, Jan 04, 2021 at 02:55:01PM +0800, Jason Wang wrote:
> This patch delay the queue number setting to vDPA device
> registering. This allows us to probe the virtqueue numbers between
> device allocation and registering.
>
> Reviewed-by: Stefano Garzarella <[email protected]>
> Signed-off-by: Jason Wang <[email protected]>
Conflicts with other patches in the vhost tree.
Can you rebase please?
> ---
> drivers/vdpa/ifcvf/ifcvf_main.c | 5 ++---
> drivers/vdpa/mlx5/net/mlx5_vnet.c | 5 ++---
> drivers/vdpa/vdpa.c | 8 ++++----
> drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 ++--
> include/linux/vdpa.h | 7 +++----
> 5 files changed, 13 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index 8b4028556cb6..d65f3221d8ed 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -438,8 +438,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> }
>
> adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
> - dev, &ifc_vdpa_ops,
> - IFCVF_MAX_QUEUE_PAIRS * 2);
> + dev, &ifc_vdpa_ops);
> if (adapter == NULL) {
> IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
> return -ENOMEM;
> @@ -463,7 +462,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
> vf->vring[i].irq = -EINVAL;
>
> - ret = vdpa_register_device(&adapter->vdpa);
> + ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
> if (ret) {
> IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
> goto err;
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index f1d54814db97..a1b9260bf04d 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -1958,8 +1958,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
> max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
> max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
>
> - ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
> - 2 * mlx5_vdpa_max_qps(max_vqs));
> + ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops);
> if (IS_ERR(ndev))
> return PTR_ERR(ndev);
>
> @@ -1986,7 +1985,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
> if (err)
> goto err_res;
>
> - err = vdpa_register_device(&mvdev->vdev);
> + err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
> if (err)
> goto err_reg;
>
> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
> index a69ffc991e13..ba89238f9898 100644
> --- a/drivers/vdpa/vdpa.c
> +++ b/drivers/vdpa/vdpa.c
> @@ -61,7 +61,6 @@ static void vdpa_release_dev(struct device *d)
> * initialized but before registered.
> * @parent: the parent device
> * @config: the bus operations that is supported by this device
> - * @nvqs: number of virtqueues supported by this device
> * @size: size of the parent structure that contains private data
> *
> * Driver should use vdpa_alloc_device() wrapper macro instead of
> @@ -72,7 +71,6 @@ static void vdpa_release_dev(struct device *d)
> */
> struct vdpa_device *__vdpa_alloc_device(struct device *parent,
> const struct vdpa_config_ops *config,
> - int nvqs,
> size_t size)
> {
> struct vdpa_device *vdev;
> @@ -99,7 +97,6 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
> vdev->index = err;
> vdev->config = config;
> vdev->features_valid = false;
> - vdev->nvqs = nvqs;
>
> err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
> if (err)
> @@ -122,11 +119,14 @@ EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
> * vdpa_register_device - register a vDPA device
> * Callers must have a succeed call of vdpa_alloc_device() before.
> * @vdev: the vdpa device to be registered to vDPA bus
> + * @nvqs: number of virtqueues supported by this device
> *
> * Returns an error when fail to add to vDPA bus
> */
> -int vdpa_register_device(struct vdpa_device *vdev)
> +int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
> {
> + vdev->nvqs = nvqs;
> +
> return device_add(&vdev->dev);
> }
> EXPORT_SYMBOL_GPL(vdpa_register_device);
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> index 6a90fdb9cbfc..b129cb4dd013 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> @@ -357,7 +357,7 @@ static struct vdpasim *vdpasim_create(void)
> else
> ops = &vdpasim_net_config_ops;
>
> - vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
> + vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops);
> if (!vdpasim)
> goto err_alloc;
>
> @@ -393,7 +393,7 @@ static struct vdpasim *vdpasim_create(void)
> vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
>
> vdpasim->vdpa.dma_dev = dev;
> - ret = vdpa_register_device(&vdpasim->vdpa);
> + ret = vdpa_register_device(&vdpasim->vdpa, VDPASIM_VQ_NUM);
> if (ret)
> goto err_iommu;
>
> diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
> index 30bc7a7223bb..d9e9d17b9083 100644
> --- a/include/linux/vdpa.h
> +++ b/include/linux/vdpa.h
> @@ -244,18 +244,17 @@ struct vdpa_config_ops {
>
> struct vdpa_device *__vdpa_alloc_device(struct device *parent,
> const struct vdpa_config_ops *config,
> - int nvqs,
> size_t size);
>
> -#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
> +#define vdpa_alloc_device(dev_struct, member, parent, config) \
> container_of(__vdpa_alloc_device( \
> - parent, config, nvqs, \
> + parent, config, \
> sizeof(dev_struct) + \
> BUILD_BUG_ON_ZERO(offsetof( \
> dev_struct, member))), \
> dev_struct, member)
>
> -int vdpa_register_device(struct vdpa_device *vdev);
> +int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
> void vdpa_unregister_device(struct vdpa_device *vdev);
>
> /**
> --
> 2.25.1
On Mon, Jan 04, 2021 at 02:55:02PM +0800, Jason Wang wrote:
> There's no guarantee that the device can disable a specific virtqueue
> through set_vq_ready(). One example is the modern virtio-pci
> device. So this patch removes the warning.
>
> Signed-off-by: Jason Wang <[email protected]>
Do we need the read as a kind of flush though?
> ---
> drivers/virtio/virtio_vdpa.c | 3 +--
> 1 file changed, 1 insertion(+), 2 deletions(-)
>
> diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> index 4a9ddb44b2a7..e28acf482e0c 100644
> --- a/drivers/virtio/virtio_vdpa.c
> +++ b/drivers/virtio/virtio_vdpa.c
> @@ -225,9 +225,8 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
> list_del(&info->node);
> spin_unlock_irqrestore(&vd_dev->lock, flags);
>
> - /* Select and deactivate the queue */
> + /* Select and deactivate the queue (best effort) */
> ops->set_vq_ready(vdpa, index, 0);
> - WARN_ON(ops->get_vq_ready(vdpa, index));
>
> vring_del_virtqueue(vq);
>
> --
> 2.25.1
On 2021/2/5 下午11:24, Michael S. Tsirkin wrote:
> On Mon, Jan 04, 2021 at 02:55:02PM +0800, Jason Wang wrote:
>> There's no guarantee that the device can disable a specific virtqueue
>> through set_vq_ready(). One example is the modern virtio-pci
>> device. So this patch removes the warning.
>>
>> Signed-off-by: Jason Wang <[email protected]>
>
> Do we need the read as a kind of flush though?
The problem is that PCI forbids write 0 to queue_enable. So I'm not sure
what kind of flush do we need here?
Thanks
>
>> ---
>> drivers/virtio/virtio_vdpa.c | 3 +--
>> 1 file changed, 1 insertion(+), 2 deletions(-)
>>
>> diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
>> index 4a9ddb44b2a7..e28acf482e0c 100644
>> --- a/drivers/virtio/virtio_vdpa.c
>> +++ b/drivers/virtio/virtio_vdpa.c
>> @@ -225,9 +225,8 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
>> list_del(&info->node);
>> spin_unlock_irqrestore(&vd_dev->lock, flags);
>>
>> - /* Select and deactivate the queue */
>> + /* Select and deactivate the queue (best effort) */
>> ops->set_vq_ready(vdpa, index, 0);
>> - WARN_ON(ops->get_vq_ready(vdpa, index));
>>
>> vring_del_virtqueue(vq);
>>
>> --
>> 2.25.1
On 2021/2/5 下午11:27, Michael S. Tsirkin wrote:
> On Mon, Jan 04, 2021 at 02:55:01PM +0800, Jason Wang wrote:
>> This patch delay the queue number setting to vDPA device
>> registering. This allows us to probe the virtqueue numbers between
>> device allocation and registering.
>>
>> Reviewed-by: Stefano Garzarella <[email protected]>
>> Signed-off-by: Jason Wang <[email protected]>
> Conflicts with other patches in the vhost tree.
> Can you rebase please?
Will do.
Thanks
>
>> ---
>> drivers/vdpa/ifcvf/ifcvf_main.c | 5 ++---
>> drivers/vdpa/mlx5/net/mlx5_vnet.c | 5 ++---
>> drivers/vdpa/vdpa.c | 8 ++++----
>> drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 ++--
>> include/linux/vdpa.h | 7 +++----
>> 5 files changed, 13 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
>> index 8b4028556cb6..d65f3221d8ed 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
>> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
>> @@ -438,8 +438,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>> }
>>
>> adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
>> - dev, &ifc_vdpa_ops,
>> - IFCVF_MAX_QUEUE_PAIRS * 2);
>> + dev, &ifc_vdpa_ops);
>> if (adapter == NULL) {
>> IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
>> return -ENOMEM;
>> @@ -463,7 +462,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>> for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
>> vf->vring[i].irq = -EINVAL;
>>
>> - ret = vdpa_register_device(&adapter->vdpa);
>> + ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
>> if (ret) {
>> IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
>> goto err;
>> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> index f1d54814db97..a1b9260bf04d 100644
>> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> @@ -1958,8 +1958,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
>> max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
>> max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
>>
>> - ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
>> - 2 * mlx5_vdpa_max_qps(max_vqs));
>> + ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops);
>> if (IS_ERR(ndev))
>> return PTR_ERR(ndev);
>>
>> @@ -1986,7 +1985,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
>> if (err)
>> goto err_res;
>>
>> - err = vdpa_register_device(&mvdev->vdev);
>> + err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
>> if (err)
>> goto err_reg;
>>
>> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
>> index a69ffc991e13..ba89238f9898 100644
>> --- a/drivers/vdpa/vdpa.c
>> +++ b/drivers/vdpa/vdpa.c
>> @@ -61,7 +61,6 @@ static void vdpa_release_dev(struct device *d)
>> * initialized but before registered.
>> * @parent: the parent device
>> * @config: the bus operations that is supported by this device
>> - * @nvqs: number of virtqueues supported by this device
>> * @size: size of the parent structure that contains private data
>> *
>> * Driver should use vdpa_alloc_device() wrapper macro instead of
>> @@ -72,7 +71,6 @@ static void vdpa_release_dev(struct device *d)
>> */
>> struct vdpa_device *__vdpa_alloc_device(struct device *parent,
>> const struct vdpa_config_ops *config,
>> - int nvqs,
>> size_t size)
>> {
>> struct vdpa_device *vdev;
>> @@ -99,7 +97,6 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
>> vdev->index = err;
>> vdev->config = config;
>> vdev->features_valid = false;
>> - vdev->nvqs = nvqs;
>>
>> err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
>> if (err)
>> @@ -122,11 +119,14 @@ EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
>> * vdpa_register_device - register a vDPA device
>> * Callers must have a succeed call of vdpa_alloc_device() before.
>> * @vdev: the vdpa device to be registered to vDPA bus
>> + * @nvqs: number of virtqueues supported by this device
>> *
>> * Returns an error when fail to add to vDPA bus
>> */
>> -int vdpa_register_device(struct vdpa_device *vdev)
>> +int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
>> {
>> + vdev->nvqs = nvqs;
>> +
>> return device_add(&vdev->dev);
>> }
>> EXPORT_SYMBOL_GPL(vdpa_register_device);
>> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> index 6a90fdb9cbfc..b129cb4dd013 100644
>> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> @@ -357,7 +357,7 @@ static struct vdpasim *vdpasim_create(void)
>> else
>> ops = &vdpasim_net_config_ops;
>>
>> - vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
>> + vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops);
>> if (!vdpasim)
>> goto err_alloc;
>>
>> @@ -393,7 +393,7 @@ static struct vdpasim *vdpasim_create(void)
>> vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
>>
>> vdpasim->vdpa.dma_dev = dev;
>> - ret = vdpa_register_device(&vdpasim->vdpa);
>> + ret = vdpa_register_device(&vdpasim->vdpa, VDPASIM_VQ_NUM);
>> if (ret)
>> goto err_iommu;
>>
>> diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
>> index 30bc7a7223bb..d9e9d17b9083 100644
>> --- a/include/linux/vdpa.h
>> +++ b/include/linux/vdpa.h
>> @@ -244,18 +244,17 @@ struct vdpa_config_ops {
>>
>> struct vdpa_device *__vdpa_alloc_device(struct device *parent,
>> const struct vdpa_config_ops *config,
>> - int nvqs,
>> size_t size);
>>
>> -#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
>> +#define vdpa_alloc_device(dev_struct, member, parent, config) \
>> container_of(__vdpa_alloc_device( \
>> - parent, config, nvqs, \
>> + parent, config, \
>> sizeof(dev_struct) + \
>> BUILD_BUG_ON_ZERO(offsetof( \
>> dev_struct, member))), \
>> dev_struct, member)
>>
>> -int vdpa_register_device(struct vdpa_device *vdev);
>> +int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
>> void vdpa_unregister_device(struct vdpa_device *vdev);
>>
>> /**
>> --
>> 2.25.1
On 2021/2/5 下午11:34, Michael S. Tsirkin wrote:
> On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
>> Signed-off-by: Jason Wang<[email protected]>
> I don't exactly get why we need to split the modern driver out,
> and it can confuse people who are used to be seeing virtio-pci.
The virtio-pci module still there. No user visible changes. Just some
codes that could be shared with other driver were split out.
>
> The vdpa thing so far looks like a development tool, why do
> we care that it depends on a bit of extra code?
If I'm not misunderstanding, trying to share codes is proposed by you here:
https://lkml.org/lkml/2020/6/10/232
We also had the plan to convert IFCVF to use this library.
Thanks
>
On Mon, Feb 08, 2021 at 01:42:27PM +0800, Jason Wang wrote:
>
> On 2021/2/5 下午11:34, Michael S. Tsirkin wrote:
> > On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
> > > Signed-off-by: Jason Wang<[email protected]>
> > I don't exactly get why we need to split the modern driver out,
> > and it can confuse people who are used to be seeing virtio-pci.
>
>
> The virtio-pci module still there. No user visible changes. Just some codes
> that could be shared with other driver were split out.
>
What I am saying is this: we can have virtio-vdpa depend on
virtio-pci without splitting the common code out to an
extra module.
> >
> > The vdpa thing so far looks like a development tool, why do
> > we care that it depends on a bit of extra code?
>
>
> If I'm not misunderstanding, trying to share codes is proposed by you here:
>
> https://lkml.org/lkml/2020/6/10/232
>
> We also had the plan to convert IFCVF to use this library.
>
> Thanks
If that happens then an extra module might become useful.
--
MST
On 2021/2/8 下午8:04, Michael S. Tsirkin wrote:
> On Mon, Feb 08, 2021 at 01:42:27PM +0800, Jason Wang wrote:
>> On 2021/2/5 下午11:34, Michael S. Tsirkin wrote:
>>> On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
>>>> Signed-off-by: Jason Wang<[email protected]>
>>> I don't exactly get why we need to split the modern driver out,
>>> and it can confuse people who are used to be seeing virtio-pci.
>>
>> The virtio-pci module still there. No user visible changes. Just some codes
>> that could be shared with other driver were split out.
>>
> What I am saying is this: we can have virtio-vdpa depend on
> virtio-pci without splitting the common code out to an
> extra module.
Ok.
>
>>> The vdpa thing so far looks like a development tool, why do
>>> we care that it depends on a bit of extra code?
>>
>> If I'm not misunderstanding, trying to share codes is proposed by you here:
>>
>> https://lkml.org/lkml/2020/6/10/232
>>
>> We also had the plan to convert IFCVF to use this library.
>>
>> Thanks
> If that happens then an extra module might become useful.
So does it make sense that I post a new version and let's merge it
first. Then Intel or I can convert IFCVF to use the library?
Thanks
>
On Tue, Feb 09, 2021 at 11:29:46AM +0800, Jason Wang wrote:
>
> On 2021/2/8 下午8:04, Michael S. Tsirkin wrote:
> > On Mon, Feb 08, 2021 at 01:42:27PM +0800, Jason Wang wrote:
> > > On 2021/2/5 下午11:34, Michael S. Tsirkin wrote:
> > > > On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
> > > > > Signed-off-by: Jason Wang<[email protected]>
> > > > I don't exactly get why we need to split the modern driver out,
> > > > and it can confuse people who are used to be seeing virtio-pci.
> > >
> > > The virtio-pci module still there. No user visible changes. Just some codes
> > > that could be shared with other driver were split out.
> > >
> > What I am saying is this: we can have virtio-vdpa depend on
> > virtio-pci without splitting the common code out to an
> > extra module.
>
>
> Ok.
>
>
> >
> > > > The vdpa thing so far looks like a development tool, why do
> > > > we care that it depends on a bit of extra code?
> > >
> > > If I'm not misunderstanding, trying to share codes is proposed by you here:
> > >
> > > https://lkml.org/lkml/2020/6/10/232
> > >
> > > We also had the plan to convert IFCVF to use this library.
> > >
> > > Thanks
> > If that happens then an extra module might become useful.
>
>
> So does it make sense that I post a new version and let's merge it first.
> Then Intel or I can convert IFCVF to use the library?
>
> Thanks
Generally it's best if we actually have a couple of users before we bother
with refactoring - it's hard to predict the future,
so we don't really know what kind of refactoring will work for IFCVF ...
>
> >
Hi Jason,
On Mon, 4 Jan 2021 at 12:28, Jason Wang <[email protected]> wrote:
>
> Signed-off-by: Jason Wang <[email protected]>
> ---
> drivers/virtio/Kconfig | 10 +-
> drivers/virtio/Makefile | 1 +
> drivers/virtio/virtio_pci_common.h | 27 +-
> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
> include/linux/virtio_pci_modern.h | 111 +++++
> 6 files changed, 721 insertions(+), 644 deletions(-)
> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
> create mode 100644 include/linux/virtio_pci_modern.h
>
> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> index 7b41130d3f35..6b9b81f4b8c2 100644
> --- a/drivers/virtio/Kconfig
> +++ b/drivers/virtio/Kconfig
> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
> This option is selected if the architecture may need to enforce
> VIRTIO_F_ACCESS_PLATFORM
>
> +config VIRTIO_PCI_MODERN
> + tristate "Modern Virtio PCI Device"
> + depends on PCI
> + help
> + Modern PCI device implementation. This module implements the
> + basic probe and control for devices which are based on modern
> + PCI device with possible vendor specific extensions.
> +
> menuconfig VIRTIO_MENU
> bool "Virtio drivers"
> default y
> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>
> config VIRTIO_PCI
> tristate "PCI driver for virtio devices"
> - depends on PCI
> + depends on VIRTIO_PCI_MODERN
While booting Linux next tag 20210208 kernel on qemu_arm64 and qemu_arm
mount rootfs failed. The root cause seems to be due to missing configs
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_PCI_LEGACY=y
Reported-by: Naresh Kamboju <[email protected]>
Then I have to force to enable this MODERN config
CONFIG_VIRTIO_PCI_MODERN=y
and which enabled
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_PCI_LEGACY=y
and the qemu_arm64 and qemu_arm boot pass.
New build link,
https://builds.tuxbuild.com/1oEse4EFsoQr1FkKBfiLmhMCe7j/
--
Linaro LKFT
https://lkft.linaro.org
On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
> Signed-off-by: Jason Wang <[email protected]>
> ---
> drivers/virtio/Kconfig | 10 +-
> drivers/virtio/Makefile | 1 +
> drivers/virtio/virtio_pci_common.h | 27 +-
> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
> include/linux/virtio_pci_modern.h | 111 +++++
> 6 files changed, 721 insertions(+), 644 deletions(-)
> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
> create mode 100644 include/linux/virtio_pci_modern.h
>
> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> index 7b41130d3f35..6b9b81f4b8c2 100644
> --- a/drivers/virtio/Kconfig
> +++ b/drivers/virtio/Kconfig
> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
> This option is selected if the architecture may need to enforce
> VIRTIO_F_ACCESS_PLATFORM
>
> +config VIRTIO_PCI_MODERN
> + tristate "Modern Virtio PCI Device"
> + depends on PCI
> + help
> + Modern PCI device implementation. This module implements the
> + basic probe and control for devices which are based on modern
> + PCI device with possible vendor specific extensions.
> +
> menuconfig VIRTIO_MENU
> bool "Virtio drivers"
> default y
> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>
> config VIRTIO_PCI
> tristate "PCI driver for virtio devices"
> - depends on PCI
> + depends on VIRTIO_PCI_MODERN
> select VIRTIO
> help
> This driver provides support for virtio based paravirtual device
Looks like VIRTIO_PCI_MODERN is actually just a library that
virtio pci uses. Is that right? In that case just select it
automatically, let's not make users enable it manually.
--
MST
On 2021/2/9 下午10:20, Michael S. Tsirkin wrote:
> On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
>> Signed-off-by: Jason Wang <[email protected]>
>> ---
>> drivers/virtio/Kconfig | 10 +-
>> drivers/virtio/Makefile | 1 +
>> drivers/virtio/virtio_pci_common.h | 27 +-
>> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
>> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
>> include/linux/virtio_pci_modern.h | 111 +++++
>> 6 files changed, 721 insertions(+), 644 deletions(-)
>> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
>> create mode 100644 include/linux/virtio_pci_modern.h
>>
>> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
>> index 7b41130d3f35..6b9b81f4b8c2 100644
>> --- a/drivers/virtio/Kconfig
>> +++ b/drivers/virtio/Kconfig
>> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
>> This option is selected if the architecture may need to enforce
>> VIRTIO_F_ACCESS_PLATFORM
>>
>> +config VIRTIO_PCI_MODERN
>> + tristate "Modern Virtio PCI Device"
>> + depends on PCI
>> + help
>> + Modern PCI device implementation. This module implements the
>> + basic probe and control for devices which are based on modern
>> + PCI device with possible vendor specific extensions.
>> +
>> menuconfig VIRTIO_MENU
>> bool "Virtio drivers"
>> default y
>> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>>
>> config VIRTIO_PCI
>> tristate "PCI driver for virtio devices"
>> - depends on PCI
>> + depends on VIRTIO_PCI_MODERN
>> select VIRTIO
>> help
>> This driver provides support for virtio based paravirtual device
> Looks like VIRTIO_PCI_MODERN is actually just a library that
> virtio pci uses. Is that right?
Right.
> In that case just select it
> automatically, let's not make users enable it manually.
I've considered to do this but the problem is that the module depends on
PCI so it can't be selected I think.
Thanks
>
On 2021/2/9 下午6:15, Naresh Kamboju wrote:
> Hi Jason,
>
> On Mon, 4 Jan 2021 at 12:28, Jason Wang <[email protected]> wrote:
>> Signed-off-by: Jason Wang <[email protected]>
>> ---
>> drivers/virtio/Kconfig | 10 +-
>> drivers/virtio/Makefile | 1 +
>> drivers/virtio/virtio_pci_common.h | 27 +-
>> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
>> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
>> include/linux/virtio_pci_modern.h | 111 +++++
>> 6 files changed, 721 insertions(+), 644 deletions(-)
>> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
>> create mode 100644 include/linux/virtio_pci_modern.h
>>
>> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
>> index 7b41130d3f35..6b9b81f4b8c2 100644
>> --- a/drivers/virtio/Kconfig
>> +++ b/drivers/virtio/Kconfig
>> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
>> This option is selected if the architecture may need to enforce
>> VIRTIO_F_ACCESS_PLATFORM
>>
>> +config VIRTIO_PCI_MODERN
>> + tristate "Modern Virtio PCI Device"
>> + depends on PCI
>> + help
>> + Modern PCI device implementation. This module implements the
>> + basic probe and control for devices which are based on modern
>> + PCI device with possible vendor specific extensions.
>> +
>> menuconfig VIRTIO_MENU
>> bool "Virtio drivers"
>> default y
>> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>>
>> config VIRTIO_PCI
>> tristate "PCI driver for virtio devices"
>> - depends on PCI
>> + depends on VIRTIO_PCI_MODERN
> While booting Linux next tag 20210208 kernel on qemu_arm64 and qemu_arm
> mount rootfs failed. The root cause seems to be due to missing configs
> CONFIG_VIRTIO_PCI=y
> CONFIG_VIRTIO_PCI_LEGACY=y
>
> Reported-by: Naresh Kamboju <[email protected]>
>
> Then I have to force to enable this MODERN config
> CONFIG_VIRTIO_PCI_MODERN=y
> and which enabled
> CONFIG_VIRTIO_PCI=y
> CONFIG_VIRTIO_PCI_LEGACY=y
>
> and the qemu_arm64 and qemu_arm boot pass.
>
>
> New build link,
> https://builds.tuxbuild.com/1oEse4EFsoQr1FkKBfiLmhMCe7j/
Thanks for the reporting.
I will post a patch to fix the def config to enable VIRTIO_PCI_MODERN.
Thanks
>
>
On Wed, Feb 10, 2021 at 12:44:03PM +0800, Jason Wang wrote:
>
> On 2021/2/9 下午10:20, Michael S. Tsirkin wrote:
> > On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
> > > Signed-off-by: Jason Wang <[email protected]>
> > > ---
> > > drivers/virtio/Kconfig | 10 +-
> > > drivers/virtio/Makefile | 1 +
> > > drivers/virtio/virtio_pci_common.h | 27 +-
> > > drivers/virtio/virtio_pci_modern.c | 617 -------------------------
> > > drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
> > > include/linux/virtio_pci_modern.h | 111 +++++
> > > 6 files changed, 721 insertions(+), 644 deletions(-)
> > > create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
> > > create mode 100644 include/linux/virtio_pci_modern.h
> > >
> > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> > > index 7b41130d3f35..6b9b81f4b8c2 100644
> > > --- a/drivers/virtio/Kconfig
> > > +++ b/drivers/virtio/Kconfig
> > > @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
> > > This option is selected if the architecture may need to enforce
> > > VIRTIO_F_ACCESS_PLATFORM
> > > +config VIRTIO_PCI_MODERN
> > > + tristate "Modern Virtio PCI Device"
> > > + depends on PCI
> > > + help
> > > + Modern PCI device implementation. This module implements the
> > > + basic probe and control for devices which are based on modern
> > > + PCI device with possible vendor specific extensions.
> > > +
> > > menuconfig VIRTIO_MENU
> > > bool "Virtio drivers"
> > > default y
> > > @@ -20,7 +28,7 @@ if VIRTIO_MENU
> > > config VIRTIO_PCI
> > > tristate "PCI driver for virtio devices"
> > > - depends on PCI
> > > + depends on VIRTIO_PCI_MODERN
> > > select VIRTIO
> > > help
> > > This driver provides support for virtio based paravirtual device
> > Looks like VIRTIO_PCI_MODERN is actually just a library that
> > virtio pci uses. Is that right?
>
>
> Right.
>
>
> > In that case just select it
> > automatically, let's not make users enable it manually.
>
>
> I've considered to do this but the problem is that the module depends on PCI
> so it can't be selected I think.
Drop the dependency, document that whoever selects it must depend on PCI.
> Thanks
>
>
> >
On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
> Signed-off-by: Jason Wang <[email protected]>
> Reported-by: Naresh Kamboju <[email protected]>
I don't really see the point of having to enable VIRTIO_PCI_MODERN
because otherwise VIRTIO_PCI no longer works. If VIRTIO_PCI now
requires VIRTIO_PCI_MODERN, maybe it should select it instead of
depending on it ? Alternatively, you could just drop the new
configuration flag entirely and build virtio_pci_modern_dev.o with
VIRTIO_PCI. One doesn't work without the other anyway, after all.
Guenter
> ---
> drivers/virtio/Kconfig | 10 +-
> drivers/virtio/Makefile | 1 +
> drivers/virtio/virtio_pci_common.h | 27 +-
> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
> include/linux/virtio_pci_modern.h | 111 +++++
> 6 files changed, 721 insertions(+), 644 deletions(-)
> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
> create mode 100644 include/linux/virtio_pci_modern.h
>
> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> index 7b41130d3f35..6b9b81f4b8c2 100644
> --- a/drivers/virtio/Kconfig
> +++ b/drivers/virtio/Kconfig
> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
> This option is selected if the architecture may need to enforce
> VIRTIO_F_ACCESS_PLATFORM
>
> +config VIRTIO_PCI_MODERN
> + tristate "Modern Virtio PCI Device"
> + depends on PCI
> + help
> + Modern PCI device implementation. This module implements the
> + basic probe and control for devices which are based on modern
> + PCI device with possible vendor specific extensions.
> +
> menuconfig VIRTIO_MENU
> bool "Virtio drivers"
> default y
> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>
> config VIRTIO_PCI
> tristate "PCI driver for virtio devices"
> - depends on PCI
> + depends on VIRTIO_PCI_MODERN
> select VIRTIO
> help
> This driver provides support for virtio based paravirtual device
> diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
> index 591e6f72aa54..f097578aaa8f 100644
> --- a/drivers/virtio/Makefile
> +++ b/drivers/virtio/Makefile
> @@ -1,5 +1,6 @@
> # SPDX-License-Identifier: GPL-2.0
> obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
> +obj-$(CONFIG_VIRTIO_PCI_MODERN) += virtio_pci_modern_dev.o
> obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
> obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
> virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
> index f35ff5b6b467..beec047a8f8d 100644
> --- a/drivers/virtio/virtio_pci_common.h
> +++ b/drivers/virtio/virtio_pci_common.h
> @@ -25,6 +25,7 @@
> #include <linux/virtio_config.h>
> #include <linux/virtio_ring.h>
> #include <linux/virtio_pci.h>
> +#include <linux/virtio_pci_modern.h>
> #include <linux/highmem.h>
> #include <linux/spinlock.h>
>
> @@ -39,32 +40,6 @@ struct virtio_pci_vq_info {
> unsigned msix_vector;
> };
>
> -struct virtio_pci_modern_device {
> - struct pci_dev *pci_dev;
> -
> - struct virtio_pci_common_cfg __iomem *common;
> - /* Device-specific data (non-legacy mode) */
> - void __iomem *device;
> - /* Base of vq notifications (non-legacy mode). */
> - void __iomem *notify_base;
> - /* Where to read and clear interrupt */
> - u8 __iomem *isr;
> -
> - /* So we can sanity-check accesses. */
> - size_t notify_len;
> - size_t device_len;
> -
> - /* Capability for when we need to map notifications per-vq. */
> - int notify_map_cap;
> -
> - /* Multiply queue_notify_off by this value. (non-legacy mode). */
> - u32 notify_offset_multiplier;
> -
> - int modern_bars;
> -
> - struct virtio_device_id id;
> -};
> -
> /* Our device structure */
> struct virtio_pci_device {
> struct virtio_device vdev;
> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> index a5e3a5e40323..fbd4ebc00eb6 100644
> --- a/drivers/virtio/virtio_pci_modern.c
> +++ b/drivers/virtio/virtio_pci_modern.c
> @@ -19,158 +19,6 @@
> #define VIRTIO_RING_NO_LEGACY
> #include "virtio_pci_common.h"
>
> -/*
> - * Type-safe wrappers for io accesses.
> - * Use these to enforce at compile time the following spec requirement:
> - *
> - * The driver MUST access each field using the “natural” access
> - * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
> - * for 16-bit fields and 8-bit accesses for 8-bit fields.
> - */
> -static inline u8 vp_ioread8(const u8 __iomem *addr)
> -{
> - return ioread8(addr);
> -}
> -static inline u16 vp_ioread16 (const __le16 __iomem *addr)
> -{
> - return ioread16(addr);
> -}
> -
> -static inline u32 vp_ioread32(const __le32 __iomem *addr)
> -{
> - return ioread32(addr);
> -}
> -
> -static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
> -{
> - iowrite8(value, addr);
> -}
> -
> -static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
> -{
> - iowrite16(value, addr);
> -}
> -
> -static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
> -{
> - iowrite32(value, addr);
> -}
> -
> -static void vp_iowrite64_twopart(u64 val,
> - __le32 __iomem *lo, __le32 __iomem *hi)
> -{
> - vp_iowrite32((u32)val, lo);
> - vp_iowrite32(val >> 32, hi);
> -}
> -
> -/*
> - * vp_modern_map_capability - map a part of virtio pci capability
> - * @mdev: the modern virtio-pci device
> - * @off: offset of the capability
> - * @minlen: minimal length of the capability
> - * @align: align requirement
> - * @start: start from the capability
> - * @size: map size
> - * @len: the length that is actually mapped
> - *
> - * Returns the io address of for the part of the capability
> - */
> -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
> - size_t minlen,
> - u32 align,
> - u32 start, u32 size,
> - size_t *len)
> -{
> - struct pci_dev *dev = mdev->pci_dev;
> - u8 bar;
> - u32 offset, length;
> - void __iomem *p;
> -
> - pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
> - bar),
> - &bar);
> - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
> - &offset);
> - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
> - &length);
> -
> - if (length <= start) {
> - dev_err(&dev->dev,
> - "virtio_pci: bad capability len %u (>%u expected)\n",
> - length, start);
> - return NULL;
> - }
> -
> - if (length - start < minlen) {
> - dev_err(&dev->dev,
> - "virtio_pci: bad capability len %u (>=%zu expected)\n",
> - length, minlen);
> - return NULL;
> - }
> -
> - length -= start;
> -
> - if (start + offset < offset) {
> - dev_err(&dev->dev,
> - "virtio_pci: map wrap-around %u+%u\n",
> - start, offset);
> - return NULL;
> - }
> -
> - offset += start;
> -
> - if (offset & (align - 1)) {
> - dev_err(&dev->dev,
> - "virtio_pci: offset %u not aligned to %u\n",
> - offset, align);
> - return NULL;
> - }
> -
> - if (length > size)
> - length = size;
> -
> - if (len)
> - *len = length;
> -
> - if (minlen + offset < minlen ||
> - minlen + offset > pci_resource_len(dev, bar)) {
> - dev_err(&dev->dev,
> - "virtio_pci: map virtio %zu@%u "
> - "out of range on bar %i length %lu\n",
> - minlen, offset,
> - bar, (unsigned long)pci_resource_len(dev, bar));
> - return NULL;
> - }
> -
> - p = pci_iomap_range(dev, bar, offset, length);
> - if (!p)
> - dev_err(&dev->dev,
> - "virtio_pci: unable to map virtio %u@%u on bar %i\n",
> - length, offset, bar);
> - return p;
> -}
> -
> -/*
> - * vp_modern_get_features - get features from device
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the features read from the device
> - */
> -static u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - u64 features;
> -
> - vp_iowrite32(0, &cfg->device_feature_select);
> - features = vp_ioread32(&cfg->device_feature);
> - vp_iowrite32(1, &cfg->device_feature_select);
> - features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
> -
> - return features;
> -}
> -
> -/* virtio config->get_features() implementation */
> static u64 vp_get_features(struct virtio_device *vdev)
> {
> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> @@ -188,149 +36,6 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
> __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> }
>
> -/*
> - * vp_modern_set_features - set features to device
> - * @mdev: the modern virtio-pci device
> - * @features: the features set to device
> - */
> -static void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
> - u64 features)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite32(0, &cfg->guest_feature_select);
> - vp_iowrite32((u32)features, &cfg->guest_feature);
> - vp_iowrite32(1, &cfg->guest_feature_select);
> - vp_iowrite32(features >> 32, &cfg->guest_feature);
> -}
> -
> -/*
> - * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: queue index
> - * @vector: the config vector
> - *
> - * Returns the config vector read from the device
> - */
> -static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
> - u16 index, u16 vector)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite16(index, &cfg->queue_select);
> - vp_iowrite16(vector, &cfg->queue_msix_vector);
> - /* Flush the write out to device */
> - return vp_ioread16(&cfg->queue_msix_vector);
> -}
> -
> -/*
> - * vp_modern_queue_address - set the virtqueue address
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - * @desc_addr: address of the descriptor area
> - * @driver_addr: address of the driver area
> - * @device_addr: address of the device area
> - */
> -static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
> - u16 index, u64 desc_addr, u64 driver_addr,
> - u64 device_addr)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite16(index, &cfg->queue_select);
> -
> - vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
> - &cfg->queue_desc_hi);
> - vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
> - &cfg->queue_avail_hi);
> - vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
> - &cfg->queue_used_hi);
> -}
> -
> -/*
> - * vp_modern_set_queue_enable - enable a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - * @enable: whether the virtqueue is enable or not
> - */
> -static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
> - u16 index, bool enable)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> - vp_iowrite16(enable, &mdev->common->queue_enable);
> -}
> -
> -/*
> - * vp_modern_get_queue_enable - enable a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - *
> - * Returns whether a virtqueue is enabled or not
> - */
> -static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
> - u16 index)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> -
> - return vp_ioread16(&mdev->common->queue_enable);
> -}
> -
> -/*
> - * vp_modern_set_queue_size - set size for a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - * @size: the size of the virtqueue
> - */
> -static void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
> - u16 index, u16 size)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> - vp_iowrite16(size, &mdev->common->queue_size);
> -
> -}
> -
> -/*
> - * vp_modern_get_queue_size - get size for a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - *
> - * Returns the size of the virtqueue
> - */
> -static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
> - u16 index)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> -
> - return vp_ioread16(&mdev->common->queue_size);
> -
> -}
> -
> -/*
> - * vp_modern_get_num_queues - get the number of virtqueues
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the number of virtqueues
> - */
> -static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
> -{
> - return vp_ioread16(&mdev->common->num_queues);
> -}
> -
> -/*
> - * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
> - * @mdev: the modern virtio-pci device
> - * @index: the queue index
> - *
> - * Returns the notification offset for a virtqueue
> - */
> -static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
> - u16 index)
> -{
> - vp_iowrite16(index, &mdev->common->queue_select);
> -
> - return vp_ioread16(&mdev->common->queue_notify_off);
> -}
> -
> /* virtio config->finalize_features() implementation */
> static int vp_finalize_features(struct virtio_device *vdev)
> {
> @@ -429,19 +134,6 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
> }
> }
>
> -/*
> - * vp_modern_generation - get the device genreation
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the genreation read from device
> - */
> -static u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - return vp_ioread8(&cfg->config_generation);
> -}
> -
> static u32 vp_generation(struct virtio_device *vdev)
> {
> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> @@ -449,19 +141,6 @@ static u32 vp_generation(struct virtio_device *vdev)
> return vp_modern_generation(&vp_dev->mdev);
> }
>
> -/*
> - * vp_modern_get_status - get the device status
> - * @mdev: the modern virtio-pci device
> - *
> - * Returns the status read from device
> - */
> -static u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - return vp_ioread8(&cfg->device_status);
> -}
> -
> /* config->{get,set}_status() implementations */
> static u8 vp_get_status(struct virtio_device *vdev)
> {
> @@ -470,19 +149,6 @@ static u8 vp_get_status(struct virtio_device *vdev)
> return vp_modern_get_status(&vp_dev->mdev);
> }
>
> -/*
> - * vp_modern_set_status - set status to device
> - * @mdev: the modern virtio-pci device
> - * @status: the status set to device
> - */
> -static void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
> - u8 status)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - vp_iowrite8(status, &cfg->device_status);
> -}
> -
> static void vp_set_status(struct virtio_device *vdev, u8 status)
> {
> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> @@ -510,25 +176,6 @@ static void vp_reset(struct virtio_device *vdev)
> vp_synchronize_vectors(vdev);
> }
>
> -/*
> - * vp_modern_config_vector - set the vector for config interrupt
> - * @mdev: the modern virtio-pci device
> - * @vector: the config vector
> - *
> - * Returns the config vector read from the device
> - */
> -static u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
> - u16 vector)
> -{
> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> -
> - /* Setup the vector used for configuration events */
> - vp_iowrite16(vector, &cfg->msix_config);
> - /* Verify we had enough resources to assign the vector */
> - /* Will also flush the write out to device */
> - return vp_ioread16(&cfg->msix_config);
> -}
> -
> static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
> {
> return vp_modern_config_vector(&vp_dev->mdev, vector);
> @@ -789,253 +436,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
> .get_shm_region = vp_get_shm_region,
> };
>
> -/**
> - * virtio_pci_find_capability - walk capabilities to find device info.
> - * @dev: the pci device
> - * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
> - * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
> - * @bars: the bitmask of BARs
> - *
> - * Returns offset of the capability, or 0.
> - */
> -static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
> - u32 ioresource_types, int *bars)
> -{
> - int pos;
> -
> - for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
> - pos > 0;
> - pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
> - u8 type, bar;
> - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> - cfg_type),
> - &type);
> - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> - bar),
> - &bar);
> -
> - /* Ignore structures with reserved BAR values */
> - if (bar > 0x5)
> - continue;
> -
> - if (type == cfg_type) {
> - if (pci_resource_len(dev, bar) &&
> - pci_resource_flags(dev, bar) & ioresource_types) {
> - *bars |= (1 << bar);
> - return pos;
> - }
> - }
> - }
> - return 0;
> -}
> -
> -/* This is part of the ABI. Don't screw with it. */
> -static inline void check_offsets(void)
> -{
> - /* Note: disk space was harmed in compilation of this function. */
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
> - offsetof(struct virtio_pci_cap, cap_vndr));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
> - offsetof(struct virtio_pci_cap, cap_next));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
> - offsetof(struct virtio_pci_cap, cap_len));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
> - offsetof(struct virtio_pci_cap, cfg_type));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
> - offsetof(struct virtio_pci_cap, bar));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
> - offsetof(struct virtio_pci_cap, offset));
> - BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
> - offsetof(struct virtio_pci_cap, length));
> - BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
> - offsetof(struct virtio_pci_notify_cap,
> - notify_off_multiplier));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
> - offsetof(struct virtio_pci_common_cfg,
> - device_feature_select));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
> - offsetof(struct virtio_pci_common_cfg, device_feature));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
> - offsetof(struct virtio_pci_common_cfg,
> - guest_feature_select));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
> - offsetof(struct virtio_pci_common_cfg, guest_feature));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
> - offsetof(struct virtio_pci_common_cfg, msix_config));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
> - offsetof(struct virtio_pci_common_cfg, num_queues));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
> - offsetof(struct virtio_pci_common_cfg, device_status));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
> - offsetof(struct virtio_pci_common_cfg, config_generation));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
> - offsetof(struct virtio_pci_common_cfg, queue_select));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
> - offsetof(struct virtio_pci_common_cfg, queue_size));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
> - offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
> - offsetof(struct virtio_pci_common_cfg, queue_enable));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
> - offsetof(struct virtio_pci_common_cfg, queue_notify_off));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
> - offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
> - offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
> - offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
> - offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
> - offsetof(struct virtio_pci_common_cfg, queue_used_lo));
> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
> - offsetof(struct virtio_pci_common_cfg, queue_used_hi));
> -}
> -
> -/*
> - * vp_modern_probe: probe the modern virtio pci device, note that the
> - * caller is required to enable PCI device before calling this function.
> - * @mdev: the modern virtio-pci device
> - *
> - * Return 0 on succeed otherwise fail
> - */
> -static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
> -{
> - struct pci_dev *pci_dev = mdev->pci_dev;
> - int err, common, isr, notify, device;
> - u32 notify_length;
> - u32 notify_offset;
> -
> - check_offsets();
> -
> - mdev->pci_dev = pci_dev;
> -
> - /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
> - if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
> - return -ENODEV;
> -
> - if (pci_dev->device < 0x1040) {
> - /* Transitional devices: use the PCI subsystem device id as
> - * virtio device id, same as legacy driver always did.
> - */
> - mdev->id.device = pci_dev->subsystem_device;
> - } else {
> - /* Modern devices: simply use PCI device id, but start from 0x1040. */
> - mdev->id.device = pci_dev->device - 0x1040;
> - }
> - mdev->id.vendor = pci_dev->subsystem_vendor;
> -
> - /* check for a common config: if not, use legacy mode (bar 0). */
> - common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> - if (!common) {
> - dev_info(&pci_dev->dev,
> - "virtio_pci: leaving for legacy driver\n");
> - return -ENODEV;
> - }
> -
> - /* If common is there, these should be too... */
> - isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> - notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> - if (!isr || !notify) {
> - dev_err(&pci_dev->dev,
> - "virtio_pci: missing capabilities %i/%i/%i\n",
> - common, isr, notify);
> - return -EINVAL;
> - }
> -
> - err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
> - if (err)
> - err = dma_set_mask_and_coherent(&pci_dev->dev,
> - DMA_BIT_MASK(32));
> - if (err)
> - dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
> -
> - /* Device capability is only mandatory for devices that have
> - * device-specific configuration.
> - */
> - device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
> - IORESOURCE_IO | IORESOURCE_MEM,
> - &mdev->modern_bars);
> -
> - err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
> - "virtio-pci-modern");
> - if (err)
> - return err;
> -
> - err = -EINVAL;
> - mdev->common = vp_modern_map_capability(mdev, common,
> - sizeof(struct virtio_pci_common_cfg), 4,
> - 0, sizeof(struct virtio_pci_common_cfg),
> - NULL);
> - if (!mdev->common)
> - goto err_map_common;
> - mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
> - 0, 1,
> - NULL);
> - if (!mdev->isr)
> - goto err_map_isr;
> -
> - /* Read notify_off_multiplier from config space. */
> - pci_read_config_dword(pci_dev,
> - notify + offsetof(struct virtio_pci_notify_cap,
> - notify_off_multiplier),
> - &mdev->notify_offset_multiplier);
> - /* Read notify length and offset from config space. */
> - pci_read_config_dword(pci_dev,
> - notify + offsetof(struct virtio_pci_notify_cap,
> - cap.length),
> - ¬ify_length);
> -
> - pci_read_config_dword(pci_dev,
> - notify + offsetof(struct virtio_pci_notify_cap,
> - cap.offset),
> - ¬ify_offset);
> -
> - /* We don't know how many VQs we'll map, ahead of the time.
> - * If notify length is small, map it all now.
> - * Otherwise, map each VQ individually later.
> - */
> - if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
> - mdev->notify_base = vp_modern_map_capability(mdev, notify,
> - 2, 2,
> - 0, notify_length,
> - &mdev->notify_len);
> - if (!mdev->notify_base)
> - goto err_map_notify;
> - } else {
> - mdev->notify_map_cap = notify;
> - }
> -
> - /* Again, we don't know how much we should map, but PAGE_SIZE
> - * is more than enough for all existing devices.
> - */
> - if (device) {
> - mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
> - 0, PAGE_SIZE,
> - &mdev->device_len);
> - if (!mdev->device)
> - goto err_map_device;
> - }
> -
> - return 0;
> -
> -err_map_device:
> - if (mdev->notify_base)
> - pci_iounmap(pci_dev, mdev->notify_base);
> -err_map_notify:
> - pci_iounmap(pci_dev, mdev->isr);
> -err_map_isr:
> - pci_iounmap(pci_dev, mdev->common);
> -err_map_common:
> - return err;
> -}
> -
> /* the PCI probing function */
> int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
> {
> @@ -1063,23 +463,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
> return 0;
> }
>
> -/*
> - * vp_modern_probe: remove and cleanup the modern virtio pci device
> - * @mdev: the modern virtio-pci device
> - */
> -static void vp_modern_remove(struct virtio_pci_modern_device *mdev)
> -{
> - struct pci_dev *pci_dev = mdev->pci_dev;
> -
> - if (mdev->device)
> - pci_iounmap(pci_dev, mdev->device);
> - if (mdev->notify_base)
> - pci_iounmap(pci_dev, mdev->notify_base);
> - pci_iounmap(pci_dev, mdev->isr);
> - pci_iounmap(pci_dev, mdev->common);
> - pci_release_selected_regions(pci_dev, mdev->modern_bars);
> -}
> -
> void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
> {
> struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
> new file mode 100644
> index 000000000000..cbd667496bb1
> --- /dev/null
> +++ b/drivers/virtio/virtio_pci_modern_dev.c
> @@ -0,0 +1,599 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +
> +#include <linux/virtio_pci_modern.h>
> +#include <linux/module.h>
> +#include <linux/pci.h>
> +
> +/*
> + * vp_modern_map_capability - map a part of virtio pci capability
> + * @mdev: the modern virtio-pci device
> + * @off: offset of the capability
> + * @minlen: minimal length of the capability
> + * @align: align requirement
> + * @start: start from the capability
> + * @size: map size
> + * @len: the length that is actually mapped
> + *
> + * Returns the io address of for the part of the capability
> + */
> +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
> + size_t minlen,
> + u32 align,
> + u32 start, u32 size,
> + size_t *len)
> +{
> + struct pci_dev *dev = mdev->pci_dev;
> + u8 bar;
> + u32 offset, length;
> + void __iomem *p;
> +
> + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
> + bar),
> + &bar);
> + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
> + &offset);
> + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
> + &length);
> +
> + if (length <= start) {
> + dev_err(&dev->dev,
> + "virtio_pci: bad capability len %u (>%u expected)\n",
> + length, start);
> + return NULL;
> + }
> +
> + if (length - start < minlen) {
> + dev_err(&dev->dev,
> + "virtio_pci: bad capability len %u (>=%zu expected)\n",
> + length, minlen);
> + return NULL;
> + }
> +
> + length -= start;
> +
> + if (start + offset < offset) {
> + dev_err(&dev->dev,
> + "virtio_pci: map wrap-around %u+%u\n",
> + start, offset);
> + return NULL;
> + }
> +
> + offset += start;
> +
> + if (offset & (align - 1)) {
> + dev_err(&dev->dev,
> + "virtio_pci: offset %u not aligned to %u\n",
> + offset, align);
> + return NULL;
> + }
> +
> + if (length > size)
> + length = size;
> +
> + if (len)
> + *len = length;
> +
> + if (minlen + offset < minlen ||
> + minlen + offset > pci_resource_len(dev, bar)) {
> + dev_err(&dev->dev,
> + "virtio_pci: map virtio %zu@%u "
> + "out of range on bar %i length %lu\n",
> + minlen, offset,
> + bar, (unsigned long)pci_resource_len(dev, bar));
> + return NULL;
> + }
> +
> + p = pci_iomap_range(dev, bar, offset, length);
> + if (!p)
> + dev_err(&dev->dev,
> + "virtio_pci: unable to map virtio %u@%u on bar %i\n",
> + length, offset, bar);
> + return p;
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_map_capability);
> +
> +/**
> + * virtio_pci_find_capability - walk capabilities to find device info.
> + * @dev: the pci device
> + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
> + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
> + * @bars: the bitmask of BARs
> + *
> + * Returns offset of the capability, or 0.
> + */
> +static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
> + u32 ioresource_types, int *bars)
> +{
> + int pos;
> +
> + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
> + pos > 0;
> + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
> + u8 type, bar;
> + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> + cfg_type),
> + &type);
> + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
> + bar),
> + &bar);
> +
> + /* Ignore structures with reserved BAR values */
> + if (bar > 0x5)
> + continue;
> +
> + if (type == cfg_type) {
> + if (pci_resource_len(dev, bar) &&
> + pci_resource_flags(dev, bar) & ioresource_types) {
> + *bars |= (1 << bar);
> + return pos;
> + }
> + }
> + }
> + return 0;
> +}
> +
> +/* This is part of the ABI. Don't screw with it. */
> +static inline void check_offsets(void)
> +{
> + /* Note: disk space was harmed in compilation of this function. */
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
> + offsetof(struct virtio_pci_cap, cap_vndr));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
> + offsetof(struct virtio_pci_cap, cap_next));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
> + offsetof(struct virtio_pci_cap, cap_len));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
> + offsetof(struct virtio_pci_cap, cfg_type));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
> + offsetof(struct virtio_pci_cap, bar));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
> + offsetof(struct virtio_pci_cap, offset));
> + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
> + offsetof(struct virtio_pci_cap, length));
> + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
> + offsetof(struct virtio_pci_notify_cap,
> + notify_off_multiplier));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
> + offsetof(struct virtio_pci_common_cfg,
> + device_feature_select));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
> + offsetof(struct virtio_pci_common_cfg, device_feature));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
> + offsetof(struct virtio_pci_common_cfg,
> + guest_feature_select));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
> + offsetof(struct virtio_pci_common_cfg, guest_feature));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
> + offsetof(struct virtio_pci_common_cfg, msix_config));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
> + offsetof(struct virtio_pci_common_cfg, num_queues));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
> + offsetof(struct virtio_pci_common_cfg, device_status));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
> + offsetof(struct virtio_pci_common_cfg, config_generation));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
> + offsetof(struct virtio_pci_common_cfg, queue_select));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
> + offsetof(struct virtio_pci_common_cfg, queue_size));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
> + offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
> + offsetof(struct virtio_pci_common_cfg, queue_enable));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
> + offsetof(struct virtio_pci_common_cfg, queue_notify_off));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
> + offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
> + offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
> + offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
> + offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
> + offsetof(struct virtio_pci_common_cfg, queue_used_lo));
> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
> + offsetof(struct virtio_pci_common_cfg, queue_used_hi));
> +}
> +
> +/*
> + * vp_modern_probe: probe the modern virtio pci device, note that the
> + * caller is required to enable PCI device before calling this function.
> + * @mdev: the modern virtio-pci device
> + *
> + * Return 0 on succeed otherwise fail
> + */
> +int vp_modern_probe(struct virtio_pci_modern_device *mdev)
> +{
> + struct pci_dev *pci_dev = mdev->pci_dev;
> + int err, common, isr, notify, device;
> + u32 notify_length;
> + u32 notify_offset;
> +
> + check_offsets();
> +
> + mdev->pci_dev = pci_dev;
> +
> + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
> + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
> + return -ENODEV;
> +
> + if (pci_dev->device < 0x1040) {
> + /* Transitional devices: use the PCI subsystem device id as
> + * virtio device id, same as legacy driver always did.
> + */
> + mdev->id.device = pci_dev->subsystem_device;
> + } else {
> + /* Modern devices: simply use PCI device id, but start from 0x1040. */
> + mdev->id.device = pci_dev->device - 0x1040;
> + }
> + mdev->id.vendor = pci_dev->subsystem_vendor;
> +
> + /* check for a common config: if not, use legacy mode (bar 0). */
> + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> + if (!common) {
> + dev_info(&pci_dev->dev,
> + "virtio_pci: leaving for legacy driver\n");
> + return -ENODEV;
> + }
> +
> + /* If common is there, these should be too... */
> + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> + if (!isr || !notify) {
> + dev_err(&pci_dev->dev,
> + "virtio_pci: missing capabilities %i/%i/%i\n",
> + common, isr, notify);
> + return -EINVAL;
> + }
> +
> + err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
> + if (err)
> + err = dma_set_mask_and_coherent(&pci_dev->dev,
> + DMA_BIT_MASK(32));
> + if (err)
> + dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
> +
> + /* Device capability is only mandatory for devices that have
> + * device-specific configuration.
> + */
> + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
> + IORESOURCE_IO | IORESOURCE_MEM,
> + &mdev->modern_bars);
> +
> + err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
> + "virtio-pci-modern");
> + if (err)
> + return err;
> +
> + err = -EINVAL;
> + mdev->common = vp_modern_map_capability(mdev, common,
> + sizeof(struct virtio_pci_common_cfg), 4,
> + 0, sizeof(struct virtio_pci_common_cfg),
> + NULL);
> + if (!mdev->common)
> + goto err_map_common;
> + mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
> + 0, 1,
> + NULL);
> + if (!mdev->isr)
> + goto err_map_isr;
> +
> + /* Read notify_off_multiplier from config space. */
> + pci_read_config_dword(pci_dev,
> + notify + offsetof(struct virtio_pci_notify_cap,
> + notify_off_multiplier),
> + &mdev->notify_offset_multiplier);
> + /* Read notify length and offset from config space. */
> + pci_read_config_dword(pci_dev,
> + notify + offsetof(struct virtio_pci_notify_cap,
> + cap.length),
> + ¬ify_length);
> +
> + pci_read_config_dword(pci_dev,
> + notify + offsetof(struct virtio_pci_notify_cap,
> + cap.offset),
> + ¬ify_offset);
> +
> + /* We don't know how many VQs we'll map, ahead of the time.
> + * If notify length is small, map it all now.
> + * Otherwise, map each VQ individually later.
> + */
> + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
> + mdev->notify_base = vp_modern_map_capability(mdev, notify,
> + 2, 2,
> + 0, notify_length,
> + &mdev->notify_len);
> + if (!mdev->notify_base)
> + goto err_map_notify;
> + } else {
> + mdev->notify_map_cap = notify;
> + }
> +
> + /* Again, we don't know how much we should map, but PAGE_SIZE
> + * is more than enough for all existing devices.
> + */
> + if (device) {
> + mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
> + 0, PAGE_SIZE,
> + &mdev->device_len);
> + if (!mdev->device)
> + goto err_map_device;
> + }
> +
> + return 0;
> +
> +err_map_device:
> + if (mdev->notify_base)
> + pci_iounmap(pci_dev, mdev->notify_base);
> +err_map_notify:
> + pci_iounmap(pci_dev, mdev->isr);
> +err_map_isr:
> + pci_iounmap(pci_dev, mdev->common);
> +err_map_common:
> + return err;
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_probe);
> +
> +/*
> + * vp_modern_probe: remove and cleanup the modern virtio pci device
> + * @mdev: the modern virtio-pci device
> + */
> +void vp_modern_remove(struct virtio_pci_modern_device *mdev)
> +{
> + struct pci_dev *pci_dev = mdev->pci_dev;
> +
> + if (mdev->device)
> + pci_iounmap(pci_dev, mdev->device);
> + if (mdev->notify_base)
> + pci_iounmap(pci_dev, mdev->notify_base);
> + pci_iounmap(pci_dev, mdev->isr);
> + pci_iounmap(pci_dev, mdev->common);
> + pci_release_selected_regions(pci_dev, mdev->modern_bars);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_remove);
> +
> +/*
> + * vp_modern_get_features - get features from device
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the features read from the device
> + */
> +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + u64 features;
> +
> + vp_iowrite32(0, &cfg->device_feature_select);
> + features = vp_ioread32(&cfg->device_feature);
> + vp_iowrite32(1, &cfg->device_feature_select);
> + features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
> +
> + return features;
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_features);
> +
> +/*
> + * vp_modern_set_features - set features to device
> + * @mdev: the modern virtio-pci device
> + * @features: the features set to device
> + */
> +void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
> + u64 features)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite32(0, &cfg->guest_feature_select);
> + vp_iowrite32((u32)features, &cfg->guest_feature);
> + vp_iowrite32(1, &cfg->guest_feature_select);
> + vp_iowrite32(features >> 32, &cfg->guest_feature);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_features);
> +
> +/*
> + * vp_modern_generation - get the device genreation
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the genreation read from device
> + */
> +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + return vp_ioread8(&cfg->config_generation);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_generation);
> +
> +/*
> + * vp_modern_get_status - get the device status
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the status read from device
> + */
> +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + return vp_ioread8(&cfg->device_status);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_status);
> +
> +/*
> + * vp_modern_set_status - set status to device
> + * @mdev: the modern virtio-pci device
> + * @status: the status set to device
> + */
> +void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
> + u8 status)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite8(status, &cfg->device_status);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_status);
> +
> +/*
> + * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: queue index
> + * @vector: the config vector
> + *
> + * Returns the config vector read from the device
> + */
> +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
> + u16 index, u16 vector)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite16(index, &cfg->queue_select);
> + vp_iowrite16(vector, &cfg->queue_msix_vector);
> + /* Flush the write out to device */
> + return vp_ioread16(&cfg->queue_msix_vector);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
> +
> +/*
> + * vp_modern_config_vector - set the vector for config interrupt
> + * @mdev: the modern virtio-pci device
> + * @vector: the config vector
> + *
> + * Returns the config vector read from the device
> + */
> +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
> + u16 vector)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + /* Setup the vector used for configuration events */
> + vp_iowrite16(vector, &cfg->msix_config);
> + /* Verify we had enough resources to assign the vector */
> + /* Will also flush the write out to device */
> + return vp_ioread16(&cfg->msix_config);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_config_vector);
> +
> +/*
> + * vp_modern_queue_address - set the virtqueue address
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + * @desc_addr: address of the descriptor area
> + * @driver_addr: address of the driver area
> + * @device_addr: address of the device area
> + */
> +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
> + u16 index, u64 desc_addr, u64 driver_addr,
> + u64 device_addr)
> +{
> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
> +
> + vp_iowrite16(index, &cfg->queue_select);
> +
> + vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
> + &cfg->queue_desc_hi);
> + vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
> + &cfg->queue_avail_hi);
> + vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
> + &cfg->queue_used_hi);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_queue_address);
> +
> +/*
> + * vp_modern_set_queue_enable - enable a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + * @enable: whether the virtqueue is enable or not
> + */
> +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 index, bool enable)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> + vp_iowrite16(enable, &mdev->common->queue_enable);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
> +
> +/*
> + * vp_modern_get_queue_enable - enable a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + *
> + * Returns whether a virtqueue is enabled or not
> + */
> +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 index)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> +
> + return vp_ioread16(&mdev->common->queue_enable);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
> +
> +/*
> + * vp_modern_set_queue_size - set size for a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + * @size: the size of the virtqueue
> + */
> +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 index, u16 size)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> + vp_iowrite16(size, &mdev->common->queue_size);
> +
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
> +
> +/*
> + * vp_modern_get_queue_size - get size for a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + *
> + * Returns the size of the virtqueue
> + */
> +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 index)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> +
> + return vp_ioread16(&mdev->common->queue_size);
> +
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
> +
> +/*
> + * vp_modern_get_num_queues - get the number of virtqueues
> + * @mdev: the modern virtio-pci device
> + *
> + * Returns the number of virtqueues
> + */
> +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
> +{
> + return vp_ioread16(&mdev->common->num_queues);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
> +
> +/*
> + * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
> + * @mdev: the modern virtio-pci device
> + * @index: the queue index
> + *
> + * Returns the notification offset for a virtqueue
> + */
> +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
> + u16 index)
> +{
> + vp_iowrite16(index, &mdev->common->queue_select);
> +
> + return vp_ioread16(&mdev->common->queue_notify_off);
> +}
> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
> +
> +MODULE_VERSION("0.1");
> +MODULE_DESCRIPTION("Modern Virtio PCI Device");
> +MODULE_AUTHOR("Jason Wang <[email protected]>");
> +MODULE_LICENSE("GPL");
> diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
> new file mode 100644
> index 000000000000..f26acbeec965
> --- /dev/null
> +++ b/include/linux/virtio_pci_modern.h
> @@ -0,0 +1,111 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _LINUX_VIRTIO_PCI_MODERN_H
> +#define _LINUX_VIRTIO_PCI_MODERN_H
> +
> +#include <linux/pci.h>
> +#include <linux/virtio_pci.h>
> +
> +struct virtio_pci_modern_device {
> + struct pci_dev *pci_dev;
> +
> + struct virtio_pci_common_cfg __iomem *common;
> + /* Device-specific data (non-legacy mode) */
> + void __iomem *device;
> + /* Base of vq notifications (non-legacy mode). */
> + void __iomem *notify_base;
> + /* Where to read and clear interrupt */
> + u8 __iomem *isr;
> +
> + /* So we can sanity-check accesses. */
> + size_t notify_len;
> + size_t device_len;
> +
> + /* Capability for when we need to map notifications per-vq. */
> + int notify_map_cap;
> +
> + /* Multiply queue_notify_off by this value. (non-legacy mode). */
> + u32 notify_offset_multiplier;
> +
> + int modern_bars;
> +
> + struct virtio_device_id id;
> +};
> +
> +/*
> + * Type-safe wrappers for io accesses.
> + * Use these to enforce at compile time the following spec requirement:
> + *
> + * The driver MUST access each field using the “natural” access
> + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
> + * for 16-bit fields and 8-bit accesses for 8-bit fields.
> + */
> +static inline u8 vp_ioread8(const u8 __iomem *addr)
> +{
> + return ioread8(addr);
> +}
> +static inline u16 vp_ioread16 (const __le16 __iomem *addr)
> +{
> + return ioread16(addr);
> +}
> +
> +static inline u32 vp_ioread32(const __le32 __iomem *addr)
> +{
> + return ioread32(addr);
> +}
> +
> +static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
> +{
> + iowrite8(value, addr);
> +}
> +
> +static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
> +{
> + iowrite16(value, addr);
> +}
> +
> +static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
> +{
> + iowrite32(value, addr);
> +}
> +
> +static inline void vp_iowrite64_twopart(u64 val,
> + __le32 __iomem *lo,
> + __le32 __iomem *hi)
> +{
> + vp_iowrite32((u32)val, lo);
> + vp_iowrite32(val >> 32, hi);
> +}
> +
> +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
> +void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
> + u64 features);
> +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
> +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
> +void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
> + u8 status);
> +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
> + u16 idx, u16 vector);
> +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
> + u16 vector);
> +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
> + u16 index, u64 desc_addr, u64 driver_addr,
> + u64 device_addr);
> +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 idx, bool enable);
> +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
> + u16 idx);
> +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 idx, u16 size);
> +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
> + u16 idx);
> +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
> +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
> + u16 idx);
> +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
> + size_t minlen,
> + u32 align,
> + u32 start, u32 size,
> + size_t *len);
> +int vp_modern_probe(struct virtio_pci_modern_device *mdev);
> +void vp_modern_remove(struct virtio_pci_modern_device *mdev);
> +#endif
On 2021/2/10 下午8:35, Michael S. Tsirkin wrote:
> On Wed, Feb 10, 2021 at 12:44:03PM +0800, Jason Wang wrote:
>> On 2021/2/9 下午10:20, Michael S. Tsirkin wrote:
>>> On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
>>>> Signed-off-by: Jason Wang <[email protected]>
>>>> ---
>>>> drivers/virtio/Kconfig | 10 +-
>>>> drivers/virtio/Makefile | 1 +
>>>> drivers/virtio/virtio_pci_common.h | 27 +-
>>>> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
>>>> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
>>>> include/linux/virtio_pci_modern.h | 111 +++++
>>>> 6 files changed, 721 insertions(+), 644 deletions(-)
>>>> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
>>>> create mode 100644 include/linux/virtio_pci_modern.h
>>>>
>>>> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
>>>> index 7b41130d3f35..6b9b81f4b8c2 100644
>>>> --- a/drivers/virtio/Kconfig
>>>> +++ b/drivers/virtio/Kconfig
>>>> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
>>>> This option is selected if the architecture may need to enforce
>>>> VIRTIO_F_ACCESS_PLATFORM
>>>> +config VIRTIO_PCI_MODERN
>>>> + tristate "Modern Virtio PCI Device"
>>>> + depends on PCI
>>>> + help
>>>> + Modern PCI device implementation. This module implements the
>>>> + basic probe and control for devices which are based on modern
>>>> + PCI device with possible vendor specific extensions.
>>>> +
>>>> menuconfig VIRTIO_MENU
>>>> bool "Virtio drivers"
>>>> default y
>>>> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>>>> config VIRTIO_PCI
>>>> tristate "PCI driver for virtio devices"
>>>> - depends on PCI
>>>> + depends on VIRTIO_PCI_MODERN
>>>> select VIRTIO
>>>> help
>>>> This driver provides support for virtio based paravirtual device
>>> Looks like VIRTIO_PCI_MODERN is actually just a library that
>>> virtio pci uses. Is that right?
>>
>> Right.
>>
>>
>>> In that case just select it
>>> automatically, let's not make users enable it manually.
>>
>> I've considered to do this but the problem is that the module depends on PCI
>> so it can't be selected I think.
> Drop the dependency, document that whoever selects it must depend on PCI.
Fine with me. Will post a patch.
Thanks
>
>> Thanks
>>
>>
On 2021/2/13 4:14 上午, Guenter Roeck wrote:
> On Mon, Jan 04, 2021 at 02:55:00PM +0800, Jason Wang wrote:
>> Signed-off-by: Jason Wang <[email protected]>
>> Reported-by: Naresh Kamboju <[email protected]>
> I don't really see the point of having to enable VIRTIO_PCI_MODERN
> because otherwise VIRTIO_PCI no longer works. If VIRTIO_PCI now
> requires VIRTIO_PCI_MODERN, maybe it should select it instead of
> depending on it ?
Right.
I will post a patch soon to select that.
Thanks
> Alternatively, you could just drop the new
> configuration flag entirely and build virtio_pci_modern_dev.o with
> VIRTIO_PCI. One doesn't work without the other anyway, after all.
>
> Guenter
>
>> ---
>> drivers/virtio/Kconfig | 10 +-
>> drivers/virtio/Makefile | 1 +
>> drivers/virtio/virtio_pci_common.h | 27 +-
>> drivers/virtio/virtio_pci_modern.c | 617 -------------------------
>> drivers/virtio/virtio_pci_modern_dev.c | 599 ++++++++++++++++++++++++
>> include/linux/virtio_pci_modern.h | 111 +++++
>> 6 files changed, 721 insertions(+), 644 deletions(-)
>> create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
>> create mode 100644 include/linux/virtio_pci_modern.h
>>
>> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
>> index 7b41130d3f35..6b9b81f4b8c2 100644
>> --- a/drivers/virtio/Kconfig
>> +++ b/drivers/virtio/Kconfig
>> @@ -12,6 +12,14 @@ config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
>> This option is selected if the architecture may need to enforce
>> VIRTIO_F_ACCESS_PLATFORM
>>
>> +config VIRTIO_PCI_MODERN
>> + tristate "Modern Virtio PCI Device"
>> + depends on PCI
>> + help
>> + Modern PCI device implementation. This module implements the
>> + basic probe and control for devices which are based on modern
>> + PCI device with possible vendor specific extensions.
>> +
>> menuconfig VIRTIO_MENU
>> bool "Virtio drivers"
>> default y
>> @@ -20,7 +28,7 @@ if VIRTIO_MENU
>>
>> config VIRTIO_PCI
>> tristate "PCI driver for virtio devices"
>> - depends on PCI
>> + depends on VIRTIO_PCI_MODERN
>> select VIRTIO
>> help
>> This driver provides support for virtio based paravirtual device
>> diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
>> index 591e6f72aa54..f097578aaa8f 100644
>> --- a/drivers/virtio/Makefile
>> +++ b/drivers/virtio/Makefile
>> @@ -1,5 +1,6 @@
>> # SPDX-License-Identifier: GPL-2.0
>> obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
>> +obj-$(CONFIG_VIRTIO_PCI_MODERN) += virtio_pci_modern_dev.o
>> obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
>> obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
>> virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
>> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
>> index f35ff5b6b467..beec047a8f8d 100644
>> --- a/drivers/virtio/virtio_pci_common.h
>> +++ b/drivers/virtio/virtio_pci_common.h
>> @@ -25,6 +25,7 @@
>> #include <linux/virtio_config.h>
>> #include <linux/virtio_ring.h>
>> #include <linux/virtio_pci.h>
>> +#include <linux/virtio_pci_modern.h>
>> #include <linux/highmem.h>
>> #include <linux/spinlock.h>
>>
>> @@ -39,32 +40,6 @@ struct virtio_pci_vq_info {
>> unsigned msix_vector;
>> };
>>
>> -struct virtio_pci_modern_device {
>> - struct pci_dev *pci_dev;
>> -
>> - struct virtio_pci_common_cfg __iomem *common;
>> - /* Device-specific data (non-legacy mode) */
>> - void __iomem *device;
>> - /* Base of vq notifications (non-legacy mode). */
>> - void __iomem *notify_base;
>> - /* Where to read and clear interrupt */
>> - u8 __iomem *isr;
>> -
>> - /* So we can sanity-check accesses. */
>> - size_t notify_len;
>> - size_t device_len;
>> -
>> - /* Capability for when we need to map notifications per-vq. */
>> - int notify_map_cap;
>> -
>> - /* Multiply queue_notify_off by this value. (non-legacy mode). */
>> - u32 notify_offset_multiplier;
>> -
>> - int modern_bars;
>> -
>> - struct virtio_device_id id;
>> -};
>> -
>> /* Our device structure */
>> struct virtio_pci_device {
>> struct virtio_device vdev;
>> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
>> index a5e3a5e40323..fbd4ebc00eb6 100644
>> --- a/drivers/virtio/virtio_pci_modern.c
>> +++ b/drivers/virtio/virtio_pci_modern.c
>> @@ -19,158 +19,6 @@
>> #define VIRTIO_RING_NO_LEGACY
>> #include "virtio_pci_common.h"
>>
>> -/*
>> - * Type-safe wrappers for io accesses.
>> - * Use these to enforce at compile time the following spec requirement:
>> - *
>> - * The driver MUST access each field using the “natural” access
>> - * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
>> - * for 16-bit fields and 8-bit accesses for 8-bit fields.
>> - */
>> -static inline u8 vp_ioread8(const u8 __iomem *addr)
>> -{
>> - return ioread8(addr);
>> -}
>> -static inline u16 vp_ioread16 (const __le16 __iomem *addr)
>> -{
>> - return ioread16(addr);
>> -}
>> -
>> -static inline u32 vp_ioread32(const __le32 __iomem *addr)
>> -{
>> - return ioread32(addr);
>> -}
>> -
>> -static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
>> -{
>> - iowrite8(value, addr);
>> -}
>> -
>> -static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
>> -{
>> - iowrite16(value, addr);
>> -}
>> -
>> -static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
>> -{
>> - iowrite32(value, addr);
>> -}
>> -
>> -static void vp_iowrite64_twopart(u64 val,
>> - __le32 __iomem *lo, __le32 __iomem *hi)
>> -{
>> - vp_iowrite32((u32)val, lo);
>> - vp_iowrite32(val >> 32, hi);
>> -}
>> -
>> -/*
>> - * vp_modern_map_capability - map a part of virtio pci capability
>> - * @mdev: the modern virtio-pci device
>> - * @off: offset of the capability
>> - * @minlen: minimal length of the capability
>> - * @align: align requirement
>> - * @start: start from the capability
>> - * @size: map size
>> - * @len: the length that is actually mapped
>> - *
>> - * Returns the io address of for the part of the capability
>> - */
>> -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
>> - size_t minlen,
>> - u32 align,
>> - u32 start, u32 size,
>> - size_t *len)
>> -{
>> - struct pci_dev *dev = mdev->pci_dev;
>> - u8 bar;
>> - u32 offset, length;
>> - void __iomem *p;
>> -
>> - pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
>> - bar),
>> - &bar);
>> - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
>> - &offset);
>> - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
>> - &length);
>> -
>> - if (length <= start) {
>> - dev_err(&dev->dev,
>> - "virtio_pci: bad capability len %u (>%u expected)\n",
>> - length, start);
>> - return NULL;
>> - }
>> -
>> - if (length - start < minlen) {
>> - dev_err(&dev->dev,
>> - "virtio_pci: bad capability len %u (>=%zu expected)\n",
>> - length, minlen);
>> - return NULL;
>> - }
>> -
>> - length -= start;
>> -
>> - if (start + offset < offset) {
>> - dev_err(&dev->dev,
>> - "virtio_pci: map wrap-around %u+%u\n",
>> - start, offset);
>> - return NULL;
>> - }
>> -
>> - offset += start;
>> -
>> - if (offset & (align - 1)) {
>> - dev_err(&dev->dev,
>> - "virtio_pci: offset %u not aligned to %u\n",
>> - offset, align);
>> - return NULL;
>> - }
>> -
>> - if (length > size)
>> - length = size;
>> -
>> - if (len)
>> - *len = length;
>> -
>> - if (minlen + offset < minlen ||
>> - minlen + offset > pci_resource_len(dev, bar)) {
>> - dev_err(&dev->dev,
>> - "virtio_pci: map virtio %zu@%u "
>> - "out of range on bar %i length %lu\n",
>> - minlen, offset,
>> - bar, (unsigned long)pci_resource_len(dev, bar));
>> - return NULL;
>> - }
>> -
>> - p = pci_iomap_range(dev, bar, offset, length);
>> - if (!p)
>> - dev_err(&dev->dev,
>> - "virtio_pci: unable to map virtio %u@%u on bar %i\n",
>> - length, offset, bar);
>> - return p;
>> -}
>> -
>> -/*
>> - * vp_modern_get_features - get features from device
>> - * @mdev: the modern virtio-pci device
>> - *
>> - * Returns the features read from the device
>> - */
>> -static u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - u64 features;
>> -
>> - vp_iowrite32(0, &cfg->device_feature_select);
>> - features = vp_ioread32(&cfg->device_feature);
>> - vp_iowrite32(1, &cfg->device_feature_select);
>> - features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
>> -
>> - return features;
>> -}
>> -
>> -/* virtio config->get_features() implementation */
>> static u64 vp_get_features(struct virtio_device *vdev)
>> {
>> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
>> @@ -188,149 +36,6 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
>> __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
>> }
>>
>> -/*
>> - * vp_modern_set_features - set features to device
>> - * @mdev: the modern virtio-pci device
>> - * @features: the features set to device
>> - */
>> -static void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
>> - u64 features)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - vp_iowrite32(0, &cfg->guest_feature_select);
>> - vp_iowrite32((u32)features, &cfg->guest_feature);
>> - vp_iowrite32(1, &cfg->guest_feature_select);
>> - vp_iowrite32(features >> 32, &cfg->guest_feature);
>> -}
>> -
>> -/*
>> - * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
>> - * @mdev: the modern virtio-pci device
>> - * @index: queue index
>> - * @vector: the config vector
>> - *
>> - * Returns the config vector read from the device
>> - */
>> -static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
>> - u16 index, u16 vector)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - vp_iowrite16(index, &cfg->queue_select);
>> - vp_iowrite16(vector, &cfg->queue_msix_vector);
>> - /* Flush the write out to device */
>> - return vp_ioread16(&cfg->queue_msix_vector);
>> -}
>> -
>> -/*
>> - * vp_modern_queue_address - set the virtqueue address
>> - * @mdev: the modern virtio-pci device
>> - * @index: the queue index
>> - * @desc_addr: address of the descriptor area
>> - * @driver_addr: address of the driver area
>> - * @device_addr: address of the device area
>> - */
>> -static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
>> - u16 index, u64 desc_addr, u64 driver_addr,
>> - u64 device_addr)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - vp_iowrite16(index, &cfg->queue_select);
>> -
>> - vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
>> - &cfg->queue_desc_hi);
>> - vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
>> - &cfg->queue_avail_hi);
>> - vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
>> - &cfg->queue_used_hi);
>> -}
>> -
>> -/*
>> - * vp_modern_set_queue_enable - enable a virtqueue
>> - * @mdev: the modern virtio-pci device
>> - * @index: the queue index
>> - * @enable: whether the virtqueue is enable or not
>> - */
>> -static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
>> - u16 index, bool enable)
>> -{
>> - vp_iowrite16(index, &mdev->common->queue_select);
>> - vp_iowrite16(enable, &mdev->common->queue_enable);
>> -}
>> -
>> -/*
>> - * vp_modern_get_queue_enable - enable a virtqueue
>> - * @mdev: the modern virtio-pci device
>> - * @index: the queue index
>> - *
>> - * Returns whether a virtqueue is enabled or not
>> - */
>> -static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
>> - u16 index)
>> -{
>> - vp_iowrite16(index, &mdev->common->queue_select);
>> -
>> - return vp_ioread16(&mdev->common->queue_enable);
>> -}
>> -
>> -/*
>> - * vp_modern_set_queue_size - set size for a virtqueue
>> - * @mdev: the modern virtio-pci device
>> - * @index: the queue index
>> - * @size: the size of the virtqueue
>> - */
>> -static void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
>> - u16 index, u16 size)
>> -{
>> - vp_iowrite16(index, &mdev->common->queue_select);
>> - vp_iowrite16(size, &mdev->common->queue_size);
>> -
>> -}
>> -
>> -/*
>> - * vp_modern_get_queue_size - get size for a virtqueue
>> - * @mdev: the modern virtio-pci device
>> - * @index: the queue index
>> - *
>> - * Returns the size of the virtqueue
>> - */
>> -static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
>> - u16 index)
>> -{
>> - vp_iowrite16(index, &mdev->common->queue_select);
>> -
>> - return vp_ioread16(&mdev->common->queue_size);
>> -
>> -}
>> -
>> -/*
>> - * vp_modern_get_num_queues - get the number of virtqueues
>> - * @mdev: the modern virtio-pci device
>> - *
>> - * Returns the number of virtqueues
>> - */
>> -static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
>> -{
>> - return vp_ioread16(&mdev->common->num_queues);
>> -}
>> -
>> -/*
>> - * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
>> - * @mdev: the modern virtio-pci device
>> - * @index: the queue index
>> - *
>> - * Returns the notification offset for a virtqueue
>> - */
>> -static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
>> - u16 index)
>> -{
>> - vp_iowrite16(index, &mdev->common->queue_select);
>> -
>> - return vp_ioread16(&mdev->common->queue_notify_off);
>> -}
>> -
>> /* virtio config->finalize_features() implementation */
>> static int vp_finalize_features(struct virtio_device *vdev)
>> {
>> @@ -429,19 +134,6 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
>> }
>> }
>>
>> -/*
>> - * vp_modern_generation - get the device genreation
>> - * @mdev: the modern virtio-pci device
>> - *
>> - * Returns the genreation read from device
>> - */
>> -static u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - return vp_ioread8(&cfg->config_generation);
>> -}
>> -
>> static u32 vp_generation(struct virtio_device *vdev)
>> {
>> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
>> @@ -449,19 +141,6 @@ static u32 vp_generation(struct virtio_device *vdev)
>> return vp_modern_generation(&vp_dev->mdev);
>> }
>>
>> -/*
>> - * vp_modern_get_status - get the device status
>> - * @mdev: the modern virtio-pci device
>> - *
>> - * Returns the status read from device
>> - */
>> -static u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - return vp_ioread8(&cfg->device_status);
>> -}
>> -
>> /* config->{get,set}_status() implementations */
>> static u8 vp_get_status(struct virtio_device *vdev)
>> {
>> @@ -470,19 +149,6 @@ static u8 vp_get_status(struct virtio_device *vdev)
>> return vp_modern_get_status(&vp_dev->mdev);
>> }
>>
>> -/*
>> - * vp_modern_set_status - set status to device
>> - * @mdev: the modern virtio-pci device
>> - * @status: the status set to device
>> - */
>> -static void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
>> - u8 status)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - vp_iowrite8(status, &cfg->device_status);
>> -}
>> -
>> static void vp_set_status(struct virtio_device *vdev, u8 status)
>> {
>> struct virtio_pci_device *vp_dev = to_vp_device(vdev);
>> @@ -510,25 +176,6 @@ static void vp_reset(struct virtio_device *vdev)
>> vp_synchronize_vectors(vdev);
>> }
>>
>> -/*
>> - * vp_modern_config_vector - set the vector for config interrupt
>> - * @mdev: the modern virtio-pci device
>> - * @vector: the config vector
>> - *
>> - * Returns the config vector read from the device
>> - */
>> -static u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
>> - u16 vector)
>> -{
>> - struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> -
>> - /* Setup the vector used for configuration events */
>> - vp_iowrite16(vector, &cfg->msix_config);
>> - /* Verify we had enough resources to assign the vector */
>> - /* Will also flush the write out to device */
>> - return vp_ioread16(&cfg->msix_config);
>> -}
>> -
>> static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
>> {
>> return vp_modern_config_vector(&vp_dev->mdev, vector);
>> @@ -789,253 +436,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
>> .get_shm_region = vp_get_shm_region,
>> };
>>
>> -/**
>> - * virtio_pci_find_capability - walk capabilities to find device info.
>> - * @dev: the pci device
>> - * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
>> - * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
>> - * @bars: the bitmask of BARs
>> - *
>> - * Returns offset of the capability, or 0.
>> - */
>> -static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
>> - u32 ioresource_types, int *bars)
>> -{
>> - int pos;
>> -
>> - for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
>> - pos > 0;
>> - pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
>> - u8 type, bar;
>> - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
>> - cfg_type),
>> - &type);
>> - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
>> - bar),
>> - &bar);
>> -
>> - /* Ignore structures with reserved BAR values */
>> - if (bar > 0x5)
>> - continue;
>> -
>> - if (type == cfg_type) {
>> - if (pci_resource_len(dev, bar) &&
>> - pci_resource_flags(dev, bar) & ioresource_types) {
>> - *bars |= (1 << bar);
>> - return pos;
>> - }
>> - }
>> - }
>> - return 0;
>> -}
>> -
>> -/* This is part of the ABI. Don't screw with it. */
>> -static inline void check_offsets(void)
>> -{
>> - /* Note: disk space was harmed in compilation of this function. */
>> - BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
>> - offsetof(struct virtio_pci_cap, cap_vndr));
>> - BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
>> - offsetof(struct virtio_pci_cap, cap_next));
>> - BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
>> - offsetof(struct virtio_pci_cap, cap_len));
>> - BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
>> - offsetof(struct virtio_pci_cap, cfg_type));
>> - BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
>> - offsetof(struct virtio_pci_cap, bar));
>> - BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
>> - offsetof(struct virtio_pci_cap, offset));
>> - BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
>> - offsetof(struct virtio_pci_cap, length));
>> - BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
>> - offsetof(struct virtio_pci_notify_cap,
>> - notify_off_multiplier));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
>> - offsetof(struct virtio_pci_common_cfg,
>> - device_feature_select));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
>> - offsetof(struct virtio_pci_common_cfg, device_feature));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
>> - offsetof(struct virtio_pci_common_cfg,
>> - guest_feature_select));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
>> - offsetof(struct virtio_pci_common_cfg, guest_feature));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
>> - offsetof(struct virtio_pci_common_cfg, msix_config));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
>> - offsetof(struct virtio_pci_common_cfg, num_queues));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
>> - offsetof(struct virtio_pci_common_cfg, device_status));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
>> - offsetof(struct virtio_pci_common_cfg, config_generation));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
>> - offsetof(struct virtio_pci_common_cfg, queue_select));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
>> - offsetof(struct virtio_pci_common_cfg, queue_size));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
>> - offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
>> - offsetof(struct virtio_pci_common_cfg, queue_enable));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
>> - offsetof(struct virtio_pci_common_cfg, queue_notify_off));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
>> - offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
>> - offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
>> - offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
>> - offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
>> - offsetof(struct virtio_pci_common_cfg, queue_used_lo));
>> - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
>> - offsetof(struct virtio_pci_common_cfg, queue_used_hi));
>> -}
>> -
>> -/*
>> - * vp_modern_probe: probe the modern virtio pci device, note that the
>> - * caller is required to enable PCI device before calling this function.
>> - * @mdev: the modern virtio-pci device
>> - *
>> - * Return 0 on succeed otherwise fail
>> - */
>> -static int vp_modern_probe(struct virtio_pci_modern_device *mdev)
>> -{
>> - struct pci_dev *pci_dev = mdev->pci_dev;
>> - int err, common, isr, notify, device;
>> - u32 notify_length;
>> - u32 notify_offset;
>> -
>> - check_offsets();
>> -
>> - mdev->pci_dev = pci_dev;
>> -
>> - /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
>> - if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
>> - return -ENODEV;
>> -
>> - if (pci_dev->device < 0x1040) {
>> - /* Transitional devices: use the PCI subsystem device id as
>> - * virtio device id, same as legacy driver always did.
>> - */
>> - mdev->id.device = pci_dev->subsystem_device;
>> - } else {
>> - /* Modern devices: simply use PCI device id, but start from 0x1040. */
>> - mdev->id.device = pci_dev->device - 0x1040;
>> - }
>> - mdev->id.vendor = pci_dev->subsystem_vendor;
>> -
>> - /* check for a common config: if not, use legacy mode (bar 0). */
>> - common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
>> - IORESOURCE_IO | IORESOURCE_MEM,
>> - &mdev->modern_bars);
>> - if (!common) {
>> - dev_info(&pci_dev->dev,
>> - "virtio_pci: leaving for legacy driver\n");
>> - return -ENODEV;
>> - }
>> -
>> - /* If common is there, these should be too... */
>> - isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
>> - IORESOURCE_IO | IORESOURCE_MEM,
>> - &mdev->modern_bars);
>> - notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
>> - IORESOURCE_IO | IORESOURCE_MEM,
>> - &mdev->modern_bars);
>> - if (!isr || !notify) {
>> - dev_err(&pci_dev->dev,
>> - "virtio_pci: missing capabilities %i/%i/%i\n",
>> - common, isr, notify);
>> - return -EINVAL;
>> - }
>> -
>> - err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
>> - if (err)
>> - err = dma_set_mask_and_coherent(&pci_dev->dev,
>> - DMA_BIT_MASK(32));
>> - if (err)
>> - dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
>> -
>> - /* Device capability is only mandatory for devices that have
>> - * device-specific configuration.
>> - */
>> - device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
>> - IORESOURCE_IO | IORESOURCE_MEM,
>> - &mdev->modern_bars);
>> -
>> - err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
>> - "virtio-pci-modern");
>> - if (err)
>> - return err;
>> -
>> - err = -EINVAL;
>> - mdev->common = vp_modern_map_capability(mdev, common,
>> - sizeof(struct virtio_pci_common_cfg), 4,
>> - 0, sizeof(struct virtio_pci_common_cfg),
>> - NULL);
>> - if (!mdev->common)
>> - goto err_map_common;
>> - mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
>> - 0, 1,
>> - NULL);
>> - if (!mdev->isr)
>> - goto err_map_isr;
>> -
>> - /* Read notify_off_multiplier from config space. */
>> - pci_read_config_dword(pci_dev,
>> - notify + offsetof(struct virtio_pci_notify_cap,
>> - notify_off_multiplier),
>> - &mdev->notify_offset_multiplier);
>> - /* Read notify length and offset from config space. */
>> - pci_read_config_dword(pci_dev,
>> - notify + offsetof(struct virtio_pci_notify_cap,
>> - cap.length),
>> - ¬ify_length);
>> -
>> - pci_read_config_dword(pci_dev,
>> - notify + offsetof(struct virtio_pci_notify_cap,
>> - cap.offset),
>> - ¬ify_offset);
>> -
>> - /* We don't know how many VQs we'll map, ahead of the time.
>> - * If notify length is small, map it all now.
>> - * Otherwise, map each VQ individually later.
>> - */
>> - if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
>> - mdev->notify_base = vp_modern_map_capability(mdev, notify,
>> - 2, 2,
>> - 0, notify_length,
>> - &mdev->notify_len);
>> - if (!mdev->notify_base)
>> - goto err_map_notify;
>> - } else {
>> - mdev->notify_map_cap = notify;
>> - }
>> -
>> - /* Again, we don't know how much we should map, but PAGE_SIZE
>> - * is more than enough for all existing devices.
>> - */
>> - if (device) {
>> - mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
>> - 0, PAGE_SIZE,
>> - &mdev->device_len);
>> - if (!mdev->device)
>> - goto err_map_device;
>> - }
>> -
>> - return 0;
>> -
>> -err_map_device:
>> - if (mdev->notify_base)
>> - pci_iounmap(pci_dev, mdev->notify_base);
>> -err_map_notify:
>> - pci_iounmap(pci_dev, mdev->isr);
>> -err_map_isr:
>> - pci_iounmap(pci_dev, mdev->common);
>> -err_map_common:
>> - return err;
>> -}
>> -
>> /* the PCI probing function */
>> int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
>> {
>> @@ -1063,23 +463,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
>> return 0;
>> }
>>
>> -/*
>> - * vp_modern_probe: remove and cleanup the modern virtio pci device
>> - * @mdev: the modern virtio-pci device
>> - */
>> -static void vp_modern_remove(struct virtio_pci_modern_device *mdev)
>> -{
>> - struct pci_dev *pci_dev = mdev->pci_dev;
>> -
>> - if (mdev->device)
>> - pci_iounmap(pci_dev, mdev->device);
>> - if (mdev->notify_base)
>> - pci_iounmap(pci_dev, mdev->notify_base);
>> - pci_iounmap(pci_dev, mdev->isr);
>> - pci_iounmap(pci_dev, mdev->common);
>> - pci_release_selected_regions(pci_dev, mdev->modern_bars);
>> -}
>> -
>> void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
>> {
>> struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
>> diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
>> new file mode 100644
>> index 000000000000..cbd667496bb1
>> --- /dev/null
>> +++ b/drivers/virtio/virtio_pci_modern_dev.c
>> @@ -0,0 +1,599 @@
>> +// SPDX-License-Identifier: GPL-2.0-or-later
>> +
>> +#include <linux/virtio_pci_modern.h>
>> +#include <linux/module.h>
>> +#include <linux/pci.h>
>> +
>> +/*
>> + * vp_modern_map_capability - map a part of virtio pci capability
>> + * @mdev: the modern virtio-pci device
>> + * @off: offset of the capability
>> + * @minlen: minimal length of the capability
>> + * @align: align requirement
>> + * @start: start from the capability
>> + * @size: map size
>> + * @len: the length that is actually mapped
>> + *
>> + * Returns the io address of for the part of the capability
>> + */
>> +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
>> + size_t minlen,
>> + u32 align,
>> + u32 start, u32 size,
>> + size_t *len)
>> +{
>> + struct pci_dev *dev = mdev->pci_dev;
>> + u8 bar;
>> + u32 offset, length;
>> + void __iomem *p;
>> +
>> + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
>> + bar),
>> + &bar);
>> + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
>> + &offset);
>> + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
>> + &length);
>> +
>> + if (length <= start) {
>> + dev_err(&dev->dev,
>> + "virtio_pci: bad capability len %u (>%u expected)\n",
>> + length, start);
>> + return NULL;
>> + }
>> +
>> + if (length - start < minlen) {
>> + dev_err(&dev->dev,
>> + "virtio_pci: bad capability len %u (>=%zu expected)\n",
>> + length, minlen);
>> + return NULL;
>> + }
>> +
>> + length -= start;
>> +
>> + if (start + offset < offset) {
>> + dev_err(&dev->dev,
>> + "virtio_pci: map wrap-around %u+%u\n",
>> + start, offset);
>> + return NULL;
>> + }
>> +
>> + offset += start;
>> +
>> + if (offset & (align - 1)) {
>> + dev_err(&dev->dev,
>> + "virtio_pci: offset %u not aligned to %u\n",
>> + offset, align);
>> + return NULL;
>> + }
>> +
>> + if (length > size)
>> + length = size;
>> +
>> + if (len)
>> + *len = length;
>> +
>> + if (minlen + offset < minlen ||
>> + minlen + offset > pci_resource_len(dev, bar)) {
>> + dev_err(&dev->dev,
>> + "virtio_pci: map virtio %zu@%u "
>> + "out of range on bar %i length %lu\n",
>> + minlen, offset,
>> + bar, (unsigned long)pci_resource_len(dev, bar));
>> + return NULL;
>> + }
>> +
>> + p = pci_iomap_range(dev, bar, offset, length);
>> + if (!p)
>> + dev_err(&dev->dev,
>> + "virtio_pci: unable to map virtio %u@%u on bar %i\n",
>> + length, offset, bar);
>> + return p;
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_map_capability);
>> +
>> +/**
>> + * virtio_pci_find_capability - walk capabilities to find device info.
>> + * @dev: the pci device
>> + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
>> + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
>> + * @bars: the bitmask of BARs
>> + *
>> + * Returns offset of the capability, or 0.
>> + */
>> +static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
>> + u32 ioresource_types, int *bars)
>> +{
>> + int pos;
>> +
>> + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
>> + pos > 0;
>> + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
>> + u8 type, bar;
>> + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
>> + cfg_type),
>> + &type);
>> + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
>> + bar),
>> + &bar);
>> +
>> + /* Ignore structures with reserved BAR values */
>> + if (bar > 0x5)
>> + continue;
>> +
>> + if (type == cfg_type) {
>> + if (pci_resource_len(dev, bar) &&
>> + pci_resource_flags(dev, bar) & ioresource_types) {
>> + *bars |= (1 << bar);
>> + return pos;
>> + }
>> + }
>> + }
>> + return 0;
>> +}
>> +
>> +/* This is part of the ABI. Don't screw with it. */
>> +static inline void check_offsets(void)
>> +{
>> + /* Note: disk space was harmed in compilation of this function. */
>> + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
>> + offsetof(struct virtio_pci_cap, cap_vndr));
>> + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
>> + offsetof(struct virtio_pci_cap, cap_next));
>> + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
>> + offsetof(struct virtio_pci_cap, cap_len));
>> + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
>> + offsetof(struct virtio_pci_cap, cfg_type));
>> + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
>> + offsetof(struct virtio_pci_cap, bar));
>> + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
>> + offsetof(struct virtio_pci_cap, offset));
>> + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
>> + offsetof(struct virtio_pci_cap, length));
>> + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
>> + offsetof(struct virtio_pci_notify_cap,
>> + notify_off_multiplier));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
>> + offsetof(struct virtio_pci_common_cfg,
>> + device_feature_select));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
>> + offsetof(struct virtio_pci_common_cfg, device_feature));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
>> + offsetof(struct virtio_pci_common_cfg,
>> + guest_feature_select));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
>> + offsetof(struct virtio_pci_common_cfg, guest_feature));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
>> + offsetof(struct virtio_pci_common_cfg, msix_config));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
>> + offsetof(struct virtio_pci_common_cfg, num_queues));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
>> + offsetof(struct virtio_pci_common_cfg, device_status));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
>> + offsetof(struct virtio_pci_common_cfg, config_generation));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
>> + offsetof(struct virtio_pci_common_cfg, queue_select));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
>> + offsetof(struct virtio_pci_common_cfg, queue_size));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
>> + offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
>> + offsetof(struct virtio_pci_common_cfg, queue_enable));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
>> + offsetof(struct virtio_pci_common_cfg, queue_notify_off));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
>> + offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
>> + offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
>> + offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
>> + offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
>> + offsetof(struct virtio_pci_common_cfg, queue_used_lo));
>> + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
>> + offsetof(struct virtio_pci_common_cfg, queue_used_hi));
>> +}
>> +
>> +/*
>> + * vp_modern_probe: probe the modern virtio pci device, note that the
>> + * caller is required to enable PCI device before calling this function.
>> + * @mdev: the modern virtio-pci device
>> + *
>> + * Return 0 on succeed otherwise fail
>> + */
>> +int vp_modern_probe(struct virtio_pci_modern_device *mdev)
>> +{
>> + struct pci_dev *pci_dev = mdev->pci_dev;
>> + int err, common, isr, notify, device;
>> + u32 notify_length;
>> + u32 notify_offset;
>> +
>> + check_offsets();
>> +
>> + mdev->pci_dev = pci_dev;
>> +
>> + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
>> + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
>> + return -ENODEV;
>> +
>> + if (pci_dev->device < 0x1040) {
>> + /* Transitional devices: use the PCI subsystem device id as
>> + * virtio device id, same as legacy driver always did.
>> + */
>> + mdev->id.device = pci_dev->subsystem_device;
>> + } else {
>> + /* Modern devices: simply use PCI device id, but start from 0x1040. */
>> + mdev->id.device = pci_dev->device - 0x1040;
>> + }
>> + mdev->id.vendor = pci_dev->subsystem_vendor;
>> +
>> + /* check for a common config: if not, use legacy mode (bar 0). */
>> + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
>> + IORESOURCE_IO | IORESOURCE_MEM,
>> + &mdev->modern_bars);
>> + if (!common) {
>> + dev_info(&pci_dev->dev,
>> + "virtio_pci: leaving for legacy driver\n");
>> + return -ENODEV;
>> + }
>> +
>> + /* If common is there, these should be too... */
>> + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
>> + IORESOURCE_IO | IORESOURCE_MEM,
>> + &mdev->modern_bars);
>> + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
>> + IORESOURCE_IO | IORESOURCE_MEM,
>> + &mdev->modern_bars);
>> + if (!isr || !notify) {
>> + dev_err(&pci_dev->dev,
>> + "virtio_pci: missing capabilities %i/%i/%i\n",
>> + common, isr, notify);
>> + return -EINVAL;
>> + }
>> +
>> + err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
>> + if (err)
>> + err = dma_set_mask_and_coherent(&pci_dev->dev,
>> + DMA_BIT_MASK(32));
>> + if (err)
>> + dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
>> +
>> + /* Device capability is only mandatory for devices that have
>> + * device-specific configuration.
>> + */
>> + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
>> + IORESOURCE_IO | IORESOURCE_MEM,
>> + &mdev->modern_bars);
>> +
>> + err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
>> + "virtio-pci-modern");
>> + if (err)
>> + return err;
>> +
>> + err = -EINVAL;
>> + mdev->common = vp_modern_map_capability(mdev, common,
>> + sizeof(struct virtio_pci_common_cfg), 4,
>> + 0, sizeof(struct virtio_pci_common_cfg),
>> + NULL);
>> + if (!mdev->common)
>> + goto err_map_common;
>> + mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
>> + 0, 1,
>> + NULL);
>> + if (!mdev->isr)
>> + goto err_map_isr;
>> +
>> + /* Read notify_off_multiplier from config space. */
>> + pci_read_config_dword(pci_dev,
>> + notify + offsetof(struct virtio_pci_notify_cap,
>> + notify_off_multiplier),
>> + &mdev->notify_offset_multiplier);
>> + /* Read notify length and offset from config space. */
>> + pci_read_config_dword(pci_dev,
>> + notify + offsetof(struct virtio_pci_notify_cap,
>> + cap.length),
>> + ¬ify_length);
>> +
>> + pci_read_config_dword(pci_dev,
>> + notify + offsetof(struct virtio_pci_notify_cap,
>> + cap.offset),
>> + ¬ify_offset);
>> +
>> + /* We don't know how many VQs we'll map, ahead of the time.
>> + * If notify length is small, map it all now.
>> + * Otherwise, map each VQ individually later.
>> + */
>> + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
>> + mdev->notify_base = vp_modern_map_capability(mdev, notify,
>> + 2, 2,
>> + 0, notify_length,
>> + &mdev->notify_len);
>> + if (!mdev->notify_base)
>> + goto err_map_notify;
>> + } else {
>> + mdev->notify_map_cap = notify;
>> + }
>> +
>> + /* Again, we don't know how much we should map, but PAGE_SIZE
>> + * is more than enough for all existing devices.
>> + */
>> + if (device) {
>> + mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
>> + 0, PAGE_SIZE,
>> + &mdev->device_len);
>> + if (!mdev->device)
>> + goto err_map_device;
>> + }
>> +
>> + return 0;
>> +
>> +err_map_device:
>> + if (mdev->notify_base)
>> + pci_iounmap(pci_dev, mdev->notify_base);
>> +err_map_notify:
>> + pci_iounmap(pci_dev, mdev->isr);
>> +err_map_isr:
>> + pci_iounmap(pci_dev, mdev->common);
>> +err_map_common:
>> + return err;
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_probe);
>> +
>> +/*
>> + * vp_modern_probe: remove and cleanup the modern virtio pci device
>> + * @mdev: the modern virtio-pci device
>> + */
>> +void vp_modern_remove(struct virtio_pci_modern_device *mdev)
>> +{
>> + struct pci_dev *pci_dev = mdev->pci_dev;
>> +
>> + if (mdev->device)
>> + pci_iounmap(pci_dev, mdev->device);
>> + if (mdev->notify_base)
>> + pci_iounmap(pci_dev, mdev->notify_base);
>> + pci_iounmap(pci_dev, mdev->isr);
>> + pci_iounmap(pci_dev, mdev->common);
>> + pci_release_selected_regions(pci_dev, mdev->modern_bars);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_remove);
>> +
>> +/*
>> + * vp_modern_get_features - get features from device
>> + * @mdev: the modern virtio-pci device
>> + *
>> + * Returns the features read from the device
>> + */
>> +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + u64 features;
>> +
>> + vp_iowrite32(0, &cfg->device_feature_select);
>> + features = vp_ioread32(&cfg->device_feature);
>> + vp_iowrite32(1, &cfg->device_feature_select);
>> + features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
>> +
>> + return features;
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_get_features);
>> +
>> +/*
>> + * vp_modern_set_features - set features to device
>> + * @mdev: the modern virtio-pci device
>> + * @features: the features set to device
>> + */
>> +void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
>> + u64 features)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + vp_iowrite32(0, &cfg->guest_feature_select);
>> + vp_iowrite32((u32)features, &cfg->guest_feature);
>> + vp_iowrite32(1, &cfg->guest_feature_select);
>> + vp_iowrite32(features >> 32, &cfg->guest_feature);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_set_features);
>> +
>> +/*
>> + * vp_modern_generation - get the device genreation
>> + * @mdev: the modern virtio-pci device
>> + *
>> + * Returns the genreation read from device
>> + */
>> +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + return vp_ioread8(&cfg->config_generation);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_generation);
>> +
>> +/*
>> + * vp_modern_get_status - get the device status
>> + * @mdev: the modern virtio-pci device
>> + *
>> + * Returns the status read from device
>> + */
>> +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + return vp_ioread8(&cfg->device_status);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_get_status);
>> +
>> +/*
>> + * vp_modern_set_status - set status to device
>> + * @mdev: the modern virtio-pci device
>> + * @status: the status set to device
>> + */
>> +void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
>> + u8 status)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + vp_iowrite8(status, &cfg->device_status);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_set_status);
>> +
>> +/*
>> + * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
>> + * @mdev: the modern virtio-pci device
>> + * @index: queue index
>> + * @vector: the config vector
>> + *
>> + * Returns the config vector read from the device
>> + */
>> +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
>> + u16 index, u16 vector)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + vp_iowrite16(index, &cfg->queue_select);
>> + vp_iowrite16(vector, &cfg->queue_msix_vector);
>> + /* Flush the write out to device */
>> + return vp_ioread16(&cfg->queue_msix_vector);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
>> +
>> +/*
>> + * vp_modern_config_vector - set the vector for config interrupt
>> + * @mdev: the modern virtio-pci device
>> + * @vector: the config vector
>> + *
>> + * Returns the config vector read from the device
>> + */
>> +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
>> + u16 vector)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + /* Setup the vector used for configuration events */
>> + vp_iowrite16(vector, &cfg->msix_config);
>> + /* Verify we had enough resources to assign the vector */
>> + /* Will also flush the write out to device */
>> + return vp_ioread16(&cfg->msix_config);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_config_vector);
>> +
>> +/*
>> + * vp_modern_queue_address - set the virtqueue address
>> + * @mdev: the modern virtio-pci device
>> + * @index: the queue index
>> + * @desc_addr: address of the descriptor area
>> + * @driver_addr: address of the driver area
>> + * @device_addr: address of the device area
>> + */
>> +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
>> + u16 index, u64 desc_addr, u64 driver_addr,
>> + u64 device_addr)
>> +{
>> + struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
>> +
>> + vp_iowrite16(index, &cfg->queue_select);
>> +
>> + vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
>> + &cfg->queue_desc_hi);
>> + vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
>> + &cfg->queue_avail_hi);
>> + vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
>> + &cfg->queue_used_hi);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_queue_address);
>> +
>> +/*
>> + * vp_modern_set_queue_enable - enable a virtqueue
>> + * @mdev: the modern virtio-pci device
>> + * @index: the queue index
>> + * @enable: whether the virtqueue is enable or not
>> + */
>> +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
>> + u16 index, bool enable)
>> +{
>> + vp_iowrite16(index, &mdev->common->queue_select);
>> + vp_iowrite16(enable, &mdev->common->queue_enable);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
>> +
>> +/*
>> + * vp_modern_get_queue_enable - enable a virtqueue
>> + * @mdev: the modern virtio-pci device
>> + * @index: the queue index
>> + *
>> + * Returns whether a virtqueue is enabled or not
>> + */
>> +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
>> + u16 index)
>> +{
>> + vp_iowrite16(index, &mdev->common->queue_select);
>> +
>> + return vp_ioread16(&mdev->common->queue_enable);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
>> +
>> +/*
>> + * vp_modern_set_queue_size - set size for a virtqueue
>> + * @mdev: the modern virtio-pci device
>> + * @index: the queue index
>> + * @size: the size of the virtqueue
>> + */
>> +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
>> + u16 index, u16 size)
>> +{
>> + vp_iowrite16(index, &mdev->common->queue_select);
>> + vp_iowrite16(size, &mdev->common->queue_size);
>> +
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
>> +
>> +/*
>> + * vp_modern_get_queue_size - get size for a virtqueue
>> + * @mdev: the modern virtio-pci device
>> + * @index: the queue index
>> + *
>> + * Returns the size of the virtqueue
>> + */
>> +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
>> + u16 index)
>> +{
>> + vp_iowrite16(index, &mdev->common->queue_select);
>> +
>> + return vp_ioread16(&mdev->common->queue_size);
>> +
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
>> +
>> +/*
>> + * vp_modern_get_num_queues - get the number of virtqueues
>> + * @mdev: the modern virtio-pci device
>> + *
>> + * Returns the number of virtqueues
>> + */
>> +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
>> +{
>> + return vp_ioread16(&mdev->common->num_queues);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
>> +
>> +/*
>> + * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
>> + * @mdev: the modern virtio-pci device
>> + * @index: the queue index
>> + *
>> + * Returns the notification offset for a virtqueue
>> + */
>> +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
>> + u16 index)
>> +{
>> + vp_iowrite16(index, &mdev->common->queue_select);
>> +
>> + return vp_ioread16(&mdev->common->queue_notify_off);
>> +}
>> +EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
>> +
>> +MODULE_VERSION("0.1");
>> +MODULE_DESCRIPTION("Modern Virtio PCI Device");
>> +MODULE_AUTHOR("Jason Wang <[email protected]>");
>> +MODULE_LICENSE("GPL");
>> diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
>> new file mode 100644
>> index 000000000000..f26acbeec965
>> --- /dev/null
>> +++ b/include/linux/virtio_pci_modern.h
>> @@ -0,0 +1,111 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef _LINUX_VIRTIO_PCI_MODERN_H
>> +#define _LINUX_VIRTIO_PCI_MODERN_H
>> +
>> +#include <linux/pci.h>
>> +#include <linux/virtio_pci.h>
>> +
>> +struct virtio_pci_modern_device {
>> + struct pci_dev *pci_dev;
>> +
>> + struct virtio_pci_common_cfg __iomem *common;
>> + /* Device-specific data (non-legacy mode) */
>> + void __iomem *device;
>> + /* Base of vq notifications (non-legacy mode). */
>> + void __iomem *notify_base;
>> + /* Where to read and clear interrupt */
>> + u8 __iomem *isr;
>> +
>> + /* So we can sanity-check accesses. */
>> + size_t notify_len;
>> + size_t device_len;
>> +
>> + /* Capability for when we need to map notifications per-vq. */
>> + int notify_map_cap;
>> +
>> + /* Multiply queue_notify_off by this value. (non-legacy mode). */
>> + u32 notify_offset_multiplier;
>> +
>> + int modern_bars;
>> +
>> + struct virtio_device_id id;
>> +};
>> +
>> +/*
>> + * Type-safe wrappers for io accesses.
>> + * Use these to enforce at compile time the following spec requirement:
>> + *
>> + * The driver MUST access each field using the “natural” access
>> + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
>> + * for 16-bit fields and 8-bit accesses for 8-bit fields.
>> + */
>> +static inline u8 vp_ioread8(const u8 __iomem *addr)
>> +{
>> + return ioread8(addr);
>> +}
>> +static inline u16 vp_ioread16 (const __le16 __iomem *addr)
>> +{
>> + return ioread16(addr);
>> +}
>> +
>> +static inline u32 vp_ioread32(const __le32 __iomem *addr)
>> +{
>> + return ioread32(addr);
>> +}
>> +
>> +static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
>> +{
>> + iowrite8(value, addr);
>> +}
>> +
>> +static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
>> +{
>> + iowrite16(value, addr);
>> +}
>> +
>> +static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
>> +{
>> + iowrite32(value, addr);
>> +}
>> +
>> +static inline void vp_iowrite64_twopart(u64 val,
>> + __le32 __iomem *lo,
>> + __le32 __iomem *hi)
>> +{
>> + vp_iowrite32((u32)val, lo);
>> + vp_iowrite32(val >> 32, hi);
>> +}
>> +
>> +u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
>> +void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
>> + u64 features);
>> +u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
>> +u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
>> +void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
>> + u8 status);
>> +u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
>> + u16 idx, u16 vector);
>> +u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
>> + u16 vector);
>> +void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
>> + u16 index, u64 desc_addr, u64 driver_addr,
>> + u64 device_addr);
>> +void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
>> + u16 idx, bool enable);
>> +bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
>> + u16 idx);
>> +void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
>> + u16 idx, u16 size);
>> +u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
>> + u16 idx);
>> +u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
>> +u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
>> + u16 idx);
>> +void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
>> + size_t minlen,
>> + u32 align,
>> + u32 start, u32 size,
>> + size_t *len);
>> +int vp_modern_probe(struct virtio_pci_modern_device *mdev);
>> +void vp_modern_remove(struct virtio_pci_modern_device *mdev);
>> +#endif