Recent years, mediated device pass-through framework (e.g. vfio-mdev)
is used to achieve flexible device sharing across domains (e.g. VMs).
Also there are hardware assisted mediated pass-through solutions from
platform vendors. e.g. Intel VT-d scalable mode which supports Intel
Scalable I/O Virtualization technology. Such mdevs are called IOMMU-
backed mdevs as there are IOMMU enforced DMA isolation for such mdevs.
In kernel, IOMMU-backed mdevs are exposed to IOMMU layer by aux-domain
concept, which means mdevs are protected by an iommu domain which is
auxiliary to the domain that the kernel driver primarily uses for DMA
API. Details can be found in the KVM presentation as below:
https://events19.linuxfoundation.org/wp-content/uploads/2017/12/\
Hardware-Assisted-Mediated-Pass-Through-with-VFIO-Kevin-Tian-Intel.pdf
This patch extends NESTING_IOMMU ops to IOMMU-backed mdev devices. The
main requirement is to use the auxiliary domain associated with mdev.
Cc: Kevin Tian <[email protected]>
CC: Jacob Pan <[email protected]>
CC: Jun Tian <[email protected]>
Cc: Alex Williamson <[email protected]>
Cc: Eric Auger <[email protected]>
Cc: Jean-Philippe Brucker <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Lu Baolu <[email protected]>
Signed-off-by: Liu Yi L <[email protected]>
---
v1 -> v2:
*) check the iommu_device to ensure the handling mdev is IOMMU-backed
---
drivers/vfio/vfio_iommu_type1.c | 39 +++++++++++++++++++++++++++++++++++----
1 file changed, 35 insertions(+), 4 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 960cc59..f1f1ae2 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2373,20 +2373,41 @@ static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
return ret;
}
+static struct device *vfio_get_iommu_device(struct vfio_group *group,
+ struct device *dev)
+{
+ if (group->mdev_group)
+ return vfio_mdev_get_iommu_device(dev);
+ else
+ return dev;
+}
+
static int vfio_dev_bind_gpasid_fn(struct device *dev, void *data)
{
struct domain_capsule *dc = (struct domain_capsule *)data;
unsigned long arg = *(unsigned long *)dc->data;
+ struct device *iommu_device;
+
+ iommu_device = vfio_get_iommu_device(dc->group, dev);
+ if (!iommu_device)
+ return -EINVAL;
- return iommu_sva_bind_gpasid(dc->domain, dev, (void __user *)arg);
+ return iommu_sva_bind_gpasid(dc->domain, iommu_device,
+ (void __user *)arg);
}
static int vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
{
struct domain_capsule *dc = (struct domain_capsule *)data;
unsigned long arg = *(unsigned long *)dc->data;
+ struct device *iommu_device;
- iommu_sva_unbind_gpasid(dc->domain, dev, (void __user *)arg);
+ iommu_device = vfio_get_iommu_device(dc->group, dev);
+ if (!iommu_device)
+ return -EINVAL;
+
+ iommu_sva_unbind_gpasid(dc->domain, iommu_device,
+ (void __user *)arg);
return 0;
}
@@ -2395,8 +2416,13 @@ static int __vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
struct domain_capsule *dc = (struct domain_capsule *)data;
struct iommu_gpasid_bind_data *unbind_data =
(struct iommu_gpasid_bind_data *)dc->data;
+ struct device *iommu_device;
+
+ iommu_device = vfio_get_iommu_device(dc->group, dev);
+ if (!iommu_device)
+ return -EINVAL;
- __iommu_sva_unbind_gpasid(dc->domain, dev, unbind_data);
+ __iommu_sva_unbind_gpasid(dc->domain, iommu_device, unbind_data);
return 0;
}
@@ -3077,8 +3103,13 @@ static int vfio_dev_cache_invalidate_fn(struct device *dev, void *data)
{
struct domain_capsule *dc = (struct domain_capsule *)data;
unsigned long arg = *(unsigned long *)dc->data;
+ struct device *iommu_device;
+
+ iommu_device = vfio_get_iommu_device(dc->group, dev);
+ if (!iommu_device)
+ return -EINVAL;
- iommu_cache_invalidate(dc->domain, dev, (void __user *)arg);
+ iommu_cache_invalidate(dc->domain, iommu_device, (void __user *)arg);
return 0;
}
--
2.7.4
Yi,
On 7/12/20 1:21 PM, Liu Yi L wrote:
> Recent years, mediated device pass-through framework (e.g. vfio-mdev)
> is used to achieve flexible device sharing across domains (e.g. VMs).
> Also there are hardware assisted mediated pass-through solutions from
> platform vendors. e.g. Intel VT-d scalable mode which supports Intel
> Scalable I/O Virtualization technology. Such mdevs are called IOMMU-
> backed mdevs as there are IOMMU enforced DMA isolation for such mdevs.
there is IOMMU enforced DMA isolation
> In kernel, IOMMU-backed mdevs are exposed to IOMMU layer by aux-domain
> concept, which means mdevs are protected by an iommu domain which is
> auxiliary to the domain that the kernel driver primarily uses for DMA
> API. Details can be found in the KVM presentation as below:
>
> https://events19.linuxfoundation.org/wp-content/uploads/2017/12/\
> Hardware-Assisted-Mediated-Pass-Through-with-VFIO-Kevin-Tian-Intel.pdf
>
> This patch extends NESTING_IOMMU ops to IOMMU-backed mdev devices. The
> main requirement is to use the auxiliary domain associated with mdev.
So as a result vSVM becomes functional for scalable mode mediated
devices, right?
>
> Cc: Kevin Tian <[email protected]>
> CC: Jacob Pan <[email protected]>
> CC: Jun Tian <[email protected]>
> Cc: Alex Williamson <[email protected]>
> Cc: Eric Auger <[email protected]>
> Cc: Jean-Philippe Brucker <[email protected]>
> Cc: Joerg Roedel <[email protected]>
> Cc: Lu Baolu <[email protected]>
> Signed-off-by: Liu Yi L <[email protected]>
> ---
> v1 -> v2:
> *) check the iommu_device to ensure the handling mdev is IOMMU-backed
> ---
> drivers/vfio/vfio_iommu_type1.c | 39 +++++++++++++++++++++++++++++++++++----
> 1 file changed, 35 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 960cc59..f1f1ae2 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -2373,20 +2373,41 @@ static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
> return ret;
> }
>
> +static struct device *vfio_get_iommu_device(struct vfio_group *group,
> + struct device *dev)
> +{
> + if (group->mdev_group)
> + return vfio_mdev_get_iommu_device(dev);
> + else
> + return dev;
> +}
> +
> static int vfio_dev_bind_gpasid_fn(struct device *dev, void *data)
> {
> struct domain_capsule *dc = (struct domain_capsule *)data;
> unsigned long arg = *(unsigned long *)dc->data;
> + struct device *iommu_device;
> +
> + iommu_device = vfio_get_iommu_device(dc->group, dev);
> + if (!iommu_device)
> + return -EINVAL;
>
> - return iommu_sva_bind_gpasid(dc->domain, dev, (void __user *)arg);
> + return iommu_sva_bind_gpasid(dc->domain, iommu_device,
> + (void __user *)arg);
> }
>
> static int vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
> {
> struct domain_capsule *dc = (struct domain_capsule *)data;
> unsigned long arg = *(unsigned long *)dc->data;
> + struct device *iommu_device;
>
> - iommu_sva_unbind_gpasid(dc->domain, dev, (void __user *)arg);
> + iommu_device = vfio_get_iommu_device(dc->group, dev);
> + if (!iommu_device)
> + return -EINVAL;
> +
> + iommu_sva_unbind_gpasid(dc->domain, iommu_device,
> + (void __user *)arg);
> return 0;
> }
>
> @@ -2395,8 +2416,13 @@ static int __vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
> struct domain_capsule *dc = (struct domain_capsule *)data;
> struct iommu_gpasid_bind_data *unbind_data =
> (struct iommu_gpasid_bind_data *)dc->data;
> + struct device *iommu_device;
> +
> + iommu_device = vfio_get_iommu_device(dc->group, dev);
> + if (!iommu_device)
> + return -EINVAL;
>
> - __iommu_sva_unbind_gpasid(dc->domain, dev, unbind_data);
> + __iommu_sva_unbind_gpasid(dc->domain, iommu_device, unbind_data);
> return 0;
> }
>
> @@ -3077,8 +3103,13 @@ static int vfio_dev_cache_invalidate_fn(struct device *dev, void *data)
> {
> struct domain_capsule *dc = (struct domain_capsule *)data;
> unsigned long arg = *(unsigned long *)dc->data;
> + struct device *iommu_device;
> +
> + iommu_device = vfio_get_iommu_device(dc->group, dev);
> + if (!iommu_device)
> + return -EINVAL;
>
> - iommu_cache_invalidate(dc->domain, dev, (void __user *)arg);
> + iommu_cache_invalidate(dc->domain, iommu_device, (void __user *)arg);
> return 0;
> }
>
>
Besides,
Looks grood to me
Reviewed-by: Eric Auger <[email protected]>
Eric
Hi Eric,
> From: Auger Eric < [email protected]>
> Sent: Monday, July 20, 2020 8:22 PM
>
> Yi,
>
> On 7/12/20 1:21 PM, Liu Yi L wrote:
> > Recent years, mediated device pass-through framework (e.g. vfio-mdev)
> > is used to achieve flexible device sharing across domains (e.g. VMs).
> > Also there are hardware assisted mediated pass-through solutions from
> > platform vendors. e.g. Intel VT-d scalable mode which supports Intel
> > Scalable I/O Virtualization technology. Such mdevs are called IOMMU-
> > backed mdevs as there are IOMMU enforced DMA isolation for such mdevs.
> there is IOMMU enforced DMA isolation
> > In kernel, IOMMU-backed mdevs are exposed to IOMMU layer by aux-domain
> > concept, which means mdevs are protected by an iommu domain which is
> > auxiliary to the domain that the kernel driver primarily uses for DMA
> > API. Details can be found in the KVM presentation as below:
> >
> > https://events19.linuxfoundation.org/wp-content/uploads/2017/12/\
> > Hardware-Assisted-Mediated-Pass-Through-with-VFIO-Kevin-Tian-Intel.pdf
> >
> > This patch extends NESTING_IOMMU ops to IOMMU-backed mdev devices. The
> > main requirement is to use the auxiliary domain associated with mdev.
>
> So as a result vSVM becomes functional for scalable mode mediated devices, right?
yes. as long as the mediated devices reports PASID capability.
> >
> > Cc: Kevin Tian <[email protected]>
> > CC: Jacob Pan <[email protected]>
> > CC: Jun Tian <[email protected]>
> > Cc: Alex Williamson <[email protected]>
> > Cc: Eric Auger <[email protected]>
> > Cc: Jean-Philippe Brucker <[email protected]>
> > Cc: Joerg Roedel <[email protected]>
> > Cc: Lu Baolu <[email protected]>
> > Signed-off-by: Liu Yi L <[email protected]>
> > ---
> > v1 -> v2:
> > *) check the iommu_device to ensure the handling mdev is IOMMU-backed
> > ---
> > drivers/vfio/vfio_iommu_type1.c | 39
> > +++++++++++++++++++++++++++++++++++----
> > 1 file changed, 35 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/vfio/vfio_iommu_type1.c
> > b/drivers/vfio/vfio_iommu_type1.c index 960cc59..f1f1ae2 100644
> > --- a/drivers/vfio/vfio_iommu_type1.c
> > +++ b/drivers/vfio/vfio_iommu_type1.c
> > @@ -2373,20 +2373,41 @@ static int vfio_iommu_resv_refresh(struct
> vfio_iommu *iommu,
> > return ret;
> > }
> >
> > +static struct device *vfio_get_iommu_device(struct vfio_group *group,
> > + struct device *dev)
> > +{
> > + if (group->mdev_group)
> > + return vfio_mdev_get_iommu_device(dev);
> > + else
> > + return dev;
> > +}
> > +
> > static int vfio_dev_bind_gpasid_fn(struct device *dev, void *data) {
> > struct domain_capsule *dc = (struct domain_capsule *)data;
> > unsigned long arg = *(unsigned long *)dc->data;
> > + struct device *iommu_device;
> > +
> > + iommu_device = vfio_get_iommu_device(dc->group, dev);
> > + if (!iommu_device)
> > + return -EINVAL;
> >
> > - return iommu_sva_bind_gpasid(dc->domain, dev, (void __user *)arg);
> > + return iommu_sva_bind_gpasid(dc->domain, iommu_device,
> > + (void __user *)arg);
> > }
> >
> > static int vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
> > {
> > struct domain_capsule *dc = (struct domain_capsule *)data;
> > unsigned long arg = *(unsigned long *)dc->data;
> > + struct device *iommu_device;
> >
> > - iommu_sva_unbind_gpasid(dc->domain, dev, (void __user *)arg);
> > + iommu_device = vfio_get_iommu_device(dc->group, dev);
> > + if (!iommu_device)
> > + return -EINVAL;
> > +
> > + iommu_sva_unbind_gpasid(dc->domain, iommu_device,
> > + (void __user *)arg);
> > return 0;
> > }
> >
> > @@ -2395,8 +2416,13 @@ static int __vfio_dev_unbind_gpasid_fn(struct device
> *dev, void *data)
> > struct domain_capsule *dc = (struct domain_capsule *)data;
> > struct iommu_gpasid_bind_data *unbind_data =
> > (struct iommu_gpasid_bind_data *)dc->data;
> > + struct device *iommu_device;
> > +
> > + iommu_device = vfio_get_iommu_device(dc->group, dev);
> > + if (!iommu_device)
> > + return -EINVAL;
> >
> > - __iommu_sva_unbind_gpasid(dc->domain, dev, unbind_data);
> > + __iommu_sva_unbind_gpasid(dc->domain, iommu_device, unbind_data);
> > return 0;
> > }
> >
> > @@ -3077,8 +3103,13 @@ static int vfio_dev_cache_invalidate_fn(struct
> > device *dev, void *data) {
> > struct domain_capsule *dc = (struct domain_capsule *)data;
> > unsigned long arg = *(unsigned long *)dc->data;
> > + struct device *iommu_device;
> > +
> > + iommu_device = vfio_get_iommu_device(dc->group, dev);
> > + if (!iommu_device)
> > + return -EINVAL;
> >
> > - iommu_cache_invalidate(dc->domain, dev, (void __user *)arg);
> > + iommu_cache_invalidate(dc->domain, iommu_device, (void __user
> > +*)arg);
> > return 0;
> > }
> >
> >
> Besides,
>
> Looks grood to me
>
> Reviewed-by: Eric Auger <[email protected]>
thanks :-)
Regards,
Yi Liu
> Eric