Cc: Kevin Tian <[email protected]>
CC: Jacob Pan <[email protected]>
Cc: Alex Williamson <[email protected]>
Cc: Eric Auger <[email protected]>
Cc: Jean-Philippe Brucker <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Lu Baolu <[email protected]>
Signed-off-by: Liu Yi L <[email protected]>
Signed-off-by: Jacob Pan <[email protected]>
---
v2 -> v3:
*) remove cap/ecap_mask in iommu_nesting_info.
---
drivers/iommu/intel/iommu.c | 81 +++++++++++++++++++++++++++++++++++++++++++--
include/linux/intel-iommu.h | 16 +++++++++
2 files changed, 95 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a9504cb..9f7ad1a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -5659,12 +5659,16 @@ static inline bool iommu_pasid_support(void)
static inline bool nested_mode_support(void)
{
struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
+ struct intel_iommu *iommu, *prev = NULL;
bool ret = true;
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
+ if (!prev)
+ prev = iommu;
+ if (!sm_supported(iommu) || !ecap_nest(iommu->ecap) ||
+ (VTD_CAP_MASK & (iommu->cap ^ prev->cap)) ||
+ (VTD_ECAP_MASK & (iommu->ecap ^ prev->ecap))) {
ret = false;
break;
}
@@ -6079,6 +6083,78 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
+static int intel_iommu_get_nesting_info(struct iommu_domain *domain,
+ struct iommu_nesting_info *info)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ u64 cap = VTD_CAP_MASK, ecap = VTD_ECAP_MASK;
+ struct device_domain_info *domain_info;
+ struct iommu_nesting_info_vtd vtd;
+ unsigned long flags;
+ unsigned int size;
+
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED ||
+ !(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
+ return -ENODEV;
+
+ if (!info)
+ return -EINVAL;
+
+ size = sizeof(struct iommu_nesting_info) +
+ sizeof(struct iommu_nesting_info_vtd);
+ /*
+ * if provided buffer size is smaller than expected, should
+ * return 0 and also the expected buffer size to caller.
+ */
+ if (info->size < size) {
+ info->size = size;
+ return 0;
+ }
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ /*
+ * arbitrary select the first domain_info as all nesting
+ * related capabilities should be consistent across iommu
+ * units.
+ */
+ domain_info = list_first_entry(&dmar_domain->devices,
+ struct device_domain_info, link);
+ cap &= domain_info->iommu->cap;
+ ecap &= domain_info->iommu->ecap;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ info->format = IOMMU_PASID_FORMAT_INTEL_VTD;
+ info->features = IOMMU_NESTING_FEAT_SYSWIDE_PASID |
+ IOMMU_NESTING_FEAT_BIND_PGTBL |
+ IOMMU_NESTING_FEAT_CACHE_INVLD;
+ info->addr_width = dmar_domain->gaw;
+ info->pasid_bits = ilog2(intel_pasid_max_id);
+ info->padding = 0;
+ vtd.flags = 0;
+ vtd.padding = 0;
+ vtd.cap_reg = cap;
+ vtd.ecap_reg = ecap;
+
+ memcpy(info->data, &vtd, sizeof(vtd));
+ return 0;
+}
+
+static int intel_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ {
+ struct iommu_nesting_info *info =
+ (struct iommu_nesting_info *)data;
+
+ return intel_iommu_get_nesting_info(domain, info);
+ }
+ default:
+ return -ENODEV;
+ }
+}
+
/*
* Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and
@@ -6101,6 +6177,7 @@ const struct iommu_ops intel_iommu_ops = {
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
.domain_set_attr = intel_iommu_domain_set_attr,
+ .domain_get_attr = intel_iommu_domain_get_attr,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
.aux_attach_dev = intel_iommu_aux_attach_device,
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 18f292e..c4ed0d4 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -197,6 +197,22 @@
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
+/* Nesting Support Capability Alignment */
+#define VTD_CAP_FL1GP BIT_ULL(56)
+#define VTD_CAP_FL5LP BIT_ULL(60)
+#define VTD_ECAP_PRS BIT_ULL(29)
+#define VTD_ECAP_ERS BIT_ULL(30)
+#define VTD_ECAP_SRS BIT_ULL(31)
+#define VTD_ECAP_EAFS BIT_ULL(34)
+#define VTD_ECAP_PASID BIT_ULL(40)
+
+/* Only capabilities marked in below MASKs are reported */
+#define VTD_CAP_MASK (VTD_CAP_FL1GP | VTD_CAP_FL5LP)
+
+#define VTD_ECAP_MASK (VTD_ECAP_PRS | VTD_ECAP_ERS | \
+ VTD_ECAP_SRS | VTD_ECAP_EAFS | \
+ VTD_ECAP_PASID)
+
/* Virtual command interface capability */
#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
--
2.7.4
Hi Yi,
Missing a proper commit message. You can comment on the fact you only
support the case where all the physical iomms have the same CAP/ECAP MASKS
On 7/12/20 1:21 PM, Liu Yi L wrote:
> Cc: Kevin Tian <[email protected]>
> CC: Jacob Pan <[email protected]>
> Cc: Alex Williamson <[email protected]>
> Cc: Eric Auger <[email protected]>
> Cc: Jean-Philippe Brucker <[email protected]>
> Cc: Joerg Roedel <[email protected]>
> Cc: Lu Baolu <[email protected]>
> Signed-off-by: Liu Yi L <[email protected]>
> Signed-off-by: Jacob Pan <[email protected]>
> ---
> v2 -> v3:
> *) remove cap/ecap_mask in iommu_nesting_info.
> ---
> drivers/iommu/intel/iommu.c | 81 +++++++++++++++++++++++++++++++++++++++++++--
> include/linux/intel-iommu.h | 16 +++++++++
> 2 files changed, 95 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index a9504cb..9f7ad1a 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -5659,12 +5659,16 @@ static inline bool iommu_pasid_support(void)
> static inline bool nested_mode_support(void)
> {
> struct dmar_drhd_unit *drhd;
> - struct intel_iommu *iommu;
> + struct intel_iommu *iommu, *prev = NULL;
> bool ret = true;
>
> rcu_read_lock();
> for_each_active_iommu(iommu, drhd) {
> - if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
> + if (!prev)
> + prev = iommu;
> + if (!sm_supported(iommu) || !ecap_nest(iommu->ecap) ||
> + (VTD_CAP_MASK & (iommu->cap ^ prev->cap)) ||
> + (VTD_ECAP_MASK & (iommu->ecap ^ prev->ecap))) {
> ret = false;
> break;> }
> @@ -6079,6 +6083,78 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
> return ret;
> }
>
> +static int intel_iommu_get_nesting_info(struct iommu_domain *domain,
> + struct iommu_nesting_info *info)
> +{
> + struct dmar_domain *dmar_domain = to_dmar_domain(domain);
> + u64 cap = VTD_CAP_MASK, ecap = VTD_ECAP_MASK;
> + struct device_domain_info *domain_info;
> + struct iommu_nesting_info_vtd vtd;
> + unsigned long flags;
> + unsigned int size;
> +
> + if (domain->type != IOMMU_DOMAIN_UNMANAGED ||
> + !(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
> + return -ENODEV;
> +
> + if (!info)
> + return -EINVAL;
> +
> + size = sizeof(struct iommu_nesting_info) +
> + sizeof(struct iommu_nesting_info_vtd);
> + /*
> + * if provided buffer size is smaller than expected, should
> + * return 0 and also the expected buffer size to caller.
> + */
> + if (info->size < size) {
> + info->size = size;
> + return 0;
> + }
> +
> + spin_lock_irqsave(&device_domain_lock, flags);
> + /*
> + * arbitrary select the first domain_info as all nesting
> + * related capabilities should be consistent across iommu
> + * units.
> + */
> + domain_info = list_first_entry(&dmar_domain->devices,
> + struct device_domain_info, link);
> + cap &= domain_info->iommu->cap;
> + ecap &= domain_info->iommu->ecap;
> + spin_unlock_irqrestore(&device_domain_lock, flags);
> +
> + info->format = IOMMU_PASID_FORMAT_INTEL_VTD;
> + info->features = IOMMU_NESTING_FEAT_SYSWIDE_PASID |
> + IOMMU_NESTING_FEAT_BIND_PGTBL |
> + IOMMU_NESTING_FEAT_CACHE_INVLD;
> + info->addr_width = dmar_domain->gaw;
> + info->pasid_bits = ilog2(intel_pasid_max_id);
> + info->padding = 0;
> + vtd.flags = 0;
> + vtd.padding = 0;
> + vtd.cap_reg = cap;
> + vtd.ecap_reg = ecap;
> +
> + memcpy(info->data, &vtd, sizeof(vtd));
> + return 0;
> +}
> +
> +static int intel_iommu_domain_get_attr(struct iommu_domain *domain,
> + enum iommu_attr attr, void *data)
> +{
> + switch (attr) {
> + case DOMAIN_ATTR_NESTING:
> + {
> + struct iommu_nesting_info *info =
> + (struct iommu_nesting_info *)data;
> +
> + return intel_iommu_get_nesting_info(domain, info);
> + }
> + default:
> + return -ENODEV;
-ENOENT?
> + }
> +}
> +
> /*
> * Check that the device does not live on an external facing PCI port that is
> * marked as untrusted. Such devices should not be able to apply quirks and
> @@ -6101,6 +6177,7 @@ const struct iommu_ops intel_iommu_ops = {
> .domain_alloc = intel_iommu_domain_alloc,
> .domain_free = intel_iommu_domain_free,
> .domain_set_attr = intel_iommu_domain_set_attr,
> + .domain_get_attr = intel_iommu_domain_get_attr,
> .attach_dev = intel_iommu_attach_device,
> .detach_dev = intel_iommu_detach_device,
> .aux_attach_dev = intel_iommu_aux_attach_device,
> diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
> index 18f292e..c4ed0d4 100644
> --- a/include/linux/intel-iommu.h
> +++ b/include/linux/intel-iommu.h
> @@ -197,6 +197,22 @@
> #define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
> #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
>
> +/* Nesting Support Capability Alignment */
> +#define VTD_CAP_FL1GP BIT_ULL(56)
> +#define VTD_CAP_FL5LP BIT_ULL(60)
> +#define VTD_ECAP_PRS BIT_ULL(29)
> +#define VTD_ECAP_ERS BIT_ULL(30)
> +#define VTD_ECAP_SRS BIT_ULL(31)
> +#define VTD_ECAP_EAFS BIT_ULL(34)
> +#define VTD_ECAP_PASID BIT_ULL(40)
> +
> +/* Only capabilities marked in below MASKs are reported */
> +#define VTD_CAP_MASK (VTD_CAP_FL1GP | VTD_CAP_FL5LP)
> +
> +#define VTD_ECAP_MASK (VTD_ECAP_PRS | VTD_ECAP_ERS | \
> + VTD_ECAP_SRS | VTD_ECAP_EAFS | \
> + VTD_ECAP_PASID)
> +
> /* Virtual command interface capability */
> #define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
>
>
Thanks
Eric
Hi Eric,
> From: Auger Eric <[email protected]>
> Sent: Saturday, July 18, 2020 1:14 AM
>
> Hi Yi,
>
> Missing a proper commit message. You can comment on the fact you only
> support the case where all the physical iomms have the same CAP/ECAP MASKS
got it. will add it. it looks like the subject is straightforward, so I removed commit
message.
>
> On 7/12/20 1:21 PM, Liu Yi L wrote:
> > Cc: Kevin Tian <[email protected]>
> > CC: Jacob Pan <[email protected]>
> > Cc: Alex Williamson <[email protected]>
> > Cc: Eric Auger <[email protected]>
> > Cc: Jean-Philippe Brucker <[email protected]>
> > Cc: Joerg Roedel <[email protected]>
> > Cc: Lu Baolu <[email protected]>
> > Signed-off-by: Liu Yi L <[email protected]>
> > Signed-off-by: Jacob Pan <[email protected]>
> > ---
> > v2 -> v3:
> > *) remove cap/ecap_mask in iommu_nesting_info.
> > ---
> > drivers/iommu/intel/iommu.c | 81
> +++++++++++++++++++++++++++++++++++++++++++--
> > include/linux/intel-iommu.h | 16 +++++++++
> > 2 files changed, 95 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> > index a9504cb..9f7ad1a 100644
> > --- a/drivers/iommu/intel/iommu.c
> > +++ b/drivers/iommu/intel/iommu.c
> > @@ -5659,12 +5659,16 @@ static inline bool iommu_pasid_support(void)
> > static inline bool nested_mode_support(void)
> > {
> > struct dmar_drhd_unit *drhd;
> > - struct intel_iommu *iommu;
> > + struct intel_iommu *iommu, *prev = NULL;
> > bool ret = true;
> >
> > rcu_read_lock();
> > for_each_active_iommu(iommu, drhd) {
> > - if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
> > + if (!prev)
> > + prev = iommu;
> > + if (!sm_supported(iommu) || !ecap_nest(iommu->ecap) ||
> > + (VTD_CAP_MASK & (iommu->cap ^ prev->cap)) ||
> > + (VTD_ECAP_MASK & (iommu->ecap ^ prev->ecap))) {
> > ret = false;
> > break;> }
> > @@ -6079,6 +6083,78 @@ intel_iommu_domain_set_attr(struct iommu_domain
> *domain,
> > return ret;
> > }
> >
> > +static int intel_iommu_get_nesting_info(struct iommu_domain *domain,
> > + struct iommu_nesting_info *info)
> > +{
> > + struct dmar_domain *dmar_domain = to_dmar_domain(domain);
> > + u64 cap = VTD_CAP_MASK, ecap = VTD_ECAP_MASK;
> > + struct device_domain_info *domain_info;
> > + struct iommu_nesting_info_vtd vtd;
> > + unsigned long flags;
> > + unsigned int size;
> > +
> > + if (domain->type != IOMMU_DOMAIN_UNMANAGED ||
> > + !(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
> > + return -ENODEV;
> > +
> > + if (!info)
> > + return -EINVAL;
> > +
> > + size = sizeof(struct iommu_nesting_info) +
> > + sizeof(struct iommu_nesting_info_vtd);
> > + /*
> > + * if provided buffer size is smaller than expected, should
> > + * return 0 and also the expected buffer size to caller.
> > + */
> > + if (info->size < size) {
> > + info->size = size;
> > + return 0;
> > + }
> > +
> > + spin_lock_irqsave(&device_domain_lock, flags);
> > + /*
> > + * arbitrary select the first domain_info as all nesting
> > + * related capabilities should be consistent across iommu
> > + * units.
> > + */
> > + domain_info = list_first_entry(&dmar_domain->devices,
> > + struct device_domain_info, link);
> > + cap &= domain_info->iommu->cap;
> > + ecap &= domain_info->iommu->ecap;
> > + spin_unlock_irqrestore(&device_domain_lock, flags);
> > +
> > + info->format = IOMMU_PASID_FORMAT_INTEL_VTD;
> > + info->features = IOMMU_NESTING_FEAT_SYSWIDE_PASID |
> > + IOMMU_NESTING_FEAT_BIND_PGTBL |
> > + IOMMU_NESTING_FEAT_CACHE_INVLD;
> > + info->addr_width = dmar_domain->gaw;
> > + info->pasid_bits = ilog2(intel_pasid_max_id);
> > + info->padding = 0;
> > + vtd.flags = 0;
> > + vtd.padding = 0;
> > + vtd.cap_reg = cap;
> > + vtd.ecap_reg = ecap;
> > +
> > + memcpy(info->data, &vtd, sizeof(vtd));
> > + return 0;
> > +}
> > +
> > +static int intel_iommu_domain_get_attr(struct iommu_domain *domain,
> > + enum iommu_attr attr, void *data)
> > +{
> > + switch (attr) {
> > + case DOMAIN_ATTR_NESTING:
> > + {
> > + struct iommu_nesting_info *info =
> > + (struct iommu_nesting_info *)data;
> > +
> > + return intel_iommu_get_nesting_info(domain, info);
> > + }
> > + default:
> > + return -ENODEV;
> -ENOENT?
arm_smmu_domain_get_attr() is using -ENODEV, so I used the same. I can
modify it if -ENOENT is better. :-)
Regards,
Yi Liu
> > + }
> > +}
> > +
> > /*
> > * Check that the device does not live on an external facing PCI port that is
> > * marked as untrusted. Such devices should not be able to apply quirks and
> > @@ -6101,6 +6177,7 @@ const struct iommu_ops intel_iommu_ops = {
> > .domain_alloc = intel_iommu_domain_alloc,
> > .domain_free = intel_iommu_domain_free,
> > .domain_set_attr = intel_iommu_domain_set_attr,
> > + .domain_get_attr = intel_iommu_domain_get_attr,
> > .attach_dev = intel_iommu_attach_device,
> > .detach_dev = intel_iommu_detach_device,
> > .aux_attach_dev = intel_iommu_aux_attach_device,
> > diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
> > index 18f292e..c4ed0d4 100644
> > --- a/include/linux/intel-iommu.h
> > +++ b/include/linux/intel-iommu.h
> > @@ -197,6 +197,22 @@
> > #define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
> > #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
> >
> > +/* Nesting Support Capability Alignment */
> > +#define VTD_CAP_FL1GP BIT_ULL(56)
> > +#define VTD_CAP_FL5LP BIT_ULL(60)
> > +#define VTD_ECAP_PRS BIT_ULL(29)
> > +#define VTD_ECAP_ERS BIT_ULL(30)
> > +#define VTD_ECAP_SRS BIT_ULL(31)
> > +#define VTD_ECAP_EAFS BIT_ULL(34)
> > +#define VTD_ECAP_PASID BIT_ULL(40)
>
> > +
> > +/* Only capabilities marked in below MASKs are reported */
> > +#define VTD_CAP_MASK (VTD_CAP_FL1GP | VTD_CAP_FL5LP)
> > +
> > +#define VTD_ECAP_MASK (VTD_ECAP_PRS | VTD_ECAP_ERS | \
> > + VTD_ECAP_SRS | VTD_ECAP_EAFS | \
> > + VTD_ECAP_PASID)
> > +
> > /* Virtual command interface capability */
> > #define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation
> */
> >
> >
> Thanks
>
> Eric
>