2023-11-16 01:55:23

by Lu Baolu

[permalink] [raw]
Subject: [PATCH 0/4] iommu/vt-d: Miscellaneous cleanups

This series includes several miscellaneous cleanups that were discovered
during recent development. Please let me know if anything I have
overlooked.

Best regards,
baolu

Lu Baolu (4):
iommu/vt-d: Introduce dev_to_iommu()
iommu/vt-d: Remove unused parameter of
intel_pasid_setup_pass_through()
iommu/vt-d: Remove unused vcmd interfaces
iommu/vt-d: Move inline helpers to header files

drivers/iommu/intel/iommu.h | 184 +++++++++++++++++++++++-
drivers/iommu/intel/pasid.h | 213 +++++++++++++++++++++++++++-
drivers/iommu/intel/iommu.c | 237 +++----------------------------
drivers/iommu/intel/pasid.c | 274 +-----------------------------------
drivers/iommu/intel/svm.c | 20 +--
5 files changed, 421 insertions(+), 507 deletions(-)

--
2.34.1


2023-11-16 01:55:52

by Lu Baolu

[permalink] [raw]
Subject: [PATCH 1/4] iommu/vt-d: Introduce dev_to_iommu()

The device_to_iommu() helper was originally designed to look up the DMAR
ACPI table to retrieve the iommu device and the request ID for a given
device. However, it was also being used in other places where there was
no need to lookup the ACPI table at all.

Introduce a new helper dev_to_iommu() which returns the iommu device saved
in the per-device iommu driver data. This function can be used after the
iommu_probe_device() returns success.

Rename the original device_to_iommu() function to a more meaningful name,
device_lookup_iommu(), to avoid mis-using it.

Signed-off-by: Lu Baolu <[email protected]>
---
drivers/iommu/intel/iommu.h | 11 ++++++++++-
drivers/iommu/intel/iommu.c | 28 ++++++++--------------------
drivers/iommu/intel/svm.c | 20 +++-----------------
3 files changed, 21 insertions(+), 38 deletions(-)

diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 65d37a138c75..049d6af6aae8 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -851,6 +851,16 @@ static inline bool context_present(struct context_entry *context)
return (context->lo & 1);
}

+static inline struct intel_iommu *dev_to_iommu(struct device *dev)
+{
+ /*
+ * Assume that valid per-device iommu structure must be installed
+ * if iommu_probe_device() has succeeded. This helper could only
+ * be used after device is probed.
+ */
+ return ((struct device_domain_info *)dev_iommu_priv_get(dev))->iommu;
+}
+
struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);

int dmar_enable_qi(struct intel_iommu *iommu);
@@ -897,7 +907,6 @@ int dmar_ir_support(void);
void *alloc_pgtable_page(int node, gfp_t gfp);
void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
const struct iommu_user_data *user_data);

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 3531b956556c..b50ade814e94 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -703,7 +703,7 @@ static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
return false;
}

-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct pci_dev *pdev = NULL;
@@ -2081,14 +2081,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct domain_context_mapping_data data;
+ struct intel_iommu *iommu = info->iommu;
+ u8 bus = info->bus, devfn = info->devfn;
struct pasid_table *table;
- struct intel_iommu *iommu;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;

table = intel_pasid_get_table(dev);

@@ -2447,15 +2444,10 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
+ struct intel_iommu *iommu = info->iommu;
unsigned long flags;
- u8 bus, devfn;
int ret;

- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
-
ret = domain_attach_iommu(domain, iommu);
if (ret)
return ret;
@@ -4117,13 +4109,9 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
+ struct intel_iommu *iommu = dev_to_iommu(dev);
int addr_width;

- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return -ENODEV;
-
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL;

@@ -4399,7 +4387,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn;
int ret;

- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_lookup_iommu(dev, &bus, &devfn);
if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV);

@@ -4735,8 +4723,8 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,

static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
- struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct dev_pasid_info *curr, *dev_pasid = NULL;
+ struct intel_iommu *iommu = dev_to_iommu(dev);
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
unsigned long flags;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 50a481c895b8..cc138e3ed4a6 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -366,14 +366,9 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
{
struct intel_svm_dev *sdev;
- struct intel_iommu *iommu;
struct intel_svm *svm;
struct mm_struct *mm;

- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return;
-
if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
return;
mm = svm->mm;
@@ -724,25 +719,16 @@ int intel_svm_page_response(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ u8 bus = info->bus, devfn = info->devfn;
struct iommu_fault_page_request *prm;
- struct intel_iommu *iommu;
bool private_present;
bool pasid_present;
bool last_page;
- u8 bus, devfn;
int ret = 0;
u16 sid;

- if (!dev || !dev_is_pci(dev))
- return -ENODEV;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
-
- if (!msg || !evt)
- return -EINVAL;
-
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
--
2.34.1

2023-11-16 01:55:57

by Lu Baolu

[permalink] [raw]
Subject: [PATCH 2/4] iommu/vt-d: Remove unused parameter of intel_pasid_setup_pass_through()

The domain parameter of this helper is unused and can be deleted to avoid
dead code.

Signed-off-by: Lu Baolu <[email protected]>
---
drivers/iommu/intel/pasid.h | 1 -
drivers/iommu/intel/iommu.c | 5 ++---
drivers/iommu/intel/pasid.c | 1 -
3 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index dd37611175cc..16265bc1f7ec 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -111,7 +111,6 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index b50ade814e94..3ccc2739236b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2460,7 +2460,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
/* Setup the PASID entry for requests without PASID: */
if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu, domain,
+ ret = intel_pasid_setup_pass_through(iommu,
dev, IOMMU_NO_PASID);
else if (domain->use_first_level)
ret = domain_setup_first_level(iommu, domain, dev,
@@ -4795,8 +4795,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
goto out_free;

if (domain_type_is_si(dmar_domain))
- ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
- dev, pasid);
+ ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
else if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 74e8e4c17e81..8a1bcabf71a9 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -767,7 +767,6 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
* Set up the scalable mode pasid entry for passthrough translation type.
*/
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid)
{
u16 did = FLPT_DEFAULT_DID;
--
2.34.1

2023-11-16 01:56:06

by Lu Baolu

[permalink] [raw]
Subject: [PATCH 3/4] iommu/vt-d: Remove unused vcmd interfaces

Commit 99b5726b4423 ("iommu: Remove ioasid infrastructure") has removed
ioasid allocation interfaces from the iommu subsystem. As a result, these
vcmd interfaces have become obsolete. Remove them to avoid dead code.

Signed-off-by: Lu Baolu <[email protected]>
---
drivers/iommu/intel/pasid.h | 2 --
drivers/iommu/intel/pasid.c | 57 -------------------------------------
2 files changed, 59 deletions(-)

diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 16265bc1f7ec..647723c650bf 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -117,8 +117,6 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool fault_ignore);
-int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
-void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 8a1bcabf71a9..57ae716a2c70 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -26,63 +26,6 @@
*/
u32 intel_pasid_max_id = PASID_MAX;

-int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
-{
- unsigned long flags;
- u8 status_code;
- int ret = 0;
- u64 res;
-
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
- IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
- !(res & VCMD_VRSP_IP), res);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- status_code = VCMD_VRSP_SC(res);
- switch (status_code) {
- case VCMD_VRSP_SC_SUCCESS:
- *pasid = VCMD_VRSP_RESULT_PASID(res);
- break;
- case VCMD_VRSP_SC_NO_PASID_AVAIL:
- pr_info("IOMMU: %s: No PASID available\n", iommu->name);
- ret = -ENOSPC;
- break;
- default:
- ret = -ENODEV;
- pr_warn("IOMMU: %s: Unexpected error code %d\n",
- iommu->name, status_code);
- }
-
- return ret;
-}
-
-void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
-{
- unsigned long flags;
- u8 status_code;
- u64 res;
-
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- dmar_writeq(iommu->reg + DMAR_VCMD_REG,
- VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
- IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
- !(res & VCMD_VRSP_IP), res);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- status_code = VCMD_VRSP_SC(res);
- switch (status_code) {
- case VCMD_VRSP_SC_SUCCESS:
- break;
- case VCMD_VRSP_SC_INVALID_PASID:
- pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
- break;
- default:
- pr_warn("IOMMU: %s: Unexpected error code %d\n",
- iommu->name, status_code);
- }
-}
-
/*
* Per device pasid table management:
*/
--
2.34.1

2023-11-16 01:56:27

by Lu Baolu

[permalink] [raw]
Subject: [PATCH 4/4] iommu/vt-d: Move inline helpers to header files

Move inline helpers to header files so that other files can use them
without duplicating the code.

Signed-off-by: Lu Baolu <[email protected]>
---
drivers/iommu/intel/iommu.h | 173 +++++++++++++++++++++++++++++
drivers/iommu/intel/pasid.h | 210 +++++++++++++++++++++++++++++++++++
drivers/iommu/intel/iommu.c | 204 ++--------------------------------
drivers/iommu/intel/pasid.c | 216 +-----------------------------------
4 files changed, 398 insertions(+), 405 deletions(-)

diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 049d6af6aae8..d747c33e4767 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -861,6 +861,179 @@ static inline struct intel_iommu *dev_to_iommu(struct device *dev)
return ((struct device_domain_info *)dev_iommu_priv_get(dev))->iommu;
}

+#define LEVEL_STRIDE (9)
+#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+#define MAX_AGAW_WIDTH (64)
+#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
+
+static inline int agaw_to_level(int agaw)
+{
+ return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+ return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
+}
+
+static inline int width_to_agaw(int width)
+{
+ return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+ return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(u64 pfn, int level)
+{
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline u64 level_mask(int level)
+{
+ return -1ULL << level_to_offset_bits(level);
+}
+
+static inline u64 level_size(int level)
+{
+ return 1ULL << level_to_offset_bits(level);
+}
+
+static inline u64 align_to_level(u64 pfn, int level)
+{
+ return (pfn + level_size(level) - 1) & level_mask(level);
+}
+
+static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+{
+ return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
+}
+
+/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+ are never going to work. */
+static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
+{
+ return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
+{
+ return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
+}
+static inline unsigned long page_to_dma_pfn(struct page *pg)
+{
+ return mm_to_dma_pfn_start(page_to_pfn(pg));
+}
+static inline unsigned long virt_to_dma_pfn(void *p)
+{
+ return page_to_dma_pfn(virt_to_page(p));
+}
+
+static inline void context_set_present(struct context_entry *context)
+{
+ context->lo |= 1;
+}
+
+static inline void context_set_fault_enable(struct context_entry *context)
+{
+ context->lo &= (((u64)-1) << 2) | 1;
+}
+
+static inline void context_set_translation_type(struct context_entry *context,
+ unsigned long value)
+{
+ context->lo &= (((u64)-1) << 4) | 3;
+ context->lo |= (value & 3) << 2;
+}
+
+static inline void context_set_address_root(struct context_entry *context,
+ unsigned long value)
+{
+ context->lo &= ~VTD_PAGE_MASK;
+ context->lo |= value & VTD_PAGE_MASK;
+}
+
+static inline void context_set_address_width(struct context_entry *context,
+ unsigned long value)
+{
+ context->hi |= value & 7;
+}
+
+static inline void context_set_domain_id(struct context_entry *context,
+ unsigned long value)
+{
+ context->hi |= (value & ((1 << 16) - 1)) << 8;
+}
+
+static inline void context_set_pasid(struct context_entry *context)
+{
+ context->lo |= CONTEXT_PASIDE;
+}
+
+static inline int context_domain_id(struct context_entry *c)
+{
+ return((c->hi >> 8) & 0xffff);
+}
+
+static inline void context_clear_entry(struct context_entry *context)
+{
+ context->lo = 0;
+ context->hi = 0;
+}
+
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ if (!iommu->copied_tables)
+ return false;
+
+ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+/*
+ * Set the RID_PASID field of a scalable mode context entry. The
+ * IOMMU hardware will use the PASID value set in this field for
+ * DMA translations of DMA requests without PASID.
+ */
+static inline void
+context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
+{
+ context->hi |= pasid & ((1 << 20) - 1);
+}
+
+/*
+ * Set the DTE(Device-TLB Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_dte(struct context_entry *context)
+{
+ context->lo |= BIT_ULL(2);
+}
+
+/*
+ * Set the PRE(Page Request Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_pre(struct context_entry *context)
+{
+ context->lo |= BIT_ULL(4);
+}
+
+/* Convert value to context PASID directory size field coding. */
+#define context_pdts(pds) (((pds) & 0x7) << 9)
+
struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);

int dmar_enable_qi(struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 647723c650bf..3fc8ffc5048d 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -96,6 +96,216 @@ static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
}

+static inline void pasid_clear_entry(struct pasid_entry *pe)
+{
+ WRITE_ONCE(pe->val[0], 0);
+ WRITE_ONCE(pe->val[1], 0);
+ WRITE_ONCE(pe->val[2], 0);
+ WRITE_ONCE(pe->val[3], 0);
+ WRITE_ONCE(pe->val[4], 0);
+ WRITE_ONCE(pe->val[5], 0);
+ WRITE_ONCE(pe->val[6], 0);
+ WRITE_ONCE(pe->val[7], 0);
+}
+
+static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
+{
+ WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
+ WRITE_ONCE(pe->val[1], 0);
+ WRITE_ONCE(pe->val[2], 0);
+ WRITE_ONCE(pe->val[3], 0);
+ WRITE_ONCE(pe->val[4], 0);
+ WRITE_ONCE(pe->val[5], 0);
+ WRITE_ONCE(pe->val[6], 0);
+ WRITE_ONCE(pe->val[7], 0);
+}
+
+static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
+{
+ u64 old;
+
+ old = READ_ONCE(*ptr);
+ WRITE_ONCE(*ptr, (old & ~mask) | bits);
+}
+
+static inline u64 pasid_get_bits(u64 *ptr)
+{
+ return READ_ONCE(*ptr);
+}
+
+/*
+ * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_domain_id(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
+}
+
+/*
+ * Get domain ID value of a scalable mode PASID entry.
+ */
+static inline u16
+pasid_get_domain_id(struct pasid_entry *pe)
+{
+ return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
+}
+
+/*
+ * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_slptr(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
+}
+
+/*
+ * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
+ * entry.
+ */
+static inline void
+pasid_set_address_width(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
+}
+
+/*
+ * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_translation_type(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
+}
+
+/*
+ * Enable fault processing by clearing the FPD(Fault Processing
+ * Disable) field (Bit 1) of a scalable mode PASID entry.
+ */
+static inline void pasid_set_fault_enable(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 1, 0);
+}
+
+/*
+ * Enable second level A/D bits by setting the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_ssade(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
+}
+
+/*
+ * Disable second level A/D bits by clearing the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_clear_ssade(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 9, 0);
+}
+
+/*
+ * Checks if second level A/D bits specifically the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry is set.
+ */
+static inline bool pasid_get_ssade(struct pasid_entry *pe)
+{
+ return pasid_get_bits(&pe->val[0]) & (1 << 9);
+}
+
+/*
+ * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_sre(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 0, 1);
+}
+
+/*
+ * Setup the WPE(Write Protect Enable) field (Bit 132) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_wpe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
+}
+
+/*
+ * Setup the P(Present) field (Bit 0) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_present(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 0, 1);
+}
+
+/*
+ * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+{
+ pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+}
+
+/*
+ * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
+ * entry. It is required when XD bit of the first level page table
+ * entry is about to be set.
+ */
+static inline void pasid_set_nxe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
+}
+
+/*
+ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_pgsnp(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
+}
+
+/*
+ * Setup the First Level Page table Pointer field (Bit 140~191)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_flptr(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
+}
+
+/*
+ * Setup the First Level Paging Mode field (Bit 130~131) of a
+ * scalable mode PASID entry.
+ */
+static inline void
+pasid_set_flpm(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
+}
+
+/*
+ * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
+ * of a scalable mode PASID entry.
+ */
+static inline void pasid_set_eafe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
+}
+
extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 3ccc2739236b..efd8f49b1a85 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -46,9 +46,6 @@

#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57

-#define MAX_AGAW_WIDTH 64
-#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
-
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)

@@ -63,74 +60,6 @@

#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)

-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
-}
-
-static inline int width_to_agaw(int width)
-{
- return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(u64 pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline u64 level_mask(int level)
-{
- return -1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 level_size(int level)
-{
- return 1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 align_to_level(u64 pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
-static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
-{
- return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
-}
-
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
-static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
-{
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
-{
- return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
- return mm_to_dma_pfn_start(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
- return page_to_dma_pfn(virt_to_page(p));
-}
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;

@@ -168,78 +97,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}

-static inline void context_set_present(struct context_entry *context)
-{
- context->lo |= 1;
-}
-
-static inline void context_set_fault_enable(struct context_entry *context)
-{
- context->lo &= (((u64)-1) << 2) | 1;
-}
-
-static inline void context_set_translation_type(struct context_entry *context,
- unsigned long value)
-{
- context->lo &= (((u64)-1) << 4) | 3;
- context->lo |= (value & 3) << 2;
-}
-
-static inline void context_set_address_root(struct context_entry *context,
- unsigned long value)
-{
- context->lo &= ~VTD_PAGE_MASK;
- context->lo |= value & VTD_PAGE_MASK;
-}
-
-static inline void context_set_address_width(struct context_entry *context,
- unsigned long value)
-{
- context->hi |= value & 7;
-}
-
-static inline void context_set_domain_id(struct context_entry *context,
- unsigned long value)
-{
- context->hi |= (value & ((1 << 16) - 1)) << 8;
-}
-
-static inline void context_set_pasid(struct context_entry *context)
-{
- context->lo |= CONTEXT_PASIDE;
-}
-
-static inline int context_domain_id(struct context_entry *c)
-{
- return((c->hi >> 8) & 0xffff);
-}
-
-static inline void context_clear_entry(struct context_entry *context)
-{
- context->lo = 0;
- context->hi = 0;
-}
-
-static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- if (!iommu->copied_tables)
- return false;
-
- return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
-
-static inline void
-set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
-
-static inline void
-clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
-
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -383,13 +240,12 @@ void free_pgtable_page(void *vaddr)
free_page((unsigned long)vaddr);
}

-static inline int domain_type_is_si(struct dmar_domain *domain)
+static int domain_type_is_si(struct dmar_domain *domain)
{
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
}

-static inline int domain_pfn_supported(struct dmar_domain *domain,
- unsigned long pfn)
+static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;

@@ -451,7 +307,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}

-static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
@@ -1574,9 +1430,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
}

/* Notification for newly created mappings */
-static inline void __mapping_notify_one(struct intel_iommu *iommu,
- struct dmar_domain *domain,
- unsigned long pfn, unsigned int pages)
+static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages)
{
/*
* It's a non-present to present mapping. Only flush if caching mode
@@ -1843,7 +1698,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
spin_unlock(&iommu->lock);
}

-static inline int guestwidth_to_adjustwidth(int gaw)
+static int guestwidth_to_adjustwidth(int gaw)
{
int agaw;
int r = (gaw - 12) % 9;
@@ -1877,7 +1732,7 @@ static void domain_exit(struct dmar_domain *domain)
* Value of X in the PDTS field of a scalable mode context entry
* indicates PASID directory with 2^(X + 7) entries.
*/
-static inline unsigned long context_get_sm_pds(struct pasid_table *table)
+static unsigned long context_get_sm_pds(struct pasid_table *table)
{
unsigned long pds, max_pde;

@@ -1889,38 +1744,6 @@ static inline unsigned long context_get_sm_pds(struct pasid_table *table)
return pds - 7;
}

-/*
- * Set the RID_PASID field of a scalable mode context entry. The
- * IOMMU hardware will use the PASID value set in this field for
- * DMA translations of DMA requests without PASID.
- */
-static inline void
-context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
-{
- context->hi |= pasid & ((1 << 20) - 1);
-}
-
-/*
- * Set the DTE(Device-TLB Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_dte(struct context_entry *context)
-{
- context->lo |= BIT_ULL(2);
-}
-
-/*
- * Set the PRE(Page Request Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_pre(struct context_entry *context)
-{
- context->lo |= BIT_ULL(4);
-}
-
-/* Convert value to context PASID directory size field coding. */
-#define context_pdts(pds) (((pds) & 0x7) << 9)
-
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
struct pasid_table *table,
@@ -2102,18 +1925,15 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
}

/* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
+static unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
{
host_addr &= ~PAGE_MASK;
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}

/* Return largest possible superpage level for a given mapping */
-static inline int hardware_largepage_caps(struct dmar_domain *domain,
- unsigned long iov_pfn,
- unsigned long phy_pfn,
- unsigned long pages)
+static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phy_pfn, unsigned long pages)
{
int support, level = 1;
unsigned long pfnmerge;
@@ -3604,7 +3424,7 @@ void intel_iommu_shutdown(void)
up_write(&dmar_global_lock);
}

-static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
+static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);

@@ -3683,7 +3503,7 @@ const struct attribute_group *intel_iommu_groups[] = {
NULL,
};

-static inline bool has_external_pci(void)
+static bool has_external_pci(void)
{
struct pci_dev *pdev = NULL;

diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 57ae716a2c70..3239cefa4c33 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -173,30 +173,6 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
/*
* Interfaces for PASID table entry manipulation:
*/
-static inline void pasid_clear_entry(struct pasid_entry *pe)
-{
- WRITE_ONCE(pe->val[0], 0);
- WRITE_ONCE(pe->val[1], 0);
- WRITE_ONCE(pe->val[2], 0);
- WRITE_ONCE(pe->val[3], 0);
- WRITE_ONCE(pe->val[4], 0);
- WRITE_ONCE(pe->val[5], 0);
- WRITE_ONCE(pe->val[6], 0);
- WRITE_ONCE(pe->val[7], 0);
-}
-
-static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
-{
- WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
- WRITE_ONCE(pe->val[1], 0);
- WRITE_ONCE(pe->val[2], 0);
- WRITE_ONCE(pe->val[3], 0);
- WRITE_ONCE(pe->val[4], 0);
- WRITE_ONCE(pe->val[5], 0);
- WRITE_ONCE(pe->val[6], 0);
- WRITE_ONCE(pe->val[7], 0);
-}
-
static void
intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
{
@@ -212,192 +188,6 @@ intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
pasid_clear_entry(pe);
}

-static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
-{
- u64 old;
-
- old = READ_ONCE(*ptr);
- WRITE_ONCE(*ptr, (old & ~mask) | bits);
-}
-
-static inline u64 pasid_get_bits(u64 *ptr)
-{
- return READ_ONCE(*ptr);
-}
-
-/*
- * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
- * PASID entry.
- */
-static inline void
-pasid_set_domain_id(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
-}
-
-/*
- * Get domain ID value of a scalable mode PASID entry.
- */
-static inline u16
-pasid_get_domain_id(struct pasid_entry *pe)
-{
- return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
-}
-
-/*
- * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_slptr(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
-}
-
-/*
- * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
- * entry.
- */
-static inline void
-pasid_set_address_width(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
-}
-
-/*
- * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_translation_type(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
-}
-
-/*
- * Enable fault processing by clearing the FPD(Fault Processing
- * Disable) field (Bit 1) of a scalable mode PASID entry.
- */
-static inline void pasid_set_fault_enable(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 1, 0);
-}
-
-/*
- * Enable second level A/D bits by setting the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_ssade(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
-}
-
-/*
- * Disable second level A/D bits by clearing the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_clear_ssade(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 9, 0);
-}
-
-/*
- * Checks if second level A/D bits specifically the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry is set.
- */
-static inline bool pasid_get_ssade(struct pasid_entry *pe)
-{
- return pasid_get_bits(&pe->val[0]) & (1 << 9);
-}
-
-/*
- * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
- * scalable mode PASID entry.
- */
-static inline void pasid_set_sre(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 0, 1);
-}
-
-/*
- * Setup the WPE(Write Protect Enable) field (Bit 132) of a
- * scalable mode PASID entry.
- */
-static inline void pasid_set_wpe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
-}
-
-/*
- * Setup the P(Present) field (Bit 0) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_present(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 0, 1);
-}
-
-/*
- * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
-{
- pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
-}
-
-/*
- * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
- * entry. It is required when XD bit of the first level page table
- * entry is about to be set.
- */
-static inline void pasid_set_nxe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
-}
-
-/*
- * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
- * PASID entry.
- */
-static inline void
-pasid_set_pgsnp(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
-}
-
-/*
- * Setup the First Level Page table Pointer field (Bit 140~191)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_flptr(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
-}
-
-/*
- * Setup the First Level Paging Mode field (Bit 130~131) of a
- * scalable mode PASID entry.
- */
-static inline void
-pasid_set_flpm(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
-}
-
-/*
- * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
- * of a scalable mode PASID entry.
- */
-static inline void pasid_set_eafe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
-}
-
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, u32 pasid)
@@ -556,9 +346,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
* Skip top levels of page tables for iommu which has less agaw
* than default. Unnecessary for PT mode.
*/
-static inline int iommu_skip_agaw(struct dmar_domain *domain,
- struct intel_iommu *iommu,
- struct dma_pte **pgd)
+static int iommu_skip_agaw(struct dmar_domain *domain,
+ struct intel_iommu *iommu,
+ struct dma_pte **pgd)
{
int agaw;

--
2.34.1

2023-11-16 03:19:38

by Tian, Kevin

[permalink] [raw]
Subject: RE: [PATCH 1/4] iommu/vt-d: Introduce dev_to_iommu()

> From: Lu Baolu <[email protected]>
> Sent: Thursday, November 16, 2023 9:51 AM
>
> +static inline struct intel_iommu *dev_to_iommu(struct device *dev)
> +{
> + /*
> + * Assume that valid per-device iommu structure must be installed
> + * if iommu_probe_device() has succeeded. This helper could only
> + * be used after device is probed.
> + */
> + return ((struct device_domain_info *)dev_iommu_priv_get(dev))-
> >iommu;
> +}

Not sure whether this helper is useful. This is only used by 2 out of 5
post-probe users. Probably just open-coding in all 5 places is clearer.

2023-11-16 03:19:44

by Tian, Kevin

[permalink] [raw]
Subject: RE: [PATCH 2/4] iommu/vt-d: Remove unused parameter of intel_pasid_setup_pass_through()

> From: Lu Baolu <[email protected]>
> Sent: Thursday, November 16, 2023 9:51 AM
>
> The domain parameter of this helper is unused and can be deleted to avoid
> dead code.
>
> Signed-off-by: Lu Baolu <[email protected]>

Reviewed-by: Kevin Tian <[email protected]>

2023-11-16 03:22:26

by Tian, Kevin

[permalink] [raw]
Subject: RE: [PATCH 3/4] iommu/vt-d: Remove unused vcmd interfaces

> From: Lu Baolu <[email protected]>
> Sent: Thursday, November 16, 2023 9:51 AM
>
> Commit 99b5726b4423 ("iommu: Remove ioasid infrastructure") has
> removed
> ioasid allocation interfaces from the iommu subsystem. As a result, these
> vcmd interfaces have become obsolete. Remove them to avoid dead code.
>
> Signed-off-by: Lu Baolu <[email protected]>

it's a surprise to see some remaining vcmd bits not removed.

with a grep actually there are more (mostly in the header files):

drivers/iommu/intel/debugfs.c: IOMMU_REGSET_ENTRY(VCMD),
drivers/iommu/intel/pasid.h:#define VCMD_CMD_ALLOC 0x1
drivers/iommu/intel/pasid.h:#define VCMD_CMD_FREE 0x2
drivers/iommu/intel/pasid.h:#define VCMD_VRSP_IP 0x1
drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC_SUCCESS 0
drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC_INVALID_PASID 16
drivers/iommu/intel/pasid.h:#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
drivers/iommu/intel/pasid.h:#define VCMD_CMD_OPERAND(e) ((e) << 16)
drivers/iommu/intel/iommu.h:#define DMAR_VCMD_REG 0xe00 /* Virtual command register */


2023-11-16 03:22:27

by Tian, Kevin

[permalink] [raw]
Subject: RE: [PATCH 4/4] iommu/vt-d: Move inline helpers to header files

> From: Lu Baolu <[email protected]>
> Sent: Thursday, November 16, 2023 9:51 AM
>
> Move inline helpers to header files so that other files can use them
> without duplicating the code.
>
> Signed-off-by: Lu Baolu <[email protected]>

Reviewed-by: Kevin Tian <[email protected]>

2023-11-16 03:28:59

by Lu Baolu

[permalink] [raw]
Subject: Re: [PATCH 1/4] iommu/vt-d: Introduce dev_to_iommu()

On 11/16/23 11:19 AM, Tian, Kevin wrote:
>> From: Lu Baolu <[email protected]>
>> Sent: Thursday, November 16, 2023 9:51 AM
>>
>> +static inline struct intel_iommu *dev_to_iommu(struct device *dev)
>> +{
>> + /*
>> + * Assume that valid per-device iommu structure must be installed
>> + * if iommu_probe_device() has succeeded. This helper could only
>> + * be used after device is probed.
>> + */
>> + return ((struct device_domain_info *)dev_iommu_priv_get(dev))-
>>> iommu;
>> +}
>
> Not sure whether this helper is useful. This is only used by 2 out of 5
> post-probe users. Probably just open-coding in all 5 places is clearer.

I thought it should get more users in the future development.

Best regards,
baolu

2023-11-16 03:29:58

by Lu Baolu

[permalink] [raw]
Subject: Re: [PATCH 3/4] iommu/vt-d: Remove unused vcmd interfaces

On 11/16/23 11:21 AM, Tian, Kevin wrote:
>> From: Lu Baolu <[email protected]>
>> Sent: Thursday, November 16, 2023 9:51 AM
>>
>> Commit 99b5726b4423 ("iommu: Remove ioasid infrastructure") has
>> removed
>> ioasid allocation interfaces from the iommu subsystem. As a result, these
>> vcmd interfaces have become obsolete. Remove them to avoid dead code.
>>
>> Signed-off-by: Lu Baolu <[email protected]>
>
> it's a surprise to see some remaining vcmd bits not removed.
>
> with a grep actually there are more (mostly in the header files):
>
> drivers/iommu/intel/debugfs.c: IOMMU_REGSET_ENTRY(VCMD),
> drivers/iommu/intel/pasid.h:#define VCMD_CMD_ALLOC 0x1
> drivers/iommu/intel/pasid.h:#define VCMD_CMD_FREE 0x2
> drivers/iommu/intel/pasid.h:#define VCMD_VRSP_IP 0x1
> drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
> drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC_SUCCESS 0
> drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
> drivers/iommu/intel/pasid.h:#define VCMD_VRSP_SC_INVALID_PASID 16
> drivers/iommu/intel/pasid.h:#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
> drivers/iommu/intel/pasid.h:#define VCMD_CMD_OPERAND(e) ((e) << 16)
> drivers/iommu/intel/iommu.h:#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
>
>

Yeah! I will also remove these bits. Thanks!

Best regards,
baolu

2023-11-17 13:09:13

by Jason Gunthorpe

[permalink] [raw]
Subject: Re: [PATCH 1/4] iommu/vt-d: Introduce dev_to_iommu()

On Thu, Nov 16, 2023 at 11:23:56AM +0800, Baolu Lu wrote:
> On 11/16/23 11:19 AM, Tian, Kevin wrote:
> > > From: Lu Baolu <[email protected]>
> > > Sent: Thursday, November 16, 2023 9:51 AM
> > >
> > > +static inline struct intel_iommu *dev_to_iommu(struct device *dev)
> > > +{
> > > + /*
> > > + * Assume that valid per-device iommu structure must be installed
> > > + * if iommu_probe_device() has succeeded. This helper could only
> > > + * be used after device is probed.
> > > + */
> > > + return ((struct device_domain_info *)dev_iommu_priv_get(dev))-
> > > > iommu;
> > > +}
> >
> > Not sure whether this helper is useful. This is only used by 2 out of 5
> > post-probe users. Probably just open-coding in all 5 places is clearer.
>
> I thought it should get more users in the future development.

The pattern in the SMMUv2 driver is like

struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;

Which really isn't worth the helper, unless you have lots of caes
where info isn't needed at all?

Jason

2023-11-18 02:32:16

by Lu Baolu

[permalink] [raw]
Subject: Re: [PATCH 1/4] iommu/vt-d: Introduce dev_to_iommu()

On 11/17/23 9:07 PM, Jason Gunthorpe wrote:
> On Thu, Nov 16, 2023 at 11:23:56AM +0800, Baolu Lu wrote:
>> On 11/16/23 11:19 AM, Tian, Kevin wrote:
>>>> From: Lu Baolu<[email protected]>
>>>> Sent: Thursday, November 16, 2023 9:51 AM
>>>>
>>>> +static inline struct intel_iommu *dev_to_iommu(struct device *dev)
>>>> +{
>>>> + /*
>>>> + * Assume that valid per-device iommu structure must be installed
>>>> + * if iommu_probe_device() has succeeded. This helper could only
>>>> + * be used after device is probed.
>>>> + */
>>>> + return ((struct device_domain_info *)dev_iommu_priv_get(dev))-
>>>>> iommu;
>>>> +}
>>> Not sure whether this helper is useful. This is only used by 2 out of 5
>>> post-probe users. Probably just open-coding in all 5 places is clearer.
>> I thought it should get more users in the future development.
> The pattern in the SMMUv2 driver is like
>
> struct device_domain_info *info = dev_iommu_priv_get(dev);
> struct intel_iommu *iommu = info->iommu;
>
> Which really isn't worth the helper, unless you have lots of caes
> where info isn't needed at all?

No. As Kevin pointed out, there are only 2 places.

I will drop this helper.

Best regards,
baolu