2009-01-18 04:21:34

by Zhao, Yu

[permalink] [raw]
Subject: [PATCH v2 0/6] ATS capability support for Intel IOMMU

This patch series implements Address Translation Service support for
the Intel IOMMU. ATS makes the PCI Endpoint be able to request the
DMA address translation from the IOMMU and cache the translation in
the Endpoint, thus alleviate IOMMU pressure and improve the hardware
performance in the I/O virtualization environment.

Changelog: v1 -> v2
added 'static' prefix to a local LIST_HEAD (Andrew Morton)


Yu Zhao (6):
PCI: support the ATS capability
VT-d: parse ATSR in DMA Remapping Reporting Structure
VT-d: add queue invalidation fault status support
VT-d: add device IOTLB invalidation support
VT-d: cleanup iommu_flush_iotlb_psi and flush_unmaps
VT-d: support the device IOTLB

drivers/pci/dmar.c | 226 ++++++++++++++++++++++++++++++++++++++----
drivers/pci/intel-iommu.c | 137 +++++++++++++++++++++-----
drivers/pci/intr_remapping.c | 21 +++--
drivers/pci/pci.c | 68 +++++++++++++
include/linux/dmar.h | 9 ++
include/linux/intel-iommu.h | 19 +++-
include/linux/pci.h | 15 +++
include/linux/pci_regs.h | 10 ++
8 files changed, 450 insertions(+), 55 deletions(-)


2009-01-18 04:21:54

by Zhao, Yu

[permalink] [raw]
Subject: [PATCH v2 1/6] PCI: support the ATS capability

The ATS spec can be found at http://www.pcisig.com/specifications/iov/ats/
(it requires membership).

Signed-off-by: Yu Zhao <[email protected]>
---
drivers/pci/pci.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++
include/linux/pci.h | 15 ++++++++++
include/linux/pci_regs.h | 10 +++++++
3 files changed, 93 insertions(+), 0 deletions(-)

diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e491fde..243e61c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1411,6 +1411,74 @@ void pci_enable_ari(struct pci_dev *dev)
}

/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or a negative value on error.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ ctrl = PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU) | PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats_enabled = 1;
+
+ return 0;
+}
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ int pos;
+ u16 ctrl;
+
+ if (!dev->ats_enabled)
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, pos + PCI_ATS_CTRL, ctrl);
+}
+
+/**
+ * pci_ats_qdep - query ATS invalidate queue depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or 0 on error.
+ */
+int pci_ats_qdep(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? : PCI_ATS_MAX_QDEP;
+}
+
+/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 80f8b8b..021a3ae 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -250,6 +250,7 @@ struct pci_dev {
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
+ unsigned int ats_enabled:1; /* Address Translation Service */
unsigned int is_managed:1;
unsigned int is_pcie:1;
pci_dev_flags_t dev_flags;
@@ -1189,5 +1190,19 @@ int pci_ext_cfg_avail(struct pci_dev *dev);

void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);

+extern int pci_enable_ats(struct pci_dev *dev, int ps);
+extern void pci_disable_ats(struct pci_dev *dev);
+extern int pci_ats_qdep(struct pci_dev *dev);
+/**
+ * pci_ats_enabled - query the ATS status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ATS capability is enabled, or 0 if not.
+ */
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return dev->ats_enabled;
+}
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 027815b..3858b4f 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -498,6 +498,7 @@
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
#define PCI_EXT_CAP_ID_ARI 14
+#define PCI_EXT_CAP_ID_ATS 15

/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -615,4 +616,13 @@
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */

+/* Address Translation Service */
+#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
+#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
+#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
+#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
+#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
+#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
+#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
+
#endif /* LINUX_PCI_REGS_H */
--
1.5.6.4

2009-01-18 04:22:30

by Zhao, Yu

[permalink] [raw]
Subject: [PATCH v2 2/6] VT-d: parse ATSR in DMA Remapping Reporting Structure

Parse the Root Port ATS Capability Reporting Structure in DMA Remapping
Reporting Structure ACPI table.

Signed-off-by: Yu Zhao <[email protected]>
---
drivers/pci/dmar.c | 112 ++++++++++++++++++++++++++++++++++++++++--
include/linux/dmar.h | 9 ++++
include/linux/intel-iommu.h | 1 +
3 files changed, 116 insertions(+), 6 deletions(-)

diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f5a662a..bd37b3c 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -254,6 +254,84 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
}
return ret;
}
+
+static LIST_HEAD(dmar_atsr_units);
+
+static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ if (!atsru)
+ return -ENOMEM;
+
+ atsru->hdr = hdr;
+ atsru->include_all = atsr->flags & 0x1;
+
+ list_add(&atsru->list, &dmar_atsr_units);
+
+ return 0;
+}
+
+static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+{
+ int rc;
+ struct acpi_dmar_atsr *atsr;
+
+ if (atsru->include_all)
+ return 0;
+
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ rc = dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+ if (rc || !atsru->devices_cnt) {
+ list_del(&atsru->list);
+ kfree(atsru);
+ }
+
+ return rc;
+}
+
+int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+{
+ int i;
+ struct pci_bus *bus;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ list_for_each_entry(atsru, &dmar_atsr_units, list) {
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (atsr->segment == pci_domain_nr(dev->bus))
+ goto found;
+ }
+
+ return 0;
+
+found:
+ for (bus = dev->bus; bus; bus = bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (!bridge || !bridge->is_pcie ||
+ bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ for (i = 0; i < atsru->devices_cnt; i++)
+ if (atsru->devices[i] == bridge)
+ return 1;
+ break;
+ }
+ }
+
+ if (atsru->include_all)
+ return 1;
+
+ return 0;
+}
#endif

static void __init
@@ -261,22 +339,28 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
{
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_atsr *atsr;

switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd = (struct acpi_dmar_hardware_unit *)header;
+ drhd = container_of(header, struct acpi_dmar_hardware_unit,
+ header);
printk (KERN_INFO PREFIX
- "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
- drhd->flags, (unsigned long long)drhd->address);
+ "DRHD base: %#016Lx flags: %#x\n",
+ (unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- rmrr = (struct acpi_dmar_reserved_memory *)header;
-
+ rmrr = container_of(header, struct acpi_dmar_reserved_memory,
+ header);
printk (KERN_INFO PREFIX
- "RMRR base: 0x%016Lx end: 0x%016Lx\n",
+ "RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
+ case ACPI_DMAR_TYPE_ATSR:
+ atsr = container_of(header, struct acpi_dmar_atsr, header);
+ printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ break;
}
}

@@ -341,6 +425,11 @@ parse_dmar_table(void)
ret = dmar_parse_one_rmrr(entry_header);
#endif
break;
+ case ACPI_DMAR_TYPE_ATSR:
+#ifdef CONFIG_DMAR
+ ret = dmar_parse_one_atsr(entry_header);
+#endif
+ break;
default:
printk(KERN_WARNING PREFIX
"Unknown DMAR structure type\n");
@@ -409,11 +498,19 @@ int __init dmar_dev_scope_init(void)
#ifdef CONFIG_DMAR
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
+ struct dmar_atsr_unit *atsr, *atsr_n;
+
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
+
+ list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ ret = atsr_parse_dev(atsr);
+ if (ret)
+ return ret;
+ }
}
#endif

@@ -446,6 +543,9 @@ int __init dmar_table_init(void)
#ifdef CONFIG_DMAR
if (list_empty(&dmar_rmrr_units))
printk(KERN_INFO PREFIX "No RMRR found\n");
+
+ if (list_empty(&dmar_atsr_units))
+ printk(KERN_INFO PREFIX "No ATSR found\n");
#endif

#ifdef CONFIG_INTR_REMAP
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index f284407..d3a1234 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -142,6 +142,15 @@ struct dmar_rmrr_unit {

#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+
+struct dmar_atsr_unit {
+ struct list_head list; /* list of ATSR units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct pci_dev **devices; /* target devices */
+ int devices_cnt; /* target device count */
+ u8 include_all:1; /* include all ports */
+};
+
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
#else
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index c4f6c10..5323ad9 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -316,6 +316,7 @@ static inline void __iommu_flush_cache(
}

extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);

extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu);
--
1.5.6.4

2009-01-18 04:22:52

by Zhao, Yu

[permalink] [raw]
Subject: [PATCH v2 3/6] VT-d: add queue invalidation fault status support

Check fault register after submitting an queue invalidation request.

Signed-off-by: Yu Zhao <[email protected]>
---
drivers/pci/dmar.c | 59 +++++++++++++++++++++++++++++++----------
drivers/pci/intr_remapping.c | 21 ++++++++------
include/linux/intel-iommu.h | 4 ++-
3 files changed, 59 insertions(+), 25 deletions(-)

diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index bd37b3c..0c87ebd 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -671,19 +671,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
}
}

+static int qi_check_fault(struct intel_iommu *iommu, int index)
+{
+ u32 fault;
+ int head;
+ struct q_inval *qi = iommu->qi;
+ int wait_index = (index + 1) % QI_LENGTH;
+
+ fault = readl(iommu->reg + DMAR_FSTS_REG);
+
+ /*
+ * If IQE happens, the head points to the descriptor associated
+ * with the error. No new descriptors are fetched until the IQE
+ * is cleared.
+ */
+ if (fault & DMA_FSTS_IQE) {
+ head = readl(iommu->reg + DMAR_IQH_REG);
+ if ((head >> DMAR_IQ_OFFSET) == index) {
+ memcpy(&qi->desc[index], &qi->desc[wait_index],
+ sizeof(struct qi_desc));
+ __iommu_flush_cache(iommu, &qi->desc[index],
+ sizeof(struct qi_desc));
+ writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/*
* Submit the queued invalidation descriptor to the remapping
* hardware unit and wait for its completion.
*/
-void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
+int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
+ int rc = 0;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
unsigned long flags;

if (!qi)
- return;
+ return 0;

hw = qi->desc;

@@ -701,7 +731,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)

hw[index] = *desc;

- wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
+ wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
+ QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);

hw[wait_index] = wait_desc;
@@ -712,13 +743,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;

- spin_lock(&iommu->register_lock);
/*
* update the HW tail register indicating the presence of
* new descriptors.
*/
- writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
- spin_unlock(&iommu->register_lock);
+ writel(qi->free_head << DMAR_IQ_OFFSET, iommu->reg + DMAR_IQT_REG);

while (qi->desc_status[wait_index] != QI_DONE) {
/*
@@ -728,6 +757,10 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue.
*/
+ rc = qi_check_fault(iommu, index);
+ if (rc)
+ break;
+
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
@@ -737,6 +770,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)

reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);
+
+ return rc;
}

/*
@@ -749,13 +784,13 @@ void qi_global_iec(struct intel_iommu *iommu)
desc.low = QI_IEC_TYPE;
desc.high = 0;

+ /* should never fail */
qi_submit_sync(&desc, iommu);
}

int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush)
{
-
struct qi_desc desc;

if (non_present_entry_flush) {
@@ -769,10 +804,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;

- qi_submit_sync(&desc, iommu);
-
- return 0;
-
+ return qi_submit_sync(&desc, iommu);
}

int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -802,10 +834,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);

- qi_submit_sync(&desc, iommu);
-
- return 0;
-
+ return qi_submit_sync(&desc, iommu);
}

/*
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index f78371b..45effc5 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
return index;
}

-static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
+static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
{
struct qi_desc desc;

@@ -215,7 +215,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
| QI_IEC_SELECTIVE;
desc.high = 0;

- qi_submit_sync(&desc, iommu);
+ return qi_submit_sync(&desc, iommu);
}

int map_irq_to_irte_handle(int irq, u16 *sub_handle)
@@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)

int modify_irte(int irq, struct irte *irte_modified)
{
+ int rc;
int index;
struct irte *irte;
struct intel_iommu *iommu;
@@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified)
set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
__iommu_flush_cache(iommu, irte, sizeof(*irte));

- qi_flush_iec(iommu, index, 0);
-
+ rc = qi_flush_iec(iommu, index, 0);
spin_unlock(&irq_2_ir_lock);
- return 0;
+
+ return rc;
}

int flush_irte(int irq)
{
+ int rc;
int index;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
@@ -326,10 +328,10 @@ int flush_irte(int irq)

index = irq_iommu->irte_index + irq_iommu->sub_handle;

- qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+ rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
spin_unlock(&irq_2_ir_lock);

- return 0;
+ return rc;
}

struct intel_iommu *map_ioapic_to_ir(int apic)
@@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)

int free_irte(int irq)
{
+ int rc = 0;
int index, i;
struct irte *irte;
struct intel_iommu *iommu;
@@ -375,7 +378,7 @@ int free_irte(int irq)
if (!irq_iommu->sub_handle) {
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
set_64bit((unsigned long *)irte, 0);
- qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+ rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}

irq_iommu->iommu = NULL;
@@ -385,7 +388,7 @@ int free_irte(int irq)

spin_unlock(&irq_2_ir_lock);

- return 0;
+ return rc;
}

static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 5323ad9..0a220c9 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -53,6 +53,7 @@
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_OFFSET 4 /* Invalidation queue head/tail offset */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
@@ -194,6 +195,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* FSTS_REG */
#define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1)
+#define DMA_FSTS_IQE (1 << 4)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)

/* FRCD_REG, 32 bits access */
@@ -329,7 +331,7 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type,
int non_present_entry_flush);

-extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);

extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
--
1.5.6.4

2009-01-18 04:23:18

by Zhao, Yu

[permalink] [raw]
Subject: [PATCH v2 6/6] VT-d: support the device IOTLB

Support device IOTLB (i.e. ATS) for both native and KVM environments.

Signed-off-by: Yu Zhao <[email protected]>
---
drivers/pci/intel-iommu.c | 97 +++++++++++++++++++++++++++++++++++++++++-
include/linux/intel-iommu.h | 1 +
2 files changed, 95 insertions(+), 3 deletions(-)

diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index df92764..fb84d82 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -125,6 +125,7 @@ static inline void context_set_fault_enable(struct context_entry *context)
}

#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1

static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
@@ -240,6 +241,8 @@ struct device_domain_info {
struct list_head global; /* link to global list */
u8 bus; /* PCI bus numer */
u8 devfn; /* PCI devfn number */
+ int qdep; /* invalidate queue depth */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
struct dmar_domain *domain; /* pointer to domain */
};
@@ -914,6 +917,75 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
return 0;
}

+static struct device_domain_info *
+iommu_support_dev_iotlb(struct dmar_domain *domain, u8 bus, u8 devfn)
+{
+ int found = 0;
+ unsigned long flags;
+ struct device_domain_info *info;
+ struct intel_iommu *iommu = device_to_iommu(bus, devfn);
+
+ if (!ecap_dev_iotlb_support(iommu->ecap))
+ return NULL;
+
+ if (!iommu->qi)
+ return NULL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->dev && info->bus == bus && info->devfn == devfn) {
+ found = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (!found)
+ return NULL;
+
+ if (!dmar_find_matched_atsr_unit(info->dev))
+ return NULL;
+
+ info->iommu = iommu;
+ info->qdep = pci_ats_qdep(info->dev);
+ if (!info->qdep)
+ return NULL;
+
+ return info;
+}
+
+static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+{
+ pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+}
+
+static void iommu_disable_dev_iotlb(struct device_domain_info *info)
+{
+ if (info->dev && pci_ats_enabled(info->dev))
+ pci_disable_ats(info->dev);
+}
+
+static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
+ u64 addr, unsigned int mask)
+{
+ int rc;
+ u16 sid;
+ unsigned long flags;
+ struct device_domain_info *info;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ continue;
+
+ sid = info->bus << 8 | info->devfn;
+ rc = qi_flush_dev_iotlb(info->iommu, sid,
+ info->qdep, addr, mask);
+ if (rc)
+ printk(KERN_ERR "IOMMU: flush device IOTLB failed\n");
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages, int non_present_entry_flush)
{
@@ -937,6 +1009,9 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
rc = iommu->flush.flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH,
non_present_entry_flush);
+ if (!rc && !non_present_entry_flush)
+ iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
+
return rc;
}

@@ -1461,6 +1536,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
unsigned long ndomains;
int id;
int agaw;
+ struct device_domain_info *info;

pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1526,7 +1602,11 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_domain_id(context, id);
context_set_address_width(context, iommu->agaw);
context_set_address_root(context, virt_to_phys(pgd));
- context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
+ info = iommu_support_dev_iotlb(domain, bus, devfn);
+ if (info)
+ context_set_translation_type(context, CONTEXT_TT_DEV_IOTLB);
+ else
+ context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
context_set_fault_enable(context);
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
@@ -1538,6 +1618,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
iommu_flush_write_buffer(iommu);
else
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
+ if (info)
+ iommu_enable_dev_iotlb(info);

spin_unlock_irqrestore(&iommu->lock, flags);

@@ -1679,6 +1761,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);

+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);
@@ -2296,8 +2379,14 @@ static void flush_unmaps(void)
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
for (j = 0; j < deferred_flush[i].next; j++) {
- __free_iova(&deferred_flush[i].domain[j]->iovad,
- deferred_flush[i].iova[j]);
+ unsigned long mask;
+ struct iova *iova = deferred_flush[i].iova[j];
+
+ mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
+ mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ iova->pfn_lo << PAGE_SHIFT, mask);
+ __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
deferred_flush[i].next = 0;
}
@@ -2784,6 +2873,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);

+ iommu_disable_dev_iotlb(info);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);

@@ -2832,6 +2922,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)

spin_unlock_irqrestore(&device_domain_lock, flags1);

+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);

diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d82bdac..609af82 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -123,6 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)


--
1.5.6.4

2009-01-18 04:23:42

by Zhao, Yu

[permalink] [raw]
Subject: [PATCH v2 4/6] VT-d: add device IOTLB invalidation support

Support device IOTLB invalidation to flush the translation cached in the
Endpoint.

Signed-off-by: Yu Zhao <[email protected]>
---
drivers/pci/dmar.c | 63 ++++++++++++++++++++++++++++++++++++++++--
include/linux/intel-iommu.h | 13 ++++++++-
2 files changed, 72 insertions(+), 4 deletions(-)

diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 0c87ebd..4fea360 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -664,7 +664,8 @@ void free_iommu(struct intel_iommu *iommu)
*/
static inline void reclaim_free_desc(struct q_inval *qi)
{
- while (qi->desc_status[qi->free_tail] == QI_DONE) {
+ while (qi->desc_status[qi->free_tail] == QI_DONE ||
+ qi->desc_status[qi->free_tail] == QI_ABORT) {
qi->desc_status[qi->free_tail] = QI_FREE;
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
qi->free_cnt++;
@@ -674,10 +675,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
- int head;
+ int head, tail;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;

+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+
fault = readl(iommu->reg + DMAR_FSTS_REG);

/*
@@ -697,6 +701,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
}
}

+ /*
+ * If ITE happens, all pending wait_desc commands are aborted.
+ * No new descriptors are fetched until the ITE is cleared.
+ */
+ if (fault & DMA_FSTS_ITE) {
+ head = readl(iommu->reg + DMAR_IQH_REG);
+ head = ((head >> DMAR_IQ_OFFSET) - 1 + QI_LENGTH) % QI_LENGTH;
+ head |= 1;
+ tail = readl(iommu->reg + DMAR_IQT_REG);
+ tail = ((tail >> DMAR_IQ_OFFSET) - 1 + QI_LENGTH) % QI_LENGTH;
+
+ writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+
+ do {
+ if (qi->desc_status[head] == QI_IN_USE)
+ qi->desc_status[head] = QI_ABORT;
+ head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+ } while (head != tail);
+
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+ }
+
+ if (fault & DMA_FSTS_ICE)
+ writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+
return 0;
}

@@ -706,7 +736,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
- int rc = 0;
+ int rc;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
@@ -717,6 +747,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)

hw = qi->desc;

+restart:
+ rc = 0;
+
spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -771,6 +804,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);

+ if (rc == -EAGAIN)
+ goto restart;
+
return rc;
}

@@ -837,6 +873,27 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
return qi_submit_sync(&desc, iommu);
}

+int qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, int qdep,
+ u64 addr, unsigned int mask)
+{
+ struct qi_desc desc;
+
+ if (mask) {
+ BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+ addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ } else
+ desc.high = QI_DEV_IOTLB_ADDR(addr);
+
+ if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+ qdep = 0;
+
+ desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+ QI_DIOTLB_TYPE;
+
+ return qi_submit_sync(&desc, iommu);
+}
+
/*
* Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 0a220c9..d82bdac 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -196,6 +196,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4)
+#define DMA_FSTS_ICE (1 << 5)
+#define DMA_FSTS_ITE (1 << 6)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)

/* FRCD_REG, 32 bits access */
@@ -224,7 +226,8 @@ do { \
enum {
QI_FREE,
QI_IN_USE,
- QI_DONE
+ QI_DONE,
+ QI_ABORT
};

#define QI_CC_TYPE 0x1
@@ -253,6 +256,12 @@ enum {
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))

+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
struct qi_desc {
u64 low, high;
};
@@ -330,6 +339,8 @@ extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type,
int non_present_entry_flush);
+extern int qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, int qdep,
+ u64 addr, unsigned int mask);

extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);

--
1.5.6.4

2009-01-18 04:24:16

by Zhao, Yu

[permalink] [raw]
Subject: [PATCH v2 5/6] VT-d: cleanup iommu_flush_iotlb_psi and flush_unmaps

Make iommu_flush_iotlb_psi() and flush_unmaps() easier to read.

Signed-off-by: Yu Zhao <[email protected]>
---
drivers/pci/intel-iommu.c | 46 +++++++++++++++++++++-----------------------
1 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 3dfecb2..df92764 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -917,30 +917,27 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages, int non_present_entry_flush)
{
- unsigned int mask;
+ int rc;
+ unsigned int mask = ilog2(__roundup_pow_of_two(pages));

BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);

- /* Fallback to domain selective flush if no PSI support */
- if (!cap_pgsel_inv(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
-
/*
+ * Fallback to domain selective flush if no PSI support or the size is
+ * too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
- mask = ilog2(__roundup_pow_of_two(pages));
- /* Fallback to domain selective flush if size is too big */
- if (mask > cap_max_amask_val(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH, non_present_entry_flush);
-
- return iommu->flush.flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH,
- non_present_entry_flush);
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ rc = iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH,
+ non_present_entry_flush);
+ else
+ rc = iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH,
+ non_present_entry_flush);
+ return rc;
}

static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -2293,15 +2290,16 @@ static void flush_unmaps(void)
if (!iommu)
continue;

- if (deferred_flush[i].next) {
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
- for (j = 0; j < deferred_flush[i].next; j++) {
- __free_iova(&deferred_flush[i].domain[j]->iovad,
- deferred_flush[i].iova[j]);
- }
- deferred_flush[i].next = 0;
+ if (!deferred_flush[i].next)
+ continue;
+
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
+ for (j = 0; j < deferred_flush[i].next; j++) {
+ __free_iova(&deferred_flush[i].domain[j]->iovad,
+ deferred_flush[i].iova[j]);
}
+ deferred_flush[i].next = 0;
}

list_size = 0;
--
1.5.6.4