Introducing support for AMD Secure Nested Paging (SNP) with IOMMU,
which mainly affects the use of IOMMU Exclusion Base and Range Limit
registers. Note that these registers are no longer used by Linux IOMMU
driver. Patch 2 and 3 are SNP-specific, and discuss detail of
the implementation.
In order to support SNP, the current Completion Wait Write-back logic
is modified (patch 1/4). This change is independent from SNP.
Please see the following white paper for more info on SNP:
https://www.amd.com/system/files/TechDocs/SEV-SNP-strengthening-vm-isolation-with-integrity-protection-and-more.pdf
Changes from V1: (https://lkml.org/lkml/2020/9/16/455)
- Patch 2/3: Fix up per Joerg's comments
Thank you,
Suravee
Suravee Suthikulpanit (3):
iommu: amd: Use 4K page for completion wait write-back semaphore
iommu: amd: Add support for RMP_PAGE_FAULT and RMP_HW_ERR
iommu: amd: Re-purpose Exclusion range registers to support SNP CWWB
drivers/iommu/amd/amd_iommu_types.h | 6 +-
drivers/iommu/amd/init.c | 44 ++++++++++++++
drivers/iommu/amd/iommu.c | 90 +++++++++++++++++++++++++----
3 files changed, 127 insertions(+), 13 deletions(-)
--
2.17.1
IOMMU SNP support requires the completion wait write-back semaphore to be
implemented using a 4K-aligned page, where the page address is to be
programmed into the newly introduced MMIO base/range registers.
This new scheme uses a per-iommu atomic variable to store the current
semaphore value, which is incremented for every completion wait command.
Since this new scheme is also compatible with non-SNP mode,
generalize the driver to use 4K page for completion-wait semaphore in
both modes.
Cc: Brijesh Singh <[email protected]>
Signed-off-by: Suravee Suthikulpanit <[email protected]>
---
drivers/iommu/amd/amd_iommu_types.h | 3 ++-
drivers/iommu/amd/init.c | 18 ++++++++++++++++++
drivers/iommu/amd/iommu.c | 23 +++++++++++------------
3 files changed, 31 insertions(+), 13 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 30a5d412255a..4c80483e78ec 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -595,7 +595,8 @@ struct amd_iommu {
#endif
u32 flags;
- volatile u64 __aligned(8) cmd_sem;
+ volatile u64 *cmd_sem;
+ u64 cmd_sem_val;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 445a08d23fed..febc072f2717 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -813,6 +813,19 @@ static int iommu_init_ga(struct amd_iommu *iommu)
return ret;
}
+static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
+{
+ iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
+
+ return iommu->cmd_sem ? 0 : -ENOMEM;
+}
+
+static void __init free_cwwb_sem(struct amd_iommu *iommu)
+{
+ if (iommu->cmd_sem)
+ free_page((unsigned long)iommu->cmd_sem);
+}
+
static void iommu_enable_xt(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
@@ -1395,6 +1408,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
static void __init free_iommu_one(struct amd_iommu *iommu)
{
+ free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
free_ppr_log(iommu);
@@ -1481,6 +1495,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
int ret;
raw_spin_lock_init(&iommu->lock);
+ iommu->cmd_sem_val = 0;
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
@@ -1558,6 +1573,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
+ if (alloc_cwwb_sem(iommu))
+ return -ENOMEM;
+
if (alloc_command_buffer(iommu))
return -ENOMEM;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 10e4200d3552..9e9898683537 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -792,11 +792,11 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
*
****************************************************************************/
-static int wait_on_sem(volatile u64 *sem)
+static int wait_on_sem(struct amd_iommu *iommu, u64 data)
{
int i = 0;
- while (*sem == 0 && i < LOOP_TIMEOUT) {
+ while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
udelay(1);
i += 1;
}
@@ -827,16 +827,16 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
}
-static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
+static void build_completion_wait(struct iommu_cmd *cmd,
+ struct amd_iommu *iommu,
+ u64 data)
{
- u64 paddr = iommu_virt_to_phys((void *)address);
-
- WARN_ON(address & 0x7ULL);
+ u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
- cmd->data[2] = 1;
+ cmd->data[2] = data;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
@@ -1045,22 +1045,21 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
struct iommu_cmd cmd;
unsigned long flags;
int ret;
+ u64 data;
if (!iommu->need_sync)
return 0;
-
- build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
-
raw_spin_lock_irqsave(&iommu->lock, flags);
- iommu->cmd_sem = 0;
+ data = ++iommu->cmd_sem_val;
+ build_completion_wait(&cmd, iommu, data);
ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
goto out_unlock;
- ret = wait_on_sem(&iommu->cmd_sem);
+ ret = wait_on_sem(iommu, data);
out_unlock:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
--
2.17.1
IOMMU SNP support introduces two new IOMMU events:
* RMP Page Fault event
* RMP Hardware Error event
Hence, add reporting functions for these events.
Cc: Brijesh Singh <[email protected]>
Signed-off-by: Suravee Suthikulpanit <[email protected]>
---
drivers/iommu/amd/amd_iommu_types.h | 2 +
drivers/iommu/amd/iommu.c | 67 +++++++++++++++++++++++++++++
2 files changed, 69 insertions(+)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 4c80483e78ec..1e7966c73707 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -128,6 +128,8 @@
#define EVENT_TYPE_IOTLB_INV_TO 0x7
#define EVENT_TYPE_INV_DEV_REQ 0x8
#define EVENT_TYPE_INV_PPR_REQ 0x9
+#define EVENT_TYPE_RMP_FAULT 0xd
+#define EVENT_TYPE_RMP_HW_ERR 0xe
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK_LO 0xffff
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 9e9898683537..ea64fa8a9418 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -486,6 +486,67 @@ static void dump_command(unsigned long phys_addr)
pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
}
+static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
+{
+ struct iommu_dev_data *dev_data = NULL;
+ int devid, vmg_tag, flags;
+ struct pci_dev *pdev;
+ u64 spa;
+
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ vmg_tag = (event[1]) & 0xFFFF;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
+
+ pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (pdev)
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data && __ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ vmg_tag, spa, flags);
+ } else {
+ pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ vmg_tag, spa, flags);
+ }
+
+ if (pdev)
+ pci_dev_put(pdev);
+}
+
+static void amd_iommu_report_rmp_fault(volatile u32 *event)
+{
+ struct iommu_dev_data *dev_data = NULL;
+ int devid, flags_rmp, vmg_tag, flags;
+ struct pci_dev *pdev;
+ u64 gpa;
+
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
+ vmg_tag = (event[1]) & 0xFFFF;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ gpa = ((u64)event[3] << 32) | event[2];
+
+ pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (pdev)
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data && __ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ vmg_tag, gpa, flags_rmp, flags);
+ } else {
+ pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ vmg_tag, gpa, flags_rmp, flags);
+ }
+
+ if (pdev)
+ pci_dev_put(pdev);
+}
+
static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
u64 address, int flags)
{
@@ -577,6 +638,12 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
+ case EVENT_TYPE_RMP_FAULT:
+ amd_iommu_report_rmp_fault(event);
+ break;
+ case EVENT_TYPE_RMP_HW_ERR:
+ amd_iommu_report_rmp_hw_error(event);
+ break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
--
2.17.1
When the IOMMU SNP support bit is set in the IOMMU Extended Features
register, hardware re-purposes the following registers:
1. IOMMU Exclusion Base register (MMIO offset 0020h) to
Completion Wait Write-Back (CWWB) Base register
2. IOMMU Exclusion Range Limit (MMIO offset 0028h) to
Completion Wait Write-Back (CWWB) Range Limit register
and requires the IOMMU CWWB semaphore base and range to be programmed
in the register offset 0020h and 0028h accordingly.
Cc: Brijesh Singh <[email protected]>
Signed-off-by: Suravee Suthikulpanit <[email protected]>
---
drivers/iommu/amd/amd_iommu_types.h | 1 +
drivers/iommu/amd/init.c | 26 ++++++++++++++++++++++++++
2 files changed, 27 insertions(+)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 1e7966c73707..f696ac7c5f89 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -93,6 +93,7 @@
#define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
#define FEATURE_EPHSUP (1ULL<<50)
+#define FEATURE_SNP (1ULL<<63)
#define FEATURE_PASID_SHIFT 32
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index febc072f2717..c55df4347487 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -359,6 +359,29 @@ static void iommu_set_exclusion_range(struct amd_iommu *iommu)
&entry, sizeof(entry));
}
+static void iommu_set_cwwb_range(struct amd_iommu *iommu)
+{
+ u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ u64 entry = start & PM_ADDR_MASK;
+
+ if (!iommu_feature(iommu, FEATURE_SNP))
+ return;
+
+ /* Note:
+ * Re-purpose Exclusion base/limit registers for Completion wait
+ * write-back base/limit.
+ */
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
+ &entry, sizeof(entry));
+
+ /* Note:
+ * Default to 4 Kbytes, which can be specified by setting base
+ * address equal to the limit address.
+ */
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
+ &entry, sizeof(entry));
+}
+
/* Programs the physical address of the device table into the IOMMU hardware */
static void iommu_set_device_table(struct amd_iommu *iommu)
{
@@ -1901,6 +1924,9 @@ static int __init amd_iommu_init_pci(void)
ret = iommu_init_pci(iommu);
if (ret)
break;
+
+ /* Need to setup range after PCI init */
+ iommu_set_cwwb_range(iommu);
}
/*
--
2.17.1
On Wed, Sep 23, 2020 at 12:13:44PM +0000, Suravee Suthikulpanit wrote:
> Suravee Suthikulpanit (3):
> iommu: amd: Use 4K page for completion wait write-back semaphore
> iommu: amd: Add support for RMP_PAGE_FAULT and RMP_HW_ERR
> iommu: amd: Re-purpose Exclusion range registers to support SNP CWWB
Applied, thanks. I am slightly concerned about the re-purposing of the
exclusion-range registers based on a feature bit being set. This makes
the hardware incompatible to older IOMMU drivers which do not check the
FEATURE_SNP bit.
It will probably work in this case, as the firmware on systems with
IOMMU-SNP support will not declare exclusion ranges at all and
exclusion-ranges in the IOMMU hardware have been a bad idea since
forever, but it would have been nicer if hardware actually
provided a bit to enable this behavior.
Regards,
Joerg