2024-02-29 09:54:31

by Baolu Lu

[permalink] [raw]
Subject: [PATCH 1/3] iommu/vt-d: Setup scalable mode context entry in probe path

In contrast to legacy mode, the DMA translation table is configured in
the PASID table entry instead of the context entry for scalable mode.
For this reason, it is more appropriate to set up the scalable mode
context entry in the device_probe callback and direct it to the
appropriate PASID table.

The iommu domain attach/detach operations only affect the PASID table
entry. Therefore, there is no need to modify the context entry when
configuring the translation type and page table.

The only exception is the kdump case, where context entry setup is
postponed until the device driver invokes the first DMA interface.

Signed-off-by: Lu Baolu <[email protected]>
---
drivers/iommu/intel/pasid.h | 1 +
drivers/iommu/intel/iommu.c | 12 ++++
drivers/iommu/intel/pasid.c | 116 ++++++++++++++++++++++++++++++++++++
3 files changed, 129 insertions(+)

diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 42fda97fd851..da9978fef7ac 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -318,5 +318,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
bool fault_ignore);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
+int intel_pasid_setup_sm_context(struct device *dev);
void intel_pasid_teardown_sm_context(struct device *dev);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index f74d42d3258f..9b96d36b9d2a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4073,6 +4073,10 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}

+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ context_copied(iommu, info->bus, info->devfn))
+ return intel_pasid_setup_sm_context(dev);
+
return 0;
}

@@ -4386,11 +4390,19 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
dev_err(dev, "PASID table allocation failed\n");
goto clear_rbtree;
}
+
+ if (!context_copied(iommu, info->bus, info->devfn)) {
+ ret = intel_pasid_setup_sm_context(dev);
+ if (ret)
+ goto free_table;
+ }
}

intel_iommu_debugfs_create_dev(info);

return &iommu->iommu;
+free_table:
+ intel_pasid_free_table(dev);
clear_rbtree:
device_rbtree_remove(info);
free:
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 52068cf52fe2..4ea8f35bd460 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -750,3 +750,119 @@ void intel_pasid_teardown_sm_context(struct device *dev)

pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_teardown, dev);
}
+
+/*
+ * Get the PASID directory size for scalable mode context entry.
+ * Value of X in the PDTS field of a scalable mode context entry
+ * indicates PASID directory with 2^(X + 7) entries.
+ */
+static unsigned long context_get_sm_pds(struct pasid_table *table)
+{
+ unsigned long pds, max_pde;
+
+ max_pde = table->max_pasid >> PASID_PDE_SHIFT;
+ pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
+ if (pds < 7)
+ return 0;
+
+ return pds - 7;
+}
+
+static int context_entry_set_pasid_table(struct context_entry *context,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct pasid_table *table = info->pasid_table;
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long pds;
+
+ context_clear_entry(context);
+
+ pds = context_get_sm_pds(table);
+ context->lo = (u64)virt_to_phys(table->table) | context_pdts(pds);
+ context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
+
+ if (info->ats_supported)
+ context_set_sm_dte(context);
+ if (info->pri_supported)
+ context_set_sm_pre(context);
+ if (info->pasid_supported)
+ context_set_pasid(context);
+
+ context_set_fault_enable(context);
+ context_set_present(context);
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(context, sizeof(*context));
+
+ return 0;
+}
+
+static int device_pasid_table_setup(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, true);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return -ENOMEM;
+ }
+
+ if (context_present(context) && !context_copied(iommu, bus, devfn)) {
+ spin_unlock(&iommu->lock);
+ return 0;
+ }
+
+ /*
+ * For kdump case, at this point, the device is supposed to finish
+ * reset at its driver probe stage, so no in-flight DMA will exist,
+ * and we don't need to worry anymore hereafter.
+ */
+ if (context_copied(iommu, bus, devfn)) {
+ context_clear_entry(context);
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(context, sizeof(*context));
+ sm_context_flush_caches(dev);
+ clear_context_copied(iommu, bus, devfn);
+ }
+
+ context_entry_set_pasid_table(context, dev);
+ spin_unlock(&iommu->lock);
+
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we don't need to flush the caches.
+ */
+ if (cap_caching_mode(iommu->cap))
+ sm_context_flush_caches(dev);
+
+ return 0;
+}
+
+static int pci_pasid_table_setup(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev != &pdev->dev)
+ return 0;
+
+ return device_pasid_table_setup(dev, PCI_BUS_NUM(alias), alias & 0xff);
+}
+
+/*
+ * Set the device's PASID table to its context table entry.
+ *
+ * The PASID table is set to the context entries of both device itself
+ * and its alias requester ID for DMA.
+ */
+int intel_pasid_setup_sm_context(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev))
+ return device_pasid_table_setup(dev, info->bus, info->devfn);
+
+ return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
+}
--
2.34.1



2024-03-04 07:48:40

by Tian, Kevin

[permalink] [raw]
Subject: RE: [PATCH 1/3] iommu/vt-d: Setup scalable mode context entry in probe path

> From: Lu Baolu <[email protected]>
> Sent: Thursday, February 29, 2024 5:48 PM
>
> +static int device_pasid_table_setup(struct device *dev, u8 bus, u8 devfn)
> +{
> + struct device_domain_info *info = dev_iommu_priv_get(dev);
> + struct intel_iommu *iommu = info->iommu;
> + struct context_entry *context;
> +
> + spin_lock(&iommu->lock);
> + context = iommu_context_addr(iommu, bus, devfn, true);
> + if (!context) {
> + spin_unlock(&iommu->lock);
> + return -ENOMEM;
> + }
> +
> + if (context_present(context) && !context_copied(iommu, bus, devfn))
> {
> + spin_unlock(&iommu->lock);
> + return 0;
> + }
> +
> + /*
> + * For kdump case, at this point, the device is supposed to finish
> + * reset at its driver probe stage, so no in-flight DMA will exist,
> + * and we don't need to worry anymore hereafter.
> + */
> + if (context_copied(iommu, bus, devfn)) {
> + context_clear_entry(context);
> + if (!ecap_coherent(iommu->ecap))
> + clflush_cache_range(context, sizeof(*context));
> + sm_context_flush_caches(dev);
> + clear_context_copied(iommu, bus, devfn);
> + }

it's unclear to me why this doesn't need refer to old did as done in the
existing code. If scalable mode makes any difference could you extend
the comment to explain so people can avoid similar confusion when
comparing the different paths between legacy and sm?

anyway it's kind of a semantics change probably worth a separate patch
to special case sm for bisect and then doing cleanup...


2024-03-04 09:05:52

by Tian, Kevin

[permalink] [raw]
Subject: RE: [PATCH 1/3] iommu/vt-d: Setup scalable mode context entry in probe path

> From: Baolu Lu <[email protected]>
> Sent: Monday, March 4, 2024 4:24 PM
>
> On 2024/3/4 15:48, Tian, Kevin wrote:
> >> From: Lu Baolu <[email protected]>
> >> Sent: Thursday, February 29, 2024 5:48 PM
> >>
> >> + /*
> >> + * For kdump case, at this point, the device is supposed to finish
> >> + * reset at its driver probe stage, so no in-flight DMA will exist,
> >> + * and we don't need to worry anymore hereafter.
> >> + */
> >> + if (context_copied(iommu, bus, devfn)) {
> >> + context_clear_entry(context);
> >> + if (!ecap_coherent(iommu->ecap))
> >> + clflush_cache_range(context, sizeof(*context));
> >> + sm_context_flush_caches(dev);
> >> + clear_context_copied(iommu, bus, devfn);
> >> + }
> >
> > it's unclear to me why this doesn't need refer to old did as done in the
> > existing code. If scalable mode makes any difference could you extend
> > the comment to explain so people can avoid similar confusion when
> > comparing the different paths between legacy and sm?
>
> The previous code gets the domain ID from the copied context entry:
>
> u16 did_old = context_domain_id(context);
>
> This makes no sense for scalable mode, as the domain ID has been moved
> to the PASID entry in scalable mode. As the result, did_old always gets
> 0.

The point is whether the driver requires to invalidate cache for old did
which is orthogonal to using legacy or sm. If yes, then we should fix the
code to find the right did instead of ignoring it. If no then the legacy path
should be cleared too to avoid unnecessary burden.

>
> > anyway it's kind of a semantics change probably worth a separate patch
> > to special case sm for bisect and then doing cleanup...
>
> This change doesn't impact anything as the hardware will skip domain id
> field in the Context-cache Invalidate Descriptor in scalable mode.
>

no semantics change but if old code has bug we should fix it instead
of carrying the behavior. ????

2024-03-04 09:30:07

by Baolu Lu

[permalink] [raw]
Subject: Re: [PATCH 1/3] iommu/vt-d: Setup scalable mode context entry in probe path

On 2024/3/4 15:48, Tian, Kevin wrote:
>> From: Lu Baolu <[email protected]>
>> Sent: Thursday, February 29, 2024 5:48 PM
>>
>> +static int device_pasid_table_setup(struct device *dev, u8 bus, u8 devfn)
>> +{
>> + struct device_domain_info *info = dev_iommu_priv_get(dev);
>> + struct intel_iommu *iommu = info->iommu;
>> + struct context_entry *context;
>> +
>> + spin_lock(&iommu->lock);
>> + context = iommu_context_addr(iommu, bus, devfn, true);
>> + if (!context) {
>> + spin_unlock(&iommu->lock);
>> + return -ENOMEM;
>> + }
>> +
>> + if (context_present(context) && !context_copied(iommu, bus, devfn))
>> {
>> + spin_unlock(&iommu->lock);
>> + return 0;
>> + }
>> +
>> + /*
>> + * For kdump case, at this point, the device is supposed to finish
>> + * reset at its driver probe stage, so no in-flight DMA will exist,
>> + * and we don't need to worry anymore hereafter.
>> + */
>> + if (context_copied(iommu, bus, devfn)) {
>> + context_clear_entry(context);
>> + if (!ecap_coherent(iommu->ecap))
>> + clflush_cache_range(context, sizeof(*context));
>> + sm_context_flush_caches(dev);
>> + clear_context_copied(iommu, bus, devfn);
>> + }
>
> it's unclear to me why this doesn't need refer to old did as done in the
> existing code. If scalable mode makes any difference could you extend
> the comment to explain so people can avoid similar confusion when
> comparing the different paths between legacy and sm?

The previous code gets the domain ID from the copied context entry:

u16 did_old = context_domain_id(context);

This makes no sense for scalable mode, as the domain ID has been moved
to the PASID entry in scalable mode. As the result, did_old always gets
0.

> anyway it's kind of a semantics change probably worth a separate patch
> to special case sm for bisect and then doing cleanup...

This change doesn't impact anything as the hardware will skip domain id
field in the Context-cache Invalidate Descriptor in scalable mode.

Spec section 6.5.2.1 Context-cache Invalidate Descriptor:

Domain-ID (DID): ... This field is ignored by hardware when operating in
scalable mode (RTADDR_REG.TTM=01b).

Best regards,
baolu


2024-03-04 11:13:41

by Baolu Lu

[permalink] [raw]
Subject: Re: [PATCH 1/3] iommu/vt-d: Setup scalable mode context entry in probe path

On 2024/3/4 17:05, Tian, Kevin wrote:
>> From: Baolu Lu <[email protected]>
>> Sent: Monday, March 4, 2024 4:24 PM
>>
>> On 2024/3/4 15:48, Tian, Kevin wrote:
>>>> From: Lu Baolu <[email protected]>
>>>> Sent: Thursday, February 29, 2024 5:48 PM
>>>>
>>>> + /*
>>>> + * For kdump case, at this point, the device is supposed to finish
>>>> + * reset at its driver probe stage, so no in-flight DMA will exist,
>>>> + * and we don't need to worry anymore hereafter.
>>>> + */
>>>> + if (context_copied(iommu, bus, devfn)) {
>>>> + context_clear_entry(context);
>>>> + if (!ecap_coherent(iommu->ecap))
>>>> + clflush_cache_range(context, sizeof(*context));
>>>> + sm_context_flush_caches(dev);
>>>> + clear_context_copied(iommu, bus, devfn);
>>>> + }
>>>
>>> it's unclear to me why this doesn't need refer to old did as done in the
>>> existing code. If scalable mode makes any difference could you extend
>>> the comment to explain so people can avoid similar confusion when
>>> comparing the different paths between legacy and sm?
>>
>> The previous code gets the domain ID from the copied context entry:
>>
>> u16 did_old = context_domain_id(context);
>>
>> This makes no sense for scalable mode, as the domain ID has been moved
>> to the PASID entry in scalable mode. As the result, did_old always gets
>> 0.
>
> The point is whether the driver requires to invalidate cache for old did
> which is orthogonal to using legacy or sm. If yes, then we should fix the
> code to find the right did instead of ignoring it. If no then the legacy path
> should be cleared too to avoid unnecessary burden.
>
>>
>>> anyway it's kind of a semantics change probably worth a separate patch
>>> to special case sm for bisect and then doing cleanup...
>>
>> This change doesn't impact anything as the hardware will skip domain id
>> field in the Context-cache Invalidate Descriptor in scalable mode.
>>
>
> no semantics change but if old code has bug we should fix it instead
> of carrying the behavior. ????

The driver is required to invalidate the cache for the old did.

The previous code invalidated the cache twice, once in
intel_pasid_tear_down_entry() and another time in
domain_context_clear().

The new code attempts to eliminate this duplication by invalidating the
cache for the did during blocking domain attachment and skipping it in
sm-context-entry teardown.

Best regards,
baolu