From: Oleksandr Tyshchenko <[email protected]>
Hello all.
Prior to V4 there was a single patch, for V4 an additional prereq patch appeared according
to the discussion at:
https://lore.kernel.org/xen-devel/alpine.DEB.2.22.394.2210211258050.3873@ubuntu-linux-20-04-desktop/
Based on:
https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git/log/?h=for-linus-6.1
Oleksandr Tyshchenko (2):
xen/virtio: Optimize the setup of "xen-grant-dma" devices
xen/virtio: Handle PCI devices which Host controller is described in
DT
arch/arm/xen/enlighten.c | 2 +-
drivers/xen/grant-dma-ops.c | 105 ++++++++++++++++++++----------------
include/xen/arm/xen-ops.h | 4 +-
include/xen/xen-ops.h | 16 ------
4 files changed, 60 insertions(+), 67 deletions(-)
--
2.25.1
From: Oleksandr Tyshchenko <[email protected]>
This is needed to avoid having to parse the same device-tree
several times for a given device.
For this to work we need to install the xen_virtio_restricted_mem_acc
callback in Arm's xen_guest_init() which is same callback as x86's
PV and HVM modes already use and remove the manual assignment in
xen_setup_dma_ops(). Also we need to split the code to initialize
backend_domid into a separate function.
Prior to current patch we parsed the device-tree three times:
1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device()
2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid()
3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device()
With current patch we parse the device-tree only once in
xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid()
Other benefits are:
- Not diverge from x86 when setting up Xen grant DMA ops
- Drop several global functions
Signed-off-by: Oleksandr Tyshchenko <[email protected]>
---
New patch
---
arch/arm/xen/enlighten.c | 2 +-
drivers/xen/grant-dma-ops.c | 77 ++++++++++++++-----------------------
include/xen/arm/xen-ops.h | 4 +-
include/xen/xen-ops.h | 16 --------
4 files changed, 30 insertions(+), 69 deletions(-)
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 93c8ccbf2982..7d59765aef22 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -445,7 +445,7 @@ static int __init xen_guest_init(void)
return 0;
if (IS_ENABLED(CONFIG_XEN_VIRTIO))
- virtio_set_mem_acc_cb(xen_virtio_mem_acc);
+ virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
if (!acpi_disabled)
xen_acpi_guest_init();
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index daa525df7bdc..1e797a043980 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.dma_supported = xen_grant_dma_supported,
};
-static bool xen_is_dt_grant_dma_device(struct device *dev)
-{
- struct device_node *iommu_np;
- bool has_iommu;
-
- iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
- has_iommu = iommu_np &&
- of_device_is_compatible(iommu_np, "xen,grant-dma");
- of_node_put(iommu_np);
-
- return has_iommu;
-}
-
-bool xen_is_grant_dma_device(struct device *dev)
-{
- /* XXX Handle only DT devices for now */
- if (dev->of_node)
- return xen_is_dt_grant_dma_device(dev);
-
- return false;
-}
-
-bool xen_virtio_mem_acc(struct virtio_device *dev)
-{
- if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
- return true;
-
- return xen_is_grant_dma_device(dev->dev.parent);
-}
-
static int xen_dt_grant_init_backend_domid(struct device *dev,
- struct xen_grant_dma_data *data)
+ domid_t *backend_domid)
{
struct of_phandle_args iommu_spec;
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0, &iommu_spec)) {
- dev_err(dev, "Cannot parse iommus property\n");
+ dev_dbg(dev, "Cannot parse iommus property\n");
return -ESRCH;
}
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
iommu_spec.args_count != 1) {
- dev_err(dev, "Incompatible IOMMU node\n");
+ dev_dbg(dev, "Incompatible IOMMU node\n");
of_node_put(iommu_spec.np);
return -ESRCH;
}
@@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
* The endpoint ID here means the ID of the domain where the
* corresponding backend is running
*/
- data->backend_domid = iommu_spec.args[0];
+ *backend_domid = iommu_spec.args[0];
return 0;
}
-void xen_grant_setup_dma_ops(struct device *dev)
+static int xen_grant_init_backend_domid(struct device *dev,
+ domid_t *backend_domid)
+{
+ int ret = -ENODEV;
+
+ if (dev->of_node) {
+ ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
+ } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
+ dev_info(dev, "Using dom0 as backend\n");
+ *backend_domid = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
{
struct xen_grant_dma_data *data;
@@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
if (!data)
goto err;
- if (dev->of_node) {
- if (xen_dt_grant_init_backend_domid(dev, data))
- goto err;
- } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
- dev_info(dev, "Using dom0 as backend\n");
- data->backend_domid = 0;
- } else {
- /* XXX ACPI device unsupported for now */
- goto err;
- }
+ data->backend_domid = backend_domid;
if (store_xen_grant_dma_data(dev, data)) {
dev_err(dev, "Cannot store Xen grant DMA data\n");
@@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev)
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
{
- bool ret = xen_virtio_mem_acc(dev);
+ domid_t backend_domid;
- if (ret)
- xen_grant_setup_dma_ops(dev->dev.parent);
+ if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
+ xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
+ return true;
+ }
- return ret;
+ return false;
}
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
index b0766a660338..70073f5a2b54 100644
--- a/include/xen/arm/xen-ops.h
+++ b/include/xen/arm/xen-ops.h
@@ -8,9 +8,7 @@
static inline void xen_setup_dma_ops(struct device *dev)
{
#ifdef CONFIG_XEN
- if (xen_is_grant_dma_device(dev))
- xen_grant_setup_dma_ops(dev);
- else if (xen_swiotlb_detect())
+ if (xen_swiotlb_detect())
dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
}
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index a34f4271a2e9..47f11bec5e90 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { }
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
#ifdef CONFIG_XEN_GRANT_DMA_OPS
-void xen_grant_setup_dma_ops(struct device *dev);
-bool xen_is_grant_dma_device(struct device *dev);
-bool xen_virtio_mem_acc(struct virtio_device *dev);
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
#else
-static inline void xen_grant_setup_dma_ops(struct device *dev)
-{
-}
-static inline bool xen_is_grant_dma_device(struct device *dev)
-{
- return false;
-}
-
struct virtio_device;
-static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
-{
- return false;
-}
-
static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
{
return false;
--
2.25.1
From: Oleksandr Tyshchenko <[email protected]>
Use the same "xen-grant-dma" device concept for the PCI devices
behind device-tree based PCI Host controller, but with one modification.
Unlike for platform devices, we cannot use generic IOMMU bindings
(iommus property), as we need to support more flexible configuration.
The problem is that PCI devices under the single PCI Host controller
may have the backends running in different Xen domains and thus have
different endpoints ID (backend domains ID).
Add ability to deal with generic PCI-IOMMU bindings (iommu-map/
iommu-map-mask properties) which allows us to describe relationship
between PCI devices and backend domains ID properly.
To avoid having to look up for the PCI Host bridge twice and reduce
the amount of checks pass an extra struct device_node *np to
xen_dt_grant_init_backend_domid().
So with current patch the code expects iommus property for the platform
devices and iommu-map/iommu-map-mask properties for PCI devices.
The example of generated by the toolstack iommu-map property
for two PCI devices 0000:00:01.0 and 0000:00:02.0 whose
backends are running in different Xen domains with IDs 1 and 2
respectively:
iommu-map = <0x08 0xfde9 0x01 0x08 0x10 0xfde9 0x02 0x08>;
Signed-off-by: Oleksandr Tyshchenko <[email protected]>
---
Slightly RFC. This is needed to support Xen grant mappings for virtio-pci devices
on Arm at some point in the future. The Xen toolstack side is not completely ready yet.
Here, for PCI devices we use more flexible way to pass backend domid to the guest
than for platform devices.
Changes V1 -> V2:
- update commit description
- rebase
- rework to use generic PCI-IOMMU bindings instead of generic IOMMU bindings
Changes V2 -> V3:
- update commit description, add an example
- drop xen_dt_map_id() and squash xen_dt_get_pci_host_node() with
xen_dt_get_node()
- pass struct device_node *np to xen_is_dt_grant_dma_device() and
xen_dt_grant_init_backend_domid()
- pass domid_t *backend_domid instead of struct xen_grant_dma_data *data
to xen_dt_grant_init_backend_domid()
Changes V3 -> V4:
- just rebase on new prereq patch
"xen/virtio: Optimize the setup of "xen-grant-dma" devices"
Previous discussion is at:
https://lore.kernel.org/xen-devel/[email protected]/
https://lore.kernel.org/xen-devel/[email protected]/
https://lore.kernel.org/xen-devel/[email protected]/
Based on:
https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git/log/?h=for-linus-6.1
---
---
drivers/xen/grant-dma-ops.c | 46 +++++++++++++++++++++++++++++++------
1 file changed, 39 insertions(+), 7 deletions(-)
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 1e797a043980..9784a77fa3c9 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/dma-map-ops.h>
#include <linux/of.h>
+#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/xarray.h>
#include <linux/virtio_anchor.h>
@@ -292,15 +293,43 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.dma_supported = xen_grant_dma_supported,
};
+static struct device_node *xen_dt_get_node(struct device *dev)
+{
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *bus = pdev->bus;
+
+ /* Walk up to the root bus to look for PCI Host controller */
+ while (!pci_is_root_bus(bus))
+ bus = bus->parent;
+
+ return of_node_get(bus->bridge->parent->of_node);
+ }
+
+ return of_node_get(dev->of_node);
+}
+
static int xen_dt_grant_init_backend_domid(struct device *dev,
+ struct device_node *np,
domid_t *backend_domid)
{
- struct of_phandle_args iommu_spec;
+ struct of_phandle_args iommu_spec = { .args_count = 1 };
- if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
- 0, &iommu_spec)) {
- dev_dbg(dev, "Cannot parse iommus property\n");
- return -ESRCH;
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+
+ if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
+ iommu_spec.args)) {
+ dev_dbg(dev, "Cannot translate ID\n");
+ return -ESRCH;
+ }
+ } else {
+ if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
+ 0, &iommu_spec)) {
+ dev_dbg(dev, "Cannot parse iommus property\n");
+ return -ESRCH;
+ }
}
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
@@ -324,10 +353,13 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
static int xen_grant_init_backend_domid(struct device *dev,
domid_t *backend_domid)
{
+ struct device_node *np;
int ret = -ENODEV;
- if (dev->of_node) {
- ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
+ np = xen_dt_get_node(dev);
+ if (np) {
+ ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
+ of_node_put(np);
} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
dev_info(dev, "Using dom0 as backend\n");
*backend_domid = 0;
--
2.25.1
On 10/25/22 19:20, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <[email protected]>
>
> This is needed to avoid having to parse the same device-tree
> several times for a given device.
>
> For this to work we need to install the xen_virtio_restricted_mem_acc
> callback in Arm's xen_guest_init() which is same callback as x86's
> PV and HVM modes already use and remove the manual assignment in
> xen_setup_dma_ops(). Also we need to split the code to initialize
> backend_domid into a separate function.
>
> Prior to current patch we parsed the device-tree three times:
> 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device()
> 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid()
> 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device()
>
> With current patch we parse the device-tree only once in
> xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid()
>
> Other benefits are:
> - Not diverge from x86 when setting up Xen grant DMA ops
> - Drop several global functions
>
> Signed-off-by: Oleksandr Tyshchenko <[email protected]>
Reviewed-by: Xenia Ragiadakou <[email protected]>
I have a question unrelated to the patch.
CONFIG_XEN_VIRTIO_FORCE_GRANT cannot be used to force backend dom0 in
case xen_dt_grant_init_backend_domid() fails?
> ---
> New patch
> ---
> arch/arm/xen/enlighten.c | 2 +-
> drivers/xen/grant-dma-ops.c | 77 ++++++++++++++-----------------------
> include/xen/arm/xen-ops.h | 4 +-
> include/xen/xen-ops.h | 16 --------
> 4 files changed, 30 insertions(+), 69 deletions(-)
>
> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
> index 93c8ccbf2982..7d59765aef22 100644
> --- a/arch/arm/xen/enlighten.c
> +++ b/arch/arm/xen/enlighten.c
> @@ -445,7 +445,7 @@ static int __init xen_guest_init(void)
> return 0;
>
> if (IS_ENABLED(CONFIG_XEN_VIRTIO))
> - virtio_set_mem_acc_cb(xen_virtio_mem_acc);
> + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
>
> if (!acpi_disabled)
> xen_acpi_guest_init();
> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
> index daa525df7bdc..1e797a043980 100644
> --- a/drivers/xen/grant-dma-ops.c
> +++ b/drivers/xen/grant-dma-ops.c
> @@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = {
> .dma_supported = xen_grant_dma_supported,
> };
>
> -static bool xen_is_dt_grant_dma_device(struct device *dev)
> -{
> - struct device_node *iommu_np;
> - bool has_iommu;
> -
> - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
> - has_iommu = iommu_np &&
> - of_device_is_compatible(iommu_np, "xen,grant-dma");
> - of_node_put(iommu_np);
> -
> - return has_iommu;
> -}
> -
> -bool xen_is_grant_dma_device(struct device *dev)
> -{
> - /* XXX Handle only DT devices for now */
> - if (dev->of_node)
> - return xen_is_dt_grant_dma_device(dev);
> -
> - return false;
> -}
> -
> -bool xen_virtio_mem_acc(struct virtio_device *dev)
> -{
> - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
> - return true;
> -
> - return xen_is_grant_dma_device(dev->dev.parent);
> -}
> -
> static int xen_dt_grant_init_backend_domid(struct device *dev,
> - struct xen_grant_dma_data *data)
> + domid_t *backend_domid)
> {
> struct of_phandle_args iommu_spec;
>
> if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
> 0, &iommu_spec)) {
> - dev_err(dev, "Cannot parse iommus property\n");
> + dev_dbg(dev, "Cannot parse iommus property\n");
> return -ESRCH;
> }
>
> if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
> iommu_spec.args_count != 1) {
> - dev_err(dev, "Incompatible IOMMU node\n");
> + dev_dbg(dev, "Incompatible IOMMU node\n");
> of_node_put(iommu_spec.np);
> return -ESRCH;
> }
> @@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
> * The endpoint ID here means the ID of the domain where the
> * corresponding backend is running
> */
> - data->backend_domid = iommu_spec.args[0];
> + *backend_domid = iommu_spec.args[0];
>
> return 0;
> }
>
> -void xen_grant_setup_dma_ops(struct device *dev)
> +static int xen_grant_init_backend_domid(struct device *dev,
> + domid_t *backend_domid)
> +{
> + int ret = -ENODEV;
> +
> + if (dev->of_node) {
> + ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
> + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
> + dev_info(dev, "Using dom0 as backend\n");
> + *backend_domid = 0;
> + ret = 0;
> + }
> +
> + return ret;
> +}
> +
> +static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
> {
> struct xen_grant_dma_data *data;
>
> @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
> if (!data)
> goto err;
>
> - if (dev->of_node) {
> - if (xen_dt_grant_init_backend_domid(dev, data))
> - goto err;
> - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
> - dev_info(dev, "Using dom0 as backend\n");
> - data->backend_domid = 0;
> - } else {
> - /* XXX ACPI device unsupported for now */
> - goto err;
> - }
> + data->backend_domid = backend_domid;
>
> if (store_xen_grant_dma_data(dev, data)) {
> dev_err(dev, "Cannot store Xen grant DMA data\n");
> @@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev)
>
> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
> {
> - bool ret = xen_virtio_mem_acc(dev);
> + domid_t backend_domid;
>
> - if (ret)
> - xen_grant_setup_dma_ops(dev->dev.parent);
> + if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
> + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
> + return true;
> + }
>
> - return ret;
> + return false;
> }
>
> MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
> diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
> index b0766a660338..70073f5a2b54 100644
> --- a/include/xen/arm/xen-ops.h
> +++ b/include/xen/arm/xen-ops.h
> @@ -8,9 +8,7 @@
> static inline void xen_setup_dma_ops(struct device *dev)
> {
> #ifdef CONFIG_XEN
> - if (xen_is_grant_dma_device(dev))
> - xen_grant_setup_dma_ops(dev);
> - else if (xen_swiotlb_detect())
> + if (xen_swiotlb_detect())
> dev->dma_ops = &xen_swiotlb_dma_ops;
> #endif
> }
> diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
> index a34f4271a2e9..47f11bec5e90 100644
> --- a/include/xen/xen-ops.h
> +++ b/include/xen/xen-ops.h
> @@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { }
> #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
>
> #ifdef CONFIG_XEN_GRANT_DMA_OPS
> -void xen_grant_setup_dma_ops(struct device *dev);
> -bool xen_is_grant_dma_device(struct device *dev);
> -bool xen_virtio_mem_acc(struct virtio_device *dev);
> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
> #else
> -static inline void xen_grant_setup_dma_ops(struct device *dev)
> -{
> -}
> -static inline bool xen_is_grant_dma_device(struct device *dev)
> -{
> - return false;
> -}
> -
> struct virtio_device;
>
> -static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
> -{
> - return false;
> -}
> -
> static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
> {
> return false;
--
Xenia
On 10/25/22 19:20, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <[email protected]>
>
> Use the same "xen-grant-dma" device concept for the PCI devices
> behind device-tree based PCI Host controller, but with one modification.
> Unlike for platform devices, we cannot use generic IOMMU bindings
> (iommus property), as we need to support more flexible configuration.
> The problem is that PCI devices under the single PCI Host controller
> may have the backends running in different Xen domains and thus have
> different endpoints ID (backend domains ID).
>
> Add ability to deal with generic PCI-IOMMU bindings (iommu-map/
> iommu-map-mask properties) which allows us to describe relationship
> between PCI devices and backend domains ID properly.
>
> To avoid having to look up for the PCI Host bridge twice and reduce
> the amount of checks pass an extra struct device_node *np to
> xen_dt_grant_init_backend_domid().
>
> So with current patch the code expects iommus property for the platform
> devices and iommu-map/iommu-map-mask properties for PCI devices.
>
> The example of generated by the toolstack iommu-map property
> for two PCI devices 0000:00:01.0 and 0000:00:02.0 whose
> backends are running in different Xen domains with IDs 1 and 2
> respectively:
> iommu-map = <0x08 0xfde9 0x01 0x08 0x10 0xfde9 0x02 0x08>;
>
> Signed-off-by: Oleksandr Tyshchenko <[email protected]>
Reviewed-by: Xenia Ragiadakou <[email protected]>
> ---
> Slightly RFC. This is needed to support Xen grant mappings for virtio-pci devices
> on Arm at some point in the future. The Xen toolstack side is not completely ready yet.
> Here, for PCI devices we use more flexible way to pass backend domid to the guest
> than for platform devices.
>
> Changes V1 -> V2:
> - update commit description
> - rebase
> - rework to use generic PCI-IOMMU bindings instead of generic IOMMU bindings
>
> Changes V2 -> V3:
> - update commit description, add an example
> - drop xen_dt_map_id() and squash xen_dt_get_pci_host_node() with
> xen_dt_get_node()
> - pass struct device_node *np to xen_is_dt_grant_dma_device() and
> xen_dt_grant_init_backend_domid()
> - pass domid_t *backend_domid instead of struct xen_grant_dma_data *data
> to xen_dt_grant_init_backend_domid()
>
> Changes V3 -> V4:
> - just rebase on new prereq patch
> "xen/virtio: Optimize the setup of "xen-grant-dma" devices"
>
> Previous discussion is at:
> https://lore.kernel.org/xen-devel/[email protected]/
> https://lore.kernel.org/xen-devel/[email protected]/
> https://lore.kernel.org/xen-devel/[email protected]/
>
> Based on:
> https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git/log/?h=for-linus-6.1
> ---
> ---
> drivers/xen/grant-dma-ops.c | 46 +++++++++++++++++++++++++++++++------
> 1 file changed, 39 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
> index 1e797a043980..9784a77fa3c9 100644
> --- a/drivers/xen/grant-dma-ops.c
> +++ b/drivers/xen/grant-dma-ops.c
> @@ -10,6 +10,7 @@
> #include <linux/module.h>
> #include <linux/dma-map-ops.h>
> #include <linux/of.h>
> +#include <linux/pci.h>
> #include <linux/pfn.h>
> #include <linux/xarray.h>
> #include <linux/virtio_anchor.h>
> @@ -292,15 +293,43 @@ static const struct dma_map_ops xen_grant_dma_ops = {
> .dma_supported = xen_grant_dma_supported,
> };
>
> +static struct device_node *xen_dt_get_node(struct device *dev)
> +{
> + if (dev_is_pci(dev)) {
> + struct pci_dev *pdev = to_pci_dev(dev);
> + struct pci_bus *bus = pdev->bus;
> +
> + /* Walk up to the root bus to look for PCI Host controller */
> + while (!pci_is_root_bus(bus))
> + bus = bus->parent;
> +
> + return of_node_get(bus->bridge->parent->of_node);
> + }
> +
> + return of_node_get(dev->of_node);
> +}
> +
> static int xen_dt_grant_init_backend_domid(struct device *dev,
> + struct device_node *np,
> domid_t *backend_domid)
> {
> - struct of_phandle_args iommu_spec;
> + struct of_phandle_args iommu_spec = { .args_count = 1 };
>
> - if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
> - 0, &iommu_spec)) {
> - dev_dbg(dev, "Cannot parse iommus property\n");
> - return -ESRCH;
> + if (dev_is_pci(dev)) {
> + struct pci_dev *pdev = to_pci_dev(dev);
> + u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
> +
> + if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
> + iommu_spec.args)) {
> + dev_dbg(dev, "Cannot translate ID\n");
> + return -ESRCH;
> + }
> + } else {
> + if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
> + 0, &iommu_spec)) {
> + dev_dbg(dev, "Cannot parse iommus property\n");
> + return -ESRCH;
> + }
> }
>
> if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
> @@ -324,10 +353,13 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
> static int xen_grant_init_backend_domid(struct device *dev,
> domid_t *backend_domid)
> {
> + struct device_node *np;
> int ret = -ENODEV;
>
> - if (dev->of_node) {
> - ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
> + np = xen_dt_get_node(dev);
> + if (np) {
> + ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
> + of_node_put(np);
> } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
> dev_info(dev, "Using dom0 as backend\n");
> *backend_domid = 0;
--
Xenia
On 25.10.22 20:27, Xenia Ragiadakou wrote:
Hello Xenia
> On 10/25/22 19:20, Oleksandr Tyshchenko wrote:
>> From: Oleksandr Tyshchenko <[email protected]>
>>
>> This is needed to avoid having to parse the same device-tree
>> several times for a given device.
>>
>> For this to work we need to install the xen_virtio_restricted_mem_acc
>> callback in Arm's xen_guest_init() which is same callback as x86's
>> PV and HVM modes already use and remove the manual assignment in
>> xen_setup_dma_ops(). Also we need to split the code to initialize
>> backend_domid into a separate function.
>>
>> Prior to current patch we parsed the device-tree three times:
>> 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device()
>> 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid()
>> 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device()
>>
>> With current patch we parse the device-tree only once in
>> xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid()
>>
>> Other benefits are:
>> - Not diverge from x86 when setting up Xen grant DMA ops
>> - Drop several global functions
>>
>> Signed-off-by: Oleksandr Tyshchenko <[email protected]>
>
> Reviewed-by: Xenia Ragiadakou <[email protected]>
Thanks!
>
> I have a question unrelated to the patch.
> CONFIG_XEN_VIRTIO_FORCE_GRANT cannot be used to force backend dom0 in
> case xen_dt_grant_init_backend_domid() fails?
Good question, as always)
Current patch doesn't change behavior in the context of
CONFIG_XEN_VIRTIO_FORCE_GRANT usage on Arm with device-tree,
this option is not applied for device-tree based devices, as for them we
have a way to communicate backend_domid, so no need to guess.
Below my understanding, which might be wrong.
The xen_dt_grant_init_backend_domid() failure means that we didn't
retrieve the backend_domid from the device node
(either the bindings is wrong or it is absent at all, the later means
that device is *not* required use grants for virtio).
I don't really know whether forcing the grant usage with domid = 0 would
be the good idea in that case, this just might not work.
For the instance, if the backend is other than Dom0 domain or it is in
Dom0 but doesn't support grant mappings.
From other hand, the CONFIG_XEN_VIRTIO_FORCE_GRANT is disabled by
default, if it gets enabled then the user is likely aware of the
consequences.
If we want to always honor CONFIG_XEN_VIRTIO_FORCE_GRANT, we would
likely need to have "if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))"
check the first (before the check for DT device).
>
>
>> ---
>> New patch
>> ---
>> arch/arm/xen/enlighten.c | 2 +-
>> drivers/xen/grant-dma-ops.c | 77 ++++++++++++++-----------------------
>> include/xen/arm/xen-ops.h | 4 +-
>> include/xen/xen-ops.h | 16 --------
>> 4 files changed, 30 insertions(+), 69 deletions(-)
>>
>> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
>> index 93c8ccbf2982..7d59765aef22 100644
>> --- a/arch/arm/xen/enlighten.c
>> +++ b/arch/arm/xen/enlighten.c
>> @@ -445,7 +445,7 @@ static int __init xen_guest_init(void)
>> return 0;
>> if (IS_ENABLED(CONFIG_XEN_VIRTIO))
>> - virtio_set_mem_acc_cb(xen_virtio_mem_acc);
>> + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
>> if (!acpi_disabled)
>> xen_acpi_guest_init();
>> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
>> index daa525df7bdc..1e797a043980 100644
>> --- a/drivers/xen/grant-dma-ops.c
>> +++ b/drivers/xen/grant-dma-ops.c
>> @@ -292,50 +292,20 @@ static const struct dma_map_ops
>> xen_grant_dma_ops = {
>> .dma_supported = xen_grant_dma_supported,
>> };
>> -static bool xen_is_dt_grant_dma_device(struct device *dev)
>> -{
>> - struct device_node *iommu_np;
>> - bool has_iommu;
>> -
>> - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
>> - has_iommu = iommu_np &&
>> - of_device_is_compatible(iommu_np, "xen,grant-dma");
>> - of_node_put(iommu_np);
>> -
>> - return has_iommu;
>> -}
>> -
>> -bool xen_is_grant_dma_device(struct device *dev)
>> -{
>> - /* XXX Handle only DT devices for now */
>> - if (dev->of_node)
>> - return xen_is_dt_grant_dma_device(dev);
>> -
>> - return false;
>> -}
>> -
>> -bool xen_virtio_mem_acc(struct virtio_device *dev)
>> -{
>> - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
>> - return true;
>> -
>> - return xen_is_grant_dma_device(dev->dev.parent);
>> -}
>> -
>> static int xen_dt_grant_init_backend_domid(struct device *dev,
>> - struct xen_grant_dma_data *data)
>> + domid_t *backend_domid)
>> {
>> struct of_phandle_args iommu_spec;
>> if (of_parse_phandle_with_args(dev->of_node, "iommus",
>> "#iommu-cells",
>> 0, &iommu_spec)) {
>> - dev_err(dev, "Cannot parse iommus property\n");
>> + dev_dbg(dev, "Cannot parse iommus property\n");
>> return -ESRCH;
>> }
>> if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
>> iommu_spec.args_count != 1) {
>> - dev_err(dev, "Incompatible IOMMU node\n");
>> + dev_dbg(dev, "Incompatible IOMMU node\n");
>> of_node_put(iommu_spec.np);
>> return -ESRCH;
>> }
>> @@ -346,12 +316,28 @@ static int
>> xen_dt_grant_init_backend_domid(struct device *dev,
>> * The endpoint ID here means the ID of the domain where the
>> * corresponding backend is running
>> */
>> - data->backend_domid = iommu_spec.args[0];
>> + *backend_domid = iommu_spec.args[0];
>> return 0;
>> }
>> -void xen_grant_setup_dma_ops(struct device *dev)
>> +static int xen_grant_init_backend_domid(struct device *dev,
>> + domid_t *backend_domid)
>> +{
>> + int ret = -ENODEV;
>> +
>> + if (dev->of_node) {
>> + ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
>> + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) ||
>> xen_pv_domain()) {
>> + dev_info(dev, "Using dom0 as backend\n");
>> + *backend_domid = 0;
>> + ret = 0;
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +static void xen_grant_setup_dma_ops(struct device *dev, domid_t
>> backend_domid)
>> {
>> struct xen_grant_dma_data *data;
>> @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
>> if (!data)
>> goto err;
>> - if (dev->of_node) {
>> - if (xen_dt_grant_init_backend_domid(dev, data))
>> - goto err;
>> - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
>> - dev_info(dev, "Using dom0 as backend\n");
>> - data->backend_domid = 0;
>> - } else {
>> - /* XXX ACPI device unsupported for now */
>> - goto err;
>> - }
>> + data->backend_domid = backend_domid;
>> if (store_xen_grant_dma_data(dev, data)) {
>> dev_err(dev, "Cannot store Xen grant DMA data\n");
>> @@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev)
>> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
>> {
>> - bool ret = xen_virtio_mem_acc(dev);
>> + domid_t backend_domid;
>> - if (ret)
>> - xen_grant_setup_dma_ops(dev->dev.parent);
>> + if (!xen_grant_init_backend_domid(dev->dev.parent,
>> &backend_domid)) {
>> + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
>> + return true;
>> + }
>> - return ret;
>> + return false;
>> }
>> MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
>> diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
>> index b0766a660338..70073f5a2b54 100644
>> --- a/include/xen/arm/xen-ops.h
>> +++ b/include/xen/arm/xen-ops.h
>> @@ -8,9 +8,7 @@
>> static inline void xen_setup_dma_ops(struct device *dev)
>> {
>> #ifdef CONFIG_XEN
>> - if (xen_is_grant_dma_device(dev))
>> - xen_grant_setup_dma_ops(dev);
>> - else if (xen_swiotlb_detect())
>> + if (xen_swiotlb_detect())
>> dev->dma_ops = &xen_swiotlb_dma_ops;
>> #endif
>> }
>> diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
>> index a34f4271a2e9..47f11bec5e90 100644
>> --- a/include/xen/xen-ops.h
>> +++ b/include/xen/xen-ops.h
>> @@ -216,26 +216,10 @@ static inline void
>> xen_preemptible_hcall_end(void) { }
>> #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
>> #ifdef CONFIG_XEN_GRANT_DMA_OPS
>> -void xen_grant_setup_dma_ops(struct device *dev);
>> -bool xen_is_grant_dma_device(struct device *dev);
>> -bool xen_virtio_mem_acc(struct virtio_device *dev);
>> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
>> #else
>> -static inline void xen_grant_setup_dma_ops(struct device *dev)
>> -{
>> -}
>> -static inline bool xen_is_grant_dma_device(struct device *dev)
>> -{
>> - return false;
>> -}
>> -
>> struct virtio_device;
>> -static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
>> -{
>> - return false;
>> -}
>> -
>> static inline bool xen_virtio_restricted_mem_acc(struct
>> virtio_device *dev)
>> {
>> return false;
>
--
Regards,
Oleksandr Tyshchenko
On Tue, 25 Oct 2022, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <[email protected]>
>
> This is needed to avoid having to parse the same device-tree
> several times for a given device.
>
> For this to work we need to install the xen_virtio_restricted_mem_acc
> callback in Arm's xen_guest_init() which is same callback as x86's
> PV and HVM modes already use and remove the manual assignment in
> xen_setup_dma_ops(). Also we need to split the code to initialize
> backend_domid into a separate function.
>
> Prior to current patch we parsed the device-tree three times:
> 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device()
> 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid()
> 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device()
>
> With current patch we parse the device-tree only once in
> xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid()
>
> Other benefits are:
> - Not diverge from x86 when setting up Xen grant DMA ops
> - Drop several global functions
>
> Signed-off-by: Oleksandr Tyshchenko <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
> ---
> New patch
> ---
> arch/arm/xen/enlighten.c | 2 +-
> drivers/xen/grant-dma-ops.c | 77 ++++++++++++++-----------------------
> include/xen/arm/xen-ops.h | 4 +-
> include/xen/xen-ops.h | 16 --------
> 4 files changed, 30 insertions(+), 69 deletions(-)
>
> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
> index 93c8ccbf2982..7d59765aef22 100644
> --- a/arch/arm/xen/enlighten.c
> +++ b/arch/arm/xen/enlighten.c
> @@ -445,7 +445,7 @@ static int __init xen_guest_init(void)
> return 0;
>
> if (IS_ENABLED(CONFIG_XEN_VIRTIO))
> - virtio_set_mem_acc_cb(xen_virtio_mem_acc);
> + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
>
> if (!acpi_disabled)
> xen_acpi_guest_init();
> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
> index daa525df7bdc..1e797a043980 100644
> --- a/drivers/xen/grant-dma-ops.c
> +++ b/drivers/xen/grant-dma-ops.c
> @@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = {
> .dma_supported = xen_grant_dma_supported,
> };
>
> -static bool xen_is_dt_grant_dma_device(struct device *dev)
> -{
> - struct device_node *iommu_np;
> - bool has_iommu;
> -
> - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
> - has_iommu = iommu_np &&
> - of_device_is_compatible(iommu_np, "xen,grant-dma");
> - of_node_put(iommu_np);
> -
> - return has_iommu;
> -}
> -
> -bool xen_is_grant_dma_device(struct device *dev)
> -{
> - /* XXX Handle only DT devices for now */
> - if (dev->of_node)
> - return xen_is_dt_grant_dma_device(dev);
> -
> - return false;
> -}
> -
> -bool xen_virtio_mem_acc(struct virtio_device *dev)
> -{
> - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
> - return true;
> -
> - return xen_is_grant_dma_device(dev->dev.parent);
> -}
> -
> static int xen_dt_grant_init_backend_domid(struct device *dev,
> - struct xen_grant_dma_data *data)
> + domid_t *backend_domid)
> {
> struct of_phandle_args iommu_spec;
>
> if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
> 0, &iommu_spec)) {
> - dev_err(dev, "Cannot parse iommus property\n");
> + dev_dbg(dev, "Cannot parse iommus property\n");
> return -ESRCH;
> }
>
> if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
> iommu_spec.args_count != 1) {
> - dev_err(dev, "Incompatible IOMMU node\n");
> + dev_dbg(dev, "Incompatible IOMMU node\n");
> of_node_put(iommu_spec.np);
> return -ESRCH;
> }
> @@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
> * The endpoint ID here means the ID of the domain where the
> * corresponding backend is running
> */
> - data->backend_domid = iommu_spec.args[0];
> + *backend_domid = iommu_spec.args[0];
>
> return 0;
> }
>
> -void xen_grant_setup_dma_ops(struct device *dev)
> +static int xen_grant_init_backend_domid(struct device *dev,
> + domid_t *backend_domid)
> +{
> + int ret = -ENODEV;
> +
> + if (dev->of_node) {
> + ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
> + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
> + dev_info(dev, "Using dom0 as backend\n");
> + *backend_domid = 0;
> + ret = 0;
> + }
> +
> + return ret;
> +}
> +
> +static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
> {
> struct xen_grant_dma_data *data;
>
> @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
> if (!data)
> goto err;
>
> - if (dev->of_node) {
> - if (xen_dt_grant_init_backend_domid(dev, data))
> - goto err;
> - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
> - dev_info(dev, "Using dom0 as backend\n");
> - data->backend_domid = 0;
> - } else {
> - /* XXX ACPI device unsupported for now */
> - goto err;
> - }
> + data->backend_domid = backend_domid;
>
> if (store_xen_grant_dma_data(dev, data)) {
> dev_err(dev, "Cannot store Xen grant DMA data\n");
> @@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev)
>
> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
> {
> - bool ret = xen_virtio_mem_acc(dev);
> + domid_t backend_domid;
>
> - if (ret)
> - xen_grant_setup_dma_ops(dev->dev.parent);
> + if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
> + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
> + return true;
> + }
>
> - return ret;
> + return false;
> }
>
> MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
> diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
> index b0766a660338..70073f5a2b54 100644
> --- a/include/xen/arm/xen-ops.h
> +++ b/include/xen/arm/xen-ops.h
> @@ -8,9 +8,7 @@
> static inline void xen_setup_dma_ops(struct device *dev)
> {
> #ifdef CONFIG_XEN
> - if (xen_is_grant_dma_device(dev))
> - xen_grant_setup_dma_ops(dev);
> - else if (xen_swiotlb_detect())
> + if (xen_swiotlb_detect())
> dev->dma_ops = &xen_swiotlb_dma_ops;
> #endif
> }
> diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
> index a34f4271a2e9..47f11bec5e90 100644
> --- a/include/xen/xen-ops.h
> +++ b/include/xen/xen-ops.h
> @@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { }
> #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
>
> #ifdef CONFIG_XEN_GRANT_DMA_OPS
> -void xen_grant_setup_dma_ops(struct device *dev);
> -bool xen_is_grant_dma_device(struct device *dev);
> -bool xen_virtio_mem_acc(struct virtio_device *dev);
> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
> #else
> -static inline void xen_grant_setup_dma_ops(struct device *dev)
> -{
> -}
> -static inline bool xen_is_grant_dma_device(struct device *dev)
> -{
> - return false;
> -}
> -
> struct virtio_device;
>
> -static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
> -{
> - return false;
> -}
> -
> static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
> {
> return false;
> --
> 2.25.1
>
On Tue, 25 Oct 2022, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <[email protected]>
>
> Use the same "xen-grant-dma" device concept for the PCI devices
> behind device-tree based PCI Host controller, but with one modification.
> Unlike for platform devices, we cannot use generic IOMMU bindings
> (iommus property), as we need to support more flexible configuration.
> The problem is that PCI devices under the single PCI Host controller
> may have the backends running in different Xen domains and thus have
> different endpoints ID (backend domains ID).
>
> Add ability to deal with generic PCI-IOMMU bindings (iommu-map/
> iommu-map-mask properties) which allows us to describe relationship
> between PCI devices and backend domains ID properly.
>
> To avoid having to look up for the PCI Host bridge twice and reduce
> the amount of checks pass an extra struct device_node *np to
> xen_dt_grant_init_backend_domid().
>
> So with current patch the code expects iommus property for the platform
> devices and iommu-map/iommu-map-mask properties for PCI devices.
>
> The example of generated by the toolstack iommu-map property
> for two PCI devices 0000:00:01.0 and 0000:00:02.0 whose
> backends are running in different Xen domains with IDs 1 and 2
> respectively:
> iommu-map = <0x08 0xfde9 0x01 0x08 0x10 0xfde9 0x02 0x08>;
>
> Signed-off-by: Oleksandr Tyshchenko <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
> ---
> Slightly RFC. This is needed to support Xen grant mappings for virtio-pci devices
> on Arm at some point in the future. The Xen toolstack side is not completely ready yet.
> Here, for PCI devices we use more flexible way to pass backend domid to the guest
> than for platform devices.
>
> Changes V1 -> V2:
> - update commit description
> - rebase
> - rework to use generic PCI-IOMMU bindings instead of generic IOMMU bindings
>
> Changes V2 -> V3:
> - update commit description, add an example
> - drop xen_dt_map_id() and squash xen_dt_get_pci_host_node() with
> xen_dt_get_node()
> - pass struct device_node *np to xen_is_dt_grant_dma_device() and
> xen_dt_grant_init_backend_domid()
> - pass domid_t *backend_domid instead of struct xen_grant_dma_data *data
> to xen_dt_grant_init_backend_domid()
>
> Changes V3 -> V4:
> - just rebase on new prereq patch
> "xen/virtio: Optimize the setup of "xen-grant-dma" devices"
>
> Previous discussion is at:
> https://lore.kernel.org/xen-devel/[email protected]/
> https://lore.kernel.org/xen-devel/[email protected]/
> https://lore.kernel.org/xen-devel/[email protected]/
>
> Based on:
> https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git/log/?h=for-linus-6.1
> ---
> ---
> drivers/xen/grant-dma-ops.c | 46 +++++++++++++++++++++++++++++++------
> 1 file changed, 39 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
> index 1e797a043980..9784a77fa3c9 100644
> --- a/drivers/xen/grant-dma-ops.c
> +++ b/drivers/xen/grant-dma-ops.c
> @@ -10,6 +10,7 @@
> #include <linux/module.h>
> #include <linux/dma-map-ops.h>
> #include <linux/of.h>
> +#include <linux/pci.h>
> #include <linux/pfn.h>
> #include <linux/xarray.h>
> #include <linux/virtio_anchor.h>
> @@ -292,15 +293,43 @@ static const struct dma_map_ops xen_grant_dma_ops = {
> .dma_supported = xen_grant_dma_supported,
> };
>
> +static struct device_node *xen_dt_get_node(struct device *dev)
> +{
> + if (dev_is_pci(dev)) {
> + struct pci_dev *pdev = to_pci_dev(dev);
> + struct pci_bus *bus = pdev->bus;
> +
> + /* Walk up to the root bus to look for PCI Host controller */
> + while (!pci_is_root_bus(bus))
> + bus = bus->parent;
> +
> + return of_node_get(bus->bridge->parent->of_node);
> + }
> +
> + return of_node_get(dev->of_node);
> +}
> +
> static int xen_dt_grant_init_backend_domid(struct device *dev,
> + struct device_node *np,
> domid_t *backend_domid)
> {
> - struct of_phandle_args iommu_spec;
> + struct of_phandle_args iommu_spec = { .args_count = 1 };
>
> - if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
> - 0, &iommu_spec)) {
> - dev_dbg(dev, "Cannot parse iommus property\n");
> - return -ESRCH;
> + if (dev_is_pci(dev)) {
> + struct pci_dev *pdev = to_pci_dev(dev);
> + u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
> +
> + if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
> + iommu_spec.args)) {
> + dev_dbg(dev, "Cannot translate ID\n");
> + return -ESRCH;
> + }
> + } else {
> + if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
> + 0, &iommu_spec)) {
> + dev_dbg(dev, "Cannot parse iommus property\n");
> + return -ESRCH;
> + }
> }
>
> if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
> @@ -324,10 +353,13 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
> static int xen_grant_init_backend_domid(struct device *dev,
> domid_t *backend_domid)
> {
> + struct device_node *np;
> int ret = -ENODEV;
>
> - if (dev->of_node) {
> - ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
> + np = xen_dt_get_node(dev);
> + if (np) {
> + ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
> + of_node_put(np);
> } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
> dev_info(dev, "Using dom0 as backend\n");
> *backend_domid = 0;
> --
> 2.25.1
>