Attach detach ops are needed to connect to remote processor that is
running before remoteproc driver is probed. Implement remoteproc
framework ops that enables such use case on AMD-Xilinx platforms.
Remote processor can also use On Chip sram Memory (OCM) for various
purpose. For example, for fast code execution or data access compare
to DDR memory. Such sram region is made available to remoteproc nodes
via "sram" property. Add support in driver to parse and use OCM memory
via sram property.
Tanmay Shah (2):
drivers: remoteproc: xlnx: add attach detach support
drivers: remoteproc: xlnx: add sram support
drivers/remoteproc/xlnx_r5_remoteproc.c | 385 +++++++++++++++++++++++-
1 file changed, 380 insertions(+), 5 deletions(-)
base-commit: 0496190c4d42965acb31b9da1b6dac3509791062
--
2.25.1
AMD-Xilinx zynqmp platform contains on-chip sram memory (OCM).
R5 cores can access OCM and access is faster than DDR memory but slower
than TCM memories available. Sram region can have optional multiple
power-domains.
Signed-off-by: Tanmay Shah <[email protected]>
---
drivers/remoteproc/xlnx_r5_remoteproc.c | 221 +++++++++++++++++++++++-
1 file changed, 220 insertions(+), 1 deletion(-)
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index af7aff5e9098..47c08b013152 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -56,6 +56,21 @@ struct mem_bank_data {
char *bank_name;
};
+/**
+ * struct zynqmp_sram_bank - sram bank description
+ *
+ * @sram_res: sram address region information
+ * @power_domains: Array of pm domain id
+ * @num_pd: total pm domain id count
+ * @da: device address of sram
+ */
+struct zynqmp_sram_bank {
+ struct resource sram_res;
+ int *power_domains;
+ int num_pd;
+ u32 da;
+};
+
/**
* struct mbox_info
*
@@ -109,6 +124,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
* struct zynqmp_r5_core
*
* @rsc_tbl_va: resource table virtual address
+ * @sram: Array of sram memories assigned to this core
+ * @num_sram: number of sram for this core
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
@@ -120,6 +137,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
*/
struct zynqmp_r5_core {
struct resource_table *rsc_tbl_va;
+ struct zynqmp_sram_bank **sram;
+ int num_sram;
struct device *dev;
struct device_node *np;
int tcm_bank_count;
@@ -483,6 +502,69 @@ static int add_mem_regions_carveout(struct rproc *rproc)
return 0;
}
+static int add_sram_carveouts(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+ struct rproc_mem_entry *rproc_mem;
+ struct zynqmp_sram_bank *sram;
+ dma_addr_t dma_addr;
+ int da, i, j, ret;
+ size_t len;
+
+ for (i = 0; i < r5_core->num_sram; i++) {
+ sram = r5_core->sram[i];
+
+ dma_addr = (dma_addr_t)sram->sram_res.start;
+ len = resource_size(&sram->sram_res);
+ da = sram->da;
+
+ for (j = 0; j < sram->num_pd; j++) {
+ ret = zynqmp_pm_request_node(sram->power_domains[j],
+ ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(r5_core->dev,
+ "failed to request on SRAM pd 0x%x",
+ sram->power_domains[j]);
+ goto fail_sram;
+ } else {
+ pr_err("sram pd 0x%x request success\n",
+ sram->power_domains[j]);
+ }
+ }
+
+ /* Register associated reserved memory regions */
+ rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
+ (dma_addr_t)dma_addr,
+ len, da,
+ zynqmp_r5_mem_region_map,
+ zynqmp_r5_mem_region_unmap,
+ sram->sram_res.name);
+
+ rproc_add_carveout(rproc, rproc_mem);
+ rproc_coredump_add_segment(rproc, da, len);
+
+ dev_err(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
+ sram->sram_res.name, dma_addr, da, len);
+ }
+
+ return 0;
+
+fail_sram:
+ /* Release current sram pd. */
+ while (--j >= 0)
+ zynqmp_pm_release_node(sram->power_domains[j]);
+
+ /* Release previously requested sram pd. */
+ while (--i >= 0) {
+ sram = r5_core->sram[i];
+ for (j = 0; j < sram->num_pd; j++)
+ zynqmp_pm_release_node(sram->power_domains[j]);
+ }
+
+ return ret;
+}
+
/*
* tcm_mem_unmap()
* @rproc: single R5 core's corresponding rproc instance
@@ -659,6 +741,12 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
return ret;
}
+ ret = add_sram_carveouts(rproc);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
+ return ret;
+ }
+
return 0;
}
@@ -673,8 +761,9 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
{
struct zynqmp_r5_core *r5_core;
+ struct zynqmp_sram_bank *sram;
u32 pm_domain_id;
- int i;
+ int i, j;
r5_core = rproc->priv;
@@ -685,6 +774,13 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
"can't turn off TCM bank 0x%x", pm_domain_id);
}
+ /* Release sram power-domains. */
+ for (i = 0; i < r5_core->num_sram; i++) {
+ sram = r5_core->sram[i];
+ for (j = 0; j < sram->num_pd; j++)
+ zynqmp_pm_release_node(sram->power_domains[j]);
+ }
+
return 0;
}
@@ -887,6 +983,123 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
return ERR_PTR(ret);
}
+static int zynqmp_r5_get_sram_pd(struct device *r5_core_dev,
+ struct device_node *sram_np, int **power_domains,
+ int *num_pd)
+{
+ struct of_phandle_args out_args;
+ int pd_count, i, ret;
+ int *pd_list;
+
+ if (!of_find_property(sram_np, "power-domains", NULL)) {
+ num_pd = 0;
+ return 0;
+ }
+
+ pd_count = of_count_phandle_with_args(sram_np, "power-domains",
+ "#power-domain-cells");
+
+ pd_list = devm_kcalloc(r5_core_dev, pd_count, sizeof(int), GFP_KERNEL);
+ if (!pd_list)
+ return -ENOMEM;
+
+ for (i = 0; i < pd_count; i++) {
+ ret = of_parse_phandle_with_args(sram_np, "power-domains",
+ "#power-domain-cells",
+ i, &out_args);
+ if (ret) {
+ dev_err(r5_core_dev, "%s: power-domains idx %d parsing failed\n",
+ sram_np->name, i);
+ return ret;
+ }
+
+ of_node_put(out_args.np);
+ pd_list[i] = out_args.args[0];
+ }
+
+ *power_domains = pd_list;
+ *num_pd = pd_count;
+
+ return 0;
+}
+
+static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
+{
+ struct zynqmp_sram_bank **sram, *sram_data;
+ struct device_node *np = r5_core->np;
+ struct device *dev = r5_core->dev;
+ struct device_node *sram_np;
+ int num_sram, i, ret;
+ u64 abs_addr, size;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_err(dev, "Invalid sram property, ret = %d\n",
+ num_sram);
+ return -EINVAL;
+ }
+
+ sram = devm_kcalloc(dev, num_sram,
+ sizeof(struct zynqmp_sram_bank *), GFP_KERNEL);
+ if (!sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_data = devm_kzalloc(dev, sizeof(struct zynqmp_sram_bank),
+ GFP_KERNEL);
+ if (!sram_data)
+ return -ENOMEM;
+
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np) {
+ dev_err(dev, "failed to get sram %d phandle\n", i);
+ return -EINVAL;
+ }
+
+ if (!of_device_is_available(sram_np)) {
+ of_node_put(sram_np);
+ dev_err(dev, "sram device not available\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &sram_data->sram_res);
+ of_node_put(sram_np);
+ if (ret) {
+ dev_err(dev, "addr to res failed\n");
+ return ret;
+ }
+
+ /* Get SRAM device address */
+ ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
+ if (ret) {
+ dev_err(dev, "failed to get reg property\n");
+ return ret;
+ }
+
+ sram_data->da = (u32)abs_addr;
+
+ ret = zynqmp_r5_get_sram_pd(r5_core->dev, sram_np,
+ &sram_data->power_domains,
+ &sram_data->num_pd);
+ if (ret) {
+ dev_err(dev, "failed to get power-domains for %d sram\n", i);
+ return ret;
+ }
+
+ sram[i] = sram_data;
+
+ dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx, num_pd=%d\n",
+ i, sram[i]->sram_res.name, sram[i]->sram_res.start,
+ sram[i]->da, resource_size(&sram[i]->sram_res),
+ sram[i]->num_pd);
+ }
+
+ r5_core->sram = sram;
+ r5_core->num_sram = num_sram;
+
+ return 0;
+}
+
static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
{
int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
@@ -1101,6 +1314,12 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
return ret;
}
}
+
+ if (of_find_property(r5_core->np, "sram", NULL)) {
+ ret = zynqmp_r5_get_sram_banks(r5_core);
+ if (ret)
+ return ret;
+ }
}
return 0;
--
2.25.1
It is possible that remote processor is already running before
linux boot or remoteproc platform driver probe. Implement required
remoteproc framework ops to provide resource table address and
connect or disconnect with remote processor in such case.
Signed-off-by: Tanmay Shah <[email protected]>
---
drivers/remoteproc/xlnx_r5_remoteproc.c | 164 +++++++++++++++++++++++-
1 file changed, 160 insertions(+), 4 deletions(-)
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 84243d1dff9f..af7aff5e9098 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -25,6 +25,10 @@
/* RX mailbox client buffer max length */
#define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
sizeof(struct zynqmp_ipi_message))
+
+#define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
+ (uint32_t)'m' << 8 | (uint32_t)'p')
+
/*
* settings for RPU cluster mode which
* reflects possible values of xlnx,cluster-mode dt-property
@@ -73,6 +77,15 @@ struct mbox_info {
struct mbox_chan *rx_chan;
};
+/* Xilinx Platform specific data structure */
+struct rsc_tbl_data {
+ const int version;
+ const u32 magic_num;
+ const u32 comp_magic_num;
+ const u32 rsc_tbl_size;
+ const uintptr_t rsc_tbl;
+} __packed;
+
/*
* Hardcoded TCM bank values. This will stay in driver to maintain backward
* compatibility with device-tree that does not have TCM information.
@@ -95,20 +108,24 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
/**
* struct zynqmp_r5_core
*
+ * @rsc_tbl_va: resource table virtual address
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
* @tcm_banks: array of each TCM bank data
* @rproc: rproc handle
+ * @rsc_tbl_size: resource table size retrieved from remote
* @pm_domain_id: RPU CPU power domain id
* @ipi: pointer to mailbox information
*/
struct zynqmp_r5_core {
+ struct resource_table *rsc_tbl_va;
struct device *dev;
struct device_node *np;
int tcm_bank_count;
struct mem_bank_data **tcm_banks;
struct rproc *rproc;
+ u32 rsc_tbl_size;
u32 pm_domain_id;
struct mbox_info *ipi;
};
@@ -621,10 +638,19 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
{
int ret;
- ret = add_tcm_banks(rproc);
- if (ret) {
- dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
- return ret;
+ /**
+ * For attach/detach use case, Firmware is already loaded so
+ * TCM isn't really needed at all. Also, for security TCM can be
+ * locked in such case and linux may not have access at all.
+ * So avoid adding TCM banks. TCM power-domains requested during attach
+ * callback.
+ */
+ if (rproc->state != RPROC_DETACHED) {
+ ret = add_tcm_banks(rproc);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
+ return ret;
+ }
}
ret = add_mem_regions_carveout(rproc);
@@ -662,6 +688,123 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
return 0;
}
+static struct resource_table *zynqmp_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ struct zynqmp_r5_core *r5_core;
+
+ r5_core = rproc->priv;
+
+ *size = r5_core->rsc_tbl_size;
+
+ return r5_core->rsc_tbl_va;
+}
+
+static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
+{
+ struct device *dev = r5_core->dev;
+ struct rsc_tbl_data *rsc_data_va;
+ struct resource_table *rsc_addr;
+ struct resource res_mem;
+ struct device_node *np;
+ int ret;
+
+ /**
+ * It is expected from remote processor firmware to provide resource
+ * table address via struct rsc_tbl_data data structure.
+ * Start address of first entry under "memory-region" property list
+ * contains that data structure which holds resource table address, size
+ * and some magic number to validate correct resource table entry.
+ */
+ np = of_parse_phandle(r5_core->np, "memory-region", 0);
+ if (!np) {
+ dev_err(dev, "failed to get memory region dev node\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &res_mem);
+ if (ret) {
+ dev_err(dev, "failed to get memory-region resource addr\n");
+ return -EINVAL;
+ }
+
+ rsc_data_va = devm_ioremap_wc(dev, res_mem.start,
+ sizeof(struct rsc_tbl_data));
+ if (!rsc_data_va) {
+ dev_err(dev, "failed to map resource table data address\n");
+ return -EIO;
+ }
+
+ /**
+ * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
+ * do not consider resource table address valid and don't attach
+ */
+ if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
+ rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
+ dev_dbg(dev, "invalid magic number, won't attach\n");
+ return -EINVAL;
+ }
+
+ rsc_addr = ioremap_wc(rsc_data_va->rsc_tbl,
+ rsc_data_va->rsc_tbl_size);
+ if (!rsc_addr) {
+ dev_err(dev, "failed to get rsc_addr\n");
+ return -EINVAL;
+ }
+
+ /**
+ * As of now resource table version 1 is expected. Don't fail to attach
+ * but warn users about it.
+ */
+ if (rsc_addr->ver != 1)
+ dev_warn(dev, "unexpected resource table version %d\n",
+ rsc_addr->ver);
+
+ r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
+ r5_core->rsc_tbl_va = rsc_addr;
+
+ return 0;
+}
+
+static int zynqmp_r5_attach(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+ int i, pm_domain_id, ret;
+
+ /*
+ * Firmware is loaded in TCM. Request TCM power domains to notify
+ * platform management controller that TCM is in use. This will be
+ * released during unprepare callback.
+ */
+ for (i = 0; i < r5_core->tcm_bank_count; i++) {
+ pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
+ ret = zynqmp_pm_request_node(pm_domain_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0)
+ pr_warn("TCM %d can't be requested\n", i);
+ }
+
+ return 0;
+}
+
+static int zynqmp_r5_detach(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+
+ /*
+ * Generate last notification to remote after clearing virtio flag.
+ * Remote can avoid polling on virtio reset flag if kick is generated
+ * during detach by host and check virtio reset flag on kick interrupt.
+ */
+ zynqmp_r5_rproc_kick(rproc, 0);
+
+ iounmap(r5_core->rsc_tbl_va);
+ r5_core->rsc_tbl_va = NULL;
+
+ return 0;
+}
+
static const struct rproc_ops zynqmp_r5_rproc_ops = {
.prepare = zynqmp_r5_rproc_prepare,
.unprepare = zynqmp_r5_rproc_unprepare,
@@ -673,6 +816,9 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
.kick = zynqmp_r5_rproc_kick,
+ .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table,
+ .attach = zynqmp_r5_attach,
+ .detach = zynqmp_r5_detach,
};
/**
@@ -723,6 +869,16 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
goto free_rproc;
}
+ /*
+ * Move rproc state to DETACHED to give one time opportunity to attach
+ * if firmware is already available in the memory. This can happen if
+ * firmware is loaded via debugger or by any other agent in the system.
+ * If firmware isn't available in the memory and resource table isn't found,
+ * then rproc state stay OFFLINE.
+ */
+ if (!zynqmp_r5_get_rsc_table_va(r5_core))
+ r5_rproc->state = RPROC_DETACHED;
+
r5_core->rproc = r5_rproc;
return r5_core;
--
2.25.1
Hi Tanmay,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 0496190c4d42965acb31b9da1b6dac3509791062]
url: https://github.com/intel-lab-lkp/linux/commits/Tanmay-Shah/drivers-remoteproc-xlnx-add-attach-detach-support/20240503-071225
base: 0496190c4d42965acb31b9da1b6dac3509791062
patch link: https://lore.kernel.org/r/20240502231021.370047-2-tanmay.shah%40amd.com
patch subject: [PATCH 1/2] drivers: remoteproc: xlnx: add attach detach support
config: arm64-randconfig-r113-20240506 (https://download.01.org/0day-ci/archive/20240506/[email protected]/config)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240506/[email protected]/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
sparse warnings: (new ones prefixed by >>)
drivers/remoteproc/xlnx_r5_remoteproc.c:404:20: sparse: sparse: cast removes address space '__iomem' of expression
drivers/remoteproc/xlnx_r5_remoteproc.c:522:20: sparse: sparse: cast removes address space '__iomem' of expression
>> drivers/remoteproc/xlnx_r5_remoteproc.c:731:21: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct rsc_tbl_data *rsc_data_va @@ got void [noderef] __iomem * @@
drivers/remoteproc/xlnx_r5_remoteproc.c:731:21: sparse: expected struct rsc_tbl_data *rsc_data_va
drivers/remoteproc/xlnx_r5_remoteproc.c:731:21: sparse: got void [noderef] __iomem *
>> drivers/remoteproc/xlnx_r5_remoteproc.c:748:18: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct resource_table *rsc_addr @@ got void [noderef] __iomem * @@
drivers/remoteproc/xlnx_r5_remoteproc.c:748:18: sparse: expected struct resource_table *rsc_addr
drivers/remoteproc/xlnx_r5_remoteproc.c:748:18: sparse: got void [noderef] __iomem *
>> drivers/remoteproc/xlnx_r5_remoteproc.c:802:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void volatile [noderef] __iomem *addr @@ got struct resource_table *rsc_tbl_va @@
drivers/remoteproc/xlnx_r5_remoteproc.c:802:24: sparse: expected void volatile [noderef] __iomem *addr
drivers/remoteproc/xlnx_r5_remoteproc.c:802:24: sparse: got struct resource_table *rsc_tbl_va
vim +731 drivers/remoteproc/xlnx_r5_remoteproc.c
702
703 static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
704 {
705 struct device *dev = r5_core->dev;
706 struct rsc_tbl_data *rsc_data_va;
707 struct resource_table *rsc_addr;
708 struct resource res_mem;
709 struct device_node *np;
710 int ret;
711
712 /**
713 * It is expected from remote processor firmware to provide resource
714 * table address via struct rsc_tbl_data data structure.
715 * Start address of first entry under "memory-region" property list
716 * contains that data structure which holds resource table address, size
717 * and some magic number to validate correct resource table entry.
718 */
719 np = of_parse_phandle(r5_core->np, "memory-region", 0);
720 if (!np) {
721 dev_err(dev, "failed to get memory region dev node\n");
722 return -EINVAL;
723 }
724
725 ret = of_address_to_resource(np, 0, &res_mem);
726 if (ret) {
727 dev_err(dev, "failed to get memory-region resource addr\n");
728 return -EINVAL;
729 }
730
> 731 rsc_data_va = devm_ioremap_wc(dev, res_mem.start,
732 sizeof(struct rsc_tbl_data));
733 if (!rsc_data_va) {
734 dev_err(dev, "failed to map resource table data address\n");
735 return -EIO;
736 }
737
738 /**
739 * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
740 * do not consider resource table address valid and don't attach
741 */
742 if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
743 rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
744 dev_dbg(dev, "invalid magic number, won't attach\n");
745 return -EINVAL;
746 }
747
> 748 rsc_addr = ioremap_wc(rsc_data_va->rsc_tbl,
749 rsc_data_va->rsc_tbl_size);
750 if (!rsc_addr) {
751 dev_err(dev, "failed to get rsc_addr\n");
752 return -EINVAL;
753 }
754
755 /**
756 * As of now resource table version 1 is expected. Don't fail to attach
757 * but warn users about it.
758 */
759 if (rsc_addr->ver != 1)
760 dev_warn(dev, "unexpected resource table version %d\n",
761 rsc_addr->ver);
762
763 r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
764 r5_core->rsc_tbl_va = rsc_addr;
765
766 return 0;
767 }
768
769 static int zynqmp_r5_attach(struct rproc *rproc)
770 {
771 struct zynqmp_r5_core *r5_core = rproc->priv;
772 int i, pm_domain_id, ret;
773
774 /*
775 * Firmware is loaded in TCM. Request TCM power domains to notify
776 * platform management controller that TCM is in use. This will be
777 * released during unprepare callback.
778 */
779 for (i = 0; i < r5_core->tcm_bank_count; i++) {
780 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
781 ret = zynqmp_pm_request_node(pm_domain_id,
782 ZYNQMP_PM_CAPABILITY_ACCESS, 0,
783 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
784 if (ret < 0)
785 pr_warn("TCM %d can't be requested\n", i);
786 }
787
788 return 0;
789 }
790
791 static int zynqmp_r5_detach(struct rproc *rproc)
792 {
793 struct zynqmp_r5_core *r5_core = rproc->priv;
794
795 /*
796 * Generate last notification to remote after clearing virtio flag.
797 * Remote can avoid polling on virtio reset flag if kick is generated
798 * during detach by host and check virtio reset flag on kick interrupt.
799 */
800 zynqmp_r5_rproc_kick(rproc, 0);
801
> 802 iounmap(r5_core->rsc_tbl_va);
803 r5_core->rsc_tbl_va = NULL;
804
805 return 0;
806 }
807
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi Tanmay,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 0496190c4d42965acb31b9da1b6dac3509791062]
url: https://github.com/intel-lab-lkp/linux/commits/Tanmay-Shah/drivers-remoteproc-xlnx-add-attach-detach-support/20240503-071225
base: 0496190c4d42965acb31b9da1b6dac3509791062
patch link: https://lore.kernel.org/r/20240502231021.370047-3-tanmay.shah%40amd.com
patch subject: [PATCH 2/2] drivers: remoteproc: xlnx: add sram support
config: arm64-randconfig-r113-20240506 (https://download.01.org/0day-ci/archive/20240506/[email protected]/config)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240506/[email protected]/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <[email protected]>
| Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
sparse warnings: (new ones prefixed by >>)
drivers/remoteproc/xlnx_r5_remoteproc.c:423:20: sparse: sparse: cast removes address space '__iomem' of expression
drivers/remoteproc/xlnx_r5_remoteproc.c:604:20: sparse: sparse: cast removes address space '__iomem' of expression
drivers/remoteproc/xlnx_r5_remoteproc.c:827:21: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct rsc_tbl_data *rsc_data_va @@ got void [noderef] __iomem * @@
drivers/remoteproc/xlnx_r5_remoteproc.c:827:21: sparse: expected struct rsc_tbl_data *rsc_data_va
drivers/remoteproc/xlnx_r5_remoteproc.c:827:21: sparse: got void [noderef] __iomem *
drivers/remoteproc/xlnx_r5_remoteproc.c:844:18: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct resource_table *rsc_addr @@ got void [noderef] __iomem * @@
drivers/remoteproc/xlnx_r5_remoteproc.c:844:18: sparse: expected struct resource_table *rsc_addr
drivers/remoteproc/xlnx_r5_remoteproc.c:844:18: sparse: got void [noderef] __iomem *
drivers/remoteproc/xlnx_r5_remoteproc.c:898:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void volatile [noderef] __iomem *addr @@ got struct resource_table *rsc_tbl_va @@
drivers/remoteproc/xlnx_r5_remoteproc.c:898:24: sparse: expected void volatile [noderef] __iomem *addr
drivers/remoteproc/xlnx_r5_remoteproc.c:898:24: sparse: got struct resource_table *rsc_tbl_va
>> drivers/remoteproc/xlnx_r5_remoteproc.c:995:26: sparse: sparse: Using plain integer as NULL pointer
vim +995 drivers/remoteproc/xlnx_r5_remoteproc.c
798
799 static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
800 {
801 struct device *dev = r5_core->dev;
802 struct rsc_tbl_data *rsc_data_va;
803 struct resource_table *rsc_addr;
804 struct resource res_mem;
805 struct device_node *np;
806 int ret;
807
808 /**
809 * It is expected from remote processor firmware to provide resource
810 * table address via struct rsc_tbl_data data structure.
811 * Start address of first entry under "memory-region" property list
812 * contains that data structure which holds resource table address, size
813 * and some magic number to validate correct resource table entry.
814 */
815 np = of_parse_phandle(r5_core->np, "memory-region", 0);
816 if (!np) {
817 dev_err(dev, "failed to get memory region dev node\n");
818 return -EINVAL;
819 }
820
821 ret = of_address_to_resource(np, 0, &res_mem);
822 if (ret) {
823 dev_err(dev, "failed to get memory-region resource addr\n");
824 return -EINVAL;
825 }
826
> 827 rsc_data_va = devm_ioremap_wc(dev, res_mem.start,
828 sizeof(struct rsc_tbl_data));
829 if (!rsc_data_va) {
830 dev_err(dev, "failed to map resource table data address\n");
831 return -EIO;
832 }
833
834 /**
835 * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
836 * do not consider resource table address valid and don't attach
837 */
838 if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
839 rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
840 dev_dbg(dev, "invalid magic number, won't attach\n");
841 return -EINVAL;
842 }
843
844 rsc_addr = ioremap_wc(rsc_data_va->rsc_tbl,
845 rsc_data_va->rsc_tbl_size);
846 if (!rsc_addr) {
847 dev_err(dev, "failed to get rsc_addr\n");
848 return -EINVAL;
849 }
850
851 /**
852 * As of now resource table version 1 is expected. Don't fail to attach
853 * but warn users about it.
854 */
855 if (rsc_addr->ver != 1)
856 dev_warn(dev, "unexpected resource table version %d\n",
857 rsc_addr->ver);
858
859 r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
860 r5_core->rsc_tbl_va = rsc_addr;
861
862 return 0;
863 }
864
865 static int zynqmp_r5_attach(struct rproc *rproc)
866 {
867 struct zynqmp_r5_core *r5_core = rproc->priv;
868 int i, pm_domain_id, ret;
869
870 /*
871 * Firmware is loaded in TCM. Request TCM power domains to notify
872 * platform management controller that TCM is in use. This will be
873 * released during unprepare callback.
874 */
875 for (i = 0; i < r5_core->tcm_bank_count; i++) {
876 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
877 ret = zynqmp_pm_request_node(pm_domain_id,
878 ZYNQMP_PM_CAPABILITY_ACCESS, 0,
879 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
880 if (ret < 0)
881 pr_warn("TCM %d can't be requested\n", i);
882 }
883
884 return 0;
885 }
886
887 static int zynqmp_r5_detach(struct rproc *rproc)
888 {
889 struct zynqmp_r5_core *r5_core = rproc->priv;
890
891 /*
892 * Generate last notification to remote after clearing virtio flag.
893 * Remote can avoid polling on virtio reset flag if kick is generated
894 * during detach by host and check virtio reset flag on kick interrupt.
895 */
896 zynqmp_r5_rproc_kick(rproc, 0);
897
898 iounmap(r5_core->rsc_tbl_va);
899 r5_core->rsc_tbl_va = NULL;
900
901 return 0;
902 }
903
904 static const struct rproc_ops zynqmp_r5_rproc_ops = {
905 .prepare = zynqmp_r5_rproc_prepare,
906 .unprepare = zynqmp_r5_rproc_unprepare,
907 .start = zynqmp_r5_rproc_start,
908 .stop = zynqmp_r5_rproc_stop,
909 .load = rproc_elf_load_segments,
910 .parse_fw = zynqmp_r5_parse_fw,
911 .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
912 .sanity_check = rproc_elf_sanity_check,
913 .get_boot_addr = rproc_elf_get_boot_addr,
914 .kick = zynqmp_r5_rproc_kick,
915 .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table,
916 .attach = zynqmp_r5_attach,
917 .detach = zynqmp_r5_detach,
918 };
919
920 /**
921 * zynqmp_r5_add_rproc_core()
922 * Allocate and add struct rproc object for each r5f core
923 * This is called for each individual r5f core
924 *
925 * @cdev: Device node of each r5 core
926 *
927 * Return: zynqmp_r5_core object for success else error code pointer
928 */
929 static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
930 {
931 struct zynqmp_r5_core *r5_core;
932 struct rproc *r5_rproc;
933 int ret;
934
935 /* Set up DMA mask */
936 ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32));
937 if (ret)
938 return ERR_PTR(ret);
939
940 /* Allocate remoteproc instance */
941 r5_rproc = rproc_alloc(cdev, dev_name(cdev),
942 &zynqmp_r5_rproc_ops,
943 NULL, sizeof(struct zynqmp_r5_core));
944 if (!r5_rproc) {
945 dev_err(cdev, "failed to allocate memory for rproc instance\n");
946 return ERR_PTR(-ENOMEM);
947 }
948
949 rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM);
950
951 r5_rproc->auto_boot = false;
952 r5_core = r5_rproc->priv;
953 r5_core->dev = cdev;
954 r5_core->np = dev_of_node(cdev);
955 if (!r5_core->np) {
956 dev_err(cdev, "can't get device node for r5 core\n");
957 ret = -EINVAL;
958 goto free_rproc;
959 }
960
961 /* Add R5 remoteproc core */
962 ret = rproc_add(r5_rproc);
963 if (ret) {
964 dev_err(cdev, "failed to add r5 remoteproc\n");
965 goto free_rproc;
966 }
967
968 /*
969 * Move rproc state to DETACHED to give one time opportunity to attach
970 * if firmware is already available in the memory. This can happen if
971 * firmware is loaded via debugger or by any other agent in the system.
972 * If firmware isn't available in the memory and resource table isn't found,
973 * then rproc state stay OFFLINE.
974 */
975 if (!zynqmp_r5_get_rsc_table_va(r5_core))
976 r5_rproc->state = RPROC_DETACHED;
977
978 r5_core->rproc = r5_rproc;
979 return r5_core;
980
981 free_rproc:
982 rproc_free(r5_rproc);
983 return ERR_PTR(ret);
984 }
985
986 static int zynqmp_r5_get_sram_pd(struct device *r5_core_dev,
987 struct device_node *sram_np, int **power_domains,
988 int *num_pd)
989 {
990 struct of_phandle_args out_args;
991 int pd_count, i, ret;
992 int *pd_list;
993
994 if (!of_find_property(sram_np, "power-domains", NULL)) {
> 995 num_pd = 0;
996 return 0;
997 }
998
999 pd_count = of_count_phandle_with_args(sram_np, "power-domains",
1000 "#power-domain-cells");
1001
1002 pd_list = devm_kcalloc(r5_core_dev, pd_count, sizeof(int), GFP_KERNEL);
1003 if (!pd_list)
1004 return -ENOMEM;
1005
1006 for (i = 0; i < pd_count; i++) {
1007 ret = of_parse_phandle_with_args(sram_np, "power-domains",
1008 "#power-domain-cells",
1009 i, &out_args);
1010 if (ret) {
1011 dev_err(r5_core_dev, "%s: power-domains idx %d parsing failed\n",
1012 sram_np->name, i);
1013 return ret;
1014 }
1015
1016 of_node_put(out_args.np);
1017 pd_list[i] = out_args.args[0];
1018 }
1019
1020 *power_domains = pd_list;
1021 *num_pd = pd_count;
1022
1023 return 0;
1024 }
1025
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki