2022-03-01 19:48:01

by Deepak Kumar Singh

[permalink] [raw]
Subject: [PATCH V5 1/2] soc: qcom: smem: map only partitions used by local HOST

SMEM driver is IO mapping complete region and CPU is doing a speculative
read into a partition where local HOST does not have permission resulting
in a NOC error.

Map only those partitions which are accessibly to local HOST.

Signed-off-by: Deepak Kumar Singh <[email protected]>
---
drivers/soc/qcom/smem.c | 226 +++++++++++++++++++++++++++++++++++-------------
1 file changed, 164 insertions(+), 62 deletions(-)

diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index e2057d8..2dde9b6 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -195,6 +195,20 @@ struct smem_partition_header {
__le32 reserved[3];
};

+/**
+ * struct smem_partition - describes smem partition
+ * @virt_base: starting virtual address of partition
+ * @phys_base: starting physical address of partition
+ * @cacheline: alignment for "cached" entries
+ * @size: size of partition
+ */
+struct smem_partition {
+ void __iomem *virt_base;
+ phys_addr_t phys_base;
+ size_t cacheline;
+ size_t size;
+};
+
static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };

/**
@@ -250,11 +264,9 @@ struct smem_region {
* struct qcom_smem - device data for the smem device
* @dev: device pointer
* @hwlock: reference to a hwspinlock
- * @global_partition: pointer to global partition when in use
- * @global_cacheline: cacheline size for global partition
- * @partitions: list of pointers to partitions affecting the current
- * processor/host
- * @cacheline: list of cacheline sizes for each host
+ * @ptable: virtual base of partition table
+ * @global_partition: describes for global partition when in use
+ * @partitions: list of partitions of current processor/host
* @item_count: max accepted item number
* @socinfo: platform device pointer
* @num_regions: number of @regions
@@ -265,12 +277,11 @@ struct qcom_smem {

struct hwspinlock *hwlock;

- struct smem_partition_header *global_partition;
- size_t global_cacheline;
- struct smem_partition_header *partitions[SMEM_HOST_COUNT];
- size_t cacheline[SMEM_HOST_COUNT];
u32 item_count;
struct platform_device *socinfo;
+ struct smem_ptable *ptable;
+ struct smem_partition global_partition;
+ struct smem_partition partitions[SMEM_HOST_COUNT];

unsigned num_regions;
struct smem_region regions[];
@@ -348,14 +359,17 @@ static struct qcom_smem *__smem;
#define HWSPINLOCK_TIMEOUT 1000

static int qcom_smem_alloc_private(struct qcom_smem *smem,
- struct smem_partition_header *phdr,
+ struct smem_partition *part,
unsigned item,
size_t size)
{
struct smem_private_entry *hdr, *end;
+ struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;

+ phdr = (struct smem_partition_header __force *)part->virt_base;
+
hdr = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr);
@@ -442,7 +456,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
- struct smem_partition_header *phdr;
+ struct smem_partition *part;
unsigned long flags;
int ret;

@@ -464,12 +478,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
if (ret)
return ret;

- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
- ret = qcom_smem_alloc_private(__smem, phdr, item, size);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
- ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
} else {
ret = qcom_smem_alloc_global(__smem, item, size);
}
@@ -511,12 +525,14 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}

static void *qcom_smem_get_private(struct qcom_smem *smem,
- struct smem_partition_header *phdr,
- size_t cacheline,
+ struct smem_partition *part,
unsigned item,
size_t *size)
{
struct smem_private_entry *e, *end;
+ struct smem_partition_header *phdr;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;

e = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
@@ -538,7 +554,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,

/* Item was not found in the uncached list, search the cached list */

- e = phdr_to_first_cached_entry(phdr, cacheline);
+ e = phdr_to_first_cached_entry(phdr, part->cacheline);
end = phdr_to_last_cached_entry(phdr);

while (e > end) {
@@ -553,7 +569,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
return cached_entry_to_item(e);
}

- e = cached_entry_next(e, cacheline);
+ e = cached_entry_next(e, part->cacheline);
}

return ERR_PTR(-ENOENT);
@@ -576,9 +592,8 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
- struct smem_partition_header *phdr;
+ struct smem_partition *part;
unsigned long flags;
- size_t cacheln;
int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);

@@ -594,14 +609,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (ret)
return ERR_PTR(ret);

- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
- cacheln = __smem->cacheline[host];
- ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
- cacheln = __smem->global_cacheline;
- ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ptr = qcom_smem_get_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ptr = qcom_smem_get_private(__smem, part, item, size);
} else {
ptr = qcom_smem_get_global(__smem, item, size);
}
@@ -622,6 +635,7 @@ EXPORT_SYMBOL(qcom_smem_get);
*/
int qcom_smem_get_free_space(unsigned host)
{
+ struct smem_partition *part;
struct smem_partition_header *phdr;
struct smem_header *header;
unsigned ret;
@@ -629,12 +643,14 @@ int qcom_smem_get_free_space(unsigned host)
if (!__smem)
return -EPROBE_DEFER;

- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
} else {
@@ -646,6 +662,11 @@ int qcom_smem_get_free_space(unsigned host)
}
EXPORT_SYMBOL(qcom_smem_get_free_space);

+static bool addr_in_range(void __iomem *base, size_t size, void *addr)
+{
+ return base && (addr >= base && addr < base + size);
+}
+
/**
* qcom_smem_virt_to_phys() - return the physical address associated
* with an smem item pointer (previously returned by qcom_smem_get()
@@ -655,17 +676,36 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
*/
phys_addr_t qcom_smem_virt_to_phys(void *p)
{
- unsigned i;
+ struct smem_partition *part;
+ struct smem_region *area;
+ u64 offset;
+ u32 i;
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ part = &__smem->partitions[i];
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
+ }
+
+ part = &__smem->global_partition;
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }

for (i = 0; i < __smem->num_regions; i++) {
- struct smem_region *region = &__smem->regions[i];
+ area = &__smem->regions[i];

- if (p < region->virt_base)
- continue;
- if (p < region->virt_base + region->size) {
- u64 offset = p - region->virt_base;
+ if (addr_in_range(area->virt_base, area->size, p)) {
+ offset = p - area->virt_base;

- return region->aux_base + offset;
+ return (phys_addr_t)area->aux_base + offset;
}
}

@@ -689,7 +729,7 @@ static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
struct smem_ptable *ptable;
u32 version;

- ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+ ptable = smem->ptable;
if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return ERR_PTR(-ENOENT);

@@ -728,9 +768,14 @@ qcom_smem_partition_header(struct qcom_smem *smem,
struct smem_ptable_entry *entry, u16 host0, u16 host1)
{
struct smem_partition_header *header;
+ u32 phys_addr;
u32 size;

- header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
+ header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
+
+ if (!header)
+ return NULL;

if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
@@ -772,7 +817,7 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
bool found = false;
int i;

- if (smem->global_partition) {
+ if (smem->global_partition.virt_base) {
dev_err(smem->dev, "Already found the global partition\n");
return -EINVAL;
}
@@ -807,8 +852,11 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
if (!header)
return -EINVAL;

- smem->global_partition = header;
- smem->global_cacheline = le32_to_cpu(entry->cacheline);
+ smem->global_partition.virt_base = (void __iomem *)header;
+ smem->global_partition.phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->global_partition.size = le32_to_cpu(entry->size);
+ smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);

return 0;
}
@@ -848,7 +896,7 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
return -EINVAL;
}

- if (smem->partitions[remote_host]) {
+ if (smem->partitions[remote_host].virt_base) {
dev_err(smem->dev, "duplicate host %hu\n", remote_host);
return -EINVAL;
}
@@ -857,13 +905,47 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
if (!header)
return -EINVAL;

- smem->partitions[remote_host] = header;
- smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
+ smem->partitions[remote_host].virt_base = (void __iomem *)header;
+ smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->partitions[remote_host].size = le32_to_cpu(entry->size);
+ smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
}

return 0;
}

+static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
+{
+ u32 ptable_start;
+
+ /* map starting 4K for smem header */
+ region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
+ ptable_start = region->aux_base + region->size - SZ_4K;
+ /* map last 4k for toc */
+ smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
+
+ if (!region->virt_base || !smem->ptable)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
+{
+ u32 phys_addr;
+
+ phys_addr = smem->regions[0].aux_base;
+
+ smem->regions[0].size = size;
+ smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
+
+ if (!smem->regions[0].virt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
struct smem_region *region)
{
@@ -894,10 +976,12 @@ static int qcom_smem_probe(struct platform_device *pdev)
struct smem_header *header;
struct reserved_mem *rmem;
struct qcom_smem *smem;
+ unsigned long flags;
size_t array_size;
int num_regions;
int hwlock_id;
u32 version;
+ u32 size;
int ret;
int i;

@@ -933,7 +1017,12 @@ static int qcom_smem_probe(struct platform_device *pdev)
return ret;
}

- for (i = 0; i < num_regions; i++) {
+
+ ret = qcom_smem_map_toc(smem, &smem->regions[0]);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < num_regions; i++) {
smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
smem->regions[i].aux_base,
smem->regions[i].size);
@@ -950,7 +1039,30 @@ static int qcom_smem_probe(struct platform_device *pdev)
return -EINVAL;
}

+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
+ if (ret)
+ return ret;
+ size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
+ hwspin_unlock_irqrestore(smem->hwlock, &flags);
+
version = qcom_smem_get_sbl_version(smem);
+ /*
+ * smem header mapping is required only in heap version scheme, so unmap
+ * it here. It will be remapped in qcom_smem_map_global() when whole
+ * partition is mapped again.
+ */
+ devm_iounmap(smem->dev, smem->regions[0].virt_base);
switch (version >> 16) {
case SMEM_GLOBAL_PART_VERSION:
ret = qcom_smem_set_global_partition(smem);
@@ -959,6 +1071,7 @@ static int qcom_smem_probe(struct platform_device *pdev)
smem->item_count = qcom_smem_get_item_count(smem);
break;
case SMEM_GLOBAL_HEAP_VERSION:
+ qcom_smem_map_global(smem, size);
smem->item_count = SMEM_ITEM_COUNT;
break;
default:
@@ -971,17 +1084,6 @@ static int qcom_smem_probe(struct platform_device *pdev)
if (ret < 0 && ret != -ENOENT)
return ret;

- hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
- if (hwlock_id < 0) {
- if (hwlock_id != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to retrieve hwlock\n");
- return hwlock_id;
- }
-
- smem->hwlock = hwspin_lock_request_specific(hwlock_id);
- if (!smem->hwlock)
- return -ENXIO;
-
__smem = smem;

smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
--
2.7.4


2022-03-02 16:41:09

by Deepak Kumar Singh

[permalink] [raw]
Subject: [PATCH V5 2/2] soc: qcom: smem: validate fields of shared structures

Structures in shared memory that can be modified by remote
processors may have untrusted values, they should be validated
before use.

Adding proper validation before using fields of shared
structures.

Signed-off-by: Deepak Kumar Singh <[email protected]>
---
drivers/soc/qcom/smem.c | 79 ++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 69 insertions(+), 10 deletions(-)

diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 2dde9b6..7733989 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -367,13 +367,18 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;
+ void *p_end;

phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;

hdr = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr);

+ if (WARN_ON((void *)end > p_end || cached > p_end))
+ return -EINVAL;
+
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY)
goto bad_canary;
@@ -383,6 +388,9 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
hdr = uncached_entry_next(hdr);
}

+ if (WARN_ON((void *)hdr > p_end))
+ return -EINVAL;
+
/* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8);
if ((void *)hdr + alloc_size > cached) {
@@ -501,6 +509,8 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
struct smem_header *header;
struct smem_region *region;
struct smem_global_entry *entry;
+ u64 entry_offset;
+ u32 e_size;
u32 aux_base;
unsigned i;

@@ -515,9 +525,16 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
region = &smem->regions[i];

if ((u32)region->aux_base == aux_base || !aux_base) {
+ e_size = le32_to_cpu(entry->size);
+ entry_offset = le32_to_cpu(entry->offset);
+
+ if (WARN_ON(e_size + entry_offset > region->size))
+ return ERR_PTR(-EINVAL);
+
if (size != NULL)
- *size = le32_to_cpu(entry->size);
- return region->virt_base + le32_to_cpu(entry->offset);
+ *size = e_size;
+
+ return region->virt_base + entry_offset;
}
}

@@ -531,8 +548,12 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
{
struct smem_private_entry *e, *end;
struct smem_partition_header *phdr;
+ void *item_ptr, *p_end;
+ u32 padding_data;
+ u32 e_size;

phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;

e = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
@@ -542,36 +563,65 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
goto invalid_canary;

if (le16_to_cpu(e->item) == item) {
- if (size != NULL)
- *size = le32_to_cpu(e->size) -
- le16_to_cpu(e->padding_data);
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
+
+ item_ptr = uncached_entry_to_item(e);
+ if (WARN_ON(item_ptr > p_end))
+ return ERR_PTR(-EINVAL);

- return uncached_entry_to_item(e);
+ return item_ptr;
}

e = uncached_entry_next(e);
}

+ if (WARN_ON((void *)e > p_end))
+ return ERR_PTR(-EINVAL);
+
/* Item was not found in the uncached list, search the cached list */

e = phdr_to_first_cached_entry(phdr, part->cacheline);
end = phdr_to_last_cached_entry(phdr);

+ if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
+ return ERR_PTR(-EINVAL);
+
while (e > end) {
if (e->canary != SMEM_PRIVATE_CANARY)
goto invalid_canary;

if (le16_to_cpu(e->item) == item) {
- if (size != NULL)
- *size = le32_to_cpu(e->size) -
- le16_to_cpu(e->padding_data);
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }

- return cached_entry_to_item(e);
+ item_ptr = cached_entry_to_item(e);
+ if (WARN_ON(item_ptr < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
}

e = cached_entry_next(e, part->cacheline);
}

+ if (WARN_ON((void *)e < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
return ERR_PTR(-ENOENT);

invalid_canary:
@@ -648,14 +698,23 @@ int qcom_smem_get_free_space(unsigned host)
phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
} else if (__smem->global_partition.virt_base) {
part = &__smem->global_partition;
phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
} else {
header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available);
+
+ if (ret > __smem->regions[0].size)
+ return -EINVAL;
}

return ret;
--
2.7.4

2022-03-03 00:46:54

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH V5 1/2] soc: qcom: smem: map only partitions used by local HOST

Hi Deepak,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.17-rc6 next-20220302]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/0day-ci/linux/commits/Deepak-Kumar-Singh/soc-qcom-smem-map-only-partitions-used-by-local-HOST/20220301-231925
base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 719fce7539cd3e186598e2aed36325fe892150cf
config: mips-randconfig-s032-20220302 (https://download.01.org/0day-ci/archive/20220303/[email protected]/config)
compiler: mipsel-linux-gcc (GCC) 11.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.4-dirty
# https://github.com/0day-ci/linux/commit/de1c04b67e5fb3075d0e655769a200cad55b02d9
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Deepak-Kumar-Singh/soc-qcom-smem-map-only-partitions-used-by-local-HOST/20220301-231925
git checkout de1c04b67e5fb3075d0e655769a200cad55b02d9
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=mips SHELL=/bin/bash drivers/misc/ drivers/soc/qcom/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>


sparse warnings: (new ones prefixed by >>)
command-line: note: in included file:
builtin:1:9: sparse: sparse: preprocessor token __ATOMIC_ACQUIRE redefined
builtin:0:0: sparse: this was the original definition
builtin:1:9: sparse: sparse: preprocessor token __ATOMIC_SEQ_CST redefined
builtin:0:0: sparse: this was the original definition
builtin:1:9: sparse: sparse: preprocessor token __ATOMIC_ACQ_REL redefined
builtin:0:0: sparse: this was the original definition
builtin:1:9: sparse: sparse: preprocessor token __ATOMIC_RELEASE redefined
builtin:0:0: sparse: this was the original definition
drivers/soc/qcom/smem.c:422:16: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_header *header @@ got void [noderef] __iomem *virt_base @@
drivers/soc/qcom/smem.c:422:16: sparse: expected struct smem_header *header
drivers/soc/qcom/smem.c:422:16: sparse: got void [noderef] __iomem *virt_base
drivers/soc/qcom/smem.c:507:16: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_header *header @@ got void [noderef] __iomem *virt_base @@
drivers/soc/qcom/smem.c:507:16: sparse: expected struct smem_header *header
drivers/soc/qcom/smem.c:507:16: sparse: got void [noderef] __iomem *virt_base
drivers/soc/qcom/smem.c:520:50: sparse: sparse: incorrect type in return expression (different address spaces) @@ expected void * @@ got void [noderef] __iomem * @@
drivers/soc/qcom/smem.c:520:50: sparse: expected void *
drivers/soc/qcom/smem.c:520:50: sparse: got void [noderef] __iomem *
drivers/soc/qcom/smem.c:648:22: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_partition_header *phdr @@ got void [noderef] __iomem *virt_base @@
drivers/soc/qcom/smem.c:648:22: sparse: expected struct smem_partition_header *phdr
drivers/soc/qcom/smem.c:648:22: sparse: got void [noderef] __iomem *virt_base
drivers/soc/qcom/smem.c:653:22: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_partition_header *phdr @@ got void [noderef] __iomem *virt_base @@
drivers/soc/qcom/smem.c:653:22: sparse: expected struct smem_partition_header *phdr
drivers/soc/qcom/smem.c:653:22: sparse: got void [noderef] __iomem *virt_base
drivers/soc/qcom/smem.c:657:24: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_header *header @@ got void [noderef] __iomem *virt_base @@
drivers/soc/qcom/smem.c:657:24: sparse: expected struct smem_header *header
drivers/soc/qcom/smem.c:657:24: sparse: got void [noderef] __iomem *virt_base
drivers/soc/qcom/smem.c:667:30: sparse: sparse: incompatible types in comparison expression (different address spaces):
drivers/soc/qcom/smem.c:667:30: sparse: void *
drivers/soc/qcom/smem.c:667:30: sparse: void [noderef] __iomem *
drivers/soc/qcom/smem.c:688:36: sparse: sparse: subtraction of different types can't work (different address spaces)
drivers/soc/qcom/smem.c:697:28: sparse: sparse: subtraction of different types can't work (different address spaces)
drivers/soc/qcom/smem.c:706:36: sparse: sparse: subtraction of different types can't work (different address spaces)
drivers/soc/qcom/smem.c:721:16: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_header *header @@ got void [noderef] __iomem *virt_base @@
drivers/soc/qcom/smem.c:721:16: sparse: expected struct smem_header *header
drivers/soc/qcom/smem.c:721:16: sparse: got void [noderef] __iomem *virt_base
drivers/soc/qcom/smem.c:754:57: sparse: sparse: restricted __le32 degrades to integer
drivers/soc/qcom/smem.c:775:16: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_partition_header *header @@ got void [noderef] __iomem * @@
drivers/soc/qcom/smem.c:775:16: sparse: expected struct smem_partition_header *header
drivers/soc/qcom/smem.c:775:16: sparse: got void [noderef] __iomem *
drivers/soc/qcom/smem.c:926:22: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_ptable *ptable @@ got void [noderef] __iomem * @@
drivers/soc/qcom/smem.c:926:22: sparse: expected struct smem_ptable *ptable
drivers/soc/qcom/smem.c:926:22: sparse: got void [noderef] __iomem *
drivers/soc/qcom/smem.c:1035:16: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct smem_header *header @@ got void [noderef] __iomem *virt_base @@
drivers/soc/qcom/smem.c:1035:16: sparse: expected struct smem_header *header
drivers/soc/qcom/smem.c:1035:16: sparse: got void [noderef] __iomem *virt_base
>> drivers/soc/qcom/smem.c:1056:31: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void const volatile [noderef] __iomem *mem @@ got restricted __le32 * @@
drivers/soc/qcom/smem.c:1056:31: sparse: expected void const volatile [noderef] __iomem *mem
drivers/soc/qcom/smem.c:1056:31: sparse: got restricted __le32 *
drivers/soc/qcom/smem.c:1056:67: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void const volatile [noderef] __iomem *mem @@ got restricted __le32 * @@
drivers/soc/qcom/smem.c:1056:67: sparse: expected void const volatile [noderef] __iomem *mem
drivers/soc/qcom/smem.c:1056:67: sparse: got restricted __le32 *

vim +1056 drivers/soc/qcom/smem.c

973
974 static int qcom_smem_probe(struct platform_device *pdev)
975 {
976 struct smem_header *header;
977 struct reserved_mem *rmem;
978 struct qcom_smem *smem;
979 unsigned long flags;
980 size_t array_size;
981 int num_regions;
982 int hwlock_id;
983 u32 version;
984 u32 size;
985 int ret;
986 int i;
987
988 num_regions = 1;
989 if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
990 num_regions++;
991
992 array_size = num_regions * sizeof(struct smem_region);
993 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
994 if (!smem)
995 return -ENOMEM;
996
997 smem->dev = &pdev->dev;
998 smem->num_regions = num_regions;
999
1000 rmem = of_reserved_mem_lookup(pdev->dev.of_node);
1001 if (rmem) {
1002 smem->regions[0].aux_base = rmem->base;
1003 smem->regions[0].size = rmem->size;
1004 } else {
1005 /*
1006 * Fall back to the memory-region reference, if we're not a
1007 * reserved-memory node.
1008 */
1009 ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
1010 if (ret)
1011 return ret;
1012 }
1013
1014 if (num_regions > 1) {
1015 ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
1016 if (ret)
1017 return ret;
1018 }
1019
1020
1021 ret = qcom_smem_map_toc(smem, &smem->regions[0]);
1022 if (ret)
1023 return ret;
1024
1025 for (i = 1; i < num_regions; i++) {
1026 smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
1027 smem->regions[i].aux_base,
1028 smem->regions[i].size);
1029 if (!smem->regions[i].virt_base) {
1030 dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
1031 return -ENOMEM;
1032 }
1033 }
1034
1035 header = smem->regions[0].virt_base;
1036 if (le32_to_cpu(header->initialized) != 1 ||
1037 le32_to_cpu(header->reserved)) {
1038 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
1039 return -EINVAL;
1040 }
1041
1042 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
1043 if (hwlock_id < 0) {
1044 if (hwlock_id != -EPROBE_DEFER)
1045 dev_err(&pdev->dev, "failed to retrieve hwlock\n");
1046 return hwlock_id;
1047 }
1048
1049 smem->hwlock = hwspin_lock_request_specific(hwlock_id);
1050 if (!smem->hwlock)
1051 return -ENXIO;
1052
1053 ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
1054 if (ret)
1055 return ret;
> 1056 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
1057 hwspin_unlock_irqrestore(smem->hwlock, &flags);
1058
1059 version = qcom_smem_get_sbl_version(smem);
1060 /*
1061 * smem header mapping is required only in heap version scheme, so unmap
1062 * it here. It will be remapped in qcom_smem_map_global() when whole
1063 * partition is mapped again.
1064 */
1065 devm_iounmap(smem->dev, smem->regions[0].virt_base);
1066 switch (version >> 16) {
1067 case SMEM_GLOBAL_PART_VERSION:
1068 ret = qcom_smem_set_global_partition(smem);
1069 if (ret < 0)
1070 return ret;
1071 smem->item_count = qcom_smem_get_item_count(smem);
1072 break;
1073 case SMEM_GLOBAL_HEAP_VERSION:
1074 qcom_smem_map_global(smem, size);
1075 smem->item_count = SMEM_ITEM_COUNT;
1076 break;
1077 default:
1078 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
1079 return -EINVAL;
1080 }
1081
1082 BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
1083 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
1084 if (ret < 0 && ret != -ENOENT)
1085 return ret;
1086
1087 __smem = smem;
1088
1089 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
1090 PLATFORM_DEVID_NONE, NULL,
1091 0);
1092 if (IS_ERR(smem->socinfo))
1093 dev_dbg(&pdev->dev, "failed to register socinfo device\n");
1094
1095 return 0;
1096 }
1097

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]