2023-06-21 23:57:52

by Suthikulpanit, Suravee

[permalink] [raw]
Subject: [RFC PATCH 01/21] iommu/amd: Declare helper functions as extern

To allow reuse from other files. There is no functional change.

Signed-off-by: Suravee Suthikulpanit <[email protected]>
---
drivers/iommu/amd/amd_iommu.h | 18 ++++++++++++++++++
drivers/iommu/amd/init.c | 6 +++---
drivers/iommu/amd/io_pgtable.c | 18 +++++++++---------
drivers/iommu/amd/iommu.c | 14 +++++++-------
4 files changed, 37 insertions(+), 19 deletions(-)

diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index e98f20a9bdd8..827d065bbe8e 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -11,6 +11,24 @@

#include "amd_iommu_types.h"

+extern void iommu_feature_enable(struct amd_iommu *iommu, u8 bit);
+extern void iommu_feature_disable(struct amd_iommu *iommu, u8 bit);
+extern u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end);
+extern void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain,
+ bool ats, bool ppr);
+extern int iommu_flush_dte(struct amd_iommu *iommu, u16 devid);
+extern struct protection_domain *to_pdomain(struct iommu_domain *dom);
+extern struct iommu_domain *amd_iommu_domain_alloc(unsigned int type);
+extern void amd_iommu_domain_free(struct iommu_domain *dom);
+extern int amd_iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+extern unsigned long amd_iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather);
+
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 329a406cc37d..886cf55e75e2 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -432,7 +432,7 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
}

/* Generic functions to enable/disable certain features of the IOMMU. */
-static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
+void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
{
u64 ctrl;

@@ -441,7 +441,7 @@ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}

-static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
+void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
{
u64 ctrl;

@@ -490,7 +490,7 @@ static void iommu_disable(struct amd_iommu *iommu)
* mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
* the system has one.
*/
-static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
+u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
{
if (!request_mem_region(address, end, "amd_iommu")) {
pr_err("Can not reserve memory region %llx-%llx for mmio\n",
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 1b67116882be..9b398673208d 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -360,9 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
* supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces.
*/
-static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t pgsize, size_t pgcount,
- int prot, gfp_t gfp, size_t *mapped)
+int amd_iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
LIST_HEAD(freelist);
@@ -435,10 +435,10 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
return ret;
}

-static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
- unsigned long iova,
- size_t pgsize, size_t pgcount,
- struct iommu_iotlb_gather *gather)
+unsigned long amd_iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
@@ -524,8 +524,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;

- pgtable->iop.ops.map_pages = iommu_v1_map_pages;
- pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
+ pgtable->iop.ops.map_pages = amd_iommu_v1_map_pages;
+ pgtable->iop.ops.unmap_pages = amd_iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;

return &pgtable->iop;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 4a314647d1f7..bbd10698851f 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -177,7 +177,7 @@ static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
}

-static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+struct protection_domain *to_pdomain(struct iommu_domain *dom)
{
return container_of(dom, struct protection_domain, domain);
}
@@ -450,7 +450,7 @@ static void amd_iommu_uninit_device(struct device *dev)
*
****************************************************************************/

-static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
+void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
struct dev_table_entry *dev_table = get_dev_table(iommu);
@@ -1192,7 +1192,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
return ret;
}

-static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
+int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
{
struct iommu_cmd cmd;

@@ -1553,8 +1553,8 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}

-static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
- struct protection_domain *domain, bool ats, bool ppr)
+void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
@@ -2118,7 +2118,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
return NULL;
}

-static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
{
struct protection_domain *domain;

@@ -2140,7 +2140,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
return &domain->domain;
}

-static void amd_iommu_domain_free(struct iommu_domain *dom)
+void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;

--
2.34.1