Patch series
*) Adds support to add virtual functions to enable endpoint controller
which supports SR-IOV capability
*) Add support in Cadence endpoint driver to configure virtual functions
*) Enable pci_endpoint_test driver to create pci_device for virtual
functions
v1 of the patch series can be found at [1]
v2 of the patch series can be found at [2]
v3 of the patch series can be found at [3]
v4 of the patch series can be found at [4]
v5 of the patch series can be found at [5]
v6 of the patch series can be found at [6]
Here both physical functions and virtual functions use the same
pci_endpoint_test driver and existing pcitest utility can be used
to test virtual functions as well.
Changes from v6:
*) Rebased to 5.13-rc4
Changes from v5:
*) Rebased to 5.13-rc1
Changes from v4:
*) Added a fix in Cadence driver which was overwriting BAR configuration
of physical function.
*) Didn't include Tom's Acked-by since Cadence driver is modified in
this revision.
Changes from v3:
*) Fixed Rob's comment and added his Reviewed-by as suggested by him.
Changes from v2:
*) Fixed DT binding documentation comment by Rob
*) Fixed the error check in pci-epc-core.c
Changes from v1:
*) Re-based and Re-worked to latest kernel 5.10.0-rc2+ (now has generic
binding for EP)
[1] -> http://lore.kernel.org/r/[email protected]
[2] -> http://lore.kernel.org/r/[email protected]
[3] -> https://lore.kernel.org/r/[email protected]
[4] -> http://lore.kernel.org/r/[email protected]
[5] -> https://lore.kernel.org/r/[email protected]
[6] -> http://lore.kernel.org/r/[email protected]
Kishon Vijay Abraham I (7):
dt-bindings: PCI: pci-ep: Add binding to specify virtual function
PCI: endpoint: Add support to add virtual function in endpoint core
PCI: endpoint: Add support to link a physical function to a virtual
function
PCI: endpoint: Add virtual function number in pci_epc ops
PCI: cadence: Add support to configure virtual functions
misc: pci_endpoint_test: Populate sriov_configure ops to configure
SR-IOV device
Documentation: PCI: endpoint/pci-endpoint-cfs: Guide to use SR-IOV
.../PCI/endpoint/pci-endpoint-cfs.rst | 12 +-
.../devicetree/bindings/pci/pci-ep.yaml | 7 +
drivers/misc/pci_endpoint_test.c | 1 +
.../pci/controller/cadence/pcie-cadence-ep.c | 285 ++++++++++++++----
drivers/pci/controller/cadence/pcie-cadence.h | 7 +
.../pci/controller/dwc/pcie-designware-ep.c | 36 +--
drivers/pci/controller/pcie-rcar-ep.c | 19 +-
drivers/pci/controller/pcie-rockchip-ep.c | 18 +-
drivers/pci/endpoint/functions/pci-epf-ntb.c | 79 +++--
drivers/pci/endpoint/functions/pci-epf-test.c | 66 ++--
drivers/pci/endpoint/pci-ep-cfs.c | 24 ++
drivers/pci/endpoint/pci-epc-core.c | 130 +++++---
drivers/pci/endpoint/pci-epf-core.c | 144 ++++++++-
include/linux/pci-epc.h | 57 ++--
include/linux/pci-epf.h | 16 +-
15 files changed, 684 insertions(+), 217 deletions(-)
--
2.17.1
Add virtual function number in pci_epc ops. EPC controller driver
can perform virtual function specific initialization based on the
virtual function number.
Signed-off-by: Kishon Vijay Abraham I <[email protected]>
---
.../pci/controller/cadence/pcie-cadence-ep.c | 44 +++---
.../pci/controller/dwc/pcie-designware-ep.c | 36 ++---
drivers/pci/controller/pcie-rcar-ep.c | 19 +--
drivers/pci/controller/pcie-rockchip-ep.c | 18 +--
drivers/pci/endpoint/functions/pci-epf-ntb.c | 79 ++++++-----
drivers/pci/endpoint/functions/pci-epf-test.c | 66 +++++----
drivers/pci/endpoint/pci-epc-core.c | 128 ++++++++++++------
drivers/pci/endpoint/pci-epf-core.c | 48 ++++++-
include/linux/pci-epc.h | 57 ++++----
9 files changed, 313 insertions(+), 182 deletions(-)
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 897cdde02bd8..912a15be8bfd 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -16,7 +16,7 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -47,7 +47,7 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -117,7 +117,7 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
return 0;
}
-static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -147,8 +147,8 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
epf->epf_bar[bar] = NULL;
}
-static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -169,7 +169,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return 0;
}
-static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -189,7 +189,7 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -209,7 +209,7 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
return 0;
}
-static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -230,7 +230,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return mme;
}
-static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -247,8 +247,9 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ u16 interrupts, enum pci_barno bir,
+ u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -317,7 +318,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
+static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -334,7 +336,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
return 0;
}
-static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
@@ -382,7 +384,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset)
@@ -419,7 +421,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
pci_addr &= GENMASK_ULL(63, 2);
for (i = 0; i < interrupt_num; i++) {
- ret = cdns_pcie_ep_map_addr(epc, fn, addr,
+ ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
pci_addr & ~pci_addr_mask,
entry_size);
if (ret)
@@ -433,7 +435,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
@@ -478,7 +480,7 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -486,13 +488,13 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
switch (type) {
case PCI_EPC_IRQ_LEGACY:
- return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
+ return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
case PCI_EPC_IRQ_MSI:
- return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
break;
@@ -531,7 +533,7 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
};
static const struct pci_epc_features*
-cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &cdns_pcie_epc_features;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8d028a88b375..998b698f4085 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -125,7 +125,7 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
-static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -202,7 +202,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -217,7 +217,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
ep->epf_bar[bar] = NULL;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -276,7 +276,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
int ret;
@@ -292,9 +292,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
clear_bit(atu_index, ep->ob_window_map);
}
-static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
- phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -309,7 +308,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
return 0;
}
-static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -333,7 +332,8 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -358,7 +358,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
return 0;
}
-static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -382,8 +382,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -418,7 +418,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
return 0;
}
-static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -450,7 +450,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features*
-dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -525,14 +525,14 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
@@ -593,14 +593,14 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data, ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index b4a288e24aaf..6cee4e09acca 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -159,7 +159,7 @@ static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
return 0;
}
-static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -195,7 +195,7 @@ static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
@@ -246,7 +246,7 @@ static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
return 0;
}
-static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -259,7 +259,8 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ u8 interrupts)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -272,7 +273,7 @@ static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
return 0;
}
-static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -285,7 +286,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
-static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr, size_t size)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -322,7 +323,7 @@ static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -403,7 +404,7 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
return 0;
}
-static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -451,7 +452,7 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
};
static const struct pci_epc_features*
-rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rcar_pcie_epc_features;
}
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 7631dc3961c1..5fb9ce6e536e 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -122,7 +122,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
}
-static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -159,7 +159,7 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -227,7 +227,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -256,7 +256,7 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
}
-static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
{
@@ -284,7 +284,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -308,7 +308,7 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
u8 multi_msg_cap)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -329,7 +329,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
@@ -471,7 +471,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -510,7 +510,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
};
static const struct pci_epc_features*
-rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rockchip_pcie_epc_features;
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index bce274d02dcf..0059b13a613f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -87,6 +87,7 @@ struct epf_ntb {
struct epf_ntb_epc {
u8 func_no;
+ u8 vfunc_no;
bool linkup;
bool is_msix;
int msix_bar;
@@ -143,14 +144,15 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
struct pci_epc *epc;
+ u8 func_no, vfunc_no;
bool is_msix;
- u8 func_no;
int ret;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
is_msix = ntb_epc->is_msix;
ctrl = ntb_epc->reg;
if (link_up)
@@ -158,7 +160,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
else
ctrl->link_status &= ~LINK_STATUS_UP;
irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
- ret = pci_epc_raise_irq(epc, func_no, irq_type, 1);
+ ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
if (ret) {
dev_err(&epc->dev,
"%s intf: Failed to raise Link Up IRQ\n",
@@ -238,10 +240,10 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
u64 addr, size;
int ret = 0;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -267,8 +269,9 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, addr, size);
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
if (ret)
dev_err(&epc->dev,
"%s intf: Failed to map memory window %d address\n",
@@ -296,8 +299,8 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -311,8 +314,9 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
phys_addr += ctrl->mw1_offset;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -385,8 +389,8 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
struct epf_ntb_ctrl *peer_ctrl;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -400,8 +404,9 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_msi_irq(epc, func_no, phys_addr, db_count,
+ ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
db_entry_size, &db_data, &db_offset);
if (ret) {
dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
@@ -491,10 +496,10 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
u32 db_entry_size, msg_data;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
size_t align;
u64 msg_addr;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -512,12 +517,13 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_entry_size = peer_ctrl->db_entry_size;
for (i = 0; i < db_count; i++) {
msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
msg_data = msix_tbl[i].msg_data;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, msg_addr,
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr,
db_entry_size);
if (ret) {
dev_err(&epc->dev,
@@ -586,8 +592,8 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
struct pci_epf_bar *peer_epf_bar;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -597,8 +603,9 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -728,14 +735,15 @@ static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -775,9 +783,9 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
struct pci_epf_bar *peer_epf_bar, *epf_bar;
enum pci_barno peer_barno, barno;
u32 peer_spad_offset;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
dev = &ntb->epf->dev;
@@ -790,6 +798,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
peer_spad_offset = peer_ntb_epc->reg->spad_offset;
@@ -798,7 +807,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
epf_bar->barno = barno;
epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
pci_epc_interface_string(type));
@@ -842,14 +851,15 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -886,10 +896,10 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct epf_ntb *ntb;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
ntb = ntb_epc->epf_ntb;
@@ -897,10 +907,11 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
pci_epc_interface_string(ntb_epc->type));
@@ -1214,17 +1225,18 @@ static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
struct pci_epf_bar *epf_bar;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
}
@@ -1263,10 +1275,10 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
bool msix_capable, msi_capable;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
u32 db_count;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1282,6 +1294,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_count = ntb->db_count;
if (db_count > MAX_DB_COUNT) {
@@ -1293,7 +1306,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
epc = ntb_epc->epc;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, func_no, db_count);
+ ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
if (ret) {
dev_err(dev, "%s intf: MSI configuration failed\n",
pci_epc_interface_string(type));
@@ -1302,7 +1315,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, func_no, db_count,
+ ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count,
ntb_epc->msix_bar,
ntb_epc->msix_table_offset);
if (ret) {
@@ -1423,11 +1436,11 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
u32 num_mws, db_count;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
size_t align;
int ret, i;
- u8 func_no;
u64 size;
ntb_epc = ntb->epc[type];
@@ -1437,6 +1450,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
num_mws = ntb->num_mws;
db_count = ntb->db_count;
@@ -1464,7 +1478,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: DoorBell BAR set failed\n",
pci_epc_interface_string(type));
@@ -1536,9 +1550,9 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
struct pci_epf_bar *epf_bar;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
dev = &ntb->epf->dev;
@@ -1547,6 +1561,7 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
return -ENOMEM;
epf = ntb->epf;
+ vfunc_no = epf->vfunc_no;
if (type == PRIMARY_INTERFACE) {
func_no = epf->func_no;
epf_bar = epf->bar;
@@ -1558,11 +1573,12 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
ntb_epc->linkup = false;
ntb_epc->epc = epc;
ntb_epc->func_no = func_no;
+ ntb_epc->vfunc_no = vfunc_no;
ntb_epc->type = type;
ntb_epc->epf_bar = epf_bar;
ntb_epc->epf_ntb = ntb;
- epc_features = pci_epc_get_features(epc, func_no);
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
if (!epc_features)
return -EINVAL;
ntb_epc->epc_features = epc_features;
@@ -1702,10 +1718,10 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1713,6 +1729,7 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
dev = &epf->dev;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
if (ret) {
@@ -1742,7 +1759,7 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
goto err_db_mw_bar_init;
}
- ret = pci_epc_write_header(epc, func_no, epf->header);
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
if (ret) {
dev_err(dev, "%s intf: Configuration header write failed\n",
pci_epc_interface_string(type));
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d2708ca4bece..aaca7134b63b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -247,8 +247,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -263,8 +263,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_src_map_addr;
}
- ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -291,13 +291,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
err_dst_addr:
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
err_src_addr:
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
@@ -331,8 +331,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -386,7 +386,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
@@ -419,8 +419,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -479,7 +479,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
@@ -501,13 +501,16 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
switch (irq_type) {
case IRQ_TYPE_LEGACY:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
break;
case IRQ_TYPE_MSI:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, irq);
break;
case IRQ_TYPE_MSIX:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, irq);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -542,7 +545,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
goto reset_handler;
}
@@ -580,22 +584,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
}
if (command & COMMAND_RAISE_MSI_IRQ) {
- count = pci_epc_get_msi(epc, epf->func_no);
+ count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSIX_IRQ) {
- count = pci_epc_get_msix(epc, epf->func_no);
+ count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, reg->irq_number);
goto reset_handler;
}
@@ -618,7 +622,8 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
}
@@ -650,7 +655,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
@@ -674,13 +680,13 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
bool msi_capable = true;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (epc_features) {
msix_capable = epc_features->msix_capable;
msi_capable = epc_features->msi_capable;
}
- ret = pci_epc_write_header(epc, epf->func_no, header);
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
if (ret) {
dev_err(dev, "Configuration header write failed\n");
return ret;
@@ -691,7 +697,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return ret;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
@@ -699,7 +706,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts,
epf_test->test_reg_bar,
epf_test->msix_table_offset);
if (ret) {
@@ -832,7 +840,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epc_features) {
dev_err(&epf->dev, "epc_features not implemented\n");
return -EOPNOTSUPP;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 01c58ca84dcc..da2b6e792490 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -137,24 +137,29 @@ EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
* @epc: the features supported by *this* EPC device will be returned
* @func_no: the features supported by the EPC device specific to the
* endpoint function with func_no will be returned
+ * @vfunc_no: the features supported by the EPC device specific to the
+ * virtual endpoint function with vfunc_no will be returned
*
* Invoke to get the features provided by the EPC which may be
* specific to an endpoint function. Returns pci_epc_features on success
* and NULL for any failures.
*/
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no)
+ u8 func_no, u8 vfunc_no)
{
const struct pci_epc_features *epc_features;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return NULL;
+
if (!epc->ops->get_features)
return NULL;
mutex_lock(&epc->lock);
- epc_features = epc->ops->get_features(epc, func_no);
+ epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
return epc_features;
@@ -205,13 +210,14 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
/**
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @type: specify the type of interrupt; legacy, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number
*
* Invoke to raise an legacy, MSI or MSI-X interrupt
*/
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
@@ -219,11 +225,14 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->raise_irq)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
+ ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
mutex_unlock(&epc->lock);
return ret;
@@ -235,6 +244,7 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* MSI data
* @epc: the EPC device which has the MSI capability
* @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: the physical address of the outbound region
* @interrupt_num: the MSI interrupt number
* @entry_size: Size of Outbound address region for each interrupt
@@ -250,21 +260,25 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* physical address (in outbound region) of the other interface to ring
* doorbell.
*/
-int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr,
- u8 interrupt_num, u32 entry_size, u32 *msi_data,
- u32 *msi_addr_offset)
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
+ u32 *msi_data, u32 *msi_addr_offset)
{
int ret;
if (IS_ERR_OR_NULL(epc))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_msi_irq)
return -EINVAL;
mutex_lock(&epc->lock);
- ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num,
- entry_size, msi_data, msi_addr_offset);
+ ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
+ interrupt_num, entry_size, msi_data,
+ msi_addr_offset);
mutex_unlock(&epc->lock);
return ret;
@@ -274,22 +288,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI interrupts allocated by the RC
*/
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msi)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msi(epc, func_no);
+ interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -304,12 +322,13 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
/**
* pci_epc_set_msi() - set the number of MSI interrupt numbers required
* @epc: the EPC device on which MSI has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
{
int ret;
u8 encode_int;
@@ -318,13 +337,16 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
interrupts > 32)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msi)
return 0;
encode_int = order_base_2(interrupts);
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
mutex_unlock(&epc->lock);
return ret;
@@ -334,22 +356,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
* pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
* @epc: the EPC device to which MSI-X interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI-X interrupts allocated by the RC
*/
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msix)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msix(epc, func_no);
+ interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -362,15 +388,16 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
/**
* pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
* @epc: the EPC device on which MSI-X has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
int ret;
@@ -378,11 +405,15 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
interrupts < 1 || interrupts > 2048)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
+ offset);
mutex_unlock(&epc->lock);
return ret;
@@ -392,22 +423,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
*
* Invoke to unmap the CPU address from PCI address.
*/
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->unmap_addr)
return;
mutex_lock(&epc->lock);
- epc->ops->unmap_addr(epc, func_no, phys_addr);
+ epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -415,14 +450,15 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
/**
* pci_epc_map_addr() - map CPU address to PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
* @pci_addr: PCI address to which the physical address should be mapped
* @size: the size of the allocation
*
* Invoke to map CPU address with PCI address.
*/
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
@@ -430,11 +466,15 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_addr)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
+ ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
+ size);
mutex_unlock(&epc->lock);
return ret;
@@ -444,12 +484,13 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to reset the BAR of the endpoint device.
*/
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -457,11 +498,14 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->clear_bar)
return;
mutex_lock(&epc->lock);
- epc->ops->clear_bar(epc, func_no, epf_bar);
+ epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -469,12 +513,13 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
/**
* pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
* @epc: the EPC device on which BAR has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to configure the BAR of the endpoint device.
*/
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -489,11 +534,14 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_bar)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_bar(epc, func_no, epf_bar);
+ ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
return ret;
@@ -503,7 +551,8 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @header: standard configuration header fields
*
* Invoke to write the configuration header to the endpoint controller. Every
@@ -511,7 +560,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
* configuration header would be written. The callback function should write
* the header fields to this dedicated location.
*/
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *header)
{
int ret;
@@ -519,11 +568,14 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->write_header)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->write_header(epc, func_no, header);
+ ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
mutex_unlock(&epc->lock);
return ret;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index ae7107a3196e..d9e1a8dd1ceb 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -90,11 +90,14 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ struct device *dev = &epf->dev;
struct pci_epf *epf_vf;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
int ret;
if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ dev_WARN(dev, "epf device not bound to driver\n");
return -EINVAL;
}
@@ -103,7 +106,50 @@ int pci_epf_bind(struct pci_epf *epf)
mutex_lock(&epf->lock);
list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ vfunc_no = epf_vf->vfunc_no;
+
+ if (vfunc_no < 1) {
+ dev_err(dev, "Invalid virtual function number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ epc = epf->epc;
+ func_no = epf->func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epc = epf->sec_epc;
+ func_no = epf->sec_epc_func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
epf_vf->func_no = epf->func_no;
+ epf_vf->sec_epc_func_no = epf->sec_epc_func_no;
epf_vf->epc = epf->epc;
epf_vf->sec_epc = epf->sec_epc;
ret = epf_vf->driver->ops->bind(epf_vf);
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 50a649d33e68..a48778e1a4ee 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -62,31 +62,32 @@ pci_epc_interface_string(enum pci_epc_interface_type type)
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
- int (*write_header)(struct pci_epc *epc, u8 func_no,
+ int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
- int (*set_bar)(struct pci_epc *epc, u8 func_no,
+ int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- void (*clear_bar)(struct pci_epc *epc, u8 func_no,
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- int (*map_addr)(struct pci_epc *epc, u8 func_no,
+ int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr, u64 pci_addr, size_t size);
- void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr);
- int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
- int (*get_msi)(struct pci_epc *epc, u8 func_no);
- int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno, u32 offset);
- int (*get_msix)(struct pci_epc *epc, u8 func_no);
- int (*raise_irq)(struct pci_epc *epc, u8 func_no,
+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num);
- int (*map_msi_irq)(struct pci_epc *epc, u8 func_no,
+ int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
struct module *owner;
};
@@ -128,6 +129,8 @@ struct pci_epc_mem {
* single window.
* @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
+ * @max_vfs: Array indicating the maximum number of virtual functions that can
+ * be associated with each physical function
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
@@ -141,6 +144,7 @@ struct pci_epc {
struct pci_epc_mem *mem;
unsigned int num_windows;
u8 max_functions;
+ u8 *max_vfs;
struct config_group *group;
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
@@ -208,31 +212,32 @@ void pci_epc_linkup(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size);
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno, u32 offset);
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
-int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
--
2.17.1
Add Documentation to help users use PCI endpoint to create virtual
functions using configfs. An endpoint function is designated as a
virtual endpoint function device when it is linked to a physical
endpoint function device (instead of a endpoint controller).
Signed-off-by: Kishon Vijay Abraham I <[email protected]>
---
Documentation/PCI/endpoint/pci-endpoint-cfs.rst | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
index db609b97ad58..fb73345cfb8a 100644
--- a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
+++ b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
@@ -43,6 +43,7 @@ entries corresponding to EPF driver will be created by the EPF core.
.. <EPF Driver1>/
... <EPF Device 11>/
... <EPF Device 21>/
+ ... <EPF Device 31>/
.. <EPF Driver2>/
... <EPF Device 12>/
... <EPF Device 22>/
@@ -68,6 +69,7 @@ created)
... subsys_vendor_id
... subsys_id
... interrupt_pin
+ ... <Symlink EPF Device 31>/
... primary/
... <Symlink EPC Device1>/
... secondary/
@@ -79,6 +81,13 @@ interface should be added in 'primary' directory and symlink of endpoint
controller connected to secondary interface should be added in 'secondary'
directory.
+The <EPF Device> directory can have a list of symbolic links
+(<Symlink EPF Device 31>) to other <EPF Device>. These symbolic links should
+be created by the user to represent the virtual functions that are bound to
+the physical function. In the above directory structure <EPF Device 11> is a
+physical function and <EPF Device 31> is a virtual function. An EPF device once
+it's linked to another EPF device, cannot be linked to a EPC device.
+
EPC Device
==========
@@ -98,7 +107,8 @@ entries corresponding to EPC device will be created by the EPC core.
The <EPC Device> directory will have a list of symbolic links to
<EPF Device>. These symbolic links should be created by the user to
-represent the functions present in the endpoint device.
+represent the functions present in the endpoint device. Only <EPF Device>
+that represents a physical function can be linked to a EPC device.
The <EPC Device> directory will also have a *start* field. Once
"1" is written to this field, the endpoint device will be ready to
--
2.17.1
Add support to add virtual function in endpoint core. The virtual
function can only be associated with a physical function instead of a
endpoint controller. Provide APIs to associate a virtual function with
a physical function here.
Signed-off-by: Kishon Vijay Abraham I <[email protected]>
---
drivers/pci/endpoint/pci-epc-core.c | 2 +-
drivers/pci/endpoint/pci-epf-core.c | 96 ++++++++++++++++++++++++++++-
include/linux/pci-epf.h | 16 ++++-
3 files changed, 111 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index adec9bee72cf..01c58ca84dcc 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -548,7 +548,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
u32 func_no;
int ret = 0;
- if (IS_ERR_OR_NULL(epc))
+ if (IS_ERR_OR_NULL(epc) || epf->is_vf)
return -EINVAL;
if (type == PRIMARY_INTERFACE && epf->epc)
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index e9289d10f822..ae7107a3196e 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -62,13 +62,20 @@ EXPORT_SYMBOL_GPL(pci_epf_type_add_cfs);
*/
void pci_epf_unbind(struct pci_epf *epf)
{
+ struct pci_epf *epf_vf;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return;
}
mutex_lock(&epf->lock);
- epf->driver->ops->unbind(epf);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ if (epf_vf->is_bound)
+ epf_vf->driver->ops->unbind(epf_vf);
+ }
+ if (epf->is_bound)
+ epf->driver->ops->unbind(epf);
mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
@@ -83,6 +90,7 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ struct pci_epf *epf_vf;
int ret;
if (!epf->driver) {
@@ -94,13 +102,95 @@ int pci_epf_bind(struct pci_epf *epf)
return -EAGAIN;
mutex_lock(&epf->lock);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ epf_vf->func_no = epf->func_no;
+ epf_vf->epc = epf->epc;
+ epf_vf->sec_epc = epf->sec_epc;
+ ret = epf_vf->driver->ops->bind(epf_vf);
+ if (ret)
+ goto ret;
+ epf_vf->is_bound = true;
+ }
+
ret = epf->driver->ops->bind(epf);
+ if (ret)
+ goto ret;
+ epf->is_bound = true;
+
+ mutex_unlock(&epf->lock);
+ return 0;
+
+ret:
mutex_unlock(&epf->lock);
+ pci_epf_unbind(epf);
return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
+/**
+ * pci_epf_add_vepf() - associate virtual EP function to physical EP function
+ * @epf_pf: the physical EP function to which the virtual EP function should be
+ * associated
+ * @epf_vf: the virtual EP function to be added
+ *
+ * A physical endpoint function can be associated with multiple virtual
+ * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint
+ * function to a physical PCI endpoint function.
+ */
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ u32 vfunc_no;
+
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return -EINVAL;
+
+ if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf)
+ return -EBUSY;
+
+ if (epf_pf->sec_epc || epf_vf->sec_epc)
+ return -EBUSY;
+
+ mutex_lock(&epf_pf->lock);
+ vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map,
+ BITS_PER_LONG);
+ if (vfunc_no >= BITS_PER_LONG)
+ return -EINVAL;
+
+ set_bit(vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->vfunc_no = vfunc_no;
+
+ epf_vf->epf_pf = epf_pf;
+ epf_vf->is_vf = true;
+
+ list_add_tail(&epf_vf->list, &epf_pf->pci_vepf);
+ mutex_unlock(&epf_pf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_add_vepf);
+
+/**
+ * pci_epf_remove_vepf() - remove virtual EP function from physical EP function
+ * @epf_pf: the physical EP function from which the virtual EP function should
+ * be removed
+ * @epf_vf: the virtual EP function to be removed
+ *
+ * Invoke to remove a virtual endpoint function from the physcial endpoint
+ * function.
+ */
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return;
+
+ mutex_lock(&epf_pf->lock);
+ clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ list_del(&epf_vf->list);
+ mutex_unlock(&epf_pf->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
+
/**
* pci_epf_free_space() - free the allocated PCI EPF register space
* @epf: the EPF device from whom to free the memory
@@ -317,6 +407,10 @@ struct pci_epf *pci_epf_create(const char *name)
return ERR_PTR(-ENOMEM);
}
+ /* VFs are numbered starting with 1. So set BIT(0) by default */
+ epf->vfunction_num_map = 1;
+ INIT_LIST_HEAD(&epf->pci_vepf);
+
dev = &epf->dev;
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 2debc27ba95e..043b4c9c7188 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -121,8 +121,10 @@ struct pci_epf_bar {
* @bar: represents the BAR of EPF device
* @msi_interrupts: number of MSI interrupts required by this function
* @msix_interrupts: number of MSI-X interrupts required by this function
- * @func_no: unique function number within this endpoint device
+ * @func_no: unique (physical) function number within this endpoint device
+ * @vfunc_no: unique virtual function number within a physical function
* @epc: the EPC device to which this EPF device is bound
+ * @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @nb: notifier block to notify EPF of any EPC events (like linkup)
@@ -133,6 +135,10 @@ struct pci_epf_bar {
* @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
* @sec_epc_func_no: unique (physical) function number within the secondary EPC
* @group: configfs group associated with the EPF device
+ * @is_bound: indicates if bind notification to function driver has been invoked
+ * @is_vf: true - virtual function, false - physical function
+ * @vfunction_num_map: bitmap to manage virtual function number
+ * @pci_vepf: list of virtual endpoint functions associated with this function
*/
struct pci_epf {
struct device dev;
@@ -142,8 +148,10 @@ struct pci_epf {
u8 msi_interrupts;
u16 msix_interrupts;
u8 func_no;
+ u8 vfunc_no;
struct pci_epc *epc;
+ struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
struct list_head list;
struct notifier_block nb;
@@ -156,6 +164,10 @@ struct pci_epf {
struct pci_epf_bar sec_epc_bar[6];
u8 sec_epc_func_no;
struct config_group *group;
+ unsigned int is_bound;
+ unsigned int is_vf;
+ unsigned long vfunction_num_map;
+ struct list_head pci_vepf;
};
/**
@@ -199,4 +211,6 @@ int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
struct config_group *group);
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
#endif /* __LINUX_PCI_EPF_H */
--
2.17.1
Populate sriov_configure ops with pci_sriov_configure_simple to
configure SR-IOV device.
Signed-off-by: Kishon Vijay Abraham I <[email protected]>
---
drivers/misc/pci_endpoint_test.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 1b2868ca4f2a..c7ee34013485 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -978,6 +978,7 @@ static struct pci_driver pci_endpoint_test_driver = {
.id_table = pci_endpoint_test_tbl,
.probe = pci_endpoint_test_probe,
.remove = pci_endpoint_test_remove,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(pci_endpoint_test_driver);
--
2.17.1
Now that support for SR-IOV is added in PCIe endpoint core, add support
to configure virtual functions in the Cadence PCIe EP driver.
Signed-off-by: Kishon Vijay Abraham I <[email protected]>
Acked-by: Tom Joseph <[email protected]>
---
.../pci/controller/cadence/pcie-cadence-ep.c | 241 +++++++++++++++---
drivers/pci/controller/cadence/pcie-cadence.h | 7 +
2 files changed, 217 insertions(+), 31 deletions(-)
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 912a15be8bfd..791915054ff4 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -20,7 +20,18 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
+ u32 reg;
+
+ if (vfn > 1) {
+ dev_dbg(&epc->dev, "Only Virtual Function #1 has deviceID\n");
+ return 0;
+ } else if (vfn == 1) {
+ reg = cap + PCI_SRIOV_VF_DID;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
+ return 0;
+ }
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
@@ -51,12 +62,14 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_epf *epf = &ep->epf[fn];
struct cdns_pcie *pcie = &ep->pcie;
dma_addr_t bar_phys = epf_bar->phys_addr;
enum pci_barno bar = epf_bar->barno;
int flags = epf_bar->flags;
u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u32 first_vf_offset, stride;
u64 sz;
/* BAR size is 2^(aperture + 7) */
@@ -92,26 +105,50 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
addr0 = lower_32_bits(bar_phys);
addr1 = upper_32_bits(bar_phys);
+
+ if (vfn == 1) {
+ /* All virtual functions use the same BAR config */
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+ } else if (vfn == 0) {
+ /* BAR configuration for physical function */
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+ }
+
+ if (vfn == 0 || vfn == 1) {
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ epf = &epf->epf[vfn - 1];
+ }
+
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
addr1);
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
- }
-
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
- cdns_pcie_writel(pcie, reg, cfg);
-
epf->epf_bar[bar] = epf_bar;
return 0;
@@ -121,25 +158,48 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_epf *epf = &ep->epf[fn];
struct cdns_pcie *pcie = &ep->pcie;
enum pci_barno bar = epf_bar->barno;
+ u32 first_vf_offset, stride;
u32 reg, cfg, b, ctrl;
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
+ if (vfn == 1) {
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+ } else if (vfn == 0) {
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
}
- ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
- cdns_pcie_writel(pcie, reg, cfg);
+ if (vfn == 0 || vfn == 1) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ epf = &epf->epf[vfn - 1];
+ }
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
@@ -152,8 +212,18 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u32 r;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
r = find_first_zero_bit(&ep->ob_region_map,
sizeof(ep->ob_region_map) * BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
@@ -193,9 +263,19 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u16 flags;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
/*
* Set the Multiple Message Capable bitfield into the Message Control
* register.
@@ -213,9 +293,19 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u16 flags, mme;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -232,11 +322,21 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u32 val, reg;
+ if (vfunc_no > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, func_no, sriov_cap
+ + PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, func_no, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ func_no = func_no + first_vf_offset + ((vfunc_no - 1) * stride);
+ }
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
@@ -251,11 +351,21 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
u16 interrupts, enum pci_barno bir,
u32 offset)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 first_vf_offset, stride;
u32 val, reg;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
@@ -275,8 +385,8 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
return 0;
}
-static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
unsigned long flags;
@@ -339,11 +449,21 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ u32 first_vf_offset, stride;
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
@@ -389,15 +509,25 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u64 pci_addr, pci_addr_mask = 0xff;
u16 flags, mme, data, data_mask;
+ u32 first_vf_offset, stride;
u8 msi_count;
int ret;
int i;
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ }
+
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -438,16 +568,29 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
+ u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 tbl_offset, msg_data, reg;
struct cdns_pcie *pcie = &ep->pcie;
struct pci_epf_msix_tbl *msix_tbl;
+ u32 first_vf_offset, stride;
struct cdns_pcie_epf *epf;
u64 pci_addr_mask = 0xff;
u64 msg_addr;
u16 flags;
u8 bir;
+ epf = &ep->epf[fn];
+
+ if (vfn > 0) {
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
+ PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+ epf = &epf->epf[vfn - 1];
+ }
+
/* Check whether the MSI-X feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
if (!(flags & PCI_MSIX_FLAGS_ENABLE))
@@ -458,7 +601,6 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
bir = tbl_offset & PCI_MSIX_TABLE_BIR;
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- epf = &ep->epf[fn];
msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
@@ -485,9 +627,15 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
switch (type) {
case PCI_EPC_IRQ_LEGACY:
+ if (vfn > 0) {
+ dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ return -EINVAL;
+ }
return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
case PCI_EPC_IRQ_MSI:
@@ -525,6 +673,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
return 0;
}
+static const struct pci_epc_features cdns_pcie_epc_vf_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 65536,
+};
+
static const struct pci_epc_features cdns_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
@@ -535,7 +690,10 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
static const struct pci_epc_features*
cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
- return &cdns_pcie_epc_features;
+ if (!vfunc_no)
+ return &cdns_pcie_epc_features;
+
+ return &cdns_pcie_epc_vf_features;
}
static const struct pci_epc_ops cdns_pcie_epc_ops = {
@@ -561,9 +719,11 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct cdns_pcie *pcie = &ep->pcie;
+ struct cdns_pcie_epf *epf;
struct resource *res;
struct pci_epc *epc;
int ret;
+ int i;
pcie->is_rc = false;
@@ -608,6 +768,25 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
if (!ep->epf)
return -ENOMEM;
+ epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
+ sizeof(*epc->max_vfs), GFP_KERNEL);
+ if (!epc->max_vfs)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "max-virtual-functions",
+ epc->max_vfs, epc->max_functions);
+ if (ret == 0) {
+ for (i = 0; i < epc->max_functions; i++) {
+ epf = &ep->epf[i];
+ if (epc->max_vfs[i] == 0)
+ continue;
+ epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
+ sizeof(*ep->epf), GFP_KERNEL);
+ if (!epf->epf)
+ return -ENOMEM;
+ }
+ }
+
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 30db2d68c17a..927b49e42997 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -50,6 +50,10 @@
(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
(GENMASK(4, 0) << ((b) * 8))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
@@ -114,6 +118,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
* Root Port Registers (PCI configuration space for the root port function)
@@ -308,9 +313,11 @@ struct cdns_pcie_rc {
/**
* struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf: Info about virtual functions attached to the physical function
* @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
*/
struct cdns_pcie_epf {
+ struct cdns_pcie_epf *epf;
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
--
2.17.1
On Tue, Aug 03, 2021 at 10:33:08AM +0530, Kishon Vijay Abraham I wrote:
> Now that support for SR-IOV is added in PCIe endpoint core, add support
> to configure virtual functions in the Cadence PCIe EP driver.
>
> Signed-off-by: Kishon Vijay Abraham I <[email protected]>
> Acked-by: Tom Joseph <[email protected]>
> ---
> .../pci/controller/cadence/pcie-cadence-ep.c | 241 +++++++++++++++---
> drivers/pci/controller/cadence/pcie-cadence.h | 7 +
> 2 files changed, 217 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
> index 912a15be8bfd..791915054ff4 100644
> --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
> +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
> @@ -20,7 +20,18 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
> struct pci_epf_header *hdr)
> {
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> struct cdns_pcie *pcie = &ep->pcie;
> + u32 reg;
> +
> + if (vfn > 1) {
> + dev_dbg(&epc->dev, "Only Virtual Function #1 has deviceID\n");
> + return 0;
Shouldn't this return an error ?
> + } else if (vfn == 1) {
> + reg = cap + PCI_SRIOV_VF_DID;
> + cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
> + return 0;
> + }
>
> cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
> cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
> @@ -51,12 +62,14 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
> struct pci_epf_bar *epf_bar)
> {
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> struct cdns_pcie_epf *epf = &ep->epf[fn];
> struct cdns_pcie *pcie = &ep->pcie;
> dma_addr_t bar_phys = epf_bar->phys_addr;
> enum pci_barno bar = epf_bar->barno;
> int flags = epf_bar->flags;
> u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
> + u32 first_vf_offset, stride;
> u64 sz;
>
> /* BAR size is 2^(aperture + 7) */
> @@ -92,26 +105,50 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
>
> addr0 = lower_32_bits(bar_phys);
> addr1 = upper_32_bits(bar_phys);
> +
> + if (vfn == 1) {
> + /* All virtual functions use the same BAR config */
> + if (bar < BAR_4) {
> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
> + b = bar;
> + } else {
> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
> + b = bar - BAR_4;
> + }
> + } else if (vfn == 0) {
> + /* BAR configuration for physical function */
> + if (bar < BAR_4) {
> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
> + b = bar;
> + } else {
> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
> + b = bar - BAR_4;
> + }
> + }
Code in both branches is almost identical except for what is
assigned to reg, it is not fundamental but maybe it can be rewritten
more concisely.
Lorenzo
> +
> + if (vfn == 0 || vfn == 1) {
> + cfg = cdns_pcie_readl(pcie, reg);
> + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
> + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
> + cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
> + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
> + cdns_pcie_writel(pcie, reg, cfg);
> + }
> +
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + epf = &epf->epf[vfn - 1];
> + }
> +
> cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
> addr0);
> cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
> addr1);
>
> - if (bar < BAR_4) {
> - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
> - b = bar;
> - } else {
> - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
> - b = bar - BAR_4;
> - }
> -
> - cfg = cdns_pcie_readl(pcie, reg);
> - cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
> - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
> - cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
> - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
> - cdns_pcie_writel(pcie, reg, cfg);
> -
> epf->epf_bar[bar] = epf_bar;
>
> return 0;
> @@ -121,25 +158,48 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
> struct pci_epf_bar *epf_bar)
> {
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> struct cdns_pcie_epf *epf = &ep->epf[fn];
> struct cdns_pcie *pcie = &ep->pcie;
> enum pci_barno bar = epf_bar->barno;
> + u32 first_vf_offset, stride;
> u32 reg, cfg, b, ctrl;
>
> - if (bar < BAR_4) {
> - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
> - b = bar;
> - } else {
> - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
> - b = bar - BAR_4;
> + if (vfn == 1) {
> + if (bar < BAR_4) {
> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
> + b = bar;
> + } else {
> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
> + b = bar - BAR_4;
> + }
> + } else if (vfn == 0) {
> + if (bar < BAR_4) {
> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
> + b = bar;
> + } else {
> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
> + b = bar - BAR_4;
> + }
> }
>
> - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
> - cfg = cdns_pcie_readl(pcie, reg);
> - cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
> - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
> - cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
> - cdns_pcie_writel(pcie, reg, cfg);
> + if (vfn == 0 || vfn == 1) {
> + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
> + cfg = cdns_pcie_readl(pcie, reg);
> + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
> + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
> + cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
> + cdns_pcie_writel(pcie, reg, cfg);
> + }
> +
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + epf = &epf->epf[vfn - 1];
> + }
>
> cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
> cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
> @@ -152,8 +212,18 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
> {
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> struct cdns_pcie *pcie = &ep->pcie;
> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> + u32 first_vf_offset, stride;
> u32 r;
>
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + }
> +
> r = find_first_zero_bit(&ep->ob_region_map,
> sizeof(ep->ob_region_map) * BITS_PER_LONG);
> if (r >= ep->max_regions - 1) {
> @@ -193,9 +263,19 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
> {
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> struct cdns_pcie *pcie = &ep->pcie;
> + u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
> + u32 first_vf_offset, stride;
> u16 flags;
>
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + }
> +
> /*
> * Set the Multiple Message Capable bitfield into the Message Control
> * register.
> @@ -213,9 +293,19 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
> {
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> struct cdns_pcie *pcie = &ep->pcie;
> + u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
> + u32 first_vf_offset, stride;
> u16 flags, mme;
>
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + }
> +
> /* Validate that the MSI feature is actually enabled. */
> flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
> if (!(flags & PCI_MSI_FLAGS_ENABLE))
> @@ -232,11 +322,21 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
>
> static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
> {
> + u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> struct cdns_pcie *pcie = &ep->pcie;
> u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
> + u32 first_vf_offset, stride;
> u32 val, reg;
>
> + if (vfunc_no > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, func_no, sriov_cap
> + + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, func_no, sriov_cap +
> + PCI_SRIOV_VF_STRIDE);
> + func_no = func_no + first_vf_offset + ((vfunc_no - 1) * stride);
> + }
> +
> reg = cap + PCI_MSIX_FLAGS;
> val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
> if (!(val & PCI_MSIX_FLAGS_ENABLE))
> @@ -251,11 +351,21 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
> u16 interrupts, enum pci_barno bir,
> u32 offset)
> {
> + u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> struct cdns_pcie *pcie = &ep->pcie;
> u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
> + u32 first_vf_offset, stride;
> u32 val, reg;
>
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + }
> +
> reg = cap + PCI_MSIX_FLAGS;
> val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
> val &= ~PCI_MSIX_FLAGS_QSIZE;
> @@ -275,8 +385,8 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
> return 0;
> }
>
> -static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
> - u8 intx, bool is_asserted)
> +static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
> + bool is_asserted)
> {
> struct cdns_pcie *pcie = &ep->pcie;
> unsigned long flags;
> @@ -339,11 +449,21 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
> static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
> u8 interrupt_num)
> {
> + u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> struct cdns_pcie *pcie = &ep->pcie;
> u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
> u16 flags, mme, data, data_mask;
> u8 msi_count;
> u64 pci_addr, pci_addr_mask = 0xff;
> + u32 first_vf_offset, stride;
> +
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + }
>
> /* Check whether the MSI feature has been enabled by the PCI host. */
> flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
> @@ -389,15 +509,25 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
> u32 entry_size, u32 *msi_data,
> u32 *msi_addr_offset)
> {
> + u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
> struct cdns_pcie *pcie = &ep->pcie;
> u64 pci_addr, pci_addr_mask = 0xff;
> u16 flags, mme, data, data_mask;
> + u32 first_vf_offset, stride;
> u8 msi_count;
> int ret;
> int i;
>
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + }
> +
> /* Check whether the MSI feature has been enabled by the PCI host. */
> flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
> if (!(flags & PCI_MSI_FLAGS_ENABLE))
> @@ -438,16 +568,29 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
> static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
> u16 interrupt_num)
> {
> + u32 sriov_cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
> u32 tbl_offset, msg_data, reg;
> struct cdns_pcie *pcie = &ep->pcie;
> struct pci_epf_msix_tbl *msix_tbl;
> + u32 first_vf_offset, stride;
> struct cdns_pcie_epf *epf;
> u64 pci_addr_mask = 0xff;
> u64 msg_addr;
> u16 flags;
> u8 bir;
>
> + epf = &ep->epf[fn];
> +
> + if (vfn > 0) {
> + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_OFFSET);
> + stride = cdns_pcie_ep_fn_readw(pcie, fn, sriov_cap +
> + PCI_SRIOV_VF_STRIDE);
> + fn = fn + first_vf_offset + ((vfn - 1) * stride);
> + epf = &epf->epf[vfn - 1];
> + }
> +
> /* Check whether the MSI-X feature has been enabled by the PCI host. */
> flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
> if (!(flags & PCI_MSIX_FLAGS_ENABLE))
> @@ -458,7 +601,6 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
> bir = tbl_offset & PCI_MSIX_TABLE_BIR;
> tbl_offset &= PCI_MSIX_TABLE_OFFSET;
>
> - epf = &ep->epf[fn];
> msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
> msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
> msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
> @@ -485,9 +627,15 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
> u16 interrupt_num)
> {
> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> + struct cdns_pcie *pcie = &ep->pcie;
> + struct device *dev = pcie->dev;
>
> switch (type) {
> case PCI_EPC_IRQ_LEGACY:
> + if (vfn > 0) {
> + dev_err(dev, "Cannot raise legacy interrupts for VF\n");
> + return -EINVAL;
> + }
> return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
>
> case PCI_EPC_IRQ_MSI:
> @@ -525,6 +673,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
> return 0;
> }
>
> +static const struct pci_epc_features cdns_pcie_epc_vf_features = {
> + .linkup_notifier = false,
> + .msi_capable = true,
> + .msix_capable = true,
> + .align = 65536,
> +};
> +
> static const struct pci_epc_features cdns_pcie_epc_features = {
> .linkup_notifier = false,
> .msi_capable = true,
> @@ -535,7 +690,10 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
> static const struct pci_epc_features*
> cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
> {
> - return &cdns_pcie_epc_features;
> + if (!vfunc_no)
> + return &cdns_pcie_epc_features;
> +
> + return &cdns_pcie_epc_vf_features;
> }
>
> static const struct pci_epc_ops cdns_pcie_epc_ops = {
> @@ -561,9 +719,11 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
> struct platform_device *pdev = to_platform_device(dev);
> struct device_node *np = dev->of_node;
> struct cdns_pcie *pcie = &ep->pcie;
> + struct cdns_pcie_epf *epf;
> struct resource *res;
> struct pci_epc *epc;
> int ret;
> + int i;
>
> pcie->is_rc = false;
>
> @@ -608,6 +768,25 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
> if (!ep->epf)
> return -ENOMEM;
>
> + epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
> + sizeof(*epc->max_vfs), GFP_KERNEL);
> + if (!epc->max_vfs)
> + return -ENOMEM;
> +
> + ret = of_property_read_u8_array(np, "max-virtual-functions",
> + epc->max_vfs, epc->max_functions);
> + if (ret == 0) {
> + for (i = 0; i < epc->max_functions; i++) {
> + epf = &ep->epf[i];
> + if (epc->max_vfs[i] == 0)
> + continue;
> + epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
> + sizeof(*ep->epf), GFP_KERNEL);
> + if (!epf->epf)
> + return -ENOMEM;
> + }
> + }
> +
> ret = pci_epc_mem_init(epc, pcie->mem_res->start,
> resource_size(pcie->mem_res), PAGE_SIZE);
> if (ret < 0) {
> diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
> index 30db2d68c17a..927b49e42997 100644
> --- a/drivers/pci/controller/cadence/pcie-cadence.h
> +++ b/drivers/pci/controller/cadence/pcie-cadence.h
> @@ -50,6 +50,10 @@
> (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
> #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
> (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
> +#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
> + (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
> +#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
> + (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
> #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
> (GENMASK(4, 0) << ((b) * 8))
> #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
> @@ -114,6 +118,7 @@
>
> #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
> #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
> +#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
>
> /*
> * Root Port Registers (PCI configuration space for the root port function)
> @@ -308,9 +313,11 @@ struct cdns_pcie_rc {
>
> /**
> * struct cdns_pcie_epf - Structure to hold info about endpoint function
> + * @epf: Info about virtual functions attached to the physical function
> * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
> */
> struct cdns_pcie_epf {
> + struct cdns_pcie_epf *epf;
> struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
> };
>
> --
> 2.17.1
>
Hi Lorenzo,
On 03/08/21 5:15 pm, Lorenzo Pieralisi wrote:
> On Tue, Aug 03, 2021 at 10:33:08AM +0530, Kishon Vijay Abraham I wrote:
>> Now that support for SR-IOV is added in PCIe endpoint core, add support
>> to configure virtual functions in the Cadence PCIe EP driver.
>>
>> Signed-off-by: Kishon Vijay Abraham I <[email protected]>
>> Acked-by: Tom Joseph <[email protected]>
>> ---
>> .../pci/controller/cadence/pcie-cadence-ep.c | 241 +++++++++++++++---
>> drivers/pci/controller/cadence/pcie-cadence.h | 7 +
>> 2 files changed, 217 insertions(+), 31 deletions(-)
>>
>> diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
>> index 912a15be8bfd..791915054ff4 100644
>> --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
>> +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
>> @@ -20,7 +20,18 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
>> struct pci_epf_header *hdr)
>> {
>> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
>> struct cdns_pcie *pcie = &ep->pcie;
>> + u32 reg;
>> +
>> + if (vfn > 1) {
>> + dev_dbg(&epc->dev, "Only Virtual Function #1 has deviceID\n");
>> + return 0;
>
> Shouldn't this return an error ?
Since the same function driver could be used for physical function and
virtual function, I tried to avoid adding any additional case specific
for virtual function in the function driver.
If we want to return an error here, then the function driver should be
modified to not invoke writeheader for vfn > 1.
>
>> + } else if (vfn == 1) {
>> + reg = cap + PCI_SRIOV_VF_DID;
>> + cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
>> + return 0;
>> + }
>>
>> cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
>> cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
>> @@ -51,12 +62,14 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
>> struct pci_epf_bar *epf_bar)
>> {
>> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
>> struct cdns_pcie_epf *epf = &ep->epf[fn];
>> struct cdns_pcie *pcie = &ep->pcie;
>> dma_addr_t bar_phys = epf_bar->phys_addr;
>> enum pci_barno bar = epf_bar->barno;
>> int flags = epf_bar->flags;
>> u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
>> + u32 first_vf_offset, stride;
>> u64 sz;
>>
>> /* BAR size is 2^(aperture + 7) */
>> @@ -92,26 +105,50 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
>>
>> addr0 = lower_32_bits(bar_phys);
>> addr1 = upper_32_bits(bar_phys);
>> +
>> + if (vfn == 1) {
>> + /* All virtual functions use the same BAR config */
>> + if (bar < BAR_4) {
>> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
>> + b = bar;
>> + } else {
>> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
>> + b = bar - BAR_4;
>> + }
>> + } else if (vfn == 0) {
>> + /* BAR configuration for physical function */
>> + if (bar < BAR_4) {
>> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
>> + b = bar;
>> + } else {
>> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
>> + b = bar - BAR_4;
>> + }
>> + }
>
> Code in both branches is almost identical except for what is
> assigned to reg, it is not fundamental but maybe it can be rewritten
> more concisely.
okay.. let me think.
Thanks
Kishon
On Tue, Aug 03, 2021 at 08:26:42PM +0530, Kishon Vijay Abraham I wrote:
> Hi Lorenzo,
>
> On 03/08/21 5:15 pm, Lorenzo Pieralisi wrote:
> > On Tue, Aug 03, 2021 at 10:33:08AM +0530, Kishon Vijay Abraham I wrote:
> >> Now that support for SR-IOV is added in PCIe endpoint core, add support
> >> to configure virtual functions in the Cadence PCIe EP driver.
> >>
> >> Signed-off-by: Kishon Vijay Abraham I <[email protected]>
> >> Acked-by: Tom Joseph <[email protected]>
> >> ---
> >> .../pci/controller/cadence/pcie-cadence-ep.c | 241 +++++++++++++++---
> >> drivers/pci/controller/cadence/pcie-cadence.h | 7 +
> >> 2 files changed, 217 insertions(+), 31 deletions(-)
> >>
> >> diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
> >> index 912a15be8bfd..791915054ff4 100644
> >> --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
> >> +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
> >> @@ -20,7 +20,18 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
> >> struct pci_epf_header *hdr)
> >> {
> >> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> >> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> >> struct cdns_pcie *pcie = &ep->pcie;
> >> + u32 reg;
> >> +
> >> + if (vfn > 1) {
> >> + dev_dbg(&epc->dev, "Only Virtual Function #1 has deviceID\n");
> >> + return 0;
> >
> > Shouldn't this return an error ?
>
> Since the same function driver could be used for physical function and
> virtual function, I tried to avoid adding any additional case specific
> for virtual function in the function driver.
>
> If we want to return an error here, then the function driver should be
> modified to not invoke writeheader for vfn > 1.
Well, I see it the other way around. If writing the header for vfn > 1
is an error it must be reported as such and handled accordingly.
As it stands - it looks like we do nothing and everything is just
fine, which is weird.
Thanks,
Lorenzo
> >> + } else if (vfn == 1) {
> >> + reg = cap + PCI_SRIOV_VF_DID;
> >> + cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
> >> + return 0;
> >> + }
> >>
> >> cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
> >> cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
> >> @@ -51,12 +62,14 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
> >> struct pci_epf_bar *epf_bar)
> >> {
> >> struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
> >> + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
> >> struct cdns_pcie_epf *epf = &ep->epf[fn];
> >> struct cdns_pcie *pcie = &ep->pcie;
> >> dma_addr_t bar_phys = epf_bar->phys_addr;
> >> enum pci_barno bar = epf_bar->barno;
> >> int flags = epf_bar->flags;
> >> u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
> >> + u32 first_vf_offset, stride;
> >> u64 sz;
> >>
> >> /* BAR size is 2^(aperture + 7) */
> >> @@ -92,26 +105,50 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
> >>
> >> addr0 = lower_32_bits(bar_phys);
> >> addr1 = upper_32_bits(bar_phys);
> >> +
> >> + if (vfn == 1) {
> >> + /* All virtual functions use the same BAR config */
> >> + if (bar < BAR_4) {
> >> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn);
> >> + b = bar;
> >> + } else {
> >> + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn);
> >> + b = bar - BAR_4;
> >> + }
> >> + } else if (vfn == 0) {
> >> + /* BAR configuration for physical function */
> >> + if (bar < BAR_4) {
> >> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
> >> + b = bar;
> >> + } else {
> >> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
> >> + b = bar - BAR_4;
> >> + }
> >> + }
> >
> > Code in both branches is almost identical except for what is
> > assigned to reg, it is not fundamental but maybe it can be rewritten
> > more concisely.
>
> okay.. let me think.
>
> Thanks
> Kishon