2024-03-27 16:10:50

by Hariprasad Kelam

[permalink] [raw]
Subject: [net-next Patch] octeontx2-af: map management port always to first PF

The user can enable or disable any MAC block or a few ports of the
block. The management port's interface name varies depending on the
setup of the user if its not mapped to the first pf.

The management port mapping is now configured to always connect to the
first PF. This patch implements this change.

Signed-off-by: Hariprasad Kelam <[email protected]>
Signed-off-by: Sunil Kovvuri Goutham <[email protected]>
---
.../net/ethernet/marvell/octeontx2/af/mbox.h | 5 +-
.../ethernet/marvell/octeontx2/af/rvu_cgx.c | 60 +++++++++++++++----
2 files changed, 53 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index eb2a20b5a0d0..105d2e8f25df 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -638,7 +638,10 @@ struct cgx_lmac_fwdata_s {
/* Only applicable if SFP/QSFP slot is present */
struct sfp_eeprom_s sfp_eeprom;
struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+ u32 lmac_type;
+ u32 portm_idx;
+ u64 mgmt_port:1;
+#define LMAC_FWDATA_RESERVED_MEM 1019
u64 reserved[LMAC_FWDATA_RESERVED_MEM];
};

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 72e060cf6b61..446344801576 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -118,15 +118,40 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
pfvf->nix_blkaddr = BLKADDR_NIX1;
}

-static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+ struct cgx_lmac_fwdata_s *fwdata;
+
+ fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+ if (fwdata->mgmt_port)
+ return true;
+
+ return false;
+}
+
+static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, int pf, int cgx, int lmac)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
+ int numvfs, hwvfs;
+ int free_pkind;
+
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+ rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+ free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+ pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
+ rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
int cgx_cnt_max = rvu->cgx_cnt_max;
int pf = PF_CGXMAP_BASE;
unsigned long lmac_bmap;
- int size, free_pkind;
int cgx, lmac, iter;
- int numvfs, hwvfs;
+ int size;

if (!cgx_cnt_max)
return 0;
@@ -155,6 +180,24 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
return -ENOMEM;

rvu->cgx_mapped_pfs = 0;
+
+ /* Map mgmt port always to first PF */
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
+ if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
+ __rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
+ pf++;
+ goto non_mgmtport_mapping;
+ }
+ }
+ }
+
+non_mgmtport_mapping:
+
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
@@ -162,14 +205,9 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
- rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
- rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
- free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
- pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
- rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
- rvu->cgx_mapped_pfs++;
- rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
- rvu->cgx_mapped_vfs += numvfs;
+ if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
+ continue;
+ __rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
pf++;
}
}
--
2.17.1



2024-03-28 14:58:49

by Simon Horman

[permalink] [raw]
Subject: Re: [net-next Patch] octeontx2-af: map management port always to first PF

On Wed, Mar 27, 2024 at 09:33:48PM +0530, Hariprasad Kelam wrote:
> The user can enable or disable any MAC block or a few ports of the
> block. The management port's interface name varies depending on the
> setup of the user if its not mapped to the first pf.
>
> The management port mapping is now configured to always connect to the
> first PF. This patch implements this change.
>
> Signed-off-by: Hariprasad Kelam <[email protected]>
> Signed-off-by: Sunil Kovvuri Goutham <[email protected]>

Hi Hariprasad and Sunil,

some feedback from my side.

> ---
> .../net/ethernet/marvell/octeontx2/af/mbox.h | 5 +-
> .../ethernet/marvell/octeontx2/af/rvu_cgx.c | 60 +++++++++++++++----
> 2 files changed, 53 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> index eb2a20b5a0d0..105d2e8f25df 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> @@ -638,7 +638,10 @@ struct cgx_lmac_fwdata_s {
> /* Only applicable if SFP/QSFP slot is present */
> struct sfp_eeprom_s sfp_eeprom;
> struct phy_s phy;
> -#define LMAC_FWDATA_RESERVED_MEM 1021
> + u32 lmac_type;
> + u32 portm_idx;
> + u64 mgmt_port:1;
> +#define LMAC_FWDATA_RESERVED_MEM 1019
> u64 reserved[LMAC_FWDATA_RESERVED_MEM];
> };
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> index 72e060cf6b61..446344801576 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> @@ -118,15 +118,40 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
> pfvf->nix_blkaddr = BLKADDR_NIX1;
> }
>
> -static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> +static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int lmac_id)
> +{
> + struct cgx_lmac_fwdata_s *fwdata;
> +
> + fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
> + if (fwdata->mgmt_port)
> + return true;
> +
> + return false;

nit: I think this could be more succinctly expressed as:

return !!fwdata->mgmt_port;

> +}
> +
> +static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, int pf, int cgx, int lmac)
> {
> struct npc_pkind *pkind = &rvu->hw->pkind;
> + int numvfs, hwvfs;
> + int free_pkind;
> +
> + rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
> + rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;

This isn't strictly related to this patch, but here
it seems implied that pf is not negative and <= 63, as
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] is only 64 bits wide.

So firstly I wonder if pf should be unsigned

> + free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> + pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;

Here pf is masked off so it is not more than 63.
But that seems to conflict with the assumption above that it is <= 63.

If there is a concern about it being larger, it should be
capped in the for loop that calls __rvu_map_cgx_lmac_pf() ?

> + rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> + rvu->cgx_mapped_pfs++;
> + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> + rvu->cgx_mapped_vfs += numvfs;
> +}
> +
> +static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> +{
> int cgx_cnt_max = rvu->cgx_cnt_max;
> int pf = PF_CGXMAP_BASE;
> unsigned long lmac_bmap;
> - int size, free_pkind;
> int cgx, lmac, iter;
> - int numvfs, hwvfs;
> + int size;
>
> if (!cgx_cnt_max)
> return 0;
> @@ -155,6 +180,24 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> return -ENOMEM;
>
> rvu->cgx_mapped_pfs = 0;
> +
> + /* Map mgmt port always to first PF */
> + for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> + if (!rvu_cgx_pdata(cgx, rvu))
> + continue;
> + lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
> + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
> + lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
> + if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
> + __rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
> + pf++;
> + goto non_mgmtport_mapping;
> + }
> + }
> + }
> +
> +non_mgmtport_mapping:
> +
> for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> if (!rvu_cgx_pdata(cgx, rvu))
> continue;
> @@ -162,14 +205,9 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
> lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
> iter);
> - rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
> - rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
> - free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> - pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
> - rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> - rvu->cgx_mapped_pfs++;
> - rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> - rvu->cgx_mapped_vfs += numvfs;
> + if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
> + continue;
> + __rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
> pf++;
> }
> }

There seems to be a fair amount of code duplication above.
If we can assume that there is always a management port,
then perhaps the following is simpler (compile tested only!).

And if not, I'd suggest moving the outermost for loop and everything
within it into a helper with a parameter such that it can handle
the (first?) management port on one invocation, and non management
ports on the next invocation.

static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
int cgx_cnt_max = rvu->cgx_cnt_max;
- int pf = PF_CGXMAP_BASE;
+ int next_pf = PF_CGXMAP_BASE + 1;
unsigned long lmac_bmap;
int size, free_pkind;
int cgx, lmac, iter;
@@ -158,10 +167,20 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
+
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ int pf;
+
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
+
+ /* Always use first PF for management port */
+ if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
+ pf = PF_CGXMAP_BASE;
+ else
+ pf = next_pf++;
+
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
free_pkind = rvu_alloc_rsrc(&pkind->rsrc);

2024-04-01 08:39:13

by Hariprasad Kelam

[permalink] [raw]
Subject: ] Re: [net-next Patch] octeontx2-af: map management port always to first PF


Hi Simon,

Thanks for the review, see inline

> On Wed, Mar 27, 2024 at 09:33:48PM +0530, Hariprasad Kelam wrote:
> > The user can enable or disable any MAC block or a few ports of the
> > block. The management port's interface name varies depending on the
> > setup of the user if its not mapped to the first pf.
> >
> > The management port mapping is now configured to always connect to the
> > first PF. This patch implements this change.
> >
> > Signed-off-by: Hariprasad Kelam <[email protected]>
> > Signed-off-by: Sunil Kovvuri Goutham <[email protected]>
>
> Hi Hariprasad and Sunil,
>
> some feedback from my side.
>
> > ---
> > .../net/ethernet/marvell/octeontx2/af/mbox.h | 5 +-
> > .../ethernet/marvell/octeontx2/af/rvu_cgx.c | 60 +++++++++++++++----
> > 2 files changed, 53 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > index eb2a20b5a0d0..105d2e8f25df 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
> > @@ -638,7 +638,10 @@ struct cgx_lmac_fwdata_s {
> > /* Only applicable if SFP/QSFP slot is present */
> > struct sfp_eeprom_s sfp_eeprom;
> > struct phy_s phy;
> > -#define LMAC_FWDATA_RESERVED_MEM 1021
> > + u32 lmac_type;
> > + u32 portm_idx;
> > + u64 mgmt_port:1;
> > +#define LMAC_FWDATA_RESERVED_MEM 1019
> > u64 reserved[LMAC_FWDATA_RESERVED_MEM];
> > };
> >
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > index 72e060cf6b61..446344801576 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
> > @@ -118,15 +118,40 @@ static void rvu_map_cgx_nix_block(struct rvu
> *rvu, int pf,
> > pfvf->nix_blkaddr = BLKADDR_NIX1;
> > }
> >
> > -static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> > +static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int
> > +lmac_id) {
> > + struct cgx_lmac_fwdata_s *fwdata;
> > +
> > + fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
> > + if (fwdata->mgmt_port)
> > + return true;
> > +
> > + return false;
>
> nit: I think this could be more succinctly expressed as:
>
> return !!fwdata->mgmt_port;
> ACK, will fix in V2.

> > +}
> > +
> > +static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, int pf, int cgx,
> > +int lmac)
> > {
> > struct npc_pkind *pkind = &rvu->hw->pkind;
> > + int numvfs, hwvfs;
> > + int free_pkind;
> > +
> > + rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
> > + rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
>
> This isn't strictly related to this patch, but here it seems implied that pf is not
> negative and <= 63, as
> rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] is only 64 bits wide.
>
> So firstly I wonder if pf should be unsigned
>
> > + free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> > + pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
>
> Here pf is masked off so it is not more than 63.
> But that seems to conflict with the assumption above that it is <= 63.
>
> If there is a concern about it being larger, it should be capped in the for loop
> that calls __rvu_map_cgx_lmac_pf() ?
>
Max PF value is from 0 to 63 will address proposed change in V2.
> > + rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> > + rvu->cgx_mapped_pfs++;
> > + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> > + rvu->cgx_mapped_vfs += numvfs;
> > +}
> > +
> > +static int rvu_map_cgx_lmac_pf(struct rvu *rvu) {
> > int cgx_cnt_max = rvu->cgx_cnt_max;
> > int pf = PF_CGXMAP_BASE;
> > unsigned long lmac_bmap;
> > - int size, free_pkind;
> > int cgx, lmac, iter;
> > - int numvfs, hwvfs;
> > + int size;
> >
> > if (!cgx_cnt_max)
> > return 0;
> > @@ -155,6 +180,24 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> > return -ENOMEM;
> >
> > rvu->cgx_mapped_pfs = 0;
> > +
> > + /* Map mgmt port always to first PF */
> > + for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> > + if (!rvu_cgx_pdata(cgx, rvu))
> > + continue;
> > + lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
> > + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx)
> {
> > + lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
> > + if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
> > + __rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
> > + pf++;
> > + goto non_mgmtport_mapping;
> > + }
> > + }
> > + }
> > +
> > +non_mgmtport_mapping:
> > +
> > for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> > if (!rvu_cgx_pdata(cgx, rvu))
> > continue;
> > @@ -162,14 +205,9 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> > for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx)
> {
> > lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
> > iter);
> > - rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx,
> lmac);
> > - rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1
> << pf;
> > - free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
> > - pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) <<
> 16;
> > - rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
> > - rvu->cgx_mapped_pfs++;
> > - rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
> > - rvu->cgx_mapped_vfs += numvfs;
> > + if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
> > + continue;
> > + __rvu_map_cgx_lmac_pf(rvu, pf, cgx, lmac);
> > pf++;
> > }
> > }
>
> There seems to be a fair amount of code duplication above.
> If we can assume that there is always a management port, then perhaps the
> following is simpler (compile tested only!).
>
> And if not, I'd suggest moving the outermost for loop and everything within it
> into a helper with a parameter such that it can handle the (first?) management
> port on one invocation, and non management ports on the next invocation.
>

Management ports might not be available on all devices, we will go with option2.

Thanks,
Hariprasad k

> static int rvu_map_cgx_lmac_pf(struct rvu *rvu) {
> struct npc_pkind *pkind = &rvu->hw->pkind;
> int cgx_cnt_max = rvu->cgx_cnt_max;
> - int pf = PF_CGXMAP_BASE;
> + int next_pf = PF_CGXMAP_BASE + 1;
> unsigned long lmac_bmap;
> int size, free_pkind;
> int cgx, lmac, iter;
> @@ -158,10 +167,20 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
> for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
> if (!rvu_cgx_pdata(cgx, rvu))
> continue;
> +
> lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
> for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx)
> {
> + int pf;
> +
> lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
> iter);
> +
> + /* Always use first PF for management port */
> + if (rvu_cgx_is_mgmt_port(rvu, cgx, lmac))
> + pf = PF_CGXMAP_BASE;
> + else
> + pf = next_pf++;
> +
> rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx,
> lmac);
> rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1
> << pf;
> free_pkind = rvu_alloc_rsrc(&pkind->rsrc);