This patch series adds outbound inline ipsec support on Marvell
cn10k series of platform. One crypto hardware logical function
(cpt-lf) per netdev is required for inline ipsec outbound
functionality. Software prepare and submit crypto hardware
(CPT) instruction for outbound inline ipsec crypto mode offload.
The CPT instruction have details for encryption and authentication
Crypto hardware encrypt, authenticate and provide the ESP packet
to network hardware logic to transmit ipsec packet.
First patch makes dma memory writable for in-place encryption,
Second patch moves code to common file, Third patch disable
backpressure on crypto (CPT) and network (NIX) hardware.
Patch four onwards enables inline outbound ipsec.
v2->v3:
- Fix smatch and sparse erros (Comment from Simon Horman)
- Fix build error with W=1 (Comment from Simon Horman)
https://patchwork.kernel.org/project/netdevbpf/patch/[email protected]/
- Some other minor cleanup as per comment
https://www.spinics.net/lists/netdev/msg997197.html
v1->v2:
- Fix compilation error to build driver a module
- Use dma_wmb() instead of architecture specific barrier
- Fix couple of other compilation warnings
Bharat Bhushan (8):
octeontx2-pf: map skb data as device writeable
octeontx2-pf: Move skb fragment map/unmap to common code
octeontx2-af: Disable backpressure between CPT and NIX
cn10k-ipsec: Initialize crypto hardware for outb inline ipsec
cn10k-ipsec: Add SA add/delete support for outb inline ipsec
cn10k-ipsec: Process inline ipsec transmit offload
cn10k-ipsec: Allow inline ipsec offload for skb with SA
cn10k-ipsec: Enable outbound inline ipsec offload
.../net/ethernet/marvell/octeontx2/af/mbox.h | 4 +
.../ethernet/marvell/octeontx2/af/rvu_nix.c | 68 +-
.../ethernet/marvell/octeontx2/nic/Makefile | 1 +
.../marvell/octeontx2/nic/cn10k_ipsec.c | 1084 +++++++++++++++++
.../marvell/octeontx2/nic/cn10k_ipsec.h | 258 ++++
.../marvell/octeontx2/nic/otx2_common.c | 99 +-
.../marvell/octeontx2/nic/otx2_common.h | 25 +
.../marvell/octeontx2/nic/otx2_dcbnl.c | 3 +
.../ethernet/marvell/octeontx2/nic/otx2_pf.c | 19 +-
.../marvell/octeontx2/nic/otx2_txrx.c | 65 +-
.../marvell/octeontx2/nic/otx2_txrx.h | 3 +
.../ethernet/marvell/octeontx2/nic/otx2_vf.c | 10 +-
12 files changed, 1585 insertions(+), 54 deletions(-)
create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
--
2.34.1
Move skb fragment map/unmap function to common file
so as to re-use same for inline ipsec transmit
Signed-off-by: Bharat Bhushan <[email protected]>
---
.../marvell/octeontx2/nic/otx2_common.c | 32 +++++++++++++++++++
.../marvell/octeontx2/nic/otx2_common.h | 3 ++
.../marvell/octeontx2/nic/otx2_txrx.c | 32 -------------------
3 files changed, 35 insertions(+), 32 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index a85ac039d779..7ec99c8d610c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1911,3 +1911,35 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, DMA_BIDIRECTIONAL);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ int seg;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_BIDIRECTIONAL);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 24fbbef265a6..99b480e21e1c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -1128,4 +1128,7 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
void otx2_qos_config_txschq(struct otx2_nic *pfvf);
void otx2_clean_qos_queues(struct otx2_nic *pfvf);
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 847052b57d9b..f368eac28fdd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -80,38 +80,6 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_BIDIRECTIONAL);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_BIDIRECTIONAL);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
struct nix_cqe_tx_s *cqe)
--
2.34.1
Allow to use hardware offload for outbound inline ipsec
if security association (SA) is set for a given skb.
Signed-off-by: Bharat Bhushan <[email protected]>
---
.../ethernet/marvell/octeontx2/nic/cn10k_ipsec.c | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 1974fda2e0d3..81f1258cd996 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -784,9 +784,24 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
mutex_unlock(&pf->ipsec.lock);
}
+static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+ return true;
+}
+
static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = cn10k_ipsec_add_state,
.xdo_dev_state_delete = cn10k_ipsec_del_state,
+ .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
};
int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
--
2.34.1
This patch adds support to add and delete Security Association
(SA) xfrm ops. Hardware maintains SA context in memory allocated
by software. Each SA context is 128 byte aligned and size of
each context is multiple of 128-byte. Add support for transport
and tunnel ipsec mode, ESP protocol, aead aes-gcm-icv16, key size
128/192/256-bits with 32bit salt.
Signed-off-by: Bharat Bhushan <[email protected]>
---
v2->v3:
- Removed memset to zero wherever possible
(comment from Kalesh Anakkur Purayil)
- Corrected error hanlding when setting SA for inbound
(comment from Kalesh Anakkur Purayil)
- Move "netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;" to this patch
This fix build error with W=1
.../marvell/octeontx2/nic/cn10k_ipsec.c | 452 ++++++++++++++++++
.../marvell/octeontx2/nic/cn10k_ipsec.h | 114 +++++
2 files changed, 566 insertions(+)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index b221b67815ee..136aebe2a007 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -336,6 +336,12 @@ static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
/* Set inline ipsec disabled for this device */
pf->flags &= ~OTX2_FLAG_INLINE_IPSEC_ENABLED;
+ if (!bitmap_empty(pf->ipsec.sa_bitmap, CN10K_IPSEC_OUTB_MAX_SA)) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ mutex_unlock(&pf->ipsec.lock);
+ return -EBUSY;
+ }
+
/* Disable CPTLF Instruction Queue (IQ) */
cn10k_outb_cptlf_iq_disable(pf);
@@ -356,6 +362,430 @@ static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
return err;
}
+static int cn10k_outb_get_sa_index(struct otx2_nic *pf,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ u32 sa_size = pf->ipsec.sa_size;
+ int sa_index;
+
+ if (!sa_entry || ((void *)sa_entry < pf->ipsec.outb_sa->base))
+ return -EINVAL;
+
+ sa_index = ((void *)sa_entry - pf->ipsec.outb_sa->base) / sa_size;
+ if (sa_index >= CN10K_IPSEC_OUTB_MAX_SA)
+ return -EINVAL;
+
+ return sa_index;
+}
+
+static dma_addr_t cn10k_outb_get_sa_iova(struct otx2_nic *pf,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int sa_index = cn10k_outb_get_sa_index(pf, sa_entry);
+
+ if (sa_index < 0)
+ return 0;
+ return pf->ipsec.outb_sa->iova + sa_index * pf->ipsec.sa_size;
+}
+
+static struct cn10k_tx_sa_s *cn10k_outb_alloc_sa(struct otx2_nic *pf)
+{
+ u32 sa_size = pf->ipsec.sa_size;
+ struct cn10k_tx_sa_s *sa_entry;
+ u32 sa_index;
+
+ sa_index = find_first_zero_bit(pf->ipsec.sa_bitmap,
+ CN10K_IPSEC_OUTB_MAX_SA);
+ if (sa_index == CN10K_IPSEC_OUTB_MAX_SA)
+ return NULL;
+
+ set_bit(sa_index, pf->ipsec.sa_bitmap);
+
+ sa_entry = pf->ipsec.outb_sa->base + sa_index * sa_size;
+ return sa_entry;
+}
+
+static void cn10k_outb_free_sa(struct otx2_nic *pf,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int sa_index = cn10k_outb_get_sa_index(pf, sa_entry);
+
+ if (sa_index < 0)
+ return;
+ clear_bit(sa_index, pf->ipsec.sa_bitmap);
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10000);
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while (res->compcode == CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf,
+ struct cn10k_tx_sa_s *sa_cptr)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = cn10k_outb_get_sa_iova(pf, sa_cptr);
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_cptr;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto out;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+out:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with AES key length
+ * 128bit.
+ */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ sa_entry->spi = (__force u32)cpu_to_be32((__force u32)x->id.spi);
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ netdev_err(netdev, "Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ netdev_err(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ netdev_err(netdev, "Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ netdev_err(netdev, "Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ netdev_err(netdev, "Only IPv4/v6 xfrm states may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ dev_info(&netdev->dev, "Only tunnel/transport xfrm states may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ netdev_err(netdev, "Only ESP xfrm state may be offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ netdev_err(netdev, "Encapsulated xfrm state may not be offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ netdev_err(netdev, "Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ netdev_err(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ netdev_err(netdev, "Cannot offload xfrm states with AEAD key length other than 128/192/256bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ netdev_err(netdev, "Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ netdev_err(netdev, "Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ netdev_err(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+
+ netdev_err(netdev, "xfrm inbound offload not supported\n");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct cpt_ctx_info_s *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x);
+ if (err)
+ return err;
+
+ pf = netdev_priv(netdev);
+ if (!mutex_trylock(&pf->ipsec.lock)) {
+ netdev_err(netdev, "IPSEC device is busy\n");
+ return -EBUSY;
+ }
+
+ if (!(pf->flags & OTX2_FLAG_INLINE_IPSEC_ENABLED)) {
+ netdev_err(netdev, "IPSEC not enabled/supported on device\n");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ sa_entry = cn10k_outb_alloc_sa(pf);
+ if (!sa_entry) {
+ netdev_err(netdev, "SA maximum limit %x reached\n",
+ CN10K_IPSEC_OUTB_MAX_SA);
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_entry);
+ if (err) {
+ netdev_err(netdev, "Error writing outbound SA\n");
+ cn10k_outb_free_sa(pf, sa_entry);
+ goto unlock;
+ }
+
+ sa_info = kmalloc(sizeof(*sa_info), GFP_KERNEL);
+ sa_info->sa_entry = sa_entry;
+ sa_info->sa_iova = cn10k_outb_get_sa_iova(pf, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_info;
+
+unlock:
+ mutex_unlock(&pf->ipsec.lock);
+ return err;
+}
+
+static int cn10k_ipsec_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x);
+ else
+ return cn10k_ipsec_outb_add_state(x);
+}
+
+static void cn10k_ipsec_del_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct cpt_ctx_info_s *sa_info;
+ struct otx2_nic *pf;
+ int sa_index;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(netdev);
+ if (!mutex_trylock(&pf->ipsec.lock)) {
+ netdev_err(netdev, "IPSEC device is busy\n");
+ return;
+ }
+
+ sa_info = (struct cpt_ctx_info_s *)x->xso.offload_handle;
+ sa_entry = sa_info->sa_entry;
+ sa_index = cn10k_outb_get_sa_index(pf, sa_entry);
+ if (sa_index < 0 || !test_bit(sa_index, pf->ipsec.sa_bitmap)) {
+ netdev_err(netdev, "Invalid SA (sa-index %d)\n", sa_index);
+ goto unlock;
+ }
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ if (cn10k_outb_write_sa(pf, sa_entry)) {
+ netdev_err(netdev, "Failed to delete sa index %d\n", sa_index);
+ goto unlock;
+ }
+ x->xso.offload_handle = 0;
+ clear_bit(sa_index, pf->ipsec.sa_bitmap);
+ kfree(sa_info);
+unlock:
+ mutex_unlock(&pf->ipsec.lock);
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+};
+
int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -374,10 +804,30 @@ int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
int cn10k_ipsec_init(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+ int err;
if (!is_dev_support_inline_ipsec(pf->pdev))
return 0;
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ err = qmem_alloc(pf->dev, &pf->ipsec.outb_sa, CN10K_IPSEC_OUTB_MAX_SA,
+ sa_size);
+ if (err)
+ return err;
+
+ pf->ipsec.sa_size = sa_size;
+ memset(pf->ipsec.outb_sa->base, 0, sa_size * CN10K_IPSEC_OUTB_MAX_SA);
+ bitmap_zero(pf->ipsec.sa_bitmap, CN10K_IPSEC_OUTB_MAX_SA);
+
+ /* Set xfrm device ops
+ * NETIF_F_HW_ESP is not set as ipsec setup is not yet complete.
+ */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+
mutex_init(&pf->ipsec.lock);
return 0;
}
@@ -388,6 +838,8 @@ void cn10k_ipsec_clean(struct otx2_nic *pf)
if (!is_dev_support_inline_ipsec(pf->pdev))
return;
+ bitmap_zero(pf->ipsec.sa_bitmap, CN10K_IPSEC_OUTB_MAX_SA);
+ qmem_free(pf->dev, pf->ipsec.outb_sa);
cn10k_outb_cpt_clean(pf);
}
EXPORT_SYMBOL(cn10k_ipsec_clean);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index b322e19d5e23..fbfb6da8dd99 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -50,6 +50,22 @@
#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+/* Outbound SA */
+#define CN10K_IPSEC_OUTB_MAX_SA 64
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06
+};
+
struct cn10k_cpt_inst_queue {
u8 *vaddr;
u8 *real_vaddr;
@@ -64,6 +80,101 @@ struct cn10k_ipsec {
/* Lock to protect SA management */
struct mutex lock;
struct cn10k_cpt_inst_queue iq;
+ /* SA info */
+ struct qmem *outb_sa;
+ u32 sa_size;
+ DECLARE_BITMAP(sa_bitmap, CN10K_IPSEC_OUTB_MAX_SA);
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_31 : 21;
+ u64 spi : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+struct cpt_ctx_info_s {
+ struct cn10k_tx_sa_s *sa_entry;
+ dma_addr_t sa_iova;
};
/* CPT LF_INPROG Register */
@@ -81,6 +192,9 @@ struct cn10k_ipsec {
/* CPT LF_Q_SIZE Register */
#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+
#ifdef CONFIG_XFRM_OFFLOAD
int cn10k_ipsec_init(struct net_device *netdev);
void cn10k_ipsec_clean(struct otx2_nic *pf);
--
2.34.1
Prepare and submit crypto hardware (CPT) instruction for
outbound inline ipsec crypto mode offload. The CPT instruction
have authentication offset, IV offset and encapsulation offset
in input packet. Also provide SA context pointer which have
details about algo, keys, salt etc. Crypto hardware encrypt,
authenticate and provide the ESP packet to networking hardware.
Signed-off-by: Bharat Bhushan <[email protected]>
---
.../marvell/octeontx2/nic/cn10k_ipsec.c | 224 ++++++++++++++++++
.../marvell/octeontx2/nic/cn10k_ipsec.h | 40 ++++
.../marvell/octeontx2/nic/otx2_common.c | 23 ++
.../marvell/octeontx2/nic/otx2_common.h | 3 +
.../ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 +
.../marvell/octeontx2/nic/otx2_txrx.c | 33 ++-
.../marvell/octeontx2/nic/otx2_txrx.h | 3 +
7 files changed, 325 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 136aebe2a007..1974fda2e0d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -7,8 +7,11 @@
#include <net/xfrm.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
#include "otx2_common.h"
+#include "otx2_struct.h"
#include "cn10k_ipsec.h"
static bool is_dev_support_inline_ipsec(struct pci_dev *pdev)
@@ -843,3 +846,224 @@ void cn10k_ipsec_clean(struct otx2_nic *pf)
cn10k_outb_cpt_clean(pf);
}
EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE |
+ CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_ctx_info_s *sa_info;
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for Inline IPSEC enabled */
+ if (!(pf->flags & OTX2_FLAG_INLINE_IPSEC_ENABLED)) {
+ netdev_err(pf->netdev, "Ipsec not enabled, drop packet\n");
+ goto drop;
+ }
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len)) {
+ netdev_err(pf->netdev, "%s: no xfrm state len = %d\n",
+ __func__, sp->len);
+ goto drop;
+ }
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x)) {
+ netdev_err(pf->netdev, "no xfrm_input_state()\n");
+ goto drop;
+ }
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ netdev_err(pf->netdev, "un supported offload mode %d\n",
+ x->props.mode);
+ goto drop;
+ }
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct cpt_ctx_info_s *)x->xso.offload_handle;
+ if (!sa_info || !sa_info->sa_iova) {
+ netdev_err(pf->netdev, "Invalid SA conext\n");
+ goto drop;
+ }
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ inst.dlen = dlen + ETH_HLEN;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index fbfb6da8dd99..c4e0d77f6fef 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -56,6 +56,7 @@
/* IPSEC Instruction opcodes */
#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x28UL
enum cn10k_cpt_comp_e {
CN10K_CPT_COMP_E_NOTDONE = 0x00,
@@ -133,6 +134,16 @@ struct cn10k_tx_sa_s {
u64 hw_ctx[6]; /* W31 - W36 */
};
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
/* CPT Instruction Structure */
struct cpt_inst_s {
u64 nixtxl : 3; /* W0 */
@@ -177,6 +188,15 @@ struct cpt_ctx_info_s {
dma_addr_t sa_iova;
};
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
/* CPT LF_INPROG Register */
#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
@@ -199,6 +219,11 @@ struct cpt_ctx_info_s {
int cn10k_ipsec_init(struct net_device *netdev);
void cn10k_ipsec_clean(struct otx2_nic *pf);
int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
#else
static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
{
@@ -214,5 +239,20 @@ int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
{
return 0;
}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
#endif
#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 0c2c4fb440f1..87304fa6fc9f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -941,6 +941,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 859bbc78e653..9471ee572625 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -54,6 +54,9 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index a7e17d870420..bc34074454b3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1444,6 +1444,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f368eac28fdd..b0e1524ea4bd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -32,6 +33,16 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
bool *need_xdp_flush);
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (unlikely(xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
+
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
{
@@ -580,7 +591,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -1116,6 +1126,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct otx2_nic *pfvf = netdev_priv(netdev);
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
+ int ipsec = 0;
+ bool ret;
+
+ if (unlikely(xfrm_offload(skb)))
+ ipsec = 1;
/* Check if there is enough room between producer
* and consumer index.
@@ -1132,6 +1147,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1148,6 +1164,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1160,7 +1179,12 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (unlikely(ipsec))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1169,11 +1193,14 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (unlikely(ipsec))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 3f1d2655ff77..248fd78ef0e9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -101,6 +101,9 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
} ____cacheline_aligned_in_smp;
enum cq_type {
--
2.34.1
One crypto hardware logical function (cpt-lf) per netdev is
required for inline ipsec outbound functionality. Allocate,
attach and initialize one crypto hardware function when
enabling inline ipsec crypto offload. Crypto hardware
function will be detached and freed on disabling inline
ipsec.
Signed-off-by: Bharat Bhushan <[email protected]>
---
v1->v2:
- Fix compilation error to build driver a module
- Fix couple of compilation warnings
.../ethernet/marvell/octeontx2/nic/Makefile | 1 +
.../marvell/octeontx2/nic/cn10k_ipsec.c | 393 ++++++++++++++++++
.../marvell/octeontx2/nic/cn10k_ipsec.h | 104 +++++
.../marvell/octeontx2/nic/otx2_common.h | 18 +
.../ethernet/marvell/octeontx2/nic/otx2_pf.c | 14 +-
.../ethernet/marvell/octeontx2/nic/otx2_vf.c | 10 +-
6 files changed, 538 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768cb0c..9695f967d416 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -14,5 +14,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..b221b67815ee
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+
+#include "otx2_common.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_inline_ipsec(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return 0;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+ return 0;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+error:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+error:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (!iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret = 0;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound inline ipsec */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ mutex_lock(&pf->ipsec.lock);
+
+ /* Attach a CPT LF for outbound inline ipsec */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ goto unlock;
+
+ /* Allocate a CPT LF for outbound inline ipsec */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound inline ipsec */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set inline ipsec enabled for this device */
+ pf->flags |= OTX2_FLAG_INLINE_IPSEC_ENABLED;
+
+ goto unlock;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+unlock:
+ mutex_unlock(&pf->ipsec.lock);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int err;
+
+ mutex_lock(&pf->ipsec.lock);
+
+ /* Set inline ipsec disabled for this device */
+ pf->flags &= ~OTX2_FLAG_INLINE_IPSEC_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ err = cn10k_outb_cptlf_detach(pf);
+ if (err)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ mutex_unlock(&pf->ipsec.lock);
+ return err;
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* Inline ipsec supported on cn10k */
+ if (!is_dev_support_inline_ipsec(pf->pdev))
+ return -ENODEV;
+
+ if (!enable)
+ return cn10k_outb_cpt_clean(pf);
+
+ /* Initialize CPT for outbound inline ipsec */
+ return cn10k_outb_cpt_init(netdev);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if (!is_dev_support_inline_ipsec(pf->pdev))
+ return 0;
+
+ mutex_init(&pf->ipsec.lock);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_inline_ipsec(pf->pdev))
+ return;
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..b322e19d5e23
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for inline ipsec */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ /* Lock to protect SA management */
+ struct mutex lock;
+ struct cn10k_cpt_inst_queue iq;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 42a759a33c11..859bbc78e653 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -29,6 +29,7 @@
#include "otx2_devlink.h"
#include <rvu_trace.h>
#include "qos.h"
+#include "cn10k_ipsec.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -39,6 +40,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */
@@ -467,6 +469,7 @@ struct otx2_nic {
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
+#define OTX2_FLAG_INLINE_IPSEC_ENABLED BIT_ULL(18)
u64 flags;
u64 *cq_op_addr;
@@ -534,6 +537,9 @@ struct otx2_nic {
#if IS_ENABLED(CONFIG_MACSEC)
struct cn10k_mcs_cfg *macsec_cfg;
#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -578,6 +584,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -627,6 +642,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index cbd5050f58e8..a7e17d870420 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,7 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -2201,6 +2202,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -3065,10 +3070,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3109,6 +3118,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3286,6 +3297,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 99fcc5661674..6fc70c3cafb6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -682,10 +683,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(vf);
@@ -719,6 +724,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -771,6 +778,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
--
2.34.1
Hardware is initialized and netdev transmit flow is
hooked up for outbound inline ipsec, so finally enable
ipsec offload.
Signed-off-by: Bharat Bhushan <[email protected]>
---
v2->v3:
- Moved "netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;" to previous patch
This fix build error with W=1
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 81f1258cd996..c9a1c494be6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -841,10 +841,10 @@ int cn10k_ipsec_init(struct net_device *netdev)
memset(pf->ipsec.outb_sa->base, 0, sa_size * CN10K_IPSEC_OUTB_MAX_SA);
bitmap_zero(pf->ipsec.sa_bitmap, CN10K_IPSEC_OUTB_MAX_SA);
- /* Set xfrm device ops
- * NETIF_F_HW_ESP is not set as ipsec setup is not yet complete.
- */
+ /* Set xfrm device ops */
netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
mutex_init(&pf->ipsec.lock);
return 0;
--
2.34.1
On Tue, May 28, 2024 at 7:27 PM Bharat Bhushan <[email protected]> wrote:
>
> One crypto hardware logical function (cpt-lf) per netdev is
> required for inline ipsec outbound functionality. Allocate,
> attach and initialize one crypto hardware function when
> enabling inline ipsec crypto offload. Crypto hardware
> function will be detached and freed on disabling inline
> ipsec.
>
> Signed-off-by: Bharat Bhushan <[email protected]>
> ---
> v1->v2:
> - Fix compilation error to build driver a module
> - Fix couple of compilation warnings
>
> .../ethernet/marvell/octeontx2/nic/Makefile | 1 +
> .../marvell/octeontx2/nic/cn10k_ipsec.c | 393 ++++++++++++++++++
> .../marvell/octeontx2/nic/cn10k_ipsec.h | 104 +++++
> .../marvell/octeontx2/nic/otx2_common.h | 18 +
> .../ethernet/marvell/octeontx2/nic/otx2_pf.c | 14 +-
> .../ethernet/marvell/octeontx2/nic/otx2_vf.c | 10 +-
> 6 files changed, 538 insertions(+), 2 deletions(-)
> create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
> index 5664f768cb0c..9695f967d416 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
> @@ -14,5 +14,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
> rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
> rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
> rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
> +rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
>
> ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> new file mode 100644
> index 000000000000..b221b67815ee
> --- /dev/null
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> @@ -0,0 +1,393 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Marvell IPSEC offload driver
> + *
> + * Copyright (C) 2024 Marvell.
> + */
> +
> +#include <net/xfrm.h>
> +#include <linux/netdevice.h>
> +#include <linux/bitfield.h>
> +
> +#include "otx2_common.h"
> +#include "cn10k_ipsec.h"
> +
> +static bool is_dev_support_inline_ipsec(struct pci_dev *pdev)
> +{
> + return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
> +}
> +
> +static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
> +{
> + struct rsrc_attach *attach;
> + int err;
> +
> + mutex_lock(&pf->mbox.lock);
> + /* Get memory to put this msg */
> + attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
> + if (!attach) {
> + mutex_unlock(&pf->mbox.lock);
> + return -ENOMEM;
> + }
[Kalesh] To make it consistent with other functions, you can add a
label unlock and re-write it as:
if (!attach) {
err = -ENOMEM;
goto unlock;
}
> +
> + attach->cptlfs = true;
> + attach->modify = true;
> +
> + /* Send attach request to AF */
> + err = otx2_sync_mbox_msg(&pf->mbox);
> + if (err) {
> + mutex_unlock(&pf->mbox.lock);
> + return err;
> + }
> +
> + mutex_unlock(&pf->mbox.lock);
> + return 0;
> +}
> +
> +static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
> +{
> + struct rsrc_detach *detach;
> +
> + mutex_lock(&pf->mbox.lock);
> + detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
> + if (!detach) {
> + mutex_unlock(&pf->mbox.lock);
> + return -ENOMEM;
> + }
[Kalesh] Same comment as above
> +
> + detach->partial = true;
> + detach->cptlfs = true;
> +
> + /* Send detach request to AF */
> + otx2_sync_mbox_msg(&pf->mbox);
> + mutex_unlock(&pf->mbox.lock);
> + return 0;
> +}
> +
> +static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
> +{
> + struct cpt_lf_alloc_req_msg *req;
> + int ret = 0;
[Kalesh] No need to initialize ret here. You are little inconsistent
in naming the variable, ret vs err :)
> +
> + mutex_lock(&pf->mbox.lock);
> + req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
> + if (!req) {
> + ret = -ENOMEM;
> + goto error;
> + }
> +
> + /* PF function */
> + req->nix_pf_func = pf->pcifunc;
> + /* Enable SE-IE Engine Group */
> + req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
> +
> + ret = otx2_sync_mbox_msg(&pf->mbox);
> +
> +error:
[Kalesh]: I would be better name the label as unlock.
> + mutex_unlock(&pf->mbox.lock);
> + return ret;
> +}
> +
> +static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
> +{
> + mutex_lock(&pf->mbox.lock);
> + otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
> + otx2_sync_mbox_msg(&pf->mbox);
> + mutex_unlock(&pf->mbox.lock);
> +}
> +
> +static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
> +{
> + struct cpt_inline_ipsec_cfg_msg *req;
> + int ret = 0;
[Kalesh] No need to initialize the variable here.
> +
> + mutex_lock(&pf->mbox.lock);
> + req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
> + if (!req) {
> + ret = -ENOMEM;
> + goto error;
> + }
> +
> + req->dir = CPT_INLINE_OUTBOUND;
> + req->enable = 1;
> + req->nix_pf_func = pf->pcifunc;
> + ret = otx2_sync_mbox_msg(&pf->mbox);
> +error:
> + mutex_unlock(&pf->mbox.lock);
> + return ret;
> +}
> +
> +static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
> +{
> + u64 reg_val;
> +
> + /* Set Execution Enable of instruction queue */
> + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> + reg_val |= BIT_ULL(16);
> + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
> +
> + /* Set iqueue's enqueuing */
> + reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
> + reg_val |= BIT_ULL(0);
> + otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
> +}
> +
> +static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
> +{
> + u32 inflight, grb_cnt, gwb_cnt;
> + u32 nq_ptr, dq_ptr;
> + int timeout = 20;
> + u64 reg_val;
> + int cnt;
> +
> + /* Disable instructions enqueuing */
> + otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
> +
> + /* Wait for instruction queue to become empty.
> + * CPT_LF_INPROG.INFLIGHT count is zero
> + */
> + do {
> + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
> + if (!inflight)
> + break;
> +
> + usleep_range(10000, 20000);
> + if (timeout-- < 0) {
> + netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
> + break;
> + }
> + } while (1);
> +
> + /* Disable executions in the LF's queue,
> + * the queue should be empty at this point
> + */
> + reg_val &= ~BIT_ULL(16);
> + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
> +
> + /* Wait for instruction queue to become empty */
> + cnt = 0;
> + do {
> + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> + if (reg_val & BIT_ULL(31))
> + cnt = 0;
> + else
> + cnt++;
> + reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
> + nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
> + dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
> + } while ((cnt < 10) && (nq_ptr != dq_ptr));
> +
> + cnt = 0;
> + do {
> + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
> + grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
> + gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
> + if (inflight == 0 && gwb_cnt < 40 &&
> + (grb_cnt == 0 || grb_cnt == 40))
> + cnt++;
> + else
> + cnt = 0;
> + } while (cnt < 10);
> +}
> +
> +/* Allocate memory for CPT outbound Instruction queue.
> + * Instruction queue memory format is:
> + * -----------------------------
> + * | Instruction Group memory |
> + * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
> + * | x 16 Bytes) |
> + * | |
> + * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
> + * | Flow Control (128 Bytes) |
> + * | |
> + * -----------------------------
> + * | Instruction Memory |
> + * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
> + * | × 40 × 64 bytes) |
> + * | |
> + * -----------------------------
> + */
> +static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
> +{
> + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
> +
> + iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
> + CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
> +
> + iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
> + &iq->real_dma_addr, GFP_KERNEL);
> + if (!iq->real_vaddr)
> + return -ENOMEM;
> +
> + /* iq->vaddr/dma_addr points to Flow Control location */
> + iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
> + iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
> +
> + /* Align pointers */
> + iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
> + iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
> + return 0;
> +}
> +
> +static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
> +{
> + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
> +
> + if (!iq->real_vaddr)
> + dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
> + iq->real_dma_addr);
> +
> + iq->real_vaddr = NULL;
> + iq->vaddr = NULL;
> +}
> +
> +static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
> +{
> + u64 reg_val;
> + int ret;
> +
> + /* Allocate Memory for CPT IQ */
> + ret = cn10k_outb_cptlf_iq_alloc(pf);
> + if (ret)
> + return ret;
> +
> + /* Disable IQ */
> + cn10k_outb_cptlf_iq_disable(pf);
> +
> + /* Set IQ base address */
> + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
> +
> + /* Set IQ size */
> + reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
> + CN10K_CPT_EXTRA_SIZE_DIV40);
> + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
> +
> + return 0;
> +}
> +
> +static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
> +{
> + int ret = 0;
[Kalesh] There is no need to initialize this variable.
> +
> + /* Initialize CPTLF Instruction Queue (IQ) */
> + ret = cn10k_outb_cptlf_iq_init(pf);
> + if (ret)
> + return ret;
> +
> + /* Configure CPTLF for outbound inline ipsec */
> + ret = cn10k_outb_cptlf_config(pf);
> + if (ret)
> + goto iq_clean;
> +
> + /* Enable CPTLF IQ */
> + cn10k_outb_cptlf_iq_enable(pf);
> + return 0;
> +iq_clean:
> + cn10k_outb_cptlf_iq_free(pf);
> + return ret;
> +}
> +
> +static int cn10k_outb_cpt_init(struct net_device *netdev)
> +{
> + struct otx2_nic *pf = netdev_priv(netdev);
> + int ret;
> +
> + mutex_lock(&pf->ipsec.lock);
> +
> + /* Attach a CPT LF for outbound inline ipsec */
> + ret = cn10k_outb_cptlf_attach(pf);
> + if (ret)
> + goto unlock;
> +
> + /* Allocate a CPT LF for outbound inline ipsec */
> + ret = cn10k_outb_cptlf_alloc(pf);
> + if (ret)
> + goto detach;
> +
> + /* Initialize the CPTLF for outbound inline ipsec */
> + ret = cn10k_outb_cptlf_init(pf);
> + if (ret)
> + goto lf_free;
> +
> + pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
> + CN10K_CPT_LF_NQX(0));
> +
> + /* Set inline ipsec enabled for this device */
> + pf->flags |= OTX2_FLAG_INLINE_IPSEC_ENABLED;
> +
> + goto unlock;
> +
> +lf_free:
> + cn10k_outb_cptlf_free(pf);
> +detach:
> + cn10k_outb_cptlf_detach(pf);
> +unlock:
> + mutex_unlock(&pf->ipsec.lock);
> + return ret;
> +}
> +
> +static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
> +{
> + int err;
> +
> + mutex_lock(&pf->ipsec.lock);
> +
> + /* Set inline ipsec disabled for this device */
> + pf->flags &= ~OTX2_FLAG_INLINE_IPSEC_ENABLED;
> +
> + /* Disable CPTLF Instruction Queue (IQ) */
> + cn10k_outb_cptlf_iq_disable(pf);
> +
> + /* Set IQ base address and size to 0 */
> + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
> + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
> +
> + /* Free CPTLF IQ */
> + cn10k_outb_cptlf_iq_free(pf);
> +
> + /* Free and detach CPT LF */
> + cn10k_outb_cptlf_free(pf);
> + err = cn10k_outb_cptlf_detach(pf);
> + if (err)
> + netdev_err(pf->netdev, "Failed to detach CPT LF\n");
> +
> + mutex_unlock(&pf->ipsec.lock);
> + return err;
> +}
> +
> +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
> +{
> + struct otx2_nic *pf = netdev_priv(netdev);
> +
> + /* Inline ipsec supported on cn10k */
> + if (!is_dev_support_inline_ipsec(pf->pdev))
> + return -ENODEV;
[Kalesh] NODEV vs NOTSUPP ?
> +
> + if (!enable)
> + return cn10k_outb_cpt_clean(pf);
> +
> + /* Initialize CPT for outbound inline ipsec */
> + return cn10k_outb_cpt_init(netdev);
> +}
> +
> +int cn10k_ipsec_init(struct net_device *netdev)
> +{
> + struct otx2_nic *pf = netdev_priv(netdev);
> +
> + if (!is_dev_support_inline_ipsec(pf->pdev))
> + return 0;
[Kalesh] This function returns 0 always, maybe you can change it to return void.
> +
> + mutex_init(&pf->ipsec.lock);
> + return 0;
> +}
> +EXPORT_SYMBOL(cn10k_ipsec_init);
> +
> +void cn10k_ipsec_clean(struct otx2_nic *pf)
> +{
> + if (!is_dev_support_inline_ipsec(pf->pdev))
> + return;
> +
> + cn10k_outb_cpt_clean(pf);
> +}
> +EXPORT_SYMBOL(cn10k_ipsec_clean);
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
> new file mode 100644
> index 000000000000..b322e19d5e23
> --- /dev/null
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
> @@ -0,0 +1,104 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Marvell IPSEC offload driver
> + *
> + * Copyright (C) 2024 Marvell.
> + */
> +
> +#ifndef CN10K_IPSEC_H
> +#define CN10K_IPSEC_H
> +
> +#include <linux/types.h>
> +
> +/* CPT instruction size in bytes */
> +#define CN10K_CPT_INST_SIZE 64
> +
> +/* CPT instruction (CPT_INST_S) queue length */
> +#define CN10K_CPT_INST_QLEN 8200
> +
> +/* CPT instruction queue size passed to HW is in units of
> + * 40*CPT_INST_S messages.
> + */
> +#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
> +
> +/* CPT needs 320 free entries */
> +#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
> +#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
> +
> +/* CPT instruction queue length in bytes */
> +#define CN10K_CPT_INST_QLEN_BYTES \
> + ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
> + CN10K_CPT_INST_QLEN_EXTRA_BYTES)
> +
> +/* CPT instruction group queue length in bytes */
> +#define CN10K_CPT_INST_GRP_QLEN_BYTES \
> + ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
> +
> +/* CPT FC length in bytes */
> +#define CN10K_CPT_Q_FC_LEN 128
> +
> +/* Default CPT engine group for inline ipsec */
> +#define CN10K_DEF_CPT_IPSEC_EGRP 1
> +
> +/* CN10K CPT LF registers */
> +#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
> +#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
> +#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
> +#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
> +#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
> +#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
> +#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
> +#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
> +#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
> +
> +struct cn10k_cpt_inst_queue {
> + u8 *vaddr;
> + u8 *real_vaddr;
> + dma_addr_t dma_addr;
> + dma_addr_t real_dma_addr;
> + u32 size;
> +};
> +
> +struct cn10k_ipsec {
> + /* Outbound CPT */
> + u64 io_addr;
> + /* Lock to protect SA management */
> + struct mutex lock;
> + struct cn10k_cpt_inst_queue iq;
> +};
> +
> +/* CPT LF_INPROG Register */
> +#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
> +#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
> +#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
> +
> +/* CPT LF_Q_GRP_PTR Register */
> +#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
> +#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
> +
> +/* CPT LF_Q_SIZE Register */
> +#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
> +
> +/* CPT LF_Q_SIZE Register */
> +#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
> +
> +#ifdef CONFIG_XFRM_OFFLOAD
> +int cn10k_ipsec_init(struct net_device *netdev);
> +void cn10k_ipsec_clean(struct otx2_nic *pf);
> +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
> +#else
> +static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
> +{
> + return 0;
> +}
> +
> +static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
> +{
> +}
> +
> +static inline __maybe_unused
> +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
> +{
> + return 0;
> +}
> +#endif
> +#endif // CN10K_IPSEC_H
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> index 42a759a33c11..859bbc78e653 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> @@ -29,6 +29,7 @@
> #include "otx2_devlink.h"
> #include <rvu_trace.h>
> #include "qos.h"
> +#include "cn10k_ipsec.h"
>
> /* IPv4 flag more fragment bit */
> #define IPV4_FLAG_MORE 0x20
> @@ -39,6 +40,7 @@
> #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
>
> #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
> +#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
> #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
>
> /* PCI BAR nos */
> @@ -467,6 +469,7 @@ struct otx2_nic {
> #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
> #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
> #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
> +#define OTX2_FLAG_INLINE_IPSEC_ENABLED BIT_ULL(18)
> u64 flags;
> u64 *cq_op_addr;
>
> @@ -534,6 +537,9 @@ struct otx2_nic {
> #if IS_ENABLED(CONFIG_MACSEC)
> struct cn10k_mcs_cfg *macsec_cfg;
> #endif
> +
> + /* Inline ipsec */
> + struct cn10k_ipsec ipsec;
> };
>
> static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
> @@ -578,6 +584,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
> return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
> }
>
> +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
> +{
> + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
> + (pdev->revision & 0xFF) == 0x54)
> + return true;
> +
> + return false;
> +}
> +
> static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
> {
> struct otx2_hw *hw = &pfvf->hw;
> @@ -627,6 +642,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
> case BLKTYPE_NPA:
> blkaddr = BLKADDR_NPA;
> break;
> + case BLKTYPE_CPT:
> + blkaddr = BLKADDR_CPT0;
> + break;
> default:
> blkaddr = BLKADDR_RVUM;
> break;
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> index cbd5050f58e8..a7e17d870420 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> @@ -26,6 +26,7 @@
> #include "cn10k.h"
> #include "qos.h"
> #include <rvu_trace.h>
> +#include "cn10k_ipsec.h"
>
> #define DRV_NAME "rvu_nicpf"
> #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
> @@ -2201,6 +2202,10 @@ static int otx2_set_features(struct net_device *netdev,
> return otx2_enable_rxvlan(pf,
> features & NETIF_F_HW_VLAN_CTAG_RX);
>
> + if (changed & NETIF_F_HW_ESP)
> + return cn10k_ipsec_ethtool_init(netdev,
> + features & NETIF_F_HW_ESP);
> +
> return otx2_handle_ntuple_tc_features(netdev, features);
> }
>
> @@ -3065,10 +3070,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> /* reset CGX/RPM MAC stats */
> otx2_reset_mac_stats(pf);
>
> + err = cn10k_ipsec_init(netdev);
> + if (err)
> + goto err_mcs_free;
> +
> err = register_netdev(netdev);
> if (err) {
> dev_err(dev, "Failed to register netdevice\n");
> - goto err_mcs_free;
> + goto err_ipsec_clean;
> }
>
> err = otx2_wq_init(pf);
> @@ -3109,6 +3118,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> otx2_mcam_flow_del(pf);
> err_unreg_netdev:
> unregister_netdev(netdev);
> +err_ipsec_clean:
> + cn10k_ipsec_clean(pf);
> err_mcs_free:
> cn10k_mcs_free(pf);
> err_del_mcam_entries:
> @@ -3286,6 +3297,7 @@ static void otx2_remove(struct pci_dev *pdev)
>
> otx2_unregister_dl(pf);
> unregister_netdev(netdev);
> + cn10k_ipsec_clean(pf);
> cn10k_mcs_free(pf);
> otx2_sriov_disable(pf->pdev);
> otx2_sriov_vfcfg_cleanup(pf);
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> index 99fcc5661674..6fc70c3cafb6 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> @@ -14,6 +14,7 @@
> #include "otx2_reg.h"
> #include "otx2_ptp.h"
> #include "cn10k.h"
> +#include "cn10k_ipsec.h"
>
> #define DRV_NAME "rvu_nicvf"
> #define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
> @@ -682,10 +683,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
> }
>
> + err = cn10k_ipsec_init(netdev);
> + if (err)
> + goto err_ptp_destroy;
> +
> err = register_netdev(netdev);
> if (err) {
> dev_err(dev, "Failed to register netdevice\n");
> - goto err_ptp_destroy;
> + goto err_ipsec_clean;
> }
>
> err = otx2_wq_init(vf);
> @@ -719,6 +724,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> otx2_shutdown_tc(vf);
> err_unreg_netdev:
> unregister_netdev(netdev);
> +err_ipsec_clean:
> + cn10k_ipsec_clean(vf);
> err_ptp_destroy:
> otx2_ptp_destroy(vf);
> err_detach_rsrc:
> @@ -771,6 +778,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
> unregister_netdev(netdev);
> if (vf->otx2_wq)
> destroy_workqueue(vf->otx2_wq);
> + cn10k_ipsec_clean(vf);
> otx2_ptp_destroy(vf);
> otx2_mcam_flow_del(vf);
> otx2_shutdown_tc(vf);
> --
> 2.34.1
>
>
--
Regards,
Kalesh A P
On Tue, May 28, 2024 at 07:23:46PM +0530, Bharat Bhushan wrote:
> This patch adds support to add and delete Security Association
> (SA) xfrm ops. Hardware maintains SA context in memory allocated
> by software. Each SA context is 128 byte aligned and size of
> each context is multiple of 128-byte. Add support for transport
> and tunnel ipsec mode, ESP protocol, aead aes-gcm-icv16, key size
> 128/192/256-bits with 32bit salt.
>
> Signed-off-by: Bharat Bhushan <[email protected]>
> ---
> v2->v3:
> - Removed memset to zero wherever possible
> (comment from Kalesh Anakkur Purayil)
> - Corrected error hanlding when setting SA for inbound
> (comment from Kalesh Anakkur Purayil)
> - Move "netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;" to this patch
> This fix build error with W=1
>
> .../marvell/octeontx2/nic/cn10k_ipsec.c | 452 ++++++++++++++++++
> .../marvell/octeontx2/nic/cn10k_ipsec.h | 114 +++++
> 2 files changed, 566 insertions(+)
<...>
> +static int cn10k_ipsec_validate_state(struct xfrm_state *x)
> +{
> + struct net_device *netdev = x->xso.dev;
> +
> + if (x->props.aalgo != SADB_AALG_NONE) {
> + netdev_err(netdev, "Cannot offload authenticated xfrm states\n");
> + return -EINVAL;
> + }
> + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
> + netdev_err(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
> + return -EINVAL;
> + }
> + if (x->props.calgo != SADB_X_CALG_NONE) {
> + netdev_err(netdev, "Cannot offload compressed xfrm states\n");
> + return -EINVAL;
> + }
> + if (x->props.flags & XFRM_STATE_ESN) {
> + netdev_err(netdev, "Cannot offload ESN xfrm states\n");
> + return -EINVAL;
> + }
I afraid that this check will cause for this offload to be unusable in
real life scenarios. It is hard to imagine that someone will use offload
which requires rekeying every 2^32 packets.
> + if (x->props.family != AF_INET && x->props.family != AF_INET6) {
> + netdev_err(netdev, "Only IPv4/v6 xfrm states may be offloaded\n");
> + return -EINVAL;
> + }
> + if (x->props.mode != XFRM_MODE_TRANSPORT &&
> + x->props.mode != XFRM_MODE_TUNNEL) {
> + dev_info(&netdev->dev, "Only tunnel/transport xfrm states may be offloaded\n");
> + return -EINVAL;
> + }
> + if (x->id.proto != IPPROTO_ESP) {
> + netdev_err(netdev, "Only ESP xfrm state may be offloaded\n");
> + return -EINVAL;
> + }
> + if (x->encap) {
> + netdev_err(netdev, "Encapsulated xfrm state may not be offloaded\n");
> + return -EINVAL;
> + }
> + if (!x->aead) {
> + netdev_err(netdev, "Cannot offload xfrm states without aead\n");
> + return -EINVAL;
> + }
> +
> + if (x->aead->alg_icv_len != 128) {
> + netdev_err(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
> + return -EINVAL;
> + }
> + if (x->aead->alg_key_len != 128 + 32 &&
> + x->aead->alg_key_len != 192 + 32 &&
> + x->aead->alg_key_len != 256 + 32) {
> + netdev_err(netdev, "Cannot offload xfrm states with AEAD key length other than 128/192/256bit\n");
> + return -EINVAL;
> + }
> + if (x->tfcpad) {
> + netdev_err(netdev, "Cannot offload xfrm states with tfc padding\n");
> + return -EINVAL;
> + }
> + if (!x->geniv) {
> + netdev_err(netdev, "Cannot offload xfrm states without geniv\n");
> + return -EINVAL;
> + }
> + if (strcmp(x->geniv, "seqiv")) {
> + netdev_err(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
> + return -EINVAL;
> + }
> + return 0;
> +}
I don't see check for supported offload type among these checks.
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
....
On Tue, May 28, 2024 at 07:23:47PM +0530, Bharat Bhushan wrote:
> Prepare and submit crypto hardware (CPT) instruction for
> outbound inline ipsec crypto mode offload. The CPT instruction
> have authentication offset, IV offset and encapsulation offset
> in input packet. Also provide SA context pointer which have
> details about algo, keys, salt etc. Crypto hardware encrypt,
> authenticate and provide the ESP packet to networking hardware.
>
> Signed-off-by: Bharat Bhushan <[email protected]>
Hi Bharat,
A minor nit from my side as it looks like there will be a v4 anyway.
...
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
...
> +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
> + struct otx2_snd_queue *sq, struct sk_buff *skb,
> + int num_segs, int size)
> +{
...
> + /* Check for valid SA context */
> + sa_info = (struct cpt_ctx_info_s *)x->xso.offload_handle;
> + if (!sa_info || !sa_info->sa_iova) {
> + netdev_err(pf->netdev, "Invalid SA conext\n");
nit: context
checkpatch.pl --codespell is your friend.
> + goto drop;
> + }
...
On Tue, May 28, 2024 at 07:23:47PM +0530, Bharat Bhushan wrote:
> Prepare and submit crypto hardware (CPT) instruction for
> outbound inline ipsec crypto mode offload. The CPT instruction
> have authentication offset, IV offset and encapsulation offset
> in input packet. Also provide SA context pointer which have
> details about algo, keys, salt etc. Crypto hardware encrypt,
> authenticate and provide the ESP packet to networking hardware.
>
> Signed-off-by: Bharat Bhushan <[email protected]>
> ---
> .../marvell/octeontx2/nic/cn10k_ipsec.c | 224 ++++++++++++++++++
> .../marvell/octeontx2/nic/cn10k_ipsec.h | 40 ++++
> .../marvell/octeontx2/nic/otx2_common.c | 23 ++
> .../marvell/octeontx2/nic/otx2_common.h | 3 +
> .../ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 +
> .../marvell/octeontx2/nic/otx2_txrx.c | 33 ++-
> .../marvell/octeontx2/nic/otx2_txrx.h | 3 +
> 7 files changed, 325 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> index 136aebe2a007..1974fda2e0d3 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> @@ -7,8 +7,11 @@
> #include <net/xfrm.h>
> #include <linux/netdevice.h>
> #include <linux/bitfield.h>
> +#include <crypto/aead.h>
> +#include <crypto/gcm.h>
>
> #include "otx2_common.h"
> +#include "otx2_struct.h"
> #include "cn10k_ipsec.h"
>
> static bool is_dev_support_inline_ipsec(struct pci_dev *pdev)
> @@ -843,3 +846,224 @@ void cn10k_ipsec_clean(struct otx2_nic *pf)
> cn10k_outb_cpt_clean(pf);
> }
> EXPORT_SYMBOL(cn10k_ipsec_clean);
<...>
> +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
> + struct otx2_snd_queue *sq, struct sk_buff *skb,
> + int num_segs, int size)
> +{
> + struct cpt_ctx_info_s *sa_info;
> + struct cpt_inst_s inst;
> + struct cpt_res_s *res;
> + struct xfrm_state *x;
> + dma_addr_t dptr_iova;
> + struct sec_path *sp;
> + u8 encap_offset;
> + u8 auth_offset;
> + u8 gthr_size;
> + u8 iv_offset;
> + u16 dlen;
> +
> + /* Check for Inline IPSEC enabled */
> + if (!(pf->flags & OTX2_FLAG_INLINE_IPSEC_ENABLED)) {
> + netdev_err(pf->netdev, "Ipsec not enabled, drop packet\n");
<...>
> + netdev_err(pf->netdev, "%s: no xfrm state len = %d\n",
> + __func__, sp->len);
<...>
> + netdev_err(pf->netdev, "no xfrm_input_state()\n");
<...>
> + netdev_err(pf->netdev, "un supported offload mode %d\n",
> + x->props.mode);
<...>
> + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
<...>
> + netdev_err(pf->netdev, "Invalid SA conext\n");
All these prints are in datapath and can be triggered by network
packets. These and RX prints need to be deleted.
Thanks
>-----Original Message-----
>From: Leon Romanovsky <[email protected]>
>Sent: Sunday, June 2, 2024 12:21 PM
>To: Bharat Bhushan <[email protected]>
>Cc: [email protected]; [email protected]; Sunil Kovvuri
>Goutham <[email protected]>; Geethasowjanya Akula
><[email protected]>; Subbaraya Sundeep Bhatta <[email protected]>;
>Hariprasad Kelam <[email protected]>; [email protected];
>[email protected]; [email protected]; [email protected]; Jerin Jacob
><[email protected]>; Linu Cherian <[email protected]>;
>[email protected]
>Subject: [EXTERNAL] Re: [net-next,v3 6/8] cn10k-ipsec: Process inline ipsec
>transmit offload
>
>Prioritize security for external emails: Confirm sender and content safety before
>clicking links or opening attachments
>
>----------------------------------------------------------------------
>On Tue, May 28, 2024 at 07:23:47PM +0530, Bharat Bhushan wrote:
>> Prepare and submit crypto hardware (CPT) instruction for outbound
>> inline ipsec crypto mode offload. The CPT instruction have
>> authentication offset, IV offset and encapsulation offset in input
>> packet. Also provide SA context pointer which have details about algo,
>> keys, salt etc. Crypto hardware encrypt, authenticate and provide the
>> ESP packet to networking hardware.
>>
>> Signed-off-by: Bharat Bhushan <[email protected]>
>> ---
>> .../marvell/octeontx2/nic/cn10k_ipsec.c | 224 ++++++++++++++++++
>> .../marvell/octeontx2/nic/cn10k_ipsec.h | 40 ++++
>> .../marvell/octeontx2/nic/otx2_common.c | 23 ++
>> .../marvell/octeontx2/nic/otx2_common.h | 3 +
>> .../ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 +
>> .../marvell/octeontx2/nic/otx2_txrx.c | 33 ++-
>> .../marvell/octeontx2/nic/otx2_txrx.h | 3 +
>> 7 files changed, 325 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
>> b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
>> index 136aebe2a007..1974fda2e0d3 100644
>> --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
>> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
>> @@ -7,8 +7,11 @@
>> #include <net/xfrm.h>
>> #include <linux/netdevice.h>
>> #include <linux/bitfield.h>
>> +#include <crypto/aead.h>
>> +#include <crypto/gcm.h>
>>
>> #include "otx2_common.h"
>> +#include "otx2_struct.h"
>> #include "cn10k_ipsec.h"
>>
>> static bool is_dev_support_inline_ipsec(struct pci_dev *pdev) @@
>> -843,3 +846,224 @@ void cn10k_ipsec_clean(struct otx2_nic *pf)
>> cn10k_outb_cpt_clean(pf);
>> }
>> EXPORT_SYMBOL(cn10k_ipsec_clean);
>
><...>
>
>> +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
>> + struct otx2_snd_queue *sq, struct sk_buff *skb,
>> + int num_segs, int size)
>> +{
>> + struct cpt_ctx_info_s *sa_info;
>> + struct cpt_inst_s inst;
>> + struct cpt_res_s *res;
>> + struct xfrm_state *x;
>> + dma_addr_t dptr_iova;
>> + struct sec_path *sp;
>> + u8 encap_offset;
>> + u8 auth_offset;
>> + u8 gthr_size;
>> + u8 iv_offset;
>> + u16 dlen;
>> +
>> + /* Check for Inline IPSEC enabled */
>> + if (!(pf->flags & OTX2_FLAG_INLINE_IPSEC_ENABLED)) {
>> + netdev_err(pf->netdev, "Ipsec not enabled, drop packet\n");
>
><...>
>
>> + netdev_err(pf->netdev, "%s: no xfrm state len = %d\n",
>> + __func__, sp->len);
>
><...>
>
>> + netdev_err(pf->netdev, "no xfrm_input_state()\n");
>
><...>
>
>> + netdev_err(pf->netdev, "un supported offload mode %d\n",
>> + x->props.mode);
>
><...>
>
>> + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
>
><...>
>
>> + netdev_err(pf->netdev, "Invalid SA conext\n");
>
>All these prints are in datapath and can be triggered by network packets. These
>and RX prints need to be deleted.
>
Yes, all these error messages in datapath should be under netif_msg_tx_err().
Thanks,
Sunil.
> -----Original Message-----
> From: Kalesh Anakkur Purayil <[email protected]>
> Sent: Wednesday, May 29, 2024 11:16 AM
> To: Bharat Bhushan <[email protected]>
> Cc: [email protected]; [email protected]; Sunil Kovvuri
> Goutham <[email protected]>; Geethasowjanya Akula
> <[email protected]>; Subbaraya Sundeep Bhatta <[email protected]>;
> Hariprasad Kelam <[email protected]>; [email protected];
> [email protected]; [email protected]; [email protected]; Jerin Jacob
> <[email protected]>; Linu Cherian <[email protected]>;
> [email protected]
> Subject: [EXTERNAL] Re: [net-next,v3 4/8] cn10k-ipsec: Initialize crypto
> hardware for outb inline ipsec
>
> ----------------------------------------------------------------------
> On Tue, May 28, 2024 at 7:27 PM Bharat Bhushan
> <[email protected]> wrote:
> >
> > One crypto hardware logical function (cpt-lf) per netdev is
> > required for inline ipsec outbound functionality. Allocate,
> > attach and initialize one crypto hardware function when
> > enabling inline ipsec crypto offload. Crypto hardware
> > function will be detached and freed on disabling inline
> > ipsec.
> >
> > Signed-off-by: Bharat Bhushan <[email protected]>
> > ---
> > v1->v2:
> > - Fix compilation error to build driver a module
> > - Fix couple of compilation warnings
> >
> > .../ethernet/marvell/octeontx2/nic/Makefile | 1 +
> > .../marvell/octeontx2/nic/cn10k_ipsec.c | 393 ++++++++++++++++++
> > .../marvell/octeontx2/nic/cn10k_ipsec.h | 104 +++++
> > .../marvell/octeontx2/nic/otx2_common.h | 18 +
> > .../ethernet/marvell/octeontx2/nic/otx2_pf.c | 14 +-
> > .../ethernet/marvell/octeontx2/nic/otx2_vf.c | 10 +-
> > 6 files changed, 538 insertions(+), 2 deletions(-)
> > create mode 100644
> drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> > create mode 100644
> drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
> >
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
> b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
> > index 5664f768cb0c..9695f967d416 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
> > @@ -14,5 +14,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
> > rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
> > rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
> > rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
> > +rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
> >
> > ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> > new file mode 100644
> > index 000000000000..b221b67815ee
> > --- /dev/null
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> > @@ -0,0 +1,393 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/* Marvell IPSEC offload driver
> > + *
> > + * Copyright (C) 2024 Marvell.
> > + */
> > +
> > +#include <net/xfrm.h>
> > +#include <linux/netdevice.h>
> > +#include <linux/bitfield.h>
> > +
> > +#include "otx2_common.h"
> > +#include "cn10k_ipsec.h"
> > +
> > +static bool is_dev_support_inline_ipsec(struct pci_dev *pdev)
> > +{
> > + return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
> > +}
> > +
> > +static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
> > +{
> > + struct rsrc_attach *attach;
> > + int err;
> > +
> > + mutex_lock(&pf->mbox.lock);
> > + /* Get memory to put this msg */
> > + attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
> > + if (!attach) {
> > + mutex_unlock(&pf->mbox.lock);
> > + return -ENOMEM;
> > + }
> [Kalesh] To make it consistent with other functions, you can add a
> label unlock and re-write it as:
> if (!attach) {
> err = -ENOMEM;
> goto unlock;
Yes, will take care
> }
> > +
> > + attach->cptlfs = true;
> > + attach->modify = true;
> > +
> > + /* Send attach request to AF */
> > + err = otx2_sync_mbox_msg(&pf->mbox);
> > + if (err) {
> > + mutex_unlock(&pf->mbox.lock);
> > + return err;
> > + }
> > +
> > + mutex_unlock(&pf->mbox.lock);
> > + return 0;
> > +}
> > +
> > +static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
> > +{
> > + struct rsrc_detach *detach;
> > +
> > + mutex_lock(&pf->mbox.lock);
> > + detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
> > + if (!detach) {
> > + mutex_unlock(&pf->mbox.lock);
> > + return -ENOMEM;
> > + }
> [Kalesh] Same comment as above
> > +
> > + detach->partial = true;
> > + detach->cptlfs = true;
> > +
> > + /* Send detach request to AF */
> > + otx2_sync_mbox_msg(&pf->mbox);
> > + mutex_unlock(&pf->mbox.lock);
> > + return 0;
> > +}
> > +
> > +static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
> > +{
> > + struct cpt_lf_alloc_req_msg *req;
> > + int ret = 0;
> [Kalesh] No need to initialize ret here. You are little inconsistent
> in naming the variable, ret vs err :)
Okay, will use ret;
> > +
> > + mutex_lock(&pf->mbox.lock);
> > + req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
> > + if (!req) {
> > + ret = -ENOMEM;
> > + goto error;
> > + }
> > +
> > + /* PF function */
> > + req->nix_pf_func = pf->pcifunc;
> > + /* Enable SE-IE Engine Group */
> > + req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
> > +
> > + ret = otx2_sync_mbox_msg(&pf->mbox);
> > +
> > +error:
> [Kalesh]: I would be better name the label as unlock.
Yes, will take care
> > + mutex_unlock(&pf->mbox.lock);
> > + return ret;
> > +}
> > +
> > +static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
> > +{
> > + mutex_lock(&pf->mbox.lock);
> > + otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
> > + otx2_sync_mbox_msg(&pf->mbox);
> > + mutex_unlock(&pf->mbox.lock);
> > +}
> > +
> > +static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
> > +{
> > + struct cpt_inline_ipsec_cfg_msg *req;
> > + int ret = 0;
> [Kalesh] No need to initialize the variable here.
> > +
> > + mutex_lock(&pf->mbox.lock);
> > + req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
> > + if (!req) {
> > + ret = -ENOMEM;
> > + goto error;
> > + }
> > +
> > + req->dir = CPT_INLINE_OUTBOUND;
> > + req->enable = 1;
> > + req->nix_pf_func = pf->pcifunc;
> > + ret = otx2_sync_mbox_msg(&pf->mbox);
> > +error:
> > + mutex_unlock(&pf->mbox.lock);
> > + return ret;
> > +}
> > +
> > +static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
> > +{
> > + u64 reg_val;
> > +
> > + /* Set Execution Enable of instruction queue */
> > + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> > + reg_val |= BIT_ULL(16);
> > + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
> > +
> > + /* Set iqueue's enqueuing */
> > + reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
> > + reg_val |= BIT_ULL(0);
> > + otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
> > +}
> > +
> > +static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
> > +{
> > + u32 inflight, grb_cnt, gwb_cnt;
> > + u32 nq_ptr, dq_ptr;
> > + int timeout = 20;
> > + u64 reg_val;
> > + int cnt;
> > +
> > + /* Disable instructions enqueuing */
> > + otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
> > +
> > + /* Wait for instruction queue to become empty.
> > + * CPT_LF_INPROG.INFLIGHT count is zero
> > + */
> > + do {
> > + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> > + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
> > + if (!inflight)
> > + break;
> > +
> > + usleep_range(10000, 20000);
> > + if (timeout-- < 0) {
> > + netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
> > + break;
> > + }
> > + } while (1);
> > +
> > + /* Disable executions in the LF's queue,
> > + * the queue should be empty at this point
> > + */
> > + reg_val &= ~BIT_ULL(16);
> > + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
> > +
> > + /* Wait for instruction queue to become empty */
> > + cnt = 0;
> > + do {
> > + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> > + if (reg_val & BIT_ULL(31))
> > + cnt = 0;
> > + else
> > + cnt++;
> > + reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
> > + nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
> > + dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
> > + } while ((cnt < 10) && (nq_ptr != dq_ptr));
> > +
> > + cnt = 0;
> > + do {
> > + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
> > + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
> > + grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
> > + gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
> > + if (inflight == 0 && gwb_cnt < 40 &&
> > + (grb_cnt == 0 || grb_cnt == 40))
> > + cnt++;
> > + else
> > + cnt = 0;
> > + } while (cnt < 10);
> > +}
> > +
> > +/* Allocate memory for CPT outbound Instruction queue.
> > + * Instruction queue memory format is:
> > + * -----------------------------
> > + * | Instruction Group memory |
> > + * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
> > + * | x 16 Bytes) |
> > + * | |
> > + * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
> > + * | Flow Control (128 Bytes) |
> > + * | |
> > + * -----------------------------
> > + * | Instruction Memory |
> > + * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
> > + * | × 40 × 64 bytes) |
> > + * | |
> > + * -----------------------------
> > + */
> > +static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
> > +{
> > + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
> > +
> > + iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
> > + CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
> > +
> > + iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
> > + &iq->real_dma_addr, GFP_KERNEL);
> > + if (!iq->real_vaddr)
> > + return -ENOMEM;
> > +
> > + /* iq->vaddr/dma_addr points to Flow Control location */
> > + iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
> > + iq->dma_addr = iq->real_dma_addr +
> CN10K_CPT_INST_GRP_QLEN_BYTES;
> > +
> > + /* Align pointers */
> > + iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
> > + iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
> > + return 0;
> > +}
> > +
> > +static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
> > +{
> > + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
> > +
> > + if (!iq->real_vaddr)
> > + dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
> > + iq->real_dma_addr);
> > +
> > + iq->real_vaddr = NULL;
> > + iq->vaddr = NULL;
> > +}
> > +
> > +static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
> > +{
> > + u64 reg_val;
> > + int ret;
> > +
> > + /* Allocate Memory for CPT IQ */
> > + ret = cn10k_outb_cptlf_iq_alloc(pf);
> > + if (ret)
> > + return ret;
> > +
> > + /* Disable IQ */
> > + cn10k_outb_cptlf_iq_disable(pf);
> > +
> > + /* Set IQ base address */
> > + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
> > +
> > + /* Set IQ size */
> > + reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40
> +
> > + CN10K_CPT_EXTRA_SIZE_DIV40);
> > + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
> > +
> > + return 0;
> > +}
> > +
> > +static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
> > +{
> > + int ret = 0;
> [Kalesh] There is no need to initialize this variable.
> > +
> > + /* Initialize CPTLF Instruction Queue (IQ) */
> > + ret = cn10k_outb_cptlf_iq_init(pf);
> > + if (ret)
> > + return ret;
> > +
> > + /* Configure CPTLF for outbound inline ipsec */
> > + ret = cn10k_outb_cptlf_config(pf);
> > + if (ret)
> > + goto iq_clean;
> > +
> > + /* Enable CPTLF IQ */
> > + cn10k_outb_cptlf_iq_enable(pf);
> > + return 0;
> > +iq_clean:
> > + cn10k_outb_cptlf_iq_free(pf);
> > + return ret;
> > +}
> > +
> > +static int cn10k_outb_cpt_init(struct net_device *netdev)
> > +{
> > + struct otx2_nic *pf = netdev_priv(netdev);
> > + int ret;
> > +
> > + mutex_lock(&pf->ipsec.lock);
> > +
> > + /* Attach a CPT LF for outbound inline ipsec */
> > + ret = cn10k_outb_cptlf_attach(pf);
> > + if (ret)
> > + goto unlock;
> > +
> > + /* Allocate a CPT LF for outbound inline ipsec */
> > + ret = cn10k_outb_cptlf_alloc(pf);
> > + if (ret)
> > + goto detach;
> > +
> > + /* Initialize the CPTLF for outbound inline ipsec */
> > + ret = cn10k_outb_cptlf_init(pf);
> > + if (ret)
> > + goto lf_free;
> > +
> > + pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
> > + CN10K_CPT_LF_NQX(0));
> > +
> > + /* Set inline ipsec enabled for this device */
> > + pf->flags |= OTX2_FLAG_INLINE_IPSEC_ENABLED;
> > +
> > + goto unlock;
> > +
> > +lf_free:
> > + cn10k_outb_cptlf_free(pf);
> > +detach:
> > + cn10k_outb_cptlf_detach(pf);
> > +unlock:
> > + mutex_unlock(&pf->ipsec.lock);
> > + return ret;
> > +}
> > +
> > +static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
> > +{
> > + int err;
> > +
> > + mutex_lock(&pf->ipsec.lock);
> > +
> > + /* Set inline ipsec disabled for this device */
> > + pf->flags &= ~OTX2_FLAG_INLINE_IPSEC_ENABLED;
> > +
> > + /* Disable CPTLF Instruction Queue (IQ) */
> > + cn10k_outb_cptlf_iq_disable(pf);
> > +
> > + /* Set IQ base address and size to 0 */
> > + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
> > + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
> > +
> > + /* Free CPTLF IQ */
> > + cn10k_outb_cptlf_iq_free(pf);
> > +
> > + /* Free and detach CPT LF */
> > + cn10k_outb_cptlf_free(pf);
> > + err = cn10k_outb_cptlf_detach(pf);
> > + if (err)
> > + netdev_err(pf->netdev, "Failed to detach CPT LF\n");
> > +
> > + mutex_unlock(&pf->ipsec.lock);
> > + return err;
> > +}
> > +
> > +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
> > +{
> > + struct otx2_nic *pf = netdev_priv(netdev);
> > +
> > + /* Inline ipsec supported on cn10k */
> > + if (!is_dev_support_inline_ipsec(pf->pdev))
> > + return -ENODEV;
> [Kalesh] NODEV vs NOTSUPP ?
No SUPPOP is better.
> > +
> > + if (!enable)
> > + return cn10k_outb_cpt_clean(pf);
> > +
> > + /* Initialize CPT for outbound inline ipsec */
> > + return cn10k_outb_cpt_init(netdev);
> > +}
> > +
> > +int cn10k_ipsec_init(struct net_device *netdev)
> > +{
> > + struct otx2_nic *pf = netdev_priv(netdev);
> > +
> > + if (!is_dev_support_inline_ipsec(pf->pdev))
> > + return 0;
> [Kalesh] This function returns 0 always, maybe you can change it to return
Yes this patch returns zero only but follow up patches add code to return non-zero as well.
So as to have minimum difference I kept it like that.
Thanks
-Bharat
> void.
> > +
> > + mutex_init(&pf->ipsec.lock);
> > + return 0;
> > +}
> > +EXPORT_SYMBOL(cn10k_ipsec_init);
> > +
> > +void cn10k_ipsec_clean(struct otx2_nic *pf)
> > +{
> > + if (!is_dev_support_inline_ipsec(pf->pdev))
> > + return;
> > +
> > + cn10k_outb_cpt_clean(pf);
> > +}
> > +EXPORT_SYMBOL(cn10k_ipsec_clean);
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
> b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
> > new file mode 100644
> > index 000000000000..b322e19d5e23
> > --- /dev/null
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
> > @@ -0,0 +1,104 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +/* Marvell IPSEC offload driver
> > + *
> > + * Copyright (C) 2024 Marvell.
> > + */
> > +
> > +#ifndef CN10K_IPSEC_H
> > +#define CN10K_IPSEC_H
> > +
> > +#include <linux/types.h>
> > +
> > +/* CPT instruction size in bytes */
> > +#define CN10K_CPT_INST_SIZE 64
> > +
> > +/* CPT instruction (CPT_INST_S) queue length */
> > +#define CN10K_CPT_INST_QLEN 8200
> > +
> > +/* CPT instruction queue size passed to HW is in units of
> > + * 40*CPT_INST_S messages.
> > + */
> > +#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
> > +
> > +/* CPT needs 320 free entries */
> > +#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 *
> CN10K_CPT_INST_SIZE)
> > +#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
> > +
> > +/* CPT instruction queue length in bytes */
> > +#define CN10K_CPT_INST_QLEN_BYTES \
> > + ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
> > + CN10K_CPT_INST_QLEN_EXTRA_BYTES)
> > +
> > +/* CPT instruction group queue length in bytes */
> > +#define CN10K_CPT_INST_GRP_QLEN_BYTES \
> > + ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
> > +
> > +/* CPT FC length in bytes */
> > +#define CN10K_CPT_Q_FC_LEN 128
> > +
> > +/* Default CPT engine group for inline ipsec */
> > +#define CN10K_DEF_CPT_IPSEC_EGRP 1
> > +
> > +/* CN10K CPT LF registers */
> > +#define CPT_LFBASE (BLKTYPE_CPT <<
> RVU_FUNC_BLKADDR_SHIFT)
> > +#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
> > +#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
> > +#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
> > +#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
> > +#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
> > +#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
> > +#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
> > +#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
> > +
> > +struct cn10k_cpt_inst_queue {
> > + u8 *vaddr;
> > + u8 *real_vaddr;
> > + dma_addr_t dma_addr;
> > + dma_addr_t real_dma_addr;
> > + u32 size;
> > +};
> > +
> > +struct cn10k_ipsec {
> > + /* Outbound CPT */
> > + u64 io_addr;
> > + /* Lock to protect SA management */
> > + struct mutex lock;
> > + struct cn10k_cpt_inst_queue iq;
> > +};
> > +
> > +/* CPT LF_INPROG Register */
> > +#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
> > +#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
> > +#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
> > +
> > +/* CPT LF_Q_GRP_PTR Register */
> > +#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
> > +#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
> > +
> > +/* CPT LF_Q_SIZE Register */
> > +#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
> > +
> > +/* CPT LF_Q_SIZE Register */
> > +#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
> > +
> > +#ifdef CONFIG_XFRM_OFFLOAD
> > +int cn10k_ipsec_init(struct net_device *netdev);
> > +void cn10k_ipsec_clean(struct otx2_nic *pf);
> > +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
> > +#else
> > +static inline __maybe_unused int cn10k_ipsec_init(struct net_device
> *netdev)
> > +{
> > + return 0;
> > +}
> > +
> > +static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
> > +{
> > +}
> > +
> > +static inline __maybe_unused
> > +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
> > +{
> > + return 0;
> > +}
> > +#endif
> > +#endif // CN10K_IPSEC_H
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> > index 42a759a33c11..859bbc78e653 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
> > @@ -29,6 +29,7 @@
> > #include "otx2_devlink.h"
> > #include <rvu_trace.h>
> > #include "qos.h"
> > +#include "cn10k_ipsec.h"
> >
> > /* IPv4 flag more fragment bit */
> > #define IPV4_FLAG_MORE 0x20
> > @@ -39,6 +40,7 @@
> > #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
> >
> > #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
> > +#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
> > #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
> >
> > /* PCI BAR nos */
> > @@ -467,6 +469,7 @@ struct otx2_nic {
> > #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
> > #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
> > #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
> > +#define OTX2_FLAG_INLINE_IPSEC_ENABLED BIT_ULL(18)
> > u64 flags;
> > u64 *cq_op_addr;
> >
> > @@ -534,6 +537,9 @@ struct otx2_nic {
> > #if IS_ENABLED(CONFIG_MACSEC)
> > struct cn10k_mcs_cfg *macsec_cfg;
> > #endif
> > +
> > + /* Inline ipsec */
> > + struct cn10k_ipsec ipsec;
> > };
> >
> > static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
> > @@ -578,6 +584,15 @@ static inline bool is_dev_cn10kb(struct pci_dev
> *pdev)
> > return pdev->subsystem_device ==
> PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
> > }
> >
> > +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
> > +{
> > + if (pdev->subsystem_device ==
> PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
> > + (pdev->revision & 0xFF) == 0x54)
> > + return true;
> > +
> > + return false;
> > +}
> > +
> > static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
> > {
> > struct otx2_hw *hw = &pfvf->hw;
> > @@ -627,6 +642,9 @@ static inline void __iomem *otx2_get_regaddr(struct
> otx2_nic *nic, u64 offset)
> > case BLKTYPE_NPA:
> > blkaddr = BLKADDR_NPA;
> > break;
> > + case BLKTYPE_CPT:
> > + blkaddr = BLKADDR_CPT0;
> > + break;
> > default:
> > blkaddr = BLKADDR_RVUM;
> > break;
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> > index cbd5050f58e8..a7e17d870420 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> > @@ -26,6 +26,7 @@
> > #include "cn10k.h"
> > #include "qos.h"
> > #include <rvu_trace.h>
> > +#include "cn10k_ipsec.h"
> >
> > #define DRV_NAME "rvu_nicpf"
> > #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
> > @@ -2201,6 +2202,10 @@ static int otx2_set_features(struct net_device
> *netdev,
> > return otx2_enable_rxvlan(pf,
> > features & NETIF_F_HW_VLAN_CTAG_RX);
> >
> > + if (changed & NETIF_F_HW_ESP)
> > + return cn10k_ipsec_ethtool_init(netdev,
> > + features & NETIF_F_HW_ESP);
> > +
> > return otx2_handle_ntuple_tc_features(netdev, features);
> > }
> >
> > @@ -3065,10 +3070,14 @@ static int otx2_probe(struct pci_dev *pdev,
> const struct pci_device_id *id)
> > /* reset CGX/RPM MAC stats */
> > otx2_reset_mac_stats(pf);
> >
> > + err = cn10k_ipsec_init(netdev);
> > + if (err)
> > + goto err_mcs_free;
> > +
> > err = register_netdev(netdev);
> > if (err) {
> > dev_err(dev, "Failed to register netdevice\n");
> > - goto err_mcs_free;
> > + goto err_ipsec_clean;
> > }
> >
> > err = otx2_wq_init(pf);
> > @@ -3109,6 +3118,8 @@ static int otx2_probe(struct pci_dev *pdev, const
> struct pci_device_id *id)
> > otx2_mcam_flow_del(pf);
> > err_unreg_netdev:
> > unregister_netdev(netdev);
> > +err_ipsec_clean:
> > + cn10k_ipsec_clean(pf);
> > err_mcs_free:
> > cn10k_mcs_free(pf);
> > err_del_mcam_entries:
> > @@ -3286,6 +3297,7 @@ static void otx2_remove(struct pci_dev *pdev)
> >
> > otx2_unregister_dl(pf);
> > unregister_netdev(netdev);
> > + cn10k_ipsec_clean(pf);
> > cn10k_mcs_free(pf);
> > otx2_sriov_disable(pf->pdev);
> > otx2_sriov_vfcfg_cleanup(pf);
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> > index 99fcc5661674..6fc70c3cafb6 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
> > @@ -14,6 +14,7 @@
> > #include "otx2_reg.h"
> > #include "otx2_ptp.h"
> > #include "cn10k.h"
> > +#include "cn10k_ipsec.h"
> >
> > #define DRV_NAME "rvu_nicvf"
> > #define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
> > @@ -682,10 +683,14 @@ static int otx2vf_probe(struct pci_dev *pdev,
> const struct pci_device_id *id)
> > snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
> > }
> >
> > + err = cn10k_ipsec_init(netdev);
> > + if (err)
> > + goto err_ptp_destroy;
> > +
> > err = register_netdev(netdev);
> > if (err) {
> > dev_err(dev, "Failed to register netdevice\n");
> > - goto err_ptp_destroy;
> > + goto err_ipsec_clean;
> > }
> >
> > err = otx2_wq_init(vf);
> > @@ -719,6 +724,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const
> struct pci_device_id *id)
> > otx2_shutdown_tc(vf);
> > err_unreg_netdev:
> > unregister_netdev(netdev);
> > +err_ipsec_clean:
> > + cn10k_ipsec_clean(vf);
> > err_ptp_destroy:
> > otx2_ptp_destroy(vf);
> > err_detach_rsrc:
> > @@ -771,6 +778,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
> > unregister_netdev(netdev);
> > if (vf->otx2_wq)
> > destroy_workqueue(vf->otx2_wq);
> > + cn10k_ipsec_clean(vf);
> > otx2_ptp_destroy(vf);
> > otx2_mcam_flow_del(vf);
> > otx2_shutdown_tc(vf);
> > --
> > 2.34.1
> >
> >
>
>
> --
> Regards,
> Kalesh A P
> -----Original Message-----
> From: Simon Horman <[email protected]>
> Sent: Saturday, June 1, 2024 3:50 PM
> To: Bharat Bhushan <[email protected]>
> Cc: [email protected]; [email protected]; Sunil Kovvuri
> Goutham <[email protected]>; Geethasowjanya Akula
> <[email protected]>; Subbaraya Sundeep Bhatta <[email protected]>;
> Hariprasad Kelam <[email protected]>; [email protected];
> [email protected]; [email protected]; [email protected]; Jerin Jacob
> <[email protected]>; Linu Cherian <[email protected]>;
> [email protected]
> Subject: [EXTERNAL] Re: [net-next,v3 6/8] cn10k-ipsec: Process inline ipsec
> transmit offload
>
>
> ----------------------------------------------------------------------
> On Tue, May 28, 2024 at 07:23:47PM +0530, Bharat Bhushan wrote:
> > Prepare and submit crypto hardware (CPT) instruction for outbound
> > inline ipsec crypto mode offload. The CPT instruction have
> > authentication offset, IV offset and encapsulation offset in input
> > packet. Also provide SA context pointer which have details about algo,
> > keys, salt etc. Crypto hardware encrypt, authenticate and provide the
> > ESP packet to networking hardware.
> >
> > Signed-off-by: Bharat Bhushan <[email protected]>
>
> Hi Bharat,
>
> A minor nit from my side as it looks like there will be a v4 anyway.
>
> ...
>
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> > b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
>
> ...
>
> > +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
> > + struct otx2_snd_queue *sq, struct sk_buff *skb,
> > + int num_segs, int size)
> > +{
>
> ...
>
> > + /* Check for valid SA context */
> > + sa_info = (struct cpt_ctx_info_s *)x->xso.offload_handle;
> > + if (!sa_info || !sa_info->sa_iova) {
> > + netdev_err(pf->netdev, "Invalid SA conext\n");
>
> nit: context
>
> checkpatch.pl --codespell is your friend.
Will fix here and run this on all patches.
Thanks
-Bharat
>
> > + goto drop;
> > + }
>
> ...
> -----Original Message-----
> From: Leon Romanovsky <[email protected]>
> Sent: Thursday, May 30, 2024 8:20 PM
> To: Bharat Bhushan <[email protected]>
> Cc: [email protected]; [email protected]; Sunil Kovvuri
> Goutham <[email protected]>; Geethasowjanya Akula
> <[email protected]>; Subbaraya Sundeep Bhatta <[email protected]>;
> Hariprasad Kelam <[email protected]>; [email protected];
> [email protected]; [email protected]; [email protected]; Jerin Jacob
> <[email protected]>; Linu Cherian <[email protected]>;
> [email protected]
> Subject: [EXTERNAL] Re: [net-next,v3 5/8] cn10k-ipsec: Add SA add/delete
> support for outb inline ipsec
>
> ----------------------------------------------------------------------
> On Tue, May 28, 2024 at 07:23:46PM +0530, Bharat Bhushan wrote:
> > This patch adds support to add and delete Security Association
> > (SA) xfrm ops. Hardware maintains SA context in memory allocated by
> > software. Each SA context is 128 byte aligned and size of each context
> > is multiple of 128-byte. Add support for transport and tunnel ipsec
> > mode, ESP protocol, aead aes-gcm-icv16, key size 128/192/256-bits with
> > 32bit salt.
> >
> > Signed-off-by: Bharat Bhushan <[email protected]>
> > ---
> > v2->v3:
> > - Removed memset to zero wherever possible
> > (comment from Kalesh Anakkur Purayil)
> > - Corrected error hanlding when setting SA for inbound
> > (comment from Kalesh Anakkur Purayil)
> > - Move "netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;" to this patch
> > This fix build error with W=1
> >
> > .../marvell/octeontx2/nic/cn10k_ipsec.c | 452 ++++++++++++++++++
> > .../marvell/octeontx2/nic/cn10k_ipsec.h | 114 +++++
> > 2 files changed, 566 insertions(+)
>
> <...>
>
> > +static int cn10k_ipsec_validate_state(struct xfrm_state *x) {
> > + struct net_device *netdev = x->xso.dev;
> > +
> > + if (x->props.aalgo != SADB_AALG_NONE) {
> > + netdev_err(netdev, "Cannot offload authenticated xfrm
> states\n");
> > + return -EINVAL;
> > + }
> > + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
> > + netdev_err(netdev, "Only AES-GCM-ICV16 xfrm state may be
> offloaded\n");
> > + return -EINVAL;
> > + }
> > + if (x->props.calgo != SADB_X_CALG_NONE) {
> > + netdev_err(netdev, "Cannot offload compressed xfrm
> states\n");
> > + return -EINVAL;
> > + }
> > + if (x->props.flags & XFRM_STATE_ESN) {
> > + netdev_err(netdev, "Cannot offload ESN xfrm states\n");
> > + return -EINVAL;
> > + }
>
> I afraid that this check will cause for this offload to be unusable in real life
> scenarios. It is hard to imagine that someone will use offload which requires
> rekeying every 2^32 packets.
I agree, Currently ESN offload is not enabled. Enabling END is in our list.
>
> > + if (x->props.family != AF_INET && x->props.family != AF_INET6) {
> > + netdev_err(netdev, "Only IPv4/v6 xfrm states may be
> offloaded\n");
> > + return -EINVAL;
> > + }
> > + if (x->props.mode != XFRM_MODE_TRANSPORT &&
> > + x->props.mode != XFRM_MODE_TUNNEL) {
> > + dev_info(&netdev->dev, "Only tunnel/transport xfrm states
> may be offloaded\n");
> > + return -EINVAL;
> > + }
> > + if (x->id.proto != IPPROTO_ESP) {
> > + netdev_err(netdev, "Only ESP xfrm state may be
> offloaded\n");
> > + return -EINVAL;
> > + }
> > + if (x->encap) {
> > + netdev_err(netdev, "Encapsulated xfrm state may not be
> offloaded\n");
> > + return -EINVAL;
> > + }
> > + if (!x->aead) {
> > + netdev_err(netdev, "Cannot offload xfrm states without
> aead\n");
> > + return -EINVAL;
> > + }
> > +
> > + if (x->aead->alg_icv_len != 128) {
> > + netdev_err(netdev, "Cannot offload xfrm states with AEAD
> ICV length other than 128bit\n");
> > + return -EINVAL;
> > + }
> > + if (x->aead->alg_key_len != 128 + 32 &&
> > + x->aead->alg_key_len != 192 + 32 &&
> > + x->aead->alg_key_len != 256 + 32) {
> > + netdev_err(netdev, "Cannot offload xfrm states with AEAD
> key length other than 128/192/256bit\n");
> > + return -EINVAL;
> > + }
> > + if (x->tfcpad) {
> > + netdev_err(netdev, "Cannot offload xfrm states with tfc
> padding\n");
> > + return -EINVAL;
> > + }
> > + if (!x->geniv) {
> > + netdev_err(netdev, "Cannot offload xfrm states without
> geniv\n");
> > + return -EINVAL;
> > + }
> > + if (strcmp(x->geniv, "seqiv")) {
> > + netdev_err(netdev, "Cannot offload xfrm states with geniv
> other than seqiv\n");
> > + return -EINVAL;
> > + }
> > + return 0;
> > +}
>
> I don't see check for supported offload type among these checks.
> if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { ....
Will add the check.
Thanks
-Bharat
> -----Original Message-----
> From: Sunil Kovvuri Goutham <[email protected]>
> Sent: Monday, June 3, 2024 10:04 AM
> To: Leon Romanovsky <[email protected]>; Bharat Bhushan
> <[email protected]>
> Cc: [email protected]; [email protected]; Geethasowjanya
> Akula <[email protected]>; Subbaraya Sundeep Bhatta
> <[email protected]>; Hariprasad Kelam <[email protected]>;
> [email protected]; [email protected]; [email protected];
> [email protected]; Jerin Jacob <[email protected]>; Linu Cherian
> <[email protected]>; [email protected]
> Subject: RE: [EXTERNAL] Re: [net-next,v3 6/8] cn10k-ipsec: Process inline
> ipsec transmit offload
>
>
>
> >-----Original Message-----
> >From: Leon Romanovsky <[email protected]>
> >Sent: Sunday, June 2, 2024 12:21 PM
> >To: Bharat Bhushan <[email protected]>
> >Cc: [email protected]; [email protected]; Sunil Kovvuri
> >Goutham <[email protected]>; Geethasowjanya Akula
> ><[email protected]>; Subbaraya Sundeep Bhatta <[email protected]>;
> >Hariprasad Kelam <[email protected]>; [email protected];
> >[email protected]; [email protected]; [email protected]; Jerin Jacob
> ><[email protected]>; Linu Cherian <[email protected]>;
> >[email protected]
> >Subject: [EXTERNAL] Re: [net-next,v3 6/8] cn10k-ipsec: Process inline
> >ipsec transmit offload
> >
> >
> >----------------------------------------------------------------------
> >On Tue, May 28, 2024 at 07:23:47PM +0530, Bharat Bhushan wrote:
> >> Prepare and submit crypto hardware (CPT) instruction for outbound
> >> inline ipsec crypto mode offload. The CPT instruction have
> >> authentication offset, IV offset and encapsulation offset in input
> >> packet. Also provide SA context pointer which have details about
> >> algo, keys, salt etc. Crypto hardware encrypt, authenticate and
> >> provide the ESP packet to networking hardware.
> >>
> >> Signed-off-by: Bharat Bhushan <[email protected]>
> >> ---
> >> .../marvell/octeontx2/nic/cn10k_ipsec.c | 224 ++++++++++++++++++
> >> .../marvell/octeontx2/nic/cn10k_ipsec.h | 40 ++++
> >> .../marvell/octeontx2/nic/otx2_common.c | 23 ++
> >> .../marvell/octeontx2/nic/otx2_common.h | 3 +
> >> .../ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 +
> >> .../marvell/octeontx2/nic/otx2_txrx.c | 33 ++-
> >> .../marvell/octeontx2/nic/otx2_txrx.h | 3 +
> >> 7 files changed, 325 insertions(+), 3 deletions(-)
> >>
> >> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> >> b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> >> index 136aebe2a007..1974fda2e0d3 100644
> >> --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> >> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> >> @@ -7,8 +7,11 @@
> >> #include <net/xfrm.h>
> >> #include <linux/netdevice.h>
> >> #include <linux/bitfield.h>
> >> +#include <crypto/aead.h>
> >> +#include <crypto/gcm.h>
> >>
> >> #include "otx2_common.h"
> >> +#include "otx2_struct.h"
> >> #include "cn10k_ipsec.h"
> >>
> >> static bool is_dev_support_inline_ipsec(struct pci_dev *pdev) @@
> >> -843,3 +846,224 @@ void cn10k_ipsec_clean(struct otx2_nic *pf)
> >> cn10k_outb_cpt_clean(pf);
> >> }
> >> EXPORT_SYMBOL(cn10k_ipsec_clean);
> >
> ><...>
> >
> >> +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
> >> + struct otx2_snd_queue *sq, struct sk_buff *skb,
> >> + int num_segs, int size)
> >> +{
> >> + struct cpt_ctx_info_s *sa_info;
> >> + struct cpt_inst_s inst;
> >> + struct cpt_res_s *res;
> >> + struct xfrm_state *x;
> >> + dma_addr_t dptr_iova;
> >> + struct sec_path *sp;
> >> + u8 encap_offset;
> >> + u8 auth_offset;
> >> + u8 gthr_size;
> >> + u8 iv_offset;
> >> + u16 dlen;
> >> +
> >> + /* Check for Inline IPSEC enabled */
> >> + if (!(pf->flags & OTX2_FLAG_INLINE_IPSEC_ENABLED)) {
> >> + netdev_err(pf->netdev, "Ipsec not enabled, drop packet\n");
> >
> ><...>
> >
> >> + netdev_err(pf->netdev, "%s: no xfrm state len = %d\n",
> >> + __func__, sp->len);
> >
> ><...>
> >
> >> + netdev_err(pf->netdev, "no xfrm_input_state()\n");
> >
> ><...>
> >
> >> + netdev_err(pf->netdev, "un supported offload mode %d\n",
> >> + x->props.mode);
> >
> ><...>
> >
> >> + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
> >
> ><...>
> >
> >> + netdev_err(pf->netdev, "Invalid SA conext\n");
> >
> >All these prints are in datapath and can be triggered by network
> >packets. These and RX prints need to be deleted.
> >
>
> Yes, all these error messages in datapath should be under netif_msg_tx_err().
Will delete a few of these prints and rest will be moved under netif_msg_tx_err().
Thanks
-Bharat
>
> Thanks,
> Sunil.