Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S940920AbcKXLxv (ORCPT ); Thu, 24 Nov 2016 06:53:51 -0500 Received: from mail-pg0-f67.google.com ([74.125.83.67]:34025 "EHLO mail-pg0-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S936137AbcKXLxF (ORCPT ); Thu, 24 Nov 2016 06:53:05 -0500 From: sunil.kovvuri@gmail.com To: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, Sunil Goutham Subject: [PATCH 3/4] net: thunderx: Configure RED and backpressure levels Date: Thu, 24 Nov 2016 14:48:02 +0530 Message-Id: <1479979083-15963-4-git-send-email-sunil.kovvuri@gmail.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1479979083-15963-1-git-send-email-sunil.kovvuri@gmail.com> References: <1479979083-15963-1-git-send-email-sunil.kovvuri@gmail.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4900 Lines: 131 From: Sunil Goutham This patch enables moving average calculation of Rx pkt's resources and configures RED and backpressure levels for both CQ and RBDR. Also initialize SQ's CQ_LIMIT properly. Signed-off-by: Sunil Goutham --- drivers/net/ethernet/cavium/thunder/nic_main.c | 9 ++++++++ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 9 ++++++-- drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 24 +++++++++++++++++----- drivers/net/ethernet/cavium/thunder/q_struct.h | 8 ++++++-- 4 files changed, 41 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index b87d416..17490ec 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -809,6 +809,15 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); + /* Enable moving average calculation. + * Keep the LVL/AVG delay to HW enforced minimum so that, not too many + * packets sneek in between average calculations. + */ + nic_reg_write(nic, NIC_PF_CQ_AVG_CFG, + (BIT_ULL(20) | 0x2ull << 14 | 0x1)); + nic_reg_write(nic, NIC_PF_RRM_AVG_CFG, + (BIT_ULL(20) | 0x3ull << 14 | 0x1)); + return 0; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 747ef08..7b336cd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -544,14 +544,18 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, nicvf_send_msg_to_pf(nic, &mbx); mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; - mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); + mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | + (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) | + (qs->vnic_id << 0); nicvf_send_msg_to_pf(nic, &mbx); /* RQ drop config * Enable CQ drop to reserve sufficient CQEs for all tx packets */ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; - mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); + mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | + (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) | + (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8); nicvf_send_msg_to_pf(nic, &mbx); if (!nic->sqs_mode && (qidx == 0)) { @@ -650,6 +654,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, sq_cfg.ldwb = 0; sq_cfg.qsize = SND_QSIZE; sq_cfg.tstmp_bgx_intf = 0; + sq_cfg.cq_limit = 0; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); /* Set threshold value for interrupt generation */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 2e3c940..20511f2 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -85,12 +85,26 @@ #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) -/* Calculate number of CQEs to reserve for all SQEs. - * Its 1/256th level of CQ size. - * '+ 1' to account for pipelining + +/* RED and Backpressure levels of CQ for pkt reception + * For CQ, level is a measure of emptiness i.e 0x0 means full + * eg: For CQ of size 4K, and for pass/drop levels of 128/96 + * HW accepts pkt if unused CQE >= 2048 + * RED accepts pkt if unused CQE < 2048 & >= 1536 + * DROPs pkts if unused CQE < 1536 + */ +#define RQ_PASS_CQ_LVL 128ULL +#define RQ_DROP_CQ_LVL 96ULL + +/* RED and Backpressure levels of RBDR for pkt reception + * For RBDR, level is a measure of fullness i.e 0x0 means empty + * eg: For RBDR of size 8K, and for pass/drop levels of 4/0 + * HW accepts pkt if unused RBs >= 256 + * RED accepts pkt if unused RBs < 256 & >= 0 + * DROPs pkts if unused RBs < 0 */ -#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ - (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) +#define RQ_PASS_RBDR_LVL 8ULL +#define RQ_DROP_RBDR_LVL 0ULL /* Descriptor size in bytes */ #define SND_QUEUE_DESC_SIZE 16 diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h index 9e6d987..f363472 100644 --- a/drivers/net/ethernet/cavium/thunder/q_struct.h +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h @@ -624,7 +624,9 @@ struct cq_cfg { struct sq_cfg { #if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63:44; + u64 reserved_32_63:32; + u64 cq_limit:8; + u64 reserved_20_23:4; u64 ena:1; u64 reserved_18_18:1; u64 reset:1; @@ -642,7 +644,9 @@ struct sq_cfg { u64 reset:1; u64 reserved_18_18:1; u64 ena:1; - u64 reserved_20_63:44; + u64 reserved_20_23:4; + u64 cq_limit:8; + u64 reserved_32_63:32; #endif }; -- 2.7.4