Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753663AbdGSJ0D (ORCPT ); Wed, 19 Jul 2017 05:26:03 -0400 Received: from szxga02-in.huawei.com ([45.249.212.188]:10321 "EHLO szxga02-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753412AbdGSJ0B (ORCPT ); Wed, 19 Jul 2017 05:26:01 -0400 From: Aviad Krawczyk To: CC: , , , , , , Subject: [PATCH V2 net-next 14/21] net-next/hinic: Initialize cmdq Date: Wed, 19 Jul 2017 17:19:12 +0800 Message-ID: X-Mailer: git-send-email 1.9.1 In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.162.197.60] X-CFilter-Loop: Reflected X-Mirapoint-Virus-RAPID-Raw: score=unknown(0), refid=str=0001.0A090201.596F25A5.0148,ss=1,re=0.000,recu=0.000,reip=0.000,cl=1,cld=1,fgs=0, ip=0.0.0.0, so=2014-11-16 11:51:01, dmn=2013-03-21 17:37:32 X-Mirapoint-Loop-Id: b86e279997470cf34cd6eae43e6a1373 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 19735 Lines: 662 Create the work queues for cmdq and update the nic about the work queue contexts. cmdq commands are used for updating the the nic about the qp contexts. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen --- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 284 +++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 53 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 2 + drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 5 + drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 157 ++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 8 + 6 files changed, 503 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 2fd3924..f3a6e24 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -13,11 +13,49 @@ * */ +#include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "hinic_hw_if.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" +#include "hinic_hw_io.h" +#include "hinic_hw_dev.h" + +#define CMDQ_DB_OFF SZ_2K + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_DEPTH SZ_4K + +#define CMDQ_WQ_PAGE_SIZE SZ_4K + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hinic_cmdqs, cmdq[0]) + +#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \ + struct hinic_func_to_io, \ + cmdqs) + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; /** * hinic_alloc_cmdq_buf - alloc buffer for sending command @@ -29,8 +67,17 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, struct hinic_cmdq_buf *cmdq_buf) { - /* should be implemented */ - return -ENOMEM; + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL, + &cmdq_buf->dma_addr); + if (!cmdq_buf->buf) { + dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n"); + return -ENOMEM; + } + + return 0; } /** @@ -41,7 +88,7 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, struct hinic_cmdq_buf *cmdq_buf) { - /* should be implemented */ + pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); } /** @@ -63,6 +110,172 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, } /** + * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq + * @cmdq: the cmdq + * @cmdq_pages: the memory of the queue + * @cmdq_ctxt: returned cmdq ctxt + **/ +static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq, + struct hinic_cmdq_pages *cmdq_pages, + struct hinic_cmdq_ctxt *cmdq_ctxt) +{ + struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + struct hinic_hwif *hwif = cmdqs->hwif; + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u16 start_ci = atomic_read(&wq->cons_idx); + u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); + + pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size); + + ctxt_info->curr_wqe_page_pfn = + HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED); + + /* block PFN - Read Modify Write */ + cmdq_first_block_paddr = cmdq_pages->page_paddr; + + pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size); + + ctxt_info->wq_block_pfn = + HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) | + HINIC_CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI); + + cmdq_ctxt->func_idx = HINIC_HWIF_GLOB_IDX(hwif); + cmdq_ctxt->cmdq_type = cmdq->cmdq_type; +} + +/** + * init_cmdq - initialize cmdq + * @cmdq: the cmdq + * @wq: the wq attaced to the cmdq + * @q_type: the cmdq type of the cmdq + * @db_area: doorbell area for the cmdq + * + * Return 0 - Success, negative - Failure + **/ +static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq, + enum hinic_cmdq_type q_type, void __iomem *db_area) +{ + int err; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->done = vzalloc(wq->q_depth * sizeof(*cmdq->done)); + if (!cmdq->done) + return -ENOMEM; + + cmdq->errcode = vzalloc(wq->q_depth * sizeof(*cmdq->errcode)); + if (!cmdq->errcode) { + err = -ENOMEM; + goto errcode_err; + } + + cmdq->db_base = db_area + CMDQ_DB_OFF; + return 0; + +errcode_err: + vfree(cmdq->done); + return err; +} + +/** + * free_cmdq - Free cmdq + * @cmdq: the cmdq to free + **/ +static void free_cmdq(struct hinic_cmdq *cmdq) +{ + vfree(cmdq->errcode); + vfree(cmdq->done); +} + +/** + * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq + * @hwdev: the NIC HW device + * @cmdqs: cmdqs to write the ctxts for + * &db_area: db_area for all the cmdqs + * + * Return 0 - Success, negative - Failure + **/ +static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdqs *cmdqs, void __iomem **db_area) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + struct hinic_cmdq_ctxt *cmdq_ctxts; + enum hinic_cmdq_type type, cmdq_type; + size_t cmdq_ctxts_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI function type\n"); + return -EINVAL; + } + + cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); + cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); + if (!cmdq_ctxts) + return -ENOMEM; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], + &cmdqs->saved_wqs[cmdq_type], cmdq_type, + db_area[cmdq_type]); + if (err) { + dev_err(&pdev->dev, "Failed to initialize cmdq\n"); + devm_kfree(&pdev->dev, cmdq_ctxts); + goto init_cmdq_err; + } + + cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq_pages, + &cmdq_ctxts[cmdq_type]); + } + + /* Write the CMDQ ctxts */ + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_CMDQ_CTXT_SET, + &cmdq_ctxts[cmdq_type], + sizeof(cmdq_ctxts[cmdq_type]), + NULL, NULL, HINIC_MGMT_MSG_SYNC); + if (err) { + dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n", + cmdq_type); + devm_kfree(&pdev->dev, cmdq_ctxts); + goto write_cmdq_ctxt_err; + } + } + + devm_kfree(&pdev->dev, cmdq_ctxts); + return 0; + +write_cmdq_ctxt_err: + cmdq_type = HINIC_MAX_CMDQ_TYPES; + +init_cmdq_err: + for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++) + free_cmdq(&cmdqs->cmdq[type]); + + return err; +} + +/** * hinic_init_cmdqs - init all cmdqs * @cmdqs: cmdqs to init * @hwif: HW interface for accessing cmdqs @@ -73,8 +286,54 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, void __iomem **db_area) { - /* should be implemented */ - return -EINVAL; + struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); + struct hinic_hwdev *hwdev = container_of(func_to_io, struct hinic_hwdev, + func_to_io); + struct pci_dev *pdev = hwif->pdev; + size_t saved_wqs_size, max_wqe_size; + int err; + + cmdqs->hwif = hwif; + cmdqs->cmdq_buf_pool = pci_pool_create("hinic_cmdq", pdev, + HINIC_CMDQ_BUF_SIZE, + HINIC_CMDQ_BUF_SIZE, 0); + if (!cmdqs->cmdq_buf_pool) + return -ENOMEM; + + saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); + cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + err = -ENOMEM; + goto saved_wqs_err; + } + + max_wqe_size = WQE_LCMD_SIZE; + err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif, + HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE, + CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size); + if (err) { + dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n"); + goto cmdq_wqs_error; + } + + err = init_cmdqs_ctxt(hwdev, cmdqs, db_area); + if (err) { + dev_err(&pdev->dev, "Failed to write cmdq ctxt\n"); + goto cmdq_ctxt_err; + } + + return 0; + +cmdq_ctxt_err: + hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + +cmdq_wqs_error: + devm_kfree(&pdev->dev, cmdqs->saved_wqs); + +saved_wqs_err: + pci_pool_destroy(cmdqs->cmdq_buf_pool); + return err; } /** @@ -83,5 +342,18 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, **/ void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) { - /* should be implemented */ + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + enum hinic_cmdq_type cmdq_type; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) + free_cmdq(&cmdqs->cmdq[cmdq_type]); + + hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + + devm_kfree(&pdev->dev, cmdqs->saved_wqs); + + pci_pool_destroy(cmdqs->cmdq_buf_pool); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h index c9e97ca..5ec59f1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -24,6 +24,40 @@ #include "hinic_hw_if.h" #include "hinic_hw_wq.h" +#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56 +#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63 + +#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F +#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1 + +#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ + << HINIC_CMDQ_CTXT_##member##_SHIFT) + +#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ + << HINIC_CMDQ_CTXT_##member##_SHIFT))) + +#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define HINIC_CMDQ_CTXT_CI_SHIFT 52 + +#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF + +#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ + << HINIC_CMDQ_CTXT_##member##_SHIFT) + +#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ + << HINIC_CMDQ_CTXT_##member##_SHIFT))) + #define HINIC_CMDQ_BUF_SIZE 2048 enum hinic_cmdq_type { @@ -38,6 +72,25 @@ struct hinic_cmdq_buf { size_t size; }; +struct hinic_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct hinic_cmdq_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 cmdq_type; + u8 rsvd1[1]; + + u8 rsvd2[4]; + + struct hinic_cmdq_ctxt_info ctxt_info; +}; + struct hinic_cmdq { struct hinic_wq *wq; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h index b406360..f8f06d2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h @@ -106,6 +106,8 @@ #define HINIC_EQ_PAGE_SIZE SZ_4K +#define HINIC_CEQ_ID_CMDQ 0 + enum hinic_eq_type { HINIC_AEQ, }; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index 4f678c8..4a849f1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -68,6 +68,11 @@ enum hinic_cfg_cmd { HINIC_CFG_NIC_CAP = 0, }; +enum hinic_comm_cmd { + HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10, + HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11, +}; + enum hinic_mgmt_cb_state { HINIC_MGMT_CB_ENABLED = BIT(0), HINIC_MGMT_CB_RUNNING = BIT(1), diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 8e13e66..1e12946 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -27,6 +27,7 @@ #include "hinic_hw_if.h" #include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" #define WQS_BLOCKS_PER_PAGE 4 @@ -42,6 +43,12 @@ #define WQ_PAGE_ADDR_SIZE sizeof(u64) #define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) +#define CMDQ_BLOCKS_PER_PAGE HINIC_MAX_CMDQ_TYPES +#define CMDQ_BLOCK_SIZE 512 +#define CMDQ_PAGE_SIZE 4096 + +#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) + #define WQ_BASE_VADDR(wqs, wq) \ (void *)(((unsigned long)((wqs)->page_vaddr[(wq)->page_idx])) \ + (wq)->block_idx * WQ_BLOCK_SIZE) @@ -54,6 +61,18 @@ (((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \ + (wq)->block_idx * WQ_BLOCK_SIZE) +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + (void *)(((unsigned long)((cmdq_pages)->page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + ((cmdq_pages)->page_paddr \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + (((void *)((cmdq_pages)->shadow_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + /** * queue_alloc_page - allocate page for Queue * @hwif: HW interface for allocating DMA @@ -122,6 +141,37 @@ static void wqs_free_page(struct hinic_wqs *wqs, int page_idx) vfree(wqs->shadow_page_vaddr[page_idx]); } +/** + * cmdq_allocate_page - allocate page for cmdq + * @cmdq_pages: the pages of the cmdq queue struct to hold the page + * + * Return 0 - Success, negative - Failure + **/ +static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) +{ + return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr, + &cmdq_pages->page_paddr, + &cmdq_pages->shadow_page_vaddr, + CMDQ_PAGE_SIZE); +} + +/** + * cmdq_free_page - free page from cmdq + * @cmdq_pages: the pages of the cmdq queue struct that hold the page + * + * Return 0 - Success, negative - Failure + **/ +static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) +{ + struct hinic_hwif *hwif = cmdq_pages->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE, + cmdq_pages->page_vaddr, + (dma_addr_t)cmdq_pages->page_paddr); + vfree(cmdq_pages->shadow_page_vaddr); +} + static int alloc_page_arrays(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; @@ -521,3 +571,110 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) wqs_return_block(wqs, wq->page_idx, wq->block_idx); } + +/** + * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs + * @cmdq_pages: will hold the pages of the cmdq + * @wq: returned wqs + * @hwif: HW interface + * @cmdq_blocks: number of cmdq blocks/wq to allocate + * @wqebb_size: Work Queue Block Byte Size + * @wq_page_size: the page size in the Work Queue + * @q_depth: number of wqebbs in WQ + * @max_wqe_size: maximum WQE size that will be used in the WQ + * + * Return 0 - Success, negative - Failure + **/ +int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, struct hinic_hwif *hwif, + int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, + u16 q_depth, u16 max_wqe_size) +{ + struct pci_dev *pdev = hwif->pdev; + u16 num_wqebbs_per_page; + int i, j, err = -ENOMEM; + + if (wqebb_size == 0) { + dev_err(&pdev->dev, "wqebb_size must be > 0\n"); + return -EINVAL; + } + + if (wq_page_size == 0) { + dev_err(&pdev->dev, "wq_page_size must be > 0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); + return -EINVAL; + } + + cmdq_pages->hwif = hwif; + + err = cmdq_allocate_page(cmdq_pages); + if (err) { + dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); + return err; + } + + for (i = 0; i < cmdq_blocks; i++) { + wq[i].hwif = hwif; + wq[i].page_idx = 0; + wq[i].block_idx = i; + + wq[i].wqebb_size = wqebb_size; + wq[i].wq_page_size = wq_page_size; + wq[i].q_depth = q_depth; + wq[i].max_wqe_size = max_wqe_size; + wq[i].num_wqebbs_per_page = num_wqebbs_per_page; + + wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); + wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); + wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); + + err = alloc_wq_pages(cmdq_pages->hwif, &wq[i], + CMDQ_WQ_MAX_PAGES); + if (err) { + dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n"); + goto cmdq_block_err; + } + + atomic_set(&wq[i].cons_idx, 0); + atomic_set(&wq[i].prod_idx, 0); + atomic_set(&wq[i].delta, q_depth); + wq[i].mask = q_depth - 1; + } + + return 0; + +cmdq_block_err: + for (j = 0; j < i; j++) + free_wq_pages(cmdq_pages->hwif, &wq[j], wq[j].num_q_pages); + + cmdq_free_page(cmdq_pages); + return err; +} + +/** + * hinic_wqs_cmdq_free - Free wqs from cmdqs + * @cmdq_pages: hold the pages of the cmdq + * @wq: wqs to free + * @cmdq_blocks: number of wqs to free + **/ +void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(cmdq_pages->hwif, &wq[i], wq[i].num_q_pages); + + cmdq_free_page(cmdq_pages); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index 8ce259a..a3c4469 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -81,6 +81,14 @@ struct hinic_cmdq_pages { struct hinic_hwif *hwif; }; +int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, struct hinic_hwif *hwif, + int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, + u16 q_depth, u16 max_wqe_size); + +void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks); + int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, struct hinic_hwif *hwif); -- 1.9.1