Received: by 2002:a25:824b:0:0:0:0:0 with SMTP id d11csp6603974ybn; Mon, 30 Sep 2019 00:17:43 -0700 (PDT) X-Google-Smtp-Source: APXvYqxw04bQ4IrlSTsQ083pEQsF8lIKN5jqKQYFm1S1N07o+yVAMibZ8FZ1LYoxJ0eqtIKsA5mq X-Received: by 2002:a05:6402:1a45:: with SMTP id bf5mr17576287edb.275.1569827863115; Mon, 30 Sep 2019 00:17:43 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1569827863; cv=none; d=google.com; s=arc-20160816; b=0HPee8TMWhWeMcnJerZm8+3B+pcY0+pbTgYa3CUYSwVNWK747EoM3SFO33lV4hggxL /ZWX8se4N+7sMqR58W5w+xgIW4uXrcm8NWFAwdx7P2ojn4drboEEm8vuY2GKxdzJxVKh yvDPjYZk8NOtvYOUz/o97fH834Gm6i65n5eyc4DaqsOFhy7wVP6WpCgiuW2bYHxt9J/Q 8UuXesN6WZFEX/GTAEHiwL415+onBk8m5U73AdZ2VD0kN5yxPx6bpfcaCKl/gXahk+VZ RG3SaX9cIOTeUsXspiF81+1Tju4vC2IQVvDOKfKPGU1zjBIYOebpsU0wkRq23WIw7aEn LD3A== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from; bh=+h1CcqYpGDV38mvjax01V7ai4Ghn1NHY4sIkHbGjrMk=; b=F22efS4iMKiKhshDIt6o5GnQwNLR0FlUVDbcxRiHxMrg1tVE2+Rrzm/lpVh+YzqJ7/ tpVOxA30pQJrjOAaqy4wzjccQwvNhNFwh6CWYqQMJCuWTZMVndTx2kwo40VLg5YQEwW9 tK0qVtgGQQzKdsVTNTV9gJJk6HeojoJpDESY76YbejJPJYZUgglSgzrzDSEKfum/KohS bfXwD0bv+M0ZnZ3JVzvhgNng+ZCgLkmOp61suW3Sqo0Hfc8Vs5ngcVXY4y2e+111rFUW iQaTH5M1UvOPn4RKoRrhday32drbxzqnlAgq3Cz+m7bpuY04c0MryAdAC0bbJAyzumjl ormg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-crypto-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-crypto-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id pj21si6279196ejb.64.2019.09.30.00.17.10; Mon, 30 Sep 2019 00:17:43 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-crypto-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-crypto-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-crypto-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1729478AbfI3HMX (ORCPT + 99 others); Mon, 30 Sep 2019 03:12:23 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:3180 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726008AbfI3HMX (ORCPT ); Mon, 30 Sep 2019 03:12:23 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 494BD480D7532B268F3E; Mon, 30 Sep 2019 15:12:19 +0800 (CST) Received: from localhost.localdomain (10.67.212.75) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.439.0; Mon, 30 Sep 2019 15:12:11 +0800 From: Zhou Wang To: Herbert Xu , "David S. Miller" CC: , , Zhou Wang , Shukun Tan Subject: [PATCH 3/4] crypto: hisilicon - fix large sgl memory allocation problem when disable smmu Date: Mon, 30 Sep 2019 15:08:54 +0800 Message-ID: <1569827335-21822-4-git-send-email-wangzhou1@hisilicon.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1569827335-21822-1-git-send-email-wangzhou1@hisilicon.com> References: <1569827335-21822-1-git-send-email-wangzhou1@hisilicon.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.67.212.75] X-CFilter-Loop: Reflected Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org When disabling SMMU, it may fail to allocate large continuous memory. This patch fixes this by allocating memory as blocks. Signed-off-by: Zhou Wang Signed-off-by: Shukun Tan --- drivers/crypto/hisilicon/sgl.c | 83 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 68 insertions(+), 15 deletions(-) diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index f71de0d..f017361 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -8,6 +8,7 @@ #define HISI_ACC_SGL_SGE_NR_MIN 1 #define HISI_ACC_SGL_NR_MAX 256 #define HISI_ACC_SGL_ALIGN_SIZE 64 +#define HISI_ACC_MEM_BLOCK_NR 5 struct acc_hw_sge { dma_addr_t buf; @@ -31,9 +32,13 @@ struct hisi_acc_hw_sgl { } __aligned(1); struct hisi_acc_sgl_pool { - struct hisi_acc_hw_sgl *sgl; - dma_addr_t sgl_dma; - size_t size; + struct mem_block { + struct hisi_acc_hw_sgl *sgl; + dma_addr_t sgl_dma; + size_t size; + } mem_block[HISI_ACC_MEM_BLOCK_NR]; + u32 sgl_num_per_block; + u32 block_num; u32 count; u32 sge_nr; size_t sgl_size; @@ -51,33 +56,66 @@ struct hisi_acc_sgl_pool { struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, u32 count, u32 sge_nr) { + u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0; struct hisi_acc_sgl_pool *pool; - u32 sgl_size; - u32 size; + struct mem_block *block; + u32 i, j; if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX) return ERR_PTR(-EINVAL); sgl_size = sizeof(struct acc_hw_sge) * sge_nr + sizeof(struct hisi_acc_hw_sgl); - size = sgl_size * count; + block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1)); + sgl_num_per_block = block_size / sgl_size; + block_num = count / sgl_num_per_block; + remain_sgl = count % sgl_num_per_block; + + if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) || + (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1)) + return ERR_PTR(-EINVAL); pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return ERR_PTR(-ENOMEM); + block = pool->mem_block; - pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL); - if (!pool->sgl) { - kfree(pool); - return ERR_PTR(-ENOMEM); + for (i = 0; i < block_num; i++) { + block[i].sgl = dma_alloc_coherent(dev, block_size, + &block[i].sgl_dma, + GFP_KERNEL); + if (!block[i].sgl) + goto err_free_mem; + + block[i].size = block_size; } - pool->size = size; + if (remain_sgl > 0) { + block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size, + &block[i].sgl_dma, + GFP_KERNEL); + if (!block[i].sgl) + goto err_free_mem; + + block[i].size = remain_sgl * sgl_size; + } + + pool->sgl_num_per_block = sgl_num_per_block; + pool->block_num = remain_sgl ? block_num + 1 : block_num; pool->count = count; pool->sgl_size = sgl_size; pool->sge_nr = sge_nr; return pool; + +err_free_mem: + for (j = 0; j < i; j++) { + dma_free_coherent(dev, block_size, block[j].sgl, + block[j].sgl_dma); + memset(block + j, 0, sizeof(*block)); + } + kfree(pool); + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); @@ -90,10 +128,18 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); */ void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool) { + struct mem_block *block; + int i; + if (!dev || !pool) return; - dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma); + block = pool->mem_block; + + for (i = 0; i < pool->block_num; i++) + dma_free_coherent(dev, block[i].size, block[i].sgl, + block[i].sgl_dma); + kfree(pool); } EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); @@ -101,11 +147,18 @@ EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index, dma_addr_t *hw_sgl_dma) { - if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl) + struct mem_block *block; + u32 block_index, offset; + + if (!pool || !hw_sgl_dma || index >= pool->count) return ERR_PTR(-EINVAL); - *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index; - return (void *)pool->sgl + pool->sgl_size * index; + block = pool->mem_block; + block_index = index / pool->sgl_num_per_block; + offset = index % pool->sgl_num_per_block; + + *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset; + return (void *)block[block_index].sgl + pool->sgl_size * offset; } void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {} -- 2.8.1