Received: by 2002:a05:6a10:8c0a:0:0:0:0 with SMTP id go10csp982849pxb; Wed, 3 Mar 2021 23:26:41 -0800 (PST) X-Google-Smtp-Source: ABdhPJzYQXxT4QpEtvnO+LT7F8DbrRctUIimxvL5aZI4i1VJPuMsp6OLw604hEdS/zubWcT64UwM X-Received: by 2002:a05:6402:11c6:: with SMTP id j6mr2973958edw.138.1614842801059; Wed, 03 Mar 2021 23:26:41 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1614842801; cv=none; d=google.com; s=arc-20160816; b=jou3xgI7RpLtY2zOUK0FkLghpE8TC7PXlsdrnEKHqgjAJ373bDiHG8EsENRqqFpaR5 5lH3WuUz1XaKrbzfNP7753yERmIBu/xFH17i520TITuYgdXVcv4fwDa0HWxBVZex1gC3 FaLfXdVoF6+CN0tvqxp6Gnr5MufbILYUmge5ySc/zFK6OLz5OD6IEh49pbYye9MMMMxQ 1qaufmbk1MgSVdcAe/hNW248ZefHmyAq0iTszZSkz0spY+FuYcm0Wmkk05t2DmSaKs7+ I38jxbi9pjYwRKxyd8JtS+tFqTc8TJJ3gPbq4NoTg6H+ibx4i3uEvrPeYyeieFRF+1FS gvnQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from :dkim-signature; bh=G3VMjfrxaD8hfBY5KMrjXjfqC3YOtQykX8XnHP/SRcs=; b=AdKVWP1Zk+3A6MchQIYUOg3oZhHr+9+kU0wpGVMepEfqUSQ1imV5uBiu+cFemCA/pN Ct9x2B0MXMd7Mer8cPryu8JpRZUizRR/dCCypZeYeNyY5EEpolnCZ1GTeTkJQw0dfn/v 6R0l0JjMwuVzG6O0GVE1kJP6gFHsS41uAh8d5+Gyulk3mVz86QJzql0Eoh51s04Ascpa 99xoGhHZaEnhxUNLkOa+hpc/MQazZ+OzLruHJ+p5rxXYlHu2Uun7WEj6XnH2TRvp8/pX vjtKOtHGLTHL5KlorsBSFvW1QTA9Fn7SvIeanWPq6bD/N8B9gxMlE30h0UxWyT+d0Z3G taqQ== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@kernel.org header.s=k20201202 header.b=EDD7lnrr; spf=pass (google.com: domain of linux-crypto-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-crypto-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [23.128.96.18]) by mx.google.com with ESMTP id k15si3670124eji.582.2021.03.03.23.26.17; Wed, 03 Mar 2021 23:26:41 -0800 (PST) Received-SPF: pass (google.com: domain of linux-crypto-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) client-ip=23.128.96.18; Authentication-Results: mx.google.com; dkim=pass header.i=@kernel.org header.s=k20201202 header.b=EDD7lnrr; spf=pass (google.com: domain of linux-crypto-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-crypto-owner@vger.kernel.org; dmarc=pass (p=NONE sp=NONE dis=NONE) header.from=kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233844AbhCCB3n (ORCPT + 99 others); Tue, 2 Mar 2021 20:29:43 -0500 Received: from mail.kernel.org ([198.145.29.99]:40170 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1378881AbhCBJFa (ORCPT ); Tue, 2 Mar 2021 04:05:30 -0500 Received: by mail.kernel.org (Postfix) with ESMTPSA id D603B64F18; Tue, 2 Mar 2021 09:02:22 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1614675746; bh=H2GOdv4kmqAztPHmgAn7EHakKeKpHN0h1sdR1E++ryA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=EDD7lnrrZEZVKscBmqk/bDnF3dDLwdVQLGGdZpy3Ac30VRXL/a/B4rguHwxQC7xPI elIohA72MIHnFm7i9kC0uPb7RZfycMR8c3GxJqw3BjgUN0BvXNcqzPPWeu87RmDwa3 QhOT1heZVzkTGVsWXtpt9+I28OUSfEelw7SdXmfandimFpLGgbkR/QUmKprHfRYpN5 y+Wrgntiy6fTOTrUKTBC8qXRADhLxGQkAzsMJhyKWGYb2/7N5UOhPiwQssH/X9KHck px84SVIAdsO657J4jZZ8+gHNgnYgFBRiKejz1gZZ4mDN4l8vcGkXLWQeOMhEJM79RO 5ue/E3WufU43g== From: Ard Biesheuvel To: linux-crypto@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org, Ard Biesheuvel , Dave Martin , Mark Brown , Herbert Xu , Eric Biggers , Will Deacon , Catalin Marinas , Thomas Gleixner , Peter Zijlstra , Sebastian Andrzej Siewior , Ingo Molnar , Andy Lutomirski Subject: [PATCH v2 7/9] crypto: arm64/aes-ccm - remove non-SIMD fallback path Date: Tue, 2 Mar 2021 10:01:16 +0100 Message-Id: <20210302090118.30666-8-ardb@kernel.org> X-Mailer: git-send-email 2.30.1 In-Reply-To: <20210302090118.30666-1-ardb@kernel.org> References: <20210302090118.30666-1-ardb@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org AES/CCM on arm64 is implemented as a synchronous AEAD, and so it is guaranteed by the API that it is only invoked in task or softirq context. Since softirqs are now only handled when the SIMD is not being used in the task context that was interrupted to service the softirq, we no longer need a fallback path. Let's remove it. Signed-off-by: Ard Biesheuvel --- arch/arm64/crypto/aes-ce-ccm-glue.c | 151 ++++---------------- 1 file changed, 30 insertions(+), 121 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index f6d19b0dc893..e6a7243825a2 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -99,36 +99,10 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], u32 abytes, u32 *macp) { - if (crypto_simd_usable()) { - kernel_neon_begin(); - ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, - num_rounds(key)); - kernel_neon_end(); - } else { - if (*macp > 0 && *macp < AES_BLOCK_SIZE) { - int added = min(abytes, AES_BLOCK_SIZE - *macp); - - crypto_xor(&mac[*macp], in, added); - - *macp += added; - in += added; - abytes -= added; - } - - while (abytes >= AES_BLOCK_SIZE) { - aes_encrypt(key, mac, mac); - crypto_xor(mac, in, AES_BLOCK_SIZE); - - in += AES_BLOCK_SIZE; - abytes -= AES_BLOCK_SIZE; - } - - if (abytes > 0) { - aes_encrypt(key, mac, mac); - crypto_xor(mac, in, abytes); - *macp = abytes; - } - } + kernel_neon_begin(); + ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, + num_rounds(key)); + kernel_neon_end(); } static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) @@ -171,54 +145,6 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) } while (len); } -static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[], - struct crypto_aes_ctx *ctx, bool enc) -{ - u8 buf[AES_BLOCK_SIZE]; - int err = 0; - - while (walk->nbytes) { - int blocks = walk->nbytes / AES_BLOCK_SIZE; - u32 tail = walk->nbytes % AES_BLOCK_SIZE; - u8 *dst = walk->dst.virt.addr; - u8 *src = walk->src.virt.addr; - u32 nbytes = walk->nbytes; - - if (nbytes == walk->total && tail > 0) { - blocks++; - tail = 0; - } - - do { - u32 bsize = AES_BLOCK_SIZE; - - if (nbytes < AES_BLOCK_SIZE) - bsize = nbytes; - - crypto_inc(walk->iv, AES_BLOCK_SIZE); - aes_encrypt(ctx, buf, walk->iv); - aes_encrypt(ctx, mac, mac); - if (enc) - crypto_xor(mac, src, bsize); - crypto_xor_cpy(dst, src, buf, bsize); - if (!enc) - crypto_xor(mac, dst, bsize); - dst += bsize; - src += bsize; - nbytes -= bsize; - } while (--blocks); - - err = skcipher_walk_done(walk, tail); - } - - if (!err) { - aes_encrypt(ctx, buf, iv0); - aes_encrypt(ctx, mac, mac); - crypto_xor(mac, buf, AES_BLOCK_SIZE); - } - return err; -} - static int ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -241,30 +167,22 @@ static int ccm_encrypt(struct aead_request *req) err = skcipher_walk_aead_encrypt(&walk, req, false); - if (crypto_simd_usable()) { - while (walk.nbytes) { - u32 tail = walk.nbytes % AES_BLOCK_SIZE; + while (walk.nbytes) { + u32 tail = walk.nbytes % AES_BLOCK_SIZE; - if (walk.nbytes == walk.total) - tail = 0; + if (walk.nbytes == walk.total) + tail = 0; - kernel_neon_begin(); - ce_aes_ccm_encrypt(walk.dst.virt.addr, - walk.src.virt.addr, - walk.nbytes - tail, ctx->key_enc, - num_rounds(ctx), mac, walk.iv); - kernel_neon_end(); + kernel_neon_begin(); + ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes - tail, ctx->key_enc, + num_rounds(ctx), mac, walk.iv); - err = skcipher_walk_done(&walk, tail); - } - if (!err) { - kernel_neon_begin(); - ce_aes_ccm_final(mac, buf, ctx->key_enc, - num_rounds(ctx)); - kernel_neon_end(); - } - } else { - err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); + if (walk.nbytes == walk.total) + ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + kernel_neon_end(); + + err = skcipher_walk_done(&walk, tail); } if (err) return err; @@ -299,32 +217,23 @@ static int ccm_decrypt(struct aead_request *req) err = skcipher_walk_aead_decrypt(&walk, req, false); - if (crypto_simd_usable()) { - while (walk.nbytes) { - u32 tail = walk.nbytes % AES_BLOCK_SIZE; + while (walk.nbytes) { + u32 tail = walk.nbytes % AES_BLOCK_SIZE; - if (walk.nbytes == walk.total) - tail = 0; + if (walk.nbytes == walk.total) + tail = 0; - kernel_neon_begin(); - ce_aes_ccm_decrypt(walk.dst.virt.addr, - walk.src.virt.addr, - walk.nbytes - tail, ctx->key_enc, - num_rounds(ctx), mac, walk.iv); - kernel_neon_end(); + kernel_neon_begin(); + ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes - tail, ctx->key_enc, + num_rounds(ctx), mac, walk.iv); - err = skcipher_walk_done(&walk, tail); - } - if (!err) { - kernel_neon_begin(); - ce_aes_ccm_final(mac, buf, ctx->key_enc, - num_rounds(ctx)); - kernel_neon_end(); - } - } else { - err = ccm_crypt_fallback(&walk, mac, buf, ctx, false); - } + if (walk.nbytes == walk.total) + ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); + kernel_neon_end(); + err = skcipher_walk_done(&walk, tail); + } if (err) return err; -- 2.30.1