2023-01-10 13:55:45

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 00/12] crypto: axis - make tests pass

This series fixes some problems in the ARTPEC-6 crypto driver. After this
series both the self tests and several dozen rounds of the random tests enabled
with CONFIG_CRYPTO_MANAGER_EXTRA_TESTS pass. There are also a couple of fixes
for problems seen when using this driver along with CIFS.

Cc: [email protected]
Cc: [email protected]

Lars Persson (1):
crypto: axis - do not DMA to ahash_request.result

Vincent Whitchurch (11):
crypto: axis - do not DMA to IV
crypto: axis - fix CTR output IV
crypto: axis - fix in-place CBC output IV
crypto: axis - validate AEAD authsize
crypto: axis - reject invalid sizes
crypto: axis - fix XTS blocksize
crypto: axis - add skcipher fallback
crypto: axis - add fallback for AEAD
crypto: axis - fix XTS unaligned block size handling
crypto: axis - handle zero cryptlen
crypto: axis - allow small size for AEAD

drivers/crypto/Kconfig | 4 +
drivers/crypto/axis/artpec6_crypto.c | 288 ++++++++++++++++++++++-----
2 files changed, 239 insertions(+), 53 deletions(-)

--
2.34.1


2023-01-10 13:55:47

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 04/12] crypto: axis - fix in-place CBC output IV

When CBC is done in-place, the ->src is overwritten by the time the
operation is done, so the output IV must be based on a backup of the
ciphertext.

Signed-off-by: Vincent Whitchurch <[email protected]>
---
drivers/crypto/axis/artpec6_crypto.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 67f510c497f2..87f82c314e48 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -321,6 +321,7 @@ struct artpec6_crypto_request_context {
u32 cipher_md;
bool decrypt;
struct artpec6_crypto_req_common common;
+ unsigned char last_ciphertext[AES_BLOCK_SIZE];
unsigned char iv_bounce[AES_BLOCK_SIZE] CRYPTO_MINALIGN_ATTR;
};

@@ -1158,6 +1159,10 @@ static int artpec6_crypto_decrypt(struct skcipher_request *req)

switch (ctx->crypto_type) {
case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ scatterwalk_map_and_copy(req_ctx->last_ciphertext, req->src,
+ req->cryptlen - sizeof(req_ctx->last_ciphertext),
+ sizeof(req_ctx->last_ciphertext), 0);
+
complete = artpec6_crypto_complete_cbc_decrypt;
break;
case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
@@ -2185,10 +2190,10 @@ artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
{
struct skcipher_request *cipher_req = container_of(req,
struct skcipher_request, base);
+ struct artpec6_crypto_request_context *req_ctx = skcipher_request_ctx(cipher_req);
+
+ memcpy(cipher_req->iv, req_ctx->last_ciphertext, sizeof(req_ctx->last_ciphertext));

- scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
- cipher_req->cryptlen - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, 0);
req->complete(req, 0);
}

--
2.34.1

2023-01-10 13:55:47

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 01/12] crypto: axis - do not DMA to ahash_request.result

From: Lars Persson <[email protected]>

The crypto API does not promise that the result pointer is suitable
for DMA. Use an intermediate result buffer and let the CPU copy the
digest to the ahash_request.

Signed-off-by: Lars Persson <[email protected]>
Signed-off-by: Vincent Whitchurch <[email protected]>
---
drivers/crypto/axis/artpec6_crypto.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 51c66afbe677..87af44ac3e64 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -276,6 +276,7 @@ enum artpec6_crypto_hash_flags {
HASH_FLAG_FINALIZE = 8,
HASH_FLAG_HMAC = 16,
HASH_FLAG_UPDATE_KEY = 32,
+ HASH_FLAG_FINALIZED = 64,
};

struct artpec6_crypto_req_common {
@@ -1493,12 +1494,15 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
return error;

/* Descriptor for the final result */
- error = artpec6_crypto_setup_in_descr(common, areq->result,
+ error = artpec6_crypto_setup_in_descr(common,
+ req_ctx->digeststate,
digestsize,
true);
if (error)
return error;

+ req_ctx->hash_flags |= HASH_FLAG_FINALIZED;
+
} else { /* This is not the final operation for this request */
if (!run_hw)
return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
@@ -2216,6 +2220,14 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)

static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
{
+ struct ahash_request *areq = container_of(req, struct ahash_request, base);
+ struct artpec6_hash_request_context *ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ size_t digestsize = crypto_ahash_digestsize(ahash);
+
+ if (ctx->hash_flags & HASH_FLAG_FINALIZED)
+ memcpy(areq->result, ctx->digeststate, digestsize);
+
req->complete(req, 0);
}

--
2.34.1

2023-01-10 13:56:00

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 05/12] crypto: axis - validate AEAD authsize

Validate the AEAD authsize to fix errors like this with
CRYPTO_MANAGER_EXTRA_TESTS:

alg: aead: artpec-gcm-aes setauthsize unexpectedly succeeded on test
vector "random: alen=0 plen=60 authsize=6 klen=17 novrfy=0";
expected_error=-22

Signed-off-by: Vincent Whitchurch <[email protected]>
---
drivers/crypto/axis/artpec6_crypto.c | 7 +++++++
1 file changed, 7 insertions(+)

diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 87f82c314e48..0ffe6e0045aa 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1274,6 +1274,12 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
return 0;
}

+static int artpec6_crypto_aead_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
static int artpec6_crypto_aead_encrypt(struct aead_request *req)
{
int ret;
@@ -2829,6 +2835,7 @@ static struct aead_alg aead_algos[] = {
{
.init = artpec6_crypto_aead_init,
.setkey = artpec6_crypto_aead_set_key,
+ .setauthsize = artpec6_crypto_aead_setauthsize,
.encrypt = artpec6_crypto_aead_encrypt,
.decrypt = artpec6_crypto_aead_decrypt,
.ivsize = GCM_AES_IV_SIZE,
--
2.34.1

2023-01-10 13:56:03

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 09/12] crypto: axis - add fallback for AEAD

The hardware has a limit of 64 DMA descriptors. If we hit the limit we
currently just fail the crypto operation, but this could result in
failures higher up in the stack such as in CIFS.

Add a fallback for the gcm(aes) AEAD algorithm. The fallback handling
is based on drivers/crypto/amcc/crypto4xx_{algo,core}.c

Signed-off-by: Vincent Whitchurch <[email protected]>
---
drivers/crypto/Kconfig | 1 +
drivers/crypto/axis/artpec6_crypto.c | 58 +++++++++++++++++++++++++++-
2 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5aa4bfb648ec..5615c9f2641e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -773,6 +773,7 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_CBC
select CRYPTO_CTR
select CRYPTO_ECB
+ select CRYPTO_GCM
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index a05f0927f753..3b47faa06606 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -331,6 +331,7 @@ struct artpec6_cryptotfm_context {
u32 key_md;
int crypto_type;
struct crypto_sync_skcipher *fallback;
+ struct crypto_aead *aead_fallback;
};

struct artpec6_crypto_aead_hw_ctx {
@@ -1317,20 +1318,54 @@ static int artpec6_crypto_aead_init(struct crypto_aead *tfm)

memset(tfm_ctx, 0, sizeof(*tfm_ctx));

+ tfm_ctx->aead_fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
+ 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tfm_ctx->aead_fallback))
+ return PTR_ERR(tfm_ctx->aead_fallback);
+
crypto_aead_set_reqsize(tfm,
- sizeof(struct artpec6_crypto_aead_req_ctx));
+ max(sizeof(struct aead_request) + 32 +
+ crypto_aead_reqsize(tfm_ctx->aead_fallback),
+ sizeof(struct artpec6_crypto_aead_req_ctx)));

return 0;
}

+static void artpec6_crypto_aead_exit(struct crypto_aead *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->aead_fallback);
+}
+
+static int artpec6_crypto_aead_fallback_set_key(struct artpec6_cryptotfm_context *ctx,
+ struct crypto_aead *tfm,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_aead *fallback = ctx->aead_fallback;
+
+ crypto_aead_clear_flags(fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(fallback,
+ crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
+ return crypto_aead_setkey(fallback, key, keylen);
+}
+
static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
unsigned int len)
{
struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
+ int ret;

if (len != 16 && len != 24 && len != 32)
return -EINVAL;

+ ret = artpec6_crypto_aead_fallback_set_key(ctx, tfm, key, len);
+ if (ret < 0)
+ return ret;
+
ctx->key_length = len;

memcpy(ctx->aes_key, key, len);
@@ -1343,6 +1378,21 @@ static int artpec6_crypto_aead_setauthsize(struct crypto_aead *tfm,
return crypto_gcm_check_authsize(authsize);
}

+static int artpec6_crypto_aead_fallback(struct aead_request *req, bool encrypt)
+{
+ struct artpec6_cryptotfm_context *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, tfm_ctx->aead_fallback);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return encrypt ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
static int artpec6_crypto_aead_encrypt(struct aead_request *req)
{
int ret;
@@ -1358,6 +1408,8 @@ static int artpec6_crypto_aead_encrypt(struct aead_request *req)
ret = artpec6_crypto_prepare_aead(req);
if (ret) {
artpec6_crypto_common_destroy(&req_ctx->common);
+ if (ret == -ENOSPC)
+ return artpec6_crypto_aead_fallback(req, true);
return ret;
}

@@ -1383,6 +1435,8 @@ static int artpec6_crypto_aead_decrypt(struct aead_request *req)
ret = artpec6_crypto_prepare_aead(req);
if (ret) {
artpec6_crypto_common_destroy(&req_ctx->common);
+ if (ret == -ENOSPC)
+ return artpec6_crypto_aead_fallback(req, false);
return ret;
}

@@ -2884,6 +2938,7 @@ static struct skcipher_alg crypto_algos[] = {
static struct aead_alg aead_algos[] = {
{
.init = artpec6_crypto_aead_init,
+ .exit = artpec6_crypto_aead_exit,
.setkey = artpec6_crypto_aead_set_key,
.setauthsize = artpec6_crypto_aead_setauthsize,
.encrypt = artpec6_crypto_aead_encrypt,
@@ -2897,6 +2952,7 @@ static struct aead_alg aead_algos[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
--
2.34.1

2023-01-10 13:56:06

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 06/12] crypto: axis - reject invalid sizes

Reject invalid sizes in block ciphers to fix hangs in
CRYPTO_MANAGER_EXTRA_TESTS like this:

artpec6-ecb-aes "random: len=55 klen=32" decryption random:
inplace_one_sglist use_final nosimd src_divs=[<reimport>87.4%@+1524,
<flush>12.96%@+3553] key_offset=84

Signed-off-by: Vincent Whitchurch <[email protected]>
---
drivers/crypto/axis/artpec6_crypto.c | 24 ++++++++++++++++++++----
1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0ffe6e0045aa..78d067ce4138 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1188,6 +1188,22 @@ static int artpec6_crypto_decrypt(struct skcipher_request *req)
return artpec6_crypto_submit(&req_ctx->common);
}

+static int artpec6_crypto_block_encrypt(struct skcipher_request *req)
+{
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+
+ return artpec6_crypto_encrypt(req);
+}
+
+static int artpec6_crypto_block_decrypt(struct skcipher_request *req)
+{
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+
+ return artpec6_crypto_decrypt(req);
+}
+
static int
artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
{
@@ -2757,8 +2773,8 @@ static struct skcipher_alg crypto_algos[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = artpec6_crypto_cipher_set_key,
- .encrypt = artpec6_crypto_encrypt,
- .decrypt = artpec6_crypto_decrypt,
+ .encrypt = artpec6_crypto_block_encrypt,
+ .decrypt = artpec6_crypto_block_decrypt,
.init = artpec6_crypto_aes_ecb_init,
.exit = artpec6_crypto_aes_exit,
},
@@ -2802,8 +2818,8 @@ static struct skcipher_alg crypto_algos[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = artpec6_crypto_cipher_set_key,
- .encrypt = artpec6_crypto_encrypt,
- .decrypt = artpec6_crypto_decrypt,
+ .encrypt = artpec6_crypto_block_encrypt,
+ .decrypt = artpec6_crypto_block_decrypt,
.init = artpec6_crypto_aes_cbc_init,
.exit = artpec6_crypto_aes_exit
},
--
2.34.1

2023-01-10 13:56:21

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 08/12] crypto: axis - add skcipher fallback

The hardware has a limit of 64 DMA descriptors. If we hit the limit we
currently just fail the crypto operation, but this could result in
failures higher up in the stack such as in CIFS. Use software fallbacks
for all skcipher algos to handle this case.

Signed-off-by: Vincent Whitchurch <[email protected]>
---
drivers/crypto/Kconfig | 3 +
drivers/crypto/axis/artpec6_crypto.c | 110 ++++++++++++++++++---------
2 files changed, 75 insertions(+), 38 deletions(-)

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index dfb103f81a64..5aa4bfb648ec 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -770,11 +770,14 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
+ select CRYPTO_CBC
select CRYPTO_CTR
+ select CRYPTO_ECB
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_XTS
help
Enables the driver for the on-chip crypto accelerator
of Axis ARTPEC SoCs.
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 5f30f3d0315f..a05f0927f753 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1088,7 +1088,7 @@ artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
/*
* Ciphering functions.
*/
-static int artpec6_crypto_encrypt(struct skcipher_request *req)
+static int __artpec6_crypto_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
@@ -1136,7 +1136,7 @@ static int artpec6_crypto_encrypt(struct skcipher_request *req)
return artpec6_crypto_submit(&req_ctx->common);
}

-static int artpec6_crypto_decrypt(struct skcipher_request *req)
+static int __artpec6_crypto_decrypt(struct skcipher_request *req)
{
int ret;
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
@@ -1188,6 +1188,53 @@ static int artpec6_crypto_decrypt(struct skcipher_request *req)
return artpec6_crypto_submit(&req_ctx->common);
}

+static int artpec6_crypto_crypt_fallback(struct skcipher_request *req,
+ bool encrypt)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ int ret;
+
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
+ if (ret)
+ return ret;
+
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq)
+ : crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+
+ return ret;
+}
+
+static int artpec6_crypto_encrypt(struct skcipher_request *req)
+{
+ int ret;
+
+ ret = __artpec6_crypto_encrypt(req);
+ if (ret != -ENOSPC)
+ return ret;
+
+ return artpec6_crypto_crypt_fallback(req, true);
+}
+
+static int artpec6_crypto_decrypt(struct skcipher_request *req)
+{
+ int ret;
+
+ ret = __artpec6_crypto_decrypt(req);
+ if (ret != -ENOSPC)
+ return ret;
+
+ return artpec6_crypto_crypt_fallback(req, false);
+}
+
static int artpec6_crypto_block_encrypt(struct skcipher_request *req)
{
if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
@@ -1570,18 +1617,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
return ARTPEC6_CRYPTO_PREPARE_HASH_START;
}

-
-static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
-{
- struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
-
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
- ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
-
- return 0;
-}
-
-static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
+static int artpec6_crypto_aes_init(struct crypto_skcipher *tfm, int crypto_type)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);

@@ -1592,44 +1628,39 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
return PTR_ERR(ctx->fallback);

tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
- ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
+ ctx->crypto_type = crypto_type;

return 0;
}

-static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
+static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
{
- struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+ return artpec6_crypto_aes_init(tfm, ARTPEC6_CRYPTO_CIPHER_AES_ECB);
+}

- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
- ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
+static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
+{
+ return artpec6_crypto_aes_init(tfm, ARTPEC6_CRYPTO_CIPHER_AES_CTR);
+}

- return 0;
+static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
+{
+ return artpec6_crypto_aes_init(tfm, ARTPEC6_CRYPTO_CIPHER_AES_CBC);
}

static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
{
- struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
-
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
- ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
-
- return 0;
+ return artpec6_crypto_aes_init(tfm, ARTPEC6_CRYPTO_CIPHER_AES_XTS);
}

static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);

- memset(ctx, 0, sizeof(*ctx));
-}
+ if (ctx->fallback)
+ crypto_free_sync_skcipher(ctx->fallback);

-static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
-{
- struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_sync_skcipher(ctx->fallback);
- artpec6_crypto_aes_exit(tfm);
+ memset(ctx, 0, sizeof(*ctx));
}

static int
@@ -2764,7 +2795,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2799,7 +2831,7 @@ static struct skcipher_alg crypto_algos[] = {
.encrypt = artpec6_crypto_ctr_encrypt,
.decrypt = artpec6_crypto_ctr_decrypt,
.init = artpec6_crypto_aes_ctr_init,
- .exit = artpec6_crypto_aes_ctr_exit,
+ .exit = artpec6_crypto_aes_exit,
},
/* AES - CBC */
{
@@ -2808,7 +2840,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2830,7 +2863,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
--
2.34.1

2023-01-10 13:57:12

by Vincent Whitchurch

[permalink] [raw]
Subject: [PATCH 12/12] crypto: axis - allow small size for AEAD

Allow sizes smaller than the AES block size to fix this failure with
CRYPTO_MANAGER_EXTRA_TESTS:

alg: aead: artpec-gcm-aes decryption failed on test vector "random:
alen=0 plen=1 authsize=4 klen=32 novrfy=0"; expected_error=0,
actual_error=-22, cfg="random: inplace_one_sglist may_sleep use_final
src_divs=[<reimport>9.71%@+778, <flush>23.43%@+2818, 52.69%@+6,
<flush>11.98%@+1030, 2.19%@+3986] iv_offset=40 key_offset=32"

Signed-off-by: Vincent Whitchurch <[email protected]>
---
drivers/crypto/axis/artpec6_crypto.c | 2 --
1 file changed, 2 deletions(-)

diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 938faf3afa69..b6fa2af42cd0 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1452,8 +1452,6 @@ static int artpec6_crypto_aead_decrypt(struct aead_request *req)
struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);

req_ctx->decrypt = true;
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;

ret = artpec6_crypto_common_init(&req_ctx->common,
&req->base,
--
2.34.1

2023-01-20 09:12:54

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH 01/12] crypto: axis - do not DMA to ahash_request.result

On Tue, Jan 10, 2023 at 02:50:31PM +0100, Vincent Whitchurch wrote:
>
> @@ -2216,6 +2220,14 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
>
> static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
> {
> + struct ahash_request *areq = container_of(req, struct ahash_request, base);
> + struct artpec6_hash_request_context *ctx = ahash_request_ctx(areq);
> + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
> + size_t digestsize = crypto_ahash_digestsize(ahash);
> +
> + if (ctx->hash_flags & HASH_FLAG_FINALIZED)
> + memcpy(areq->result, ctx->digeststate, digestsize);
> +

I was just looking through the driver and digeststate does not
appear to be aligned to the DMA cacheline, should it be?

Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2023-01-27 15:35:18

by Vincent Whitchurch

[permalink] [raw]
Subject: Re: [PATCH 01/12] crypto: axis - do not DMA to ahash_request.result

On Fri, Jan 20, 2023 at 10:09:18AM +0100, Herbert Xu wrote:
> On Tue, Jan 10, 2023 at 02:50:31PM +0100, Vincent Whitchurch wrote:
> >
> > @@ -2216,6 +2220,14 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
> >
> > static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
> > {
> > + struct ahash_request *areq = container_of(req, struct ahash_request, base);
> > + struct artpec6_hash_request_context *ctx = ahash_request_ctx(areq);
> > + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
> > + size_t digestsize = crypto_ahash_digestsize(ahash);
> > +
> > + if (ctx->hash_flags & HASH_FLAG_FINALIZED)
> > + memcpy(areq->result, ctx->digeststate, digestsize);
> > +
>
> I was just looking through the driver and digeststate does not
> appear to be aligned to the DMA cacheline, should it be?

Yes, you're right, thanks, that buffer and a few others are missing
alignment annotations. I'll add a patch to fix that when I respin the
series.