2023-02-01 12:31:42

by Tianjia Zhang

[permalink] [raw]
Subject: [PATCH v3] crypto: arm64/sm4-gcm - Fix possible crash in GCM cryption

When the cryption total length is zero, GCM cryption call
skcipher_walk_done() will cause an unexpected crash, so skip calling
this function to avoid possible crash when the GCM cryption length
is equal to zero.

This patch also rewrite the skcipher walker loop, and separates the
cryption of the last chunk from the walker loop. In addition to
following the usual convention of checking walk->nbytes, it also makes
the execution logic of the loop clearer and easier to understand.

Fixes: ae1b83c7d572 ("crypto: arm64/sm4 - add CE implementation for GCM mode")
Signed-off-by: Tianjia Zhang <[email protected]>
---
arch/arm64/crypto/sm4-ce-gcm-glue.c | 43 ++++++++++++++---------------
1 file changed, 20 insertions(+), 23 deletions(-)

diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c
index c450a2025ca9..80ac4e94a90d 100644
--- a/arch/arm64/crypto/sm4-ce-gcm-glue.c
+++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c
@@ -143,7 +143,7 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
{
u8 __aligned(8) iv[SM4_BLOCK_SIZE];
be128 __aligned(8) lengths;
- int err;
+ int err = 0;

memset(ghash, 0, SM4_BLOCK_SIZE);

@@ -158,34 +158,31 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
if (req->assoclen)
gcm_calculate_auth_mac(req, ghash);

- do {
+ while (walk->nbytes && walk->nbytes != walk->total) {
unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
- const u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
-
- if (walk->nbytes == walk->total) {
- tail = 0;
-
- sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
- walk->nbytes, ghash,
- ctx->ghash_table,
- (const u8 *)&lengths);
- } else if (walk->nbytes - tail) {
- sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
- walk->nbytes - tail, ghash,
- ctx->ghash_table, NULL);
- }
+
+ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, walk->dst.virt.addr,
+ walk->src.virt.addr, iv,
+ walk->nbytes - tail, ghash,
+ ctx->ghash_table, NULL);

kernel_neon_end();

err = skcipher_walk_done(walk, tail);
- if (err)
- return err;
- if (walk->nbytes)
- kernel_neon_begin();
- } while (walk->nbytes > 0);

- return 0;
+ kernel_neon_begin();
+ }
+
+ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, walk->dst.virt.addr,
+ walk->src.virt.addr, iv, walk->nbytes, ghash,
+ ctx->ghash_table, (const u8 *)&lengths);
+
+ kernel_neon_end();
+
+ if (walk->nbytes)
+ err = skcipher_walk_done(walk, 0);
+
+ return err;
}

static int gcm_encrypt(struct aead_request *req)
--
2.24.3 (Apple Git-128)



2023-02-02 08:35:13

by Herbert Xu

[permalink] [raw]
Subject: [v4 PATCH] crypto: arm64/sm4-gcm - Fix possible crash in GCM cryption

On Wed, Feb 01, 2023 at 08:31:33PM +0800, Tianjia Zhang wrote:
>
> + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, walk->dst.virt.addr,
> + walk->src.virt.addr, iv, walk->nbytes, ghash,
> + ctx->ghash_table, (const u8 *)&lengths);

I still think this is error-prone. When walk->nbytes == 0,
walk->src and walk->dst are undefined. Sure you could argue
that the underlying assembly code won't touch the values, but
accessing uninitialised memory even if just to throw them away
is still a bit icky.

Anyway, here's my attempt at rewriting the gcm loop:

---8<---
An often overlooked aspect of the skcipher walker API is that an
error is not just indicated by a non-zero return value, but by the
fact that walk->nbytes is zero.

Thus it is an error to call skcipher_walk_done after getting back
walk->nbytes == 0 from the previous interaction with the walker.

This is because when walk->nbytes is zero the walker is left in
an undefined state and any further calls to it may try to free
uninitialised stack memory.

The sm4 arm64 ccm code gets this wrong and ends up calling
skcipher_walk_done even when walk->nbytes is zero.

This patch rewrites the loop in a form that resembles other callers.

Reported-by: Tianjia Zhang <[email protected]>
Fixes: ae1b83c7d572 ("crypto: arm64/sm4 - add CE implementation for GCM mode")
Signed-off-by: Herbert Xu <[email protected]>

diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c
index c450a2025ca9..73bfb6972d3a 100644
--- a/arch/arm64/crypto/sm4-ce-gcm-glue.c
+++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c
@@ -135,22 +135,23 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
}

static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
- struct sm4_gcm_ctx *ctx, u8 ghash[],
+ u8 ghash[], int err,
void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc,
u8 *dst, const u8 *src, u8 *iv,
unsigned int nbytes, u8 *ghash,
const u8 *ghash_table, const u8 *lengths))
{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 __aligned(8) iv[SM4_BLOCK_SIZE];
be128 __aligned(8) lengths;
- int err;

memset(ghash, 0, SM4_BLOCK_SIZE);

lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(walk->total * 8);

- memcpy(iv, walk->iv, GCM_IV_SIZE);
+ memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(2, iv + GCM_IV_SIZE);

kernel_neon_begin();
@@ -158,49 +159,51 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
if (req->assoclen)
gcm_calculate_auth_mac(req, ghash);

- do {
+ while (walk->nbytes) {
unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;

if (walk->nbytes == walk->total) {
- tail = 0;
-
sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
walk->nbytes, ghash,
ctx->ghash_table,
(const u8 *)&lengths);
- } else if (walk->nbytes - tail) {
- sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
- walk->nbytes - tail, ghash,
- ctx->ghash_table, NULL);
+
+ kernel_neon_end();
+
+ return skcipher_walk_done(walk, 0);
}

+ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
+ walk->nbytes - tail, ghash,
+ ctx->ghash_table, NULL);
+
kernel_neon_end();

err = skcipher_walk_done(walk, tail);
- if (err)
- return err;
- if (walk->nbytes)
- kernel_neon_begin();
- } while (walk->nbytes > 0);

- return 0;
+ kernel_neon_begin();
+ }
+
+ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv,
+ walk->nbytes, ghash, ctx->ghash_table,
+ (const u8 *)&lengths);
+
+ kernel_neon_end();
+
+ return err;
}

static int gcm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
struct skcipher_walk walk;
int err;

err = skcipher_walk_aead_encrypt(&walk, req, false);
- if (err)
- return err;
-
- err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc);
+ err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc);
if (err)
return err;

@@ -215,17 +218,13 @@ static int gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(aead);
- struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
u8 authtag[SM4_BLOCK_SIZE];
struct skcipher_walk walk;
int err;

err = skcipher_walk_aead_decrypt(&walk, req, false);
- if (err)
- return err;
-
- err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec);
+ err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec);
if (err)
return err;

--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2023-02-03 02:45:02

by Tianjia Zhang

[permalink] [raw]
Subject: Re: [v4 PATCH] crypto: arm64/sm4-gcm - Fix possible crash in GCM cryption

Hi Herbert,

On 2/2/23 4:33 PM, Herbert Xu wrote:
> On Wed, Feb 01, 2023 at 08:31:33PM +0800, Tianjia Zhang wrote:
>>
>> + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, walk->dst.virt.addr,
>> + walk->src.virt.addr, iv, walk->nbytes, ghash,
>> + ctx->ghash_table, (const u8 *)&lengths);
>
> I still think this is error-prone. When walk->nbytes == 0,
> walk->src and walk->dst are undefined. Sure you could argue
> that the underlying assembly code won't touch the values, but
> accessing uninitialised memory even if just to throw them away
> is still a bit icky.

You're right, whether used or not, accessing an undefined pointer is
always ugly. This benefited me a lot.

>
> Anyway, here's my attempt at rewriting the gcm loop:
>
> ---8<---
> An often overlooked aspect of the skcipher walker API is that an
> error is not just indicated by a non-zero return value, but by the
> fact that walk->nbytes is zero.
>
> Thus it is an error to call skcipher_walk_done after getting back
> walk->nbytes == 0 from the previous interaction with the walker.
>
> This is because when walk->nbytes is zero the walker is left in
> an undefined state and any further calls to it may try to free
> uninitialised stack memory.
>
> The sm4 arm64 ccm code gets this wrong and ends up calling
> skcipher_walk_done even when walk->nbytes is zero.
>
> This patch rewrites the loop in a form that resembles other callers.
>
> Reported-by: Tianjia Zhang <[email protected]>
> Fixes: ae1b83c7d572 ("crypto: arm64/sm4 - add CE implementation for GCM mode")
> Signed-off-by: Herbert Xu <[email protected]>

Thanks for the fix, this patch works find to me, so

Tested-by: Tianjia Zhang <[email protected]>

>
> diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c
> index c450a2025ca9..73bfb6972d3a 100644
> --- a/arch/arm64/crypto/sm4-ce-gcm-glue.c
> +++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c
> @@ -135,22 +135,23 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
> }
>
> static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
> - struct sm4_gcm_ctx *ctx, u8 ghash[],
> + u8 ghash[], int err,
> void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc,
> u8 *dst, const u8 *src, u8 *iv,
> unsigned int nbytes, u8 *ghash,
> const u8 *ghash_table, const u8 *lengths))
> {
> + struct crypto_aead *aead = crypto_aead_reqtfm(req);
> + struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
> u8 __aligned(8) iv[SM4_BLOCK_SIZE];
> be128 __aligned(8) lengths;
> - int err;
>
> memset(ghash, 0, SM4_BLOCK_SIZE);
>
> lengths.a = cpu_to_be64(req->assoclen * 8);
> lengths.b = cpu_to_be64(walk->total * 8);
>
> - memcpy(iv, walk->iv, GCM_IV_SIZE);
> + memcpy(iv, req->iv, GCM_IV_SIZE);
> put_unaligned_be32(2, iv + GCM_IV_SIZE);
>
> kernel_neon_begin();
> @@ -158,49 +159,51 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
> if (req->assoclen)
> gcm_calculate_auth_mac(req, ghash);
>
> - do {
> + while (walk->nbytes) {
> unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
> const u8 *src = walk->src.virt.addr;
> u8 *dst = walk->dst.virt.addr;
>
> if (walk->nbytes == walk->total) {
> - tail = 0;
> -
> sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
> walk->nbytes, ghash,
> ctx->ghash_table,
> (const u8 *)&lengths);
> - } else if (walk->nbytes - tail) {
> - sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
> - walk->nbytes - tail, ghash,
> - ctx->ghash_table, NULL);
> +
> + kernel_neon_end();
> +
> + return skcipher_walk_done(walk, 0);
> }
>
> + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
> + walk->nbytes - tail, ghash,
> + ctx->ghash_table, NULL);
> +
> kernel_neon_end();
>
> err = skcipher_walk_done(walk, tail);
> - if (err)
> - return err;
> - if (walk->nbytes)
> - kernel_neon_begin();
> - } while (walk->nbytes > 0);
>
> - return 0;
> + kernel_neon_begin();
> + }
> +
> + sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv,
> + walk->nbytes, ghash, ctx->ghash_table,
> + (const u8 *)&lengths);
> +
> + kernel_neon_end();
> +
> + return err;
> }
>
> static int gcm_encrypt(struct aead_request *req)
> {
> struct crypto_aead *aead = crypto_aead_reqtfm(req);
> - struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
> u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
> struct skcipher_walk walk;
> int err;
>
> err = skcipher_walk_aead_encrypt(&walk, req, false);
> - if (err)
> - return err;
> -
> - err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc);
> + err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc);
> if (err)
> return err;
>
> @@ -215,17 +218,13 @@ static int gcm_decrypt(struct aead_request *req)
> {
> struct crypto_aead *aead = crypto_aead_reqtfm(req);
> unsigned int authsize = crypto_aead_authsize(aead);
> - struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
> u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
> u8 authtag[SM4_BLOCK_SIZE];
> struct skcipher_walk walk;
> int err;
>
> err = skcipher_walk_aead_decrypt(&walk, req, false);
> - if (err)
> - return err;
> -
> - err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec);
> + err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec);
> if (err)
> return err;
>