2019-11-12 22:32:12

by Sami Tolvanen

[permalink] [raw]
Subject: [PATCH] crypto: arm64/sha: fix function types

Declare assembly functions with the expected function type
instead of casting pointers in C to avoid type mismatch failures
with Control-Flow Integrity (CFI) checking.

Signed-off-by: Sami Tolvanen <[email protected]>
---
arch/arm64/crypto/sha1-ce-glue.c | 12 +++++-------
arch/arm64/crypto/sha2-ce-glue.c | 26 +++++++++++---------------
arch/arm64/crypto/sha256-glue.c | 30 ++++++++++++------------------
arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
arch/arm64/crypto/sha512-glue.c | 13 +++++--------
5 files changed, 43 insertions(+), 61 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..3153a9bbb683 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -25,7 +25,7 @@ struct sha1_ce_state {
u32 finalize;
};

-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
int blocks);

const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
@@ -41,8 +41,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, sha1_ce_transform);
kernel_neon_end();

return 0;
@@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, sha1_ce_transform);
if (!finalize)
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
@@ -81,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a4dacedfe4d4 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -25,7 +25,7 @@ struct sha256_ce_state {
u32 finalize;
};

-asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
int blocks);

const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
@@ -33,7 +33,8 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
finalize);

-asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks);

static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -42,12 +43,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_block_data_order);

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, sha2_ce_transform);
kernel_neon_end();

return 0;
@@ -62,9 +62,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_block_data_order);
+ sha256_base_do_finalize(desc, sha256_block_data_order);
return sha256_base_finish(desc, out);
}

@@ -75,11 +74,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, sha2_ce_transform);
if (!finalize)
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
@@ -89,14 +86,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
struct sha256_ce_state *sctx = shash_desc_ctx(desc);

if (!crypto_simd_usable()) {
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc, sha256_block_data_order);
return sha256_base_finish(desc, out);
}

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..dac3157937ba 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -23,28 +23,25 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha224");
MODULE_ALIAS_CRYPTO("sha256");

-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks);
EXPORT_SYMBOL(sha256_block_data_order);

-asmlinkage void sha256_block_neon(u32 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
+ int blocks);

static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ return sha256_base_do_update(desc, data, len, sha256_block_data_order);
}

static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (len)
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ sha256_base_do_finalize(desc, sha256_block_data_order);

return sha256_base_finish(desc, out);
}
@@ -87,7 +84,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_block_data_order);

while (len > 0) {
unsigned int chunk = len;
@@ -103,8 +100,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE;

kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, sha256_block_neon);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -118,15 +114,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_block_data_order);
+ sha256_base_do_finalize(desc, sha256_block_data_order);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc, sha256_block_neon);
kernel_neon_end();
}
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..0f964235d753 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -27,18 +27,18 @@ MODULE_LICENSE("GPL v2");
asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
int blocks);

-asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+ int blocks);

static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_block_data_order);

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
kernel_neon_end();

return 0;
@@ -50,16 +50,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_block_data_order);
+ sha512_base_do_finalize(desc, sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
@@ -67,13 +65,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
if (!crypto_simd_usable()) {
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc, sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..0f6b610a7954 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,25 +20,22 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");

-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha512_block_data_order(struct sha512_state *sst,
+ u8 const *src, int blocks);
EXPORT_SYMBOL(sha512_block_data_order);

static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_do_update(desc, data, len, sha512_block_data_order);
}

static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (len)
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ sha512_base_do_finalize(desc, sha512_block_data_order);

return sha512_base_finish(desc, out);
}

base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60
--
2.24.0.rc1.363.gb1bccd3e3d-goog


2019-11-13 18:28:22

by Kees Cook

[permalink] [raw]
Subject: Re: [PATCH] crypto: arm64/sha: fix function types

On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
>
> Signed-off-by: Sami Tolvanen <[email protected]>

Looks good, yes. This looks very similar to what I needed to do for
x86's SHA routines.

Reviewed-by: Kees Cook <[email protected]>

> ---
> arch/arm64/crypto/sha1-ce-glue.c | 12 +++++-------
> arch/arm64/crypto/sha2-ce-glue.c | 26 +++++++++++---------------
> arch/arm64/crypto/sha256-glue.c | 30 ++++++++++++------------------
> arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> arch/arm64/crypto/sha512-glue.c | 13 +++++--------
> 5 files changed, 43 insertions(+), 61 deletions(-)
>
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
> u32 finalize;
> };
>
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> int blocks);
>
> const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
> @@ -41,8 +41,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
>
> sctx->finalize = 0;
> kernel_neon_begin();
> - sha1_base_do_update(desc, data, len,
> - (sha1_block_fn *)sha1_ce_transform);
> + sha1_base_do_update(desc, data, len, sha1_ce_transform);
> kernel_neon_end();
>
> return 0;
> @@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
> sctx->finalize = finalize;
>
> kernel_neon_begin();
> - sha1_base_do_update(desc, data, len,
> - (sha1_block_fn *)sha1_ce_transform);
> + sha1_base_do_update(desc, data, len, sha1_ce_transform);
> if (!finalize)
> - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> + sha1_base_do_finalize(desc, sha1_ce_transform);
> kernel_neon_end();
> return sha1_base_finish(desc, out);
> }
> @@ -81,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
>
> sctx->finalize = 0;
> kernel_neon_begin();
> - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
> + sha1_base_do_finalize(desc, sha1_ce_transform);
> kernel_neon_end();
> return sha1_base_finish(desc, out);
> }
> diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
> index 604a01a4ede6..a4dacedfe4d4 100644
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha256_ce_state {
> u32 finalize;
> };
>
> -asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
> +asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
> int blocks);
>
> const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> @@ -33,7 +33,8 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
> finalize);
>
> -asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> + int blocks);
>
> static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
> unsigned int len)
> @@ -42,12 +43,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>
> if (!crypto_simd_usable())
> return sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha256_block_data_order);
> + sha256_block_data_order);
>
> sctx->finalize = 0;
> kernel_neon_begin();
> - sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha2_ce_transform);
> + sha256_base_do_update(desc, data, len, sha2_ce_transform);
> kernel_neon_end();
>
> return 0;
> @@ -62,9 +62,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
> if (!crypto_simd_usable()) {
> if (len)
> sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha256_block_data_order);
> - sha256_base_do_finalize(desc,
> - (sha256_block_fn *)sha256_block_data_order);
> + sha256_block_data_order);
> + sha256_base_do_finalize(desc, sha256_block_data_order);
> return sha256_base_finish(desc, out);
> }
>
> @@ -75,11 +74,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
> sctx->finalize = finalize;
>
> kernel_neon_begin();
> - sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha2_ce_transform);
> + sha256_base_do_update(desc, data, len, sha2_ce_transform);
> if (!finalize)
> - sha256_base_do_finalize(desc,
> - (sha256_block_fn *)sha2_ce_transform);
> + sha256_base_do_finalize(desc, sha2_ce_transform);
> kernel_neon_end();
> return sha256_base_finish(desc, out);
> }
> @@ -89,14 +86,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
> struct sha256_ce_state *sctx = shash_desc_ctx(desc);
>
> if (!crypto_simd_usable()) {
> - sha256_base_do_finalize(desc,
> - (sha256_block_fn *)sha256_block_data_order);
> + sha256_base_do_finalize(desc, sha256_block_data_order);
> return sha256_base_finish(desc, out);
> }
>
> sctx->finalize = 0;
> kernel_neon_begin();
> - sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
> + sha256_base_do_finalize(desc, sha2_ce_transform);
> kernel_neon_end();
> return sha256_base_finish(desc, out);
> }
> diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
> index e273faca924f..dac3157937ba 100644
> --- a/arch/arm64/crypto/sha256-glue.c
> +++ b/arch/arm64/crypto/sha256-glue.c
> @@ -23,28 +23,25 @@ MODULE_LICENSE("GPL v2");
> MODULE_ALIAS_CRYPTO("sha224");
> MODULE_ALIAS_CRYPTO("sha256");
>
> -asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
> - unsigned int num_blks);
> +asmlinkage void sha256_block_data_order(struct sha256_state *sst, u8 const *src,
> + int blocks);
> EXPORT_SYMBOL(sha256_block_data_order);
>
> -asmlinkage void sha256_block_neon(u32 *digest, const void *data,
> - unsigned int num_blks);
> +asmlinkage void sha256_block_neon(struct sha256_state *sst, u8 const *src,
> + int blocks);
>
> static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
> unsigned int len)
> {
> - return sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha256_block_data_order);
> + return sha256_base_do_update(desc, data, len, sha256_block_data_order);
> }
>
> static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
> unsigned int len, u8 *out)
> {
> if (len)
> - sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha256_block_data_order);
> - sha256_base_do_finalize(desc,
> - (sha256_block_fn *)sha256_block_data_order);
> + sha256_base_do_update(desc, data, len, sha256_block_data_order);
> + sha256_base_do_finalize(desc, sha256_block_data_order);
>
> return sha256_base_finish(desc, out);
> }
> @@ -87,7 +84,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
>
> if (!crypto_simd_usable())
> return sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha256_block_data_order);
> + sha256_block_data_order);
>
> while (len > 0) {
> unsigned int chunk = len;
> @@ -103,8 +100,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
> sctx->count % SHA256_BLOCK_SIZE;
>
> kernel_neon_begin();
> - sha256_base_do_update(desc, data, chunk,
> - (sha256_block_fn *)sha256_block_neon);
> + sha256_base_do_update(desc, data, chunk, sha256_block_neon);
> kernel_neon_end();
> data += chunk;
> len -= chunk;
> @@ -118,15 +114,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
> if (!crypto_simd_usable()) {
> if (len)
> sha256_base_do_update(desc, data, len,
> - (sha256_block_fn *)sha256_block_data_order);
> - sha256_base_do_finalize(desc,
> - (sha256_block_fn *)sha256_block_data_order);
> + sha256_block_data_order);
> + sha256_base_do_finalize(desc, sha256_block_data_order);
> } else {
> if (len)
> sha256_update_neon(desc, data, len);
> kernel_neon_begin();
> - sha256_base_do_finalize(desc,
> - (sha256_block_fn *)sha256_block_neon);
> + sha256_base_do_finalize(desc, sha256_block_neon);
> kernel_neon_end();
> }
> return sha256_base_finish(desc, out);
> diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
> index 2369540040aa..0f964235d753 100644
> --- a/arch/arm64/crypto/sha512-ce-glue.c
> +++ b/arch/arm64/crypto/sha512-ce-glue.c
> @@ -27,18 +27,18 @@ MODULE_LICENSE("GPL v2");
> asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
> int blocks);
>
> -asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst, u8 const *src,
> + int blocks);
>
> static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
> unsigned int len)
> {
> if (!crypto_simd_usable())
> return sha512_base_do_update(desc, data, len,
> - (sha512_block_fn *)sha512_block_data_order);
> + sha512_block_data_order);
>
> kernel_neon_begin();
> - sha512_base_do_update(desc, data, len,
> - (sha512_block_fn *)sha512_ce_transform);
> + sha512_base_do_update(desc, data, len, sha512_ce_transform);
> kernel_neon_end();
>
> return 0;
> @@ -50,16 +50,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
> if (!crypto_simd_usable()) {
> if (len)
> sha512_base_do_update(desc, data, len,
> - (sha512_block_fn *)sha512_block_data_order);
> - sha512_base_do_finalize(desc,
> - (sha512_block_fn *)sha512_block_data_order);
> + sha512_block_data_order);
> + sha512_base_do_finalize(desc, sha512_block_data_order);
> return sha512_base_finish(desc, out);
> }
>
> kernel_neon_begin();
> - sha512_base_do_update(desc, data, len,
> - (sha512_block_fn *)sha512_ce_transform);
> - sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> + sha512_base_do_update(desc, data, len, sha512_ce_transform);
> + sha512_base_do_finalize(desc, sha512_ce_transform);
> kernel_neon_end();
> return sha512_base_finish(desc, out);
> }
> @@ -67,13 +65,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
> static int sha512_ce_final(struct shash_desc *desc, u8 *out)
> {
> if (!crypto_simd_usable()) {
> - sha512_base_do_finalize(desc,
> - (sha512_block_fn *)sha512_block_data_order);
> + sha512_base_do_finalize(desc, sha512_block_data_order);
> return sha512_base_finish(desc, out);
> }
>
> kernel_neon_begin();
> - sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
> + sha512_base_do_finalize(desc, sha512_ce_transform);
> kernel_neon_end();
> return sha512_base_finish(desc, out);
> }
> diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
> index d915c656e5fe..0f6b610a7954 100644
> --- a/arch/arm64/crypto/sha512-glue.c
> +++ b/arch/arm64/crypto/sha512-glue.c
> @@ -20,25 +20,22 @@ MODULE_LICENSE("GPL v2");
> MODULE_ALIAS_CRYPTO("sha384");
> MODULE_ALIAS_CRYPTO("sha512");
>
> -asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
> - unsigned int num_blks);
> +asmlinkage void sha512_block_data_order(struct sha512_state *sst,
> + u8 const *src, int blocks);
> EXPORT_SYMBOL(sha512_block_data_order);
>
> static int sha512_update(struct shash_desc *desc, const u8 *data,
> unsigned int len)
> {
> - return sha512_base_do_update(desc, data, len,
> - (sha512_block_fn *)sha512_block_data_order);
> + return sha512_base_do_update(desc, data, len, sha512_block_data_order);
> }
>
> static int sha512_finup(struct shash_desc *desc, const u8 *data,
> unsigned int len, u8 *out)
> {
> if (len)
> - sha512_base_do_update(desc, data, len,
> - (sha512_block_fn *)sha512_block_data_order);
> - sha512_base_do_finalize(desc,
> - (sha512_block_fn *)sha512_block_data_order);
> + sha512_base_do_update(desc, data, len, sha512_block_data_order);
> + sha512_base_do_finalize(desc, sha512_block_data_order);
>
> return sha512_base_finish(desc, out);
> }
>
> base-commit: 100d46bd72ec689a5582c2f5f4deadc5bcb92d60
> --
> 2.24.0.rc1.363.gb1bccd3e3d-goog
>

--
Kees Cook

2019-11-13 20:04:53

by Eric Biggers

[permalink] [raw]
Subject: Re: [PATCH] crypto: arm64/sha: fix function types

On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> Declare assembly functions with the expected function type
> instead of casting pointers in C to avoid type mismatch failures
> with Control-Flow Integrity (CFI) checking.
>
> Signed-off-by: Sami Tolvanen <[email protected]>
> ---
> arch/arm64/crypto/sha1-ce-glue.c | 12 +++++-------
> arch/arm64/crypto/sha2-ce-glue.c | 26 +++++++++++---------------
> arch/arm64/crypto/sha256-glue.c | 30 ++++++++++++------------------
> arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> arch/arm64/crypto/sha512-glue.c | 13 +++++--------
> 5 files changed, 43 insertions(+), 61 deletions(-)
>
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index bdc1b6d7aff7..3153a9bbb683 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -25,7 +25,7 @@ struct sha1_ce_state {
> u32 finalize;
> };
>
> -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> int blocks);

Please update the comments in the corresponding assembly files too.

Also, this change doesn't really make sense because the assembly functions still
expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
not present in struct sha1_state. There should either be wrapper functions that
explicitly do the cast from sha1_state to sha1_ce_state, or there should be
comments in the assembly files that very clearly explain that although the
function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Likewise for SHA-256 and SHA-512.

- Eric

2019-11-13 22:29:34

by Sami Tolvanen

[permalink] [raw]
Subject: Re: [PATCH] crypto: arm64/sha: fix function types

On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <[email protected]> wrote:
>
> On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > Declare assembly functions with the expected function type
> > instead of casting pointers in C to avoid type mismatch failures
> > with Control-Flow Integrity (CFI) checking.
> >
> > Signed-off-by: Sami Tolvanen <[email protected]>
> > ---
> > arch/arm64/crypto/sha1-ce-glue.c | 12 +++++-------
> > arch/arm64/crypto/sha2-ce-glue.c | 26 +++++++++++---------------
> > arch/arm64/crypto/sha256-glue.c | 30 ++++++++++++------------------
> > arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > arch/arm64/crypto/sha512-glue.c | 13 +++++--------
> > 5 files changed, 43 insertions(+), 61 deletions(-)
> >
> > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > index bdc1b6d7aff7..3153a9bbb683 100644
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > u32 finalize;
> > };
> >
> > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > int blocks);
>
> Please update the comments in the corresponding assembly files too.
>
> Also, this change doesn't really make sense because the assembly functions still
> expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> not present in struct sha1_state. There should either be wrapper functions that
> explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> comments in the assembly files that very clearly explain that although the
> function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.

Agreed, this needs a comment explaining the type mismatch. I'm also
fine with using wrapper functions and explicitly casting the
parameters instead of changing function declarations. Herbert, Ard,
any preferences?

Sami

2019-11-14 09:46:30

by Ard Biesheuvel

[permalink] [raw]
Subject: Re: [PATCH] crypto: arm64/sha: fix function types

On Wed, 13 Nov 2019 at 22:28, Sami Tolvanen <[email protected]> wrote:
>
> On Wed, Nov 13, 2019 at 12:04 PM Eric Biggers <[email protected]> wrote:
> >
> > On Tue, Nov 12, 2019 at 02:30:46PM -0800, Sami Tolvanen wrote:
> > > Declare assembly functions with the expected function type
> > > instead of casting pointers in C to avoid type mismatch failures
> > > with Control-Flow Integrity (CFI) checking.
> > >
> > > Signed-off-by: Sami Tolvanen <[email protected]>
> > > ---
> > > arch/arm64/crypto/sha1-ce-glue.c | 12 +++++-------
> > > arch/arm64/crypto/sha2-ce-glue.c | 26 +++++++++++---------------
> > > arch/arm64/crypto/sha256-glue.c | 30 ++++++++++++------------------
> > > arch/arm64/crypto/sha512-ce-glue.c | 23 ++++++++++-------------
> > > arch/arm64/crypto/sha512-glue.c | 13 +++++--------
> > > 5 files changed, 43 insertions(+), 61 deletions(-)
> > >
> > > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > > index bdc1b6d7aff7..3153a9bbb683 100644
> > > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > > @@ -25,7 +25,7 @@ struct sha1_ce_state {
> > > u32 finalize;
> > > };
> > >
> > > -asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > > +asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > > int blocks);
> >
> > Please update the comments in the corresponding assembly files too.
> >
> > Also, this change doesn't really make sense because the assembly functions still
> > expect struct sha1_ce_state, and they access sha1_ce_state::finalize which is
> > not present in struct sha1_state. There should either be wrapper functions that
> > explicitly do the cast from sha1_state to sha1_ce_state, or there should be
> > comments in the assembly files that very clearly explain that although the
> > function prototype takes sha1_state, it's really assumed to be a sha1_ce_state.
>
> Agreed, this needs a comment explaining the type mismatch. I'm also
> fine with using wrapper functions and explicitly casting the
> parameters instead of changing function declarations. Herbert, Ard,
> any preferences?
>

I guess the former would be cleaner, using container_of() rather than
a blind cast to make the code more self-documenting. The extra branch
shouldn't really matter.

2019-11-14 22:52:00

by Sami Tolvanen

[permalink] [raw]
Subject: [PATCH v2] crypto: arm64/sha: fix function types

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <[email protected]>
---
arch/arm64/crypto/sha1-ce-glue.c | 17 +++++++++------
arch/arm64/crypto/sha2-ce-glue.c | 34 ++++++++++++++++++------------
arch/arm64/crypto/sha256-glue.c | 32 +++++++++++++++++-----------
arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
arch/arm64/crypto/sha512-glue.c | 15 ++++++++-----
5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..76a951ce2a7b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);

+static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
+ src, blocks);
+}
+
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);

@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
kernel_neon_end();

return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
if (!finalize)
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..6042555517b0 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);

+static inline void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
+ src, blocks);
+}
+
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
sst.count);
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,

asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);

+static inline void __sha256_block_data_order(struct sha256_state *sst,
+ u8 const *src, int blocks)
+{
+ return sha256_block_data_order(sst->state, src, blocks);
+}
+
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
kernel_neon_end();

return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}

@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
if (!finalize)
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
struct sha256_ce_state *sctx = shash_desc_ctx(desc);

if (!crypto_simd_usable()) {
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..bb0f89382d70 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);

+static inline void __sha256_block_data_order(struct sha256_state *sst,
+ u8 const *src, int blocks)
+{
+ return sha256_block_data_order(sst->state, src, blocks);
+}
+
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);

+static inline void __sha256_block_neon(struct sha256_state *sst,
+ u8 const *src, int blocks)
+{
+ return sha256_block_neon(sst->state, src, blocks);
+}
+
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
}

static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);

return sha256_base_finish(desc, out);
}
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);

while (len > 0) {
unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE;

kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc, __sha256_block_neon);
kernel_neon_end();
}
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..076fcae454e0 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,

asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);

+static inline void __sha512_block_data_order(struct sha512_state *sst,
+ u8 const *src, int blocks)
+{
+ return sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
kernel_neon_end();

return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
if (!crypto_simd_usable()) {
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..125aac89c3c4 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");

-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha512_block_data_order);

+static inline void __sha512_block_data_order(struct sha512_state *sst,
+ u8 const *src, int blocks)
+{
+ return sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
}

static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);

return sha512_base_finish(desc, out);
}

base-commit: 96b95eff4a591dbac582c2590d067e356a18aacb
--
2.24.0.432.g9d3f5f5b63-goog

2019-11-19 20:04:11

by Sami Tolvanen

[permalink] [raw]
Subject: Re: [PATCH v2] crypto: arm64/sha: fix function types

On Fri, Nov 15, 2019 at 3:32 AM Ard Biesheuvel
<[email protected]> wrote:
>
> On Thu, 14 Nov 2019 at 22:51, Sami Tolvanen <[email protected]> wrote:
> >
> > Instead of casting pointers to callback functions, add C wrappers
> > to avoid type mismatch failures with Control-Flow Integrity (CFI)
> > checking.
> >
> > Signed-off-by: Sami Tolvanen <[email protected]>
> > ---
> > arch/arm64/crypto/sha1-ce-glue.c | 17 +++++++++------
> > arch/arm64/crypto/sha2-ce-glue.c | 34 ++++++++++++++++++------------
> > arch/arm64/crypto/sha256-glue.c | 32 +++++++++++++++++-----------
> > arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
> > arch/arm64/crypto/sha512-glue.c | 15 ++++++++-----
> > 5 files changed, 76 insertions(+), 48 deletions(-)
> >
> > diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> > index bdc1b6d7aff7..76a951ce2a7b 100644
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -28,6 +28,13 @@ struct sha1_ce_state {
> > asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> > int blocks);
> >
> > +static inline void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > + int blocks)
>
> Nit: making a function inline when all we ever do is take its address
> is rather pointless, so please drop that (below as well)

Ack, I'll send v3 that removes the extra inlines shortly.

> With that fixed (and assuming that the crypto selftests still pass -
> please confirm that you've tried that)

I don't have a test device that supports sha512-ce, but self-tests for
everything else pass with these changes.

Sami

2019-11-19 20:14:45

by Sami Tolvanen

[permalink] [raw]
Subject: [PATCH v3] crypto: arm64/sha: fix function types

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <[email protected]>
Reviewed-by: Ard Biesheuvel <[email protected]>
---
Changes in v3:
- Removed unnecessary inline attributes.

Changes in v2:
- Added wrapper functions instead of changing parameter types
for the assembly functions.

---
arch/arm64/crypto/sha1-ce-glue.c | 17 +++++++++------
arch/arm64/crypto/sha2-ce-glue.c | 34 ++++++++++++++++++------------
arch/arm64/crypto/sha256-glue.c | 32 +++++++++++++++++-----------
arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
arch/arm64/crypto/sha512-glue.c | 15 ++++++++-----
5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..ee8d9f0edb1a 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);

+static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
+ src, blocks);
+}
+
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);

@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
kernel_neon_end();

return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
if (!finalize)
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..2b05e95ab2b5 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);

+static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst),
+ src, blocks);
+}
+
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
sst.count);
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,

asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);

+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha256_block_data_order(sst->state, src, blocks);
+}
+
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
kernel_neon_end();

return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}

@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
if (!finalize)
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
struct sha256_ce_state *sctx = shash_desc_ctx(desc);

if (!crypto_simd_usable()) {
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..03ed0358c6eb 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);

+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha256_block_data_order(sst->state, src, blocks);
+}
+
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);

+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha256_block_neon(sst->state, src, blocks);
+}
+
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
}

static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);

return sha256_base_finish(desc, out);
}
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);

while (len > 0) {
unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE;

kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc, __sha256_block_neon);
kernel_neon_end();
}
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..659c913cd056 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,

asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);

+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
kernel_neon_end();

return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
if (!crypto_simd_usable()) {
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..86045ae596b1 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");

-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha512_block_data_order);

+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+ int blocks)
+{
+ return sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
}

static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);

return sha512_base_finish(desc, out);
}

base-commit: fd8f64df95204951c3edd4c4a7817c909d55a100
--
2.24.0.432.g9d3f5f5b63-goog


2019-11-22 11:09:43

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH v3] crypto: arm64/sha: fix function types

On Fri, Nov 22, 2019 at 07:04:46PM +0800, Herbert Xu wrote:
> On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> > Instead of casting pointers to callback functions, add C wrappers
> > to avoid type mismatch failures with Control-Flow Integrity (CFI)
> > checking.
> >
> > Signed-off-by: Sami Tolvanen <[email protected]>
> > Reviewed-by: Ard Biesheuvel <[email protected]>
> > ---
> > Changes in v3:
> > - Removed unnecessary inline attributes.
> >
> > Changes in v2:
> > - Added wrapper functions instead of changing parameter types
> > for the assembly functions.
> >
> > ---
> > arch/arm64/crypto/sha1-ce-glue.c | 17 +++++++++------
> > arch/arm64/crypto/sha2-ce-glue.c | 34 ++++++++++++++++++------------
> > arch/arm64/crypto/sha256-glue.c | 32 +++++++++++++++++-----------
> > arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
> > arch/arm64/crypto/sha512-glue.c | 15 ++++++++-----
> > 5 files changed, 76 insertions(+), 48 deletions(-)
>
> Patch applied. Thanks.

Scratch that. This patch is still in the queue.
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2019-11-27 18:21:05

by Eric Biggers

[permalink] [raw]
Subject: Re: [PATCH v3] crypto: arm64/sha: fix function types

On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> +static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> + int blocks)
> +{
> + return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> + src, blocks);
> +}
> +

'return' isn't needed in functions that return void.

Likewise everywhere else in this patch.

Otherwise this patch looks fine.

- Eric

2019-11-27 23:43:38

by Sami Tolvanen

[permalink] [raw]
Subject: Re: [PATCH v3] crypto: arm64/sha: fix function types

On Wed, Nov 27, 2019 at 10:19 AM Eric Biggers <[email protected]> wrote:
>
> On Tue, Nov 19, 2019 at 12:13:53PM -0800, Sami Tolvanen wrote:
> > +static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
> > + int blocks)
> > +{
> > + return sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst),
> > + src, blocks);
> > +}
> > +
>
> 'return' isn't needed in functions that return void.
>
> Likewise everywhere else in this patch.
>
> Otherwise this patch looks fine.

Thanks for taking a look, Eric. I'll send out v4 with this fixed.

Sami

2019-11-27 23:55:47

by Sami Tolvanen

[permalink] [raw]
Subject: [PATCH v4] crypto: arm64/sha: fix function types

Instead of casting pointers to callback functions, add C wrappers
to avoid type mismatch failures with Control-Flow Integrity (CFI)
checking.

Signed-off-by: Sami Tolvanen <[email protected]>
Reviewed-by: Ard Biesheuvel <[email protected]>
---
Changes in v4:
- Removed unnecessary returns.

Changes in v3:
- Removed unnecessary inline attributes.

Changes in v2:
- Added wrapper functions instead of changing parameter types
for the assembly functions.

---
arch/arm64/crypto/sha1-ce-glue.c | 17 +++++++++------
arch/arm64/crypto/sha2-ce-glue.c | 34 ++++++++++++++++++------------
arch/arm64/crypto/sha256-glue.c | 32 +++++++++++++++++-----------
arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
arch/arm64/crypto/sha512-glue.c | 15 ++++++++-----
5 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index bdc1b6d7aff7..63c875d3314b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -28,6 +28,13 @@ struct sha1_ce_state {
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);

+static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks)
+{
+ sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src,
+ blocks);
+}
+
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);

@@ -41,8 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
kernel_neon_end();

return 0;
@@ -64,10 +70,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_update(desc, data, len, __sha1_ce_transform);
if (!finalize)
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
@@ -81,7 +86,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)

sctx->finalize = 0;
kernel_neon_begin();
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
+ sha1_base_do_finalize(desc, __sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 604a01a4ede6..a8e67bafba3d 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -28,6 +28,13 @@ struct sha256_ce_state {
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);

+static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src,
+ blocks);
+}
+
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
sst.count);
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
@@ -35,6 +42,12 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,

asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);

+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_data_order(sst->state, src, blocks);
+}
+
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -42,12 +55,11 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
kernel_neon_end();

return 0;
@@ -62,9 +74,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}

@@ -75,11 +86,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
sctx->finalize = finalize;

kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_update(desc, data, len, __sha2_ce_transform);
if (!finalize)
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
@@ -89,14 +98,13 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
struct sha256_ce_state *sctx = shash_desc_ctx(desc);

if (!crypto_simd_usable()) {
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}

sctx->finalize = 0;
kernel_neon_begin();
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
+ sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e273faca924f..01e0ab36d135 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);

+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_data_order(sst->state, src, blocks);
+}
+
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);

+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_neon(sst->state, src, blocks);
+}
+
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
}

static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);

return sha256_base_finish(desc, out);
}
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,

if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);

while (len > 0) {
unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE;

kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc, __sha256_block_neon);
kernel_neon_end();
}
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index 2369540040aa..dc890a719f54 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -29,16 +29,21 @@ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,

asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);

+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+ int blocks)
+{
+ sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
kernel_neon_end();

return 0;
@@ -50,16 +55,14 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_ce_transform);
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_update(desc, data, len, sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
@@ -67,13 +70,12 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
if (!crypto_simd_usable()) {
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);
return sha512_base_finish(desc, out);
}

kernel_neon_begin();
- sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_finalize(desc, sha512_ce_transform);
kernel_neon_end();
return sha512_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index d915c656e5fe..78d3083de6b7 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -20,15 +20,21 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");

-asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+asmlinkage void sha512_block_data_order(u64 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha512_block_data_order);

+static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
+ int blocks)
+{
+ sha512_block_data_order(sst->state, src, blocks);
+}
+
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
}

static int sha512_finup(struct shash_desc *desc, const u8 *data,
@@ -36,9 +42,8 @@ static int sha512_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ __sha512_block_data_order);
+ sha512_base_do_finalize(desc, __sha512_block_data_order);

return sha512_base_finish(desc, out);
}

base-commit: 95f1fa9e3418d50ce099e67280b5497b9c93843b
--
2.24.0.432.g9d3f5f5b63-goog

2019-12-11 09:40:52

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH v4] crypto: arm64/sha: fix function types

On Wed, Nov 27, 2019 at 03:55:03PM -0800, Sami Tolvanen wrote:
> Instead of casting pointers to callback functions, add C wrappers
> to avoid type mismatch failures with Control-Flow Integrity (CFI)
> checking.
>
> Signed-off-by: Sami Tolvanen <[email protected]>
> Reviewed-by: Ard Biesheuvel <[email protected]>
> ---
> Changes in v4:
> - Removed unnecessary returns.
>
> Changes in v3:
> - Removed unnecessary inline attributes.
>
> Changes in v2:
> - Added wrapper functions instead of changing parameter types
> for the assembly functions.
>
> ---
> arch/arm64/crypto/sha1-ce-glue.c | 17 +++++++++------
> arch/arm64/crypto/sha2-ce-glue.c | 34 ++++++++++++++++++------------
> arch/arm64/crypto/sha256-glue.c | 32 +++++++++++++++++-----------
> arch/arm64/crypto/sha512-ce-glue.c | 26 ++++++++++++-----------
> arch/arm64/crypto/sha512-glue.c | 15 ++++++++-----
> 5 files changed, 76 insertions(+), 48 deletions(-)

Patch applied. Thanks.
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt