2022-11-21 00:53:21

by Taehee Yoo

[permalink] [raw]
Subject: [PATCH v6 2/4] crypto: aria: do not use magic number offsets of aria_ctx

aria-avx assembly code accesses members of aria_ctx with magic number
offset. If the shape of struct aria_ctx is changed carelessly,
aria-avx will not work.
So, we need to ensure accessing members of aria_ctx with correct
offset values, not with magic numbers.

It adds ARIA_CTX_enc_key, ARIA_CTX_dec_key, and ARIA_CTX_rounds in the
asm-offsets.c So, correct offset definitions will be generated.
aria-avx assembly code can access members of aria_ctx safely with
these definitions.

Signed-off-by: Taehee Yoo <[email protected]>
---

v6:
- Rebase for "CFI fixes" patchset.
https://lore.kernel.org/linux-crypto/[email protected]/T/#t

v5:
- No changes.

v4:
- Add BUILD_BUG_ON() to check size of fields of aria_ctx.

v3:
- Patch introduced.

arch/x86/crypto/aria-aesni-avx-asm_64.S | 26 +++++++++++--------------
arch/x86/kernel/asm-offsets.c | 11 +++++++++++
crypto/aria_generic.c | 4 ++++
3 files changed, 26 insertions(+), 15 deletions(-)

diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S
index 03ae4cd1d976..be6adc6e7458 100644
--- a/arch/x86/crypto/aria-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S
@@ -8,13 +8,9 @@

#include <linux/linkage.h>
#include <linux/cfi_types.h>
+#include <asm/asm-offsets.h>
#include <asm/frame.h>

-/* struct aria_ctx: */
-#define enc_key 0
-#define dec_key 272
-#define rounds 544
-
/* register macros */
#define CTX %rdi

@@ -874,7 +870,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way)
aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%rax, %r9, 10);
- cmpl $12, rounds(CTX);
+ cmpl $12, ARIA_CTX_rounds(CTX);
jne .Laria_192;
aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -887,7 +883,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way)
aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%rax, %r9, 12);
- cmpl $14, rounds(CTX);
+ cmpl $14, ARIA_CTX_rounds(CTX);
jne .Laria_256;
aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -923,7 +919,7 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_encrypt_16way)

FRAME_BEGIN

- leaq enc_key(CTX), %r9;
+ leaq ARIA_CTX_enc_key(CTX), %r9;

inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -948,7 +944,7 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_decrypt_16way)

FRAME_BEGIN

- leaq dec_key(CTX), %r9;
+ leaq ARIA_CTX_dec_key(CTX), %r9;

inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1056,7 +1052,7 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_ctr_crypt_16way)
leaq (%rdx), %r11;
leaq (%rcx), %rsi;
leaq (%rcx), %rdx;
- leaq enc_key(CTX), %r9;
+ leaq ARIA_CTX_enc_key(CTX), %r9;

call __aria_aesni_avx_crypt_16way;

@@ -1157,7 +1153,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way)
%xmm0, %xmm1, %xmm2, %xmm3,
%xmm4, %xmm5, %xmm6, %xmm7,
%rax, %r9, 10);
- cmpl $12, rounds(CTX);
+ cmpl $12, ARIA_CTX_rounds(CTX);
jne .Laria_gfni_192;
aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1174,7 +1170,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way)
%xmm0, %xmm1, %xmm2, %xmm3,
%xmm4, %xmm5, %xmm6, %xmm7,
%rax, %r9, 12);
- cmpl $14, rounds(CTX);
+ cmpl $14, ARIA_CTX_rounds(CTX);
jne .Laria_gfni_256;
aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
%xmm4, %xmm5, %xmm6, %xmm7,
@@ -1218,7 +1214,7 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)

FRAME_BEGIN

- leaq enc_key(CTX), %r9;
+ leaq ARIA_CTX_enc_key(CTX), %r9;

inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1243,7 +1239,7 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)

FRAME_BEGIN

- leaq dec_key(CTX), %r9;
+ leaq ARIA_CTX_dec_key(CTX), %r9;

inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -1275,7 +1271,7 @@ SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way)
leaq (%rdx), %r11;
leaq (%rcx), %rsi;
leaq (%rcx), %rdx;
- leaq enc_key(CTX), %r9;
+ leaq ARIA_CTX_enc_key(CTX), %r9;

call __aria_aesni_avx_gfni_crypt_16way;

diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index cb50589a7102..32192a91c65b 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
#define COMPILE_OFFSETS

#include <linux/crypto.h>
+#include <crypto/aria.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/hardirq.h>
@@ -109,6 +110,16 @@ static void __used common(void)
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);

+#if defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) || \
+ defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64_MODULE)
+
+ /* Offset for fields in aria_ctx */
+ BLANK();
+ OFFSET(ARIA_CTX_enc_key, aria_ctx, enc_key);
+ OFFSET(ARIA_CTX_dec_key, aria_ctx, dec_key);
+ OFFSET(ARIA_CTX_rounds, aria_ctx, rounds);
+#endif
+
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
BLANK();
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index 4cc29b82b99d..d96dfc4fdde6 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -178,6 +178,10 @@ int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;

+ BUILD_BUG_ON(sizeof(ctx->enc_key) != 272);
+ BUILD_BUG_ON(sizeof(ctx->dec_key) != 272);
+ BUILD_BUG_ON(sizeof(int) != sizeof(ctx->rounds));
+
ctx->key_length = key_len;
ctx->rounds = (key_len + 32) / 4;

--
2.34.1



2022-12-02 10:18:49

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH v6 2/4] crypto: aria: do not use magic number offsets of aria_ctx

On Mon, Nov 21, 2022 at 12:39:53AM +0000, Taehee Yoo wrote:
>
> +#if defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) || \
> + defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64_MODULE)

Why isn't this IS_ENABLED like the hunk below?

> +
> + /* Offset for fields in aria_ctx */
> + BLANK();
> + OFFSET(ARIA_CTX_enc_key, aria_ctx, enc_key);
> + OFFSET(ARIA_CTX_dec_key, aria_ctx, dec_key);
> + OFFSET(ARIA_CTX_rounds, aria_ctx, rounds);
> +#endif
> +
> if (IS_ENABLED(CONFIG_KVM_INTEL)) {
> BLANK();
> OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);

Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2022-12-03 05:07:05

by Taehee Yoo

[permalink] [raw]
Subject: Re: [PATCH v6 2/4] crypto: aria: do not use magic number offsets of aria_ctx

Hi Herbert,
Thank you so much for your review!

On 12/2/22 19:11, Herbert Xu wrote:
> On Mon, Nov 21, 2022 at 12:39:53AM +0000, Taehee Yoo wrote:
>>
>> +#if defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) || \
>> + defined(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64_MODULE)
>
> Why isn't this IS_ENABLED like the hunk below?
>
>> +
>> + /* Offset for fields in aria_ctx */
>> + BLANK();
>> + OFFSET(ARIA_CTX_enc_key, aria_ctx, enc_key);
>> + OFFSET(ARIA_CTX_dec_key, aria_ctx, dec_key);
>> + OFFSET(ARIA_CTX_rounds, aria_ctx, rounds);
>> +#endif
>> +
>> if (IS_ENABLED(CONFIG_KVM_INTEL)) {
>> BLANK();
>> OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
>
> Thanks,

I missed cleaning it up.
I will use IS_ENABLED() instead of defined() in the v7.

Thanks a lot!
Taehee Yoo