Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751630AbeACXKZ (ORCPT + 1 other); Wed, 3 Jan 2018 18:10:25 -0500 Received: from mga05.intel.com ([192.55.52.43]:58822 "EHLO mga05.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751053AbeACXJr (ORCPT ); Wed, 3 Jan 2018 18:09:47 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.45,504,1508828400"; d="scan'208";a="17284139" From: Andi Kleen To: tglx@linuxtronix.de Cc: torvalds@linux-foundation.org, gregkh@linux-foundation.org, dwmw@amazon.co.uk, tim.c.chen@linux.intel.com, linux-kernel@vger.kernel.org, dave.hansen@intel.com, Andi Kleen Subject: [PATCH 02/11] x86/retpoline/crypto: Convert crypto assembler indirect jumps Date: Wed, 3 Jan 2018 15:09:25 -0800 Message-Id: <20180103230934.15788-3-andi@firstfloor.org> X-Mailer: git-send-email 2.14.3 In-Reply-To: <20180103230934.15788-1-andi@firstfloor.org> References: <20180103230934.15788-1-andi@firstfloor.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Return-Path: From: Andi Kleen Convert all indirect jumps in crypto assembler code to use non speculative sequences. Based on code from David Woodhouse and Tim Chen Signed-off-by: Andi Kleen --- arch/x86/crypto/aesni-intel_asm.S | 5 +++-- arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++- arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 2 +- arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 16627fec80b2..f6c964f30ede 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -32,6 +32,7 @@ #include #include #include +#include /* * The following macros are used to move an (un)aligned 16 byte value to/from @@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8) pxor INC, STATE4 movdqu IV, 0x30(OUTP) - call *%r11 + NOSPEC_CALL %r11 movdqu 0x00(OUTP), INC pxor INC, STATE1 @@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8) _aesni_gf128mul_x_ble() movups IV, (IVP) - call *%r11 + NOSPEC_CALL %r11 movdqu 0x40(OUTP), INC pxor INC, STATE1 diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index f7c495e2863c..9006543227b2 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -17,6 +17,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way: vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; - call *%r9; + NOSPEC_CALL %r9; addq $(16 * 16), %rsp; diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index eee5b3982cfd..1743e6850e00 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -1343,7 +1343,7 @@ camellia_xts_crypt_32way: vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; - call *%r9; + NOSPEC_CALL %r9; addq $(16 * 32), %rsp; diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 7a7de27c6f41..969000c64456 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -45,6 +45,7 @@ #include #include +#include ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction @@ -172,7 +173,7 @@ continue_block: movzxw (bufp, %rax, 2), len lea crc_array(%rip), bufp lea (bufp, len, 1), bufp - jmp *bufp + NOSPEC_JMP bufp ################################################################ ## 2a) PROCESS FULL BLOCKS: -- 2.14.3