2015-06-17 03:21:55

by Michael Ellerman

[permalink] [raw]
Subject: linux-next: manual merge of the tip tree with the crypto tree

Hi all,

Today's linux-next merge of the tip tree got conflicts in:

drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/ghash.c

between commit:

4beb10604597 "crypto: vmx - Reindent to kernel style"

from the crypto tree and commit:

5f76eea88dcb "sched/preempt, powerpc: Disable preemption in enable_kernel_altivec() explicitly"

from the tip tree.

I fixed it up (see below) and can carry the fix as necessary (no action
is required).

cheers


diff --cc drivers/crypto/vmx/aes.c
index 023e5f014783,a9064e36e7b5..000000000000
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@@ -76,47 -73,53 +76,53 @@@ static void p8_aes_exit(struct crypto_t
}

static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen)
{
- int ret;
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
- pagefault_enable();
- preempt_enable();
-
- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ int ret;
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ pagefault_enable();
++ preempt_enable();
+
+ ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
+ return ret;
}

static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (in_interrupt()) {
- crypto_cipher_encrypt_one(ctx->fallback, dst, src);
- } else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- aes_p8_encrypt(src, dst, &ctx->enc_key);
- pagefault_enable();
- preempt_enable();
- }
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (in_interrupt()) {
+ crypto_cipher_encrypt_one(ctx->fallback, dst, src);
+ } else {
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ aes_p8_encrypt(src, dst, &ctx->enc_key);
+ pagefault_enable();
++ preempt_enable();
+ }
}

static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
- struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (in_interrupt()) {
- crypto_cipher_decrypt_one(ctx->fallback, dst, src);
- } else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- aes_p8_decrypt(src, dst, &ctx->dec_key);
- pagefault_enable();
- preempt_enable();
- }
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (in_interrupt()) {
+ crypto_cipher_decrypt_one(ctx->fallback, dst, src);
+ } else {
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ aes_p8_decrypt(src, dst, &ctx->dec_key);
+ pagefault_enable();
++ preempt_enable();
+ }
}

struct crypto_alg p8_aes_alg = {
diff --cc drivers/crypto/vmx/aes_cbc.c
index 7120ab24d8c6,477284abdd11..000000000000
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@@ -77,95 -74,95 +77,101 @@@ static void p8_aes_cbc_exit(struct cryp
}

static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen)
{
- int ret;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
- pagefault_enable();
- preempt_enable();
-
- ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ int ret;
+ struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ pagefault_enable();
++ preempt_enable();
+
+ ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ return ret;
}

static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
{
- int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
- crypto_blkcipher_tfm(desc->tfm));
- struct blkcipher_desc fallback_desc = {
- .tfm = ctx->fallback,
- .info = desc->info,
- .flags = desc->flags
- };
-
- if (in_interrupt()) {
- ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
- } else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1);
+ int ret;
+ struct blkcipher_walk walk;
+ struct p8_aes_cbc_ctx *ctx =
+ crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+ struct blkcipher_desc fallback_desc = {
+ .tfm = ctx->fallback,
+ .info = desc->info,
+ .flags = desc->flags
+ };
+
+ if (in_interrupt()) {
+ ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
+ nbytes);
+ } else {
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ ret = blkcipher_walk_virt(desc, &walk);
+ while ((nbytes = walk.nbytes)) {
+ aes_p8_cbc_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK,
+ &ctx->enc_key, walk.iv, 1);
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ }

- pagefault_enable();
- preempt_enable();
- }
+ pagefault_enable();
++ preempt_enable();
+ }

- return ret;
+ return ret;
}

static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
{
- int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
- crypto_blkcipher_tfm(desc->tfm));
- struct blkcipher_desc fallback_desc = {
- .tfm = ctx->fallback,
- .info = desc->info,
- .flags = desc->flags
- };
-
- if (in_interrupt()) {
- ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
- } else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0);
+ int ret;
+ struct blkcipher_walk walk;
+ struct p8_aes_cbc_ctx *ctx =
+ crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+ struct blkcipher_desc fallback_desc = {
+ .tfm = ctx->fallback,
+ .info = desc->info,
+ .flags = desc->flags
+ };
+
+ if (in_interrupt()) {
+ ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
+ nbytes);
+ } else {
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ ret = blkcipher_walk_virt(desc, &walk);
+ while ((nbytes = walk.nbytes)) {
+ aes_p8_cbc_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK,
+ &ctx->dec_key, walk.iv, 0);
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}

- pagefault_enable();
- preempt_enable();
- }
+ pagefault_enable();
++ preempt_enable();
+ }

- return ret;
+ return ret;
}


diff --cc drivers/crypto/vmx/ghash.c
index 4c3a8f7e5059,f255ec4a04d4..000000000000
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@@ -109,92 -107,98 +109,100 @@@ static int p8_ghash_init(struct shash_d
}

static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
+ unsigned int keylen)
{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
-
- if (keylen != GHASH_KEY_LEN)
- return -EINVAL;
-
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- enable_kernel_fp();
- gcm_init_p8(ctx->htable, (const u64 *) key);
- pagefault_enable();
- preempt_enable();
- return crypto_shash_setkey(ctx->fallback, key, keylen);
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
+
+ if (keylen != GHASH_KEY_LEN)
+ return -EINVAL;
+
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_init_p8(ctx->htable, (const u64 *) key);
+ pagefault_enable();
++ preempt_enable();
+ return crypto_shash_setkey(ctx->fallback, key, keylen);
}

static int p8_ghash_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
+ const u8 *src, unsigned int srclen)
{
- unsigned int len;
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
- struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (IN_INTERRUPT) {
- return crypto_shash_update(&dctx->fallback_desc, src, srclen);
- } else {
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src, srclen);
- dctx->bytes += srclen;
- return 0;
- }
- memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_DIGEST_SIZE - dctx->bytes);
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- enable_kernel_fp();
- gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
- GHASH_DIGEST_SIZE);
- pagefault_enable();
- preempt_enable();
- src += GHASH_DIGEST_SIZE - dctx->bytes;
- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
- if (len) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- enable_kernel_fp();
- gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
- pagefault_enable();
- preempt_enable();
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
- }
- return 0;
- }
+ unsigned int len;
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (IN_INTERRUPT) {
+ return crypto_shash_update(&dctx->fallback_desc, src,
+ srclen);
+ } else {
+ if (dctx->bytes) {
+ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+ memcpy(dctx->buffer + dctx->bytes, src,
+ srclen);
+ dctx->bytes += srclen;
+ return 0;
+ }
+ memcpy(dctx->buffer + dctx->bytes, src,
+ GHASH_DIGEST_SIZE - dctx->bytes);
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ dctx->buffer, GHASH_DIGEST_SIZE);
+ pagefault_enable();
++ preempt_enable();
+ src += GHASH_DIGEST_SIZE - dctx->bytes;
+ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+ dctx->bytes = 0;
+ }
+ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+ if (len) {
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+ pagefault_enable();
++ preempt_enable();
+ src += len;
+ srclen -= len;
+ }
+ if (srclen) {
+ memcpy(dctx->buffer, src, srclen);
+ dctx->bytes = srclen;
+ }
+ return 0;
+ }
}

static int p8_ghash_final(struct shash_desc *desc, u8 *out)
{
- int i;
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
- struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (IN_INTERRUPT) {
- return crypto_shash_final(&dctx->fallback_desc, out);
- } else {
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
- dctx->buffer[i] = 0;
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- enable_kernel_fp();
- gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
- GHASH_DIGEST_SIZE);
- pagefault_enable();
- preempt_enable();
- dctx->bytes = 0;
- }
- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
- return 0;
- }
+ int i;
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (IN_INTERRUPT) {
+ return crypto_shash_final(&dctx->fallback_desc, out);
+ } else {
+ if (dctx->bytes) {
+ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+ dctx->buffer[i] = 0;
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ enable_kernel_fp();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ dctx->buffer, GHASH_DIGEST_SIZE);
+ pagefault_enable();
++ preempt_enable();
+ dctx->bytes = 0;
+ }
+ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+ return 0;
+ }
}

struct shash_alg p8_ghash_alg = {



2020-07-17 06:30:21

by Uros Bizjak

[permalink] [raw]
Subject: Re: linux-next: manual merge of the tip tree with the crypto tree

On Fri, Jul 17, 2020 at 6:47 AM Stephen Rothwell <[email protected]> wrote:
>
> Hi all,
>
> Today's linux-next merge of the tip tree got a conflict in:
>
> arch/x86/include/asm/inst.h
>
> between commit:
>
> d7866e503bdc ("crypto: x86 - Remove include/asm/inst.h")
>
> from the crypto tree and commit:
>
> eaad981291ee ("x86/entry/64: Introduce the FIND_PERCPU_BASE macro")
>
> from the tip tree.
>
> I fixed it up (I brought the file back but removed what the crypto tree
> no longer needed - see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
>
> I think if the crypto tree brought back this file as well (even without
> the RDPID macro, it would make this conflict much more manageable.

I will prepare a v2 that leaves needed part of inst.h.

Uros.

> /* SPDX-License-Identifier: GPL-2.0 */
> /*
> * Generate .byte code for some instructions not supported by old
> * binutils.
> */
> #ifndef X86_ASM_INST_H
> #define X86_ASM_INST_H
>
> #ifdef __ASSEMBLY__
>
> #define REG_NUM_INVALID 100
>
> #define REG_TYPE_R32 0
> #define REG_TYPE_R64 1
> #define REG_TYPE_XMM 2
> #define REG_TYPE_INVALID 100
>
> .macro R32_NUM opd r32
> \opd = REG_NUM_INVALID
> .ifc \r32,%eax
> \opd = 0
> .endif
> .ifc \r32,%ecx
> \opd = 1
> .endif
> .ifc \r32,%edx
> \opd = 2
> .endif
> .ifc \r32,%ebx
> \opd = 3
> .endif
> .ifc \r32,%esp
> \opd = 4
> .endif
> .ifc \r32,%ebp
> \opd = 5
> .endif
> .ifc \r32,%esi
> \opd = 6
> .endif
> .ifc \r32,%edi
> \opd = 7
> .endif
> #ifdef CONFIG_X86_64
> .ifc \r32,%r8d
> \opd = 8
> .endif
> .ifc \r32,%r9d
> \opd = 9
> .endif
> .ifc \r32,%r10d
> \opd = 10
> .endif
> .ifc \r32,%r11d
> \opd = 11
> .endif
> .ifc \r32,%r12d
> \opd = 12
> .endif
> .ifc \r32,%r13d
> \opd = 13
> .endif
> .ifc \r32,%r14d
> \opd = 14
> .endif
> .ifc \r32,%r15d
> \opd = 15
> .endif
> #endif
> .endm
>
> .macro R64_NUM opd r64
> \opd = REG_NUM_INVALID
> #ifdef CONFIG_X86_64
> .ifc \r64,%rax
> \opd = 0
> .endif
> .ifc \r64,%rcx
> \opd = 1
> .endif
> .ifc \r64,%rdx
> \opd = 2
> .endif
> .ifc \r64,%rbx
> \opd = 3
> .endif
> .ifc \r64,%rsp
> \opd = 4
> .endif
> .ifc \r64,%rbp
> \opd = 5
> .endif
> .ifc \r64,%rsi
> \opd = 6
> .endif
> .ifc \r64,%rdi
> \opd = 7
> .endif
> .ifc \r64,%r8
> \opd = 8
> .endif
> .ifc \r64,%r9
> \opd = 9
> .endif
> .ifc \r64,%r10
> \opd = 10
> .endif
> .ifc \r64,%r11
> \opd = 11
> .endif
> .ifc \r64,%r12
> \opd = 12
> .endif
> .ifc \r64,%r13
> \opd = 13
> .endif
> .ifc \r64,%r14
> \opd = 14
> .endif
> .ifc \r64,%r15
> \opd = 15
> .endif
> #endif
> .endm
>
> .macro XMM_NUM opd xmm
> \opd = REG_NUM_INVALID
> .ifc \xmm,%xmm0
> \opd = 0
> .endif
> .ifc \xmm,%xmm1
> \opd = 1
> .endif
> .ifc \xmm,%xmm2
> \opd = 2
> .endif
> .ifc \xmm,%xmm3
> \opd = 3
> .endif
> .ifc \xmm,%xmm4
> \opd = 4
> .endif
> .ifc \xmm,%xmm5
> \opd = 5
> .endif
> .ifc \xmm,%xmm6
> \opd = 6
> .endif
> .ifc \xmm,%xmm7
> \opd = 7
> .endif
> .ifc \xmm,%xmm8
> \opd = 8
> .endif
> .ifc \xmm,%xmm9
> \opd = 9
> .endif
> .ifc \xmm,%xmm10
> \opd = 10
> .endif
> .ifc \xmm,%xmm11
> \opd = 11
> .endif
> .ifc \xmm,%xmm12
> \opd = 12
> .endif
> .ifc \xmm,%xmm13
> \opd = 13
> .endif
> .ifc \xmm,%xmm14
> \opd = 14
> .endif
> .ifc \xmm,%xmm15
> \opd = 15
> .endif
> .endm
>
> .macro REG_TYPE type reg
> R32_NUM reg_type_r32 \reg
> R64_NUM reg_type_r64 \reg
> XMM_NUM reg_type_xmm \reg
> .if reg_type_r64 <> REG_NUM_INVALID
> \type = REG_TYPE_R64
> .elseif reg_type_r32 <> REG_NUM_INVALID
> \type = REG_TYPE_R32
> .elseif reg_type_xmm <> REG_NUM_INVALID
> \type = REG_TYPE_XMM
> .else
> \type = REG_TYPE_INVALID
> .endif
> .endm
>
> .macro PFX_OPD_SIZE
> .byte 0x66
> .endm
>
> .macro PFX_REX opd1 opd2 W=0
> .if ((\opd1 | \opd2) & 8) || \W
> .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
> .endif
> .endm
>
> .macro MODRM mod opd1 opd2
> .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
> .endm
>
> .macro RDPID opd
> REG_TYPE rdpid_opd_type \opd
> .if rdpid_opd_type == REG_TYPE_R64
> R64_NUM rdpid_opd \opd
> .else
> R32_NUM rdpid_opd \opd
> .endif
> .byte 0xf3
> .if rdpid_opd > 7
> PFX_REX rdpid_opd 0
> .endif
> .byte 0x0f, 0xc7
> MODRM 0xc0 rdpid_opd 0x7
> .endm
> #endif
>
> #endif
>
> --
> Cheers,
> Stephen Rothwell

2020-07-17 06:45:45

by Herbert Xu

[permalink] [raw]
Subject: Re: linux-next: manual merge of the tip tree with the crypto tree

On Fri, Jul 17, 2020 at 08:27:27AM +0200, Uros Bizjak wrote:
>
> I will prepare a v2 that leaves needed part of inst.h.

Your patch has already been applied. So please make it an
incremental patch.

Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2020-07-17 07:32:32

by Uros Bizjak

[permalink] [raw]
Subject: Re: linux-next: manual merge of the tip tree with the crypto tree

Please find attached the incremental patch that puts back integer
parts of inst.h. This resolves the conflict with the tip tree.

Uros.

On Fri, Jul 17, 2020 at 8:45 AM Herbert Xu <[email protected]> wrote:
>
> On Fri, Jul 17, 2020 at 08:27:27AM +0200, Uros Bizjak wrote:
> >
> > I will prepare a v2 that leaves needed part of inst.h.
>
> Your patch has already been applied. So please make it an
> incremental patch.
>
> Thanks,
> --
> Email: Herbert Xu <[email protected]>
> Home Page: http://gondor.apana.org.au/~herbert/
> PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Attachments:
0001-crypto-x86-Put-back-integer-parts-of-include-asm-ins.patch (2.82 kB)

2020-07-20 06:17:05

by Uros Bizjak

[permalink] [raw]
Subject: Re: linux-next: manual merge of the tip tree with the crypto tree

On Mon, Jul 20, 2020 at 6:03 AM Stephen Rothwell <[email protected]> wrote:

> > Please find attached the incremental patch that puts back integer
> > parts of inst.h. This resolves the conflict with the tip tree.
>
> The tip tree change needs the XMM parts kept as well, sorry.

Strange, because I did test my patch with the tip tree from
'origin/master' at commit a282cddefe90c4b21ef2c22a76a7c3ebd3ec6b86 and
the compilation produced the same lonely rdpid %eax in
.altinstr_replacement section. AFAICS, the header is included only for
RDPID macro, where XMM registers are unused.

> So I ended up just removing the actual now unused crypto instruction
> macros.

To avoid any further troubles, this is also OK with me.

Uros.

2020-07-20 06:30:07

by Stephen Rothwell

[permalink] [raw]
Subject: Re: linux-next: manual merge of the tip tree with the crypto tree

Hi Uros,

On Mon, 20 Jul 2020 08:13:51 +0200 Uros Bizjak <[email protected]> wrote:
>
> On Mon, Jul 20, 2020 at 6:03 AM Stephen Rothwell <[email protected]> wrote:
>
> > > Please find attached the incremental patch that puts back integer
> > > parts of inst.h. This resolves the conflict with the tip tree.
> >
> > The tip tree change needs the XMM parts kept as well, sorry.
>
> Strange, because I did test my patch with the tip tree from
> 'origin/master' at commit a282cddefe90c4b21ef2c22a76a7c3ebd3ec6b86 and
> the compilation produced the same lonely rdpid %eax in
> .altinstr_replacement section. AFAICS, the header is included only for
> RDPID macro, where XMM registers are unused.
>
> > So I ended up just removing the actual now unused crypto instruction
> > macros.
>
> To avoid any further troubles, this is also OK with me.

Sorry, I see what happened now. Since your patch was not in the crypto
tree yet, I did a fixup to the tip tree merge based on your patch, but
did it by hand and didn't remove the XMM bits from the REG_TYPE
macro ...

So your original patch is probably all good (especially since you
actually tested it :-))

--
Cheers,
Stephen Rothwell


Attachments:
(No filename) (499.00 B)
OpenPGP digital signature