Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756451AbaBUOBW (ORCPT ); Fri, 21 Feb 2014 09:01:22 -0500 Received: from youngberry.canonical.com ([91.189.89.112]:46719 "EHLO youngberry.canonical.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755436AbaBUMtT (ORCPT ); Fri, 21 Feb 2014 07:49:19 -0500 From: Luis Henriques To: linux-kernel@vger.kernel.org, stable@vger.kernel.org, kernel-team@lists.ubuntu.com Cc: Harald Freudenberger , Herbert Xu , Luis Henriques Subject: [PATCH 3.11 004/121] crypto: s390 - fix concurrency issue in aes-ctr mode Date: Fri, 21 Feb 2014 12:47:08 +0000 Message-Id: <1392986945-9693-5-git-send-email-luis.henriques@canonical.com> X-Mailer: git-send-email 1.9.0 In-Reply-To: <1392986945-9693-1-git-send-email-luis.henriques@canonical.com> References: <1392986945-9693-1-git-send-email-luis.henriques@canonical.com> X-Extended-Stable: 3.11 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org 3.11.10.5 -stable review patch. If anyone has any objections, please let me know. ------------------ From: Harald Freudenberger commit 0519e9ad89e5cd6e6b08398f57c6a71d9580564c upstream. The aes-ctr mode uses one preallocated page without any concurrency protection. When multiple threads run aes-ctr encryption or decryption this can lead to data corruption. The patch introduces locking for the page and a fallback solution with slower en/decryption performance in concurrency situations. Signed-off-by: Harald Freudenberger Signed-off-by: Herbert Xu Signed-off-by: Luis Henriques --- arch/s390/crypto/aes_s390.c | 65 ++++++++++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 19 deletions(-) diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 94e20dd..2a245b5 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "crypt_s390.h" #define AES_KEYLEN_128 1 @@ -32,6 +33,7 @@ #define AES_KEYLEN_256 4 static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); static char keylen_flag; struct s390_aes_ctx { @@ -756,43 +758,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return aes_set_key(tfm, in_key, key_len); } +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(ctrptr + i, AES_BLOCK_SIZE); + } + return n; +} + static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[AES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; if (!walk->nbytes) return ret; - memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= AES_BLOCK_SIZE) { - /* only use complete blocks, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(ctrblk + i, AES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = AES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, sctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > AES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrptr, AES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -800,14 +826,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, sctx->key, buf, in, - AES_BLOCK_SIZE, ctrblk); + AES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != AES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrbuf, AES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); + return ret; } -- 1.9.0 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/