2018-01-31 20:27:26

by Junaid Shahid

[permalink] [raw]
Subject: [PATCH v3 0/4] crypto: aesni - Use zero-copy for gcm(aes) buffers that are partially contiguous

Changes in v3:
- Rebased on top of the latest linux-next
Changes in v2:
- Integrated https://patchwork.kernel.org/patch/10173981

Currently, the AESNI gcm(aes) implementation uses zero-copy only when the
entire src and dest request buffers, including the AAD, the data and the
Auth Tag are contiguous. This series enables the use of zero-copy even if the
AAD and/or Auth Tag are in different buffers than the actual data, as long as
each of them individually satisfies the zero-copy conditions (i.e. the entire
buffer is either in low-mem or within a single high-mem page). Furthermore,
it also enables the use of zero-copy even if only one of src and dest satisfies
these conditions rather than only when both of them do.

Junaid Shahid (4):
crypto: aesni - Fix out-of-bounds access of the AAD buffer in AVX
gcm-aesni
crypto: aesni - Enable one-sided zero copy for gcm(aes) request
buffers
crypto: aesni - Directly use kmap_atomic instead of scatter_walk
object in gcm(aes)
crypto: aesni - Use zero-copy for gcm(aes) even if the
AAD/Data/AuthTag are separate

arch/x86/crypto/aesni-intel_avx-x86_64.S | 154 +++++-----------
arch/x86/crypto/aesni-intel_glue.c | 307 +++++++++++++++++++------------
2 files changed, 227 insertions(+), 234 deletions(-)

--
2.16.0.rc1.238.g530d649a79-goog


2018-01-31 20:27:30

by Junaid Shahid

[permalink] [raw]
Subject: [PATCH v3 4/4] crypto: aesni - Use zero-copy for gcm(aes) even if the AAD/Data/AuthTag are separate

Enable the use of zero-copy even if the AAD and/or Auth Tag are in different
buffers than the actual data, as long as each of them individually satisfies
the zero-copy conditions (i.e. the entire buffer is either in low-mem or
within a single high-mem page).

Signed-off-by: Junaid Shahid <[email protected]>
---
arch/x86/crypto/aesni-intel_glue.c | 121 +++++++++++++++++++++++++++----------
1 file changed, 89 insertions(+), 32 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 9e69e02076d2..7cebc99a0405 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -756,42 +756,91 @@ static u8 *map_buffer(struct scatterlist *sgl)
}

/*
- * Maps the sglist buffer and returns a pointer to the mapped buffer in
- * data_buf.
+ * Maps the sglist buffer and returns pointers to the mapped buffers in assoc,
+ * data and (optionally) auth_tag.
*
* If direct mapping is not feasible, then allocates a bounce buffer if one
- * isn't already available in bounce_buf, and returns a pointer to the bounce
- * buffer in data_buf.
+ * isn't already available in bounce_buf, and returns pointers within the bounce
+ * buffer in assoc, data and auth_tag.
*
- * When the buffer is no longer needed, put_request_buffer() should be called on
- * the data_buf and the bounce_buf should be freed using kfree().
+ * When the buffers are no longer needed, put_request_buffers() should be called
+ * and the bounce_buf should be freed using kfree().
*/
-static int get_request_buffer(struct scatterlist *sgl,
- unsigned long bounce_buf_size,
- u8 **data_buf, u8 **bounce_buf, bool *mapped)
+static int get_request_buffers(struct scatterlist *sgl,
+ unsigned long assoc_len, unsigned long data_len,
+ unsigned long auth_tag_len,
+ u8 **assoc, u8 **data, u8 **auth_tag,
+ u8 **bounce_buf, bool *mapped)
{
- if (sg_is_last(sgl) && is_mappable(sgl, sgl->length)) {
+ struct scatterlist sgl_data_chain[2], sgl_auth_tag_chain[2];
+ struct scatterlist *sgl_data, *sgl_auth_tag;
+
+ sgl_data = scatterwalk_ffwd(sgl_data_chain, sgl, assoc_len);
+ sgl_auth_tag = scatterwalk_ffwd(sgl_auth_tag_chain, sgl,
+ assoc_len + data_len);
+
+ if (is_mappable(sgl, assoc_len) && is_mappable(sgl_data, data_len) &&
+ (auth_tag == NULL || is_mappable(sgl_auth_tag, auth_tag_len))) {
*mapped = true;
- *data_buf = map_buffer(sgl);
+
+ *assoc = map_buffer(sgl);
+
+ if (sgl->length >= assoc_len + data_len)
+ *data = *assoc + assoc_len;
+ else
+ *data = map_buffer(sgl_data);
+
+ if (auth_tag != NULL) {
+ if (sgl_data->length >= data_len + auth_tag_len)
+ *auth_tag = *data + data_len;
+ else
+ *auth_tag = map_buffer(sgl_auth_tag);
+ }
+
return 0;
}

*mapped = false;

if (*bounce_buf == NULL) {
- *bounce_buf = kmalloc(bounce_buf_size, GFP_ATOMIC);
+ *bounce_buf = kmalloc(assoc_len + data_len + auth_tag_len,
+ GFP_ATOMIC);
if (unlikely(*bounce_buf == NULL))
return -ENOMEM;
}

- *data_buf = *bounce_buf;
+ *assoc = *bounce_buf;
+ *data = *assoc + assoc_len;
+
+ if (auth_tag != NULL)
+ *auth_tag = *data + data_len;
+
return 0;
}

-static void put_request_buffer(u8 *data_buf, bool mapped)
+static void put_request_buffers(struct scatterlist *sgl, bool mapped,
+ u8 *assoc, u8 *data, u8 *auth_tag,
+ unsigned long assoc_len,
+ unsigned long data_len,
+ unsigned long auth_tag_len)
{
- if (mapped)
- kunmap_atomic(data_buf);
+ struct scatterlist sgl_data_chain[2];
+ struct scatterlist *sgl_data;
+
+ if (!mapped)
+ return;
+
+ sgl_data = scatterwalk_ffwd(sgl_data_chain, sgl, assoc_len);
+
+ /* The unmaps need to be done in reverse order of the maps. */
+
+ if (auth_tag != NULL && sgl_data->length < data_len + auth_tag_len)
+ kunmap_atomic(auth_tag);
+
+ if (sgl->length < assoc_len + data_len)
+ kunmap_atomic(data);
+
+ kunmap_atomic(assoc);
}

/*
@@ -803,34 +852,38 @@ static void put_request_buffer(u8 *data_buf, bool mapped)
static int gcmaes_crypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx, bool decrypt)
{
- u8 *src, *dst, *assoc, *bounce_buf = NULL;
+ u8 *src, *src_assoc;
+ u8 *dst, *dst_assoc;
+ u8 *auth_tag;
+ u8 *bounce_buf = NULL;
bool src_mapped = false, dst_mapped = false;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
unsigned long data_len = req->cryptlen - (decrypt ? auth_tag_len : 0);
int retval = 0;
- unsigned long bounce_buf_size = data_len + auth_tag_len + req->assoclen;

if (auth_tag_len > 16)
return -EINVAL;

- retval = get_request_buffer(req->src, bounce_buf_size, &assoc,
- &bounce_buf, &src_mapped);
+ retval = get_request_buffers(req->src, req->assoclen, data_len,
+ auth_tag_len, &src_assoc, &src,
+ (decrypt || req->src == req->dst)
+ ? &auth_tag : NULL,
+ &bounce_buf, &src_mapped);
if (retval)
goto exit;

- src = assoc + req->assoclen;
-
if (req->src == req->dst) {
+ dst_assoc = src_assoc;
dst = src;
dst_mapped = src_mapped;
} else {
- retval = get_request_buffer(req->dst, bounce_buf_size, &dst,
- &bounce_buf, &dst_mapped);
+ retval = get_request_buffers(req->dst, req->assoclen, data_len,
+ auth_tag_len, &dst_assoc, &dst,
+ decrypt ? NULL : &auth_tag,
+ &bounce_buf, &dst_mapped);
if (retval)
goto exit;
-
- dst += req->assoclen;
}

if (!src_mapped)
@@ -843,16 +896,16 @@ static int gcmaes_crypt(struct aead_request *req, unsigned int assoclen,
u8 gen_auth_tag[16];

aesni_gcm_dec_tfm(aes_ctx, dst, src, data_len, iv,
- hash_subkey, assoc, assoclen,
+ hash_subkey, src_assoc, assoclen,
gen_auth_tag, auth_tag_len);
/* Compare generated tag with passed in tag. */
- if (crypto_memneq(src + data_len, gen_auth_tag, auth_tag_len))
+ if (crypto_memneq(auth_tag, gen_auth_tag, auth_tag_len))
retval = -EBADMSG;

} else
aesni_gcm_enc_tfm(aes_ctx, dst, src, data_len, iv,
- hash_subkey, assoc, assoclen,
- dst + data_len, auth_tag_len);
+ hash_subkey, src_assoc, assoclen,
+ auth_tag, auth_tag_len);

kernel_fpu_end();

@@ -862,9 +915,13 @@ static int gcmaes_crypt(struct aead_request *req, unsigned int assoclen,
1);
exit:
if (req->dst != req->src)
- put_request_buffer(dst - req->assoclen, dst_mapped);
+ put_request_buffers(req->dst, dst_mapped, dst_assoc, dst,
+ decrypt ? NULL : auth_tag,
+ req->assoclen, data_len, auth_tag_len);

- put_request_buffer(assoc, src_mapped);
+ put_request_buffers(req->src, src_mapped, src_assoc, src,
+ (decrypt || req->src == req->dst) ? auth_tag : NULL,
+ req->assoclen, data_len, auth_tag_len);

kfree(bounce_buf);
return retval;
--
2.16.0.rc1.238.g530d649a79-goog

2018-01-31 20:27:29

by Junaid Shahid

[permalink] [raw]
Subject: [PATCH v3 2/4] crypto: aesni - Enable one-sided zero copy for gcm(aes) request buffers

gcmaes_encrypt/decrypt perform zero-copy crypto if both the source and
destination satisfy certain conditions (single sglist entry located in
low-mem or within a single high-mem page). But two copies are done
otherwise, even if one of source or destination still satisfies the
zero-copy conditions. This optimization is now extended to avoid the
copy on the side that does satisfy the zero-copy conditions.

Signed-off-by: Junaid Shahid <[email protected]>
---
arch/x86/crypto/aesni-intel_glue.c | 256 +++++++++++++++++++------------------
1 file changed, 134 insertions(+), 122 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 34cf1c1f8c98..c11e531d21dd 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -744,136 +744,148 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
return 0;
}

+static bool is_mappable(struct scatterlist *sgl, unsigned long len)
+{
+ return sgl->length > 0 && len <= sgl->length &&
+ (!PageHighMem(sg_page(sgl)) || sgl->offset + len <= PAGE_SIZE);
+}
+
+/*
+ * Maps the sglist buffer and returns a pointer to the mapped buffer in
+ * data_buf.
+ *
+ * If direct mapping is not feasible, then allocates a bounce buffer if one
+ * isn't already available in bounce_buf, and returns a pointer to the bounce
+ * buffer in data_buf.
+ *
+ * When the buffer is no longer needed, put_request_buffer() should be called on
+ * the data_buf and the bounce_buf should be freed using kfree().
+ */
+static int get_request_buffer(struct scatterlist *sgl,
+ struct scatter_walk *sg_walk,
+ unsigned long bounce_buf_size,
+ u8 **data_buf, u8 **bounce_buf, bool *mapped)
+{
+ if (sg_is_last(sgl) && is_mappable(sgl, sgl->length)) {
+ *mapped = true;
+ scatterwalk_start(sg_walk, sgl);
+ *data_buf = scatterwalk_map(sg_walk);
+ return 0;
+ }
+
+ *mapped = false;
+
+ if (*bounce_buf == NULL) {
+ *bounce_buf = kmalloc(bounce_buf_size, GFP_ATOMIC);
+ if (unlikely(*bounce_buf == NULL))
+ return -ENOMEM;
+ }
+
+ *data_buf = *bounce_buf;
+ return 0;
+}
+
+static void put_request_buffer(u8 *data_buf, unsigned long len, bool mapped,
+ struct scatter_walk *sg_walk, bool output)
+{
+ if (mapped) {
+ scatterwalk_unmap(data_buf);
+ scatterwalk_advance(sg_walk, len);
+ scatterwalk_done(sg_walk, output, 0);
+ }
+}
+
+/*
+ * Performs the encryption/decryption operation for the given request. The src
+ * and dst sglists in the request are directly mapped if possible. Otherwise, a
+ * bounce buffer is allocated and used to copy the data from the src or to the
+ * dst, or both.
+ */
+static int gcmaes_crypt(struct aead_request *req, unsigned int assoclen,
+ u8 *hash_subkey, u8 *iv, void *aes_ctx, bool decrypt)
+{
+ u8 *src, *dst, *assoc, *bounce_buf = NULL;
+ bool src_mapped = false, dst_mapped = false;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ unsigned long data_len = req->cryptlen - (decrypt ? auth_tag_len : 0);
+ struct scatter_walk src_sg_walk;
+ struct scatter_walk dst_sg_walk = {};
+ int retval = 0;
+ unsigned long bounce_buf_size = data_len + auth_tag_len + req->assoclen;
+
+ if (auth_tag_len > 16)
+ return -EINVAL;
+
+ retval = get_request_buffer(req->src, &src_sg_walk, bounce_buf_size,
+ &assoc, &bounce_buf, &src_mapped);
+ if (retval)
+ goto exit;
+
+ src = assoc + req->assoclen;
+
+ if (req->src == req->dst) {
+ dst = src;
+ dst_mapped = src_mapped;
+ } else {
+ retval = get_request_buffer(req->dst, &dst_sg_walk,
+ bounce_buf_size, &dst, &bounce_buf,
+ &dst_mapped);
+ if (retval)
+ goto exit;
+
+ dst += req->assoclen;
+ }
+
+ if (!src_mapped)
+ scatterwalk_map_and_copy(bounce_buf, req->src, 0,
+ req->assoclen + req->cryptlen, 0);
+
+ kernel_fpu_begin();
+
+ if (decrypt) {
+ u8 gen_auth_tag[16];
+
+ aesni_gcm_dec_tfm(aes_ctx, dst, src, data_len, iv,
+ hash_subkey, assoc, assoclen,
+ gen_auth_tag, auth_tag_len);
+ /* Compare generated tag with passed in tag. */
+ if (crypto_memneq(src + data_len, gen_auth_tag, auth_tag_len))
+ retval = -EBADMSG;
+
+ } else
+ aesni_gcm_enc_tfm(aes_ctx, dst, src, data_len, iv,
+ hash_subkey, assoc, assoclen,
+ dst + data_len, auth_tag_len);
+
+ kernel_fpu_end();
+
+ if (!dst_mapped)
+ scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
+ data_len + (decrypt ? 0 : auth_tag_len),
+ 1);
+exit:
+ if (req->dst != req->src)
+ put_request_buffer(dst - req->assoclen, req->dst->length,
+ dst_mapped, &dst_sg_walk, true);
+
+ put_request_buffer(assoc, req->src->length, src_mapped, &src_sg_walk,
+ false);
+
+ kfree(bounce_buf);
+ return retval;
+}
+
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
- u8 one_entry_in_sg = 0;
- u8 *src, *dst, *assoc;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk = {};
-
- if (sg_is_last(req->src) &&
- (!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length <= PAGE_SIZE) &&
- sg_is_last(req->dst) &&
- (!PageHighMem(sg_page(req->dst)) ||
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
- one_entry_in_sg = 1;
- scatterwalk_start(&src_sg_walk, req->src);
- assoc = scatterwalk_map(&src_sg_walk);
- src = assoc + req->assoclen;
- dst = src;
- if (unlikely(req->src != req->dst)) {
- scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
- }
- } else {
- /* Allocate memory for src, dst, assoc */
- assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
- GFP_ATOMIC);
- if (unlikely(!assoc))
- return -ENOMEM;
- scatterwalk_map_and_copy(assoc, req->src, 0,
- req->assoclen + req->cryptlen, 0);
- src = assoc + req->assoclen;
- dst = src;
- }
-
- kernel_fpu_begin();
- aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
- hash_subkey, assoc, assoclen,
- dst + req->cryptlen, auth_tag_len);
- kernel_fpu_end();
-
- /* The authTag (aka the Integrity Check Value) needs to be written
- * back to the packet. */
- if (one_entry_in_sg) {
- if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst - req->assoclen);
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
- scatterwalk_done(&dst_sg_walk, 1, 0);
- }
- scatterwalk_unmap(assoc);
- scatterwalk_advance(&src_sg_walk, req->src->length);
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
- } else {
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
- req->cryptlen + auth_tag_len, 1);
- kfree(assoc);
- }
- return 0;
+ return gcmaes_crypt(req, assoclen, hash_subkey, iv, aes_ctx, false);
}

static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
- u8 one_entry_in_sg = 0;
- u8 *src, *dst, *assoc;
- unsigned long tempCipherLen = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- u8 authTag[16];
- struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk = {};
- int retval = 0;
-
- tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
-
- if (sg_is_last(req->src) &&
- (!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length <= PAGE_SIZE) &&
- sg_is_last(req->dst) && req->dst->length &&
- (!PageHighMem(sg_page(req->dst)) ||
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
- one_entry_in_sg = 1;
- scatterwalk_start(&src_sg_walk, req->src);
- assoc = scatterwalk_map(&src_sg_walk);
- src = assoc + req->assoclen;
- dst = src;
- if (unlikely(req->src != req->dst)) {
- scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
- }
- } else {
- /* Allocate memory for src, dst, assoc */
- assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
- if (!assoc)
- return -ENOMEM;
- scatterwalk_map_and_copy(assoc, req->src, 0,
- req->assoclen + req->cryptlen, 0);
- src = assoc + req->assoclen;
- dst = src;
- }
-
-
- kernel_fpu_begin();
- aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
- hash_subkey, assoc, assoclen,
- authTag, auth_tag_len);
- kernel_fpu_end();
-
- /* Compare generated tag with passed in tag. */
- retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
- -EBADMSG : 0;
-
- if (one_entry_in_sg) {
- if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst - req->assoclen);
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
- scatterwalk_done(&dst_sg_walk, 1, 0);
- }
- scatterwalk_unmap(assoc);
- scatterwalk_advance(&src_sg_walk, req->src->length);
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
- } else {
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
- tempCipherLen, 1);
- kfree(assoc);
- }
- return retval;
-
+ return gcmaes_crypt(req, assoclen, hash_subkey, iv, aes_ctx, true);
}

static int helper_rfc4106_encrypt(struct aead_request *req)
--
2.16.0.rc1.238.g530d649a79-goog

2018-01-31 20:27:30

by Junaid Shahid

[permalink] [raw]
Subject: [PATCH v3 3/4] crypto: aesni - Directly use kmap_atomic instead of scatter_walk object in gcm(aes)

gcmaes_crypt uses a scatter_walk object to map and unmap the crypto
request sglists. But the only purpose that appears to serve here is to allow
the D-Cache to be flushed at the end for pages that were used as output.
However, that is not applicable on x86, so we can avoid using the scatter_walk
object for simplicity.

Signed-off-by: Junaid Shahid <[email protected]>
---
arch/x86/crypto/aesni-intel_glue.c | 36 +++++++++++++++---------------------
1 file changed, 15 insertions(+), 21 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index c11e531d21dd..9e69e02076d2 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -750,6 +750,11 @@ static bool is_mappable(struct scatterlist *sgl, unsigned long len)
(!PageHighMem(sg_page(sgl)) || sgl->offset + len <= PAGE_SIZE);
}

+static u8 *map_buffer(struct scatterlist *sgl)
+{
+ return kmap_atomic(sg_page(sgl)) + sgl->offset;
+}
+
/*
* Maps the sglist buffer and returns a pointer to the mapped buffer in
* data_buf.
@@ -762,14 +767,12 @@ static bool is_mappable(struct scatterlist *sgl, unsigned long len)
* the data_buf and the bounce_buf should be freed using kfree().
*/
static int get_request_buffer(struct scatterlist *sgl,
- struct scatter_walk *sg_walk,
unsigned long bounce_buf_size,
u8 **data_buf, u8 **bounce_buf, bool *mapped)
{
if (sg_is_last(sgl) && is_mappable(sgl, sgl->length)) {
*mapped = true;
- scatterwalk_start(sg_walk, sgl);
- *data_buf = scatterwalk_map(sg_walk);
+ *data_buf = map_buffer(sgl);
return 0;
}

@@ -785,14 +788,10 @@ static int get_request_buffer(struct scatterlist *sgl,
return 0;
}

-static void put_request_buffer(u8 *data_buf, unsigned long len, bool mapped,
- struct scatter_walk *sg_walk, bool output)
+static void put_request_buffer(u8 *data_buf, bool mapped)
{
- if (mapped) {
- scatterwalk_unmap(data_buf);
- scatterwalk_advance(sg_walk, len);
- scatterwalk_done(sg_walk, output, 0);
- }
+ if (mapped)
+ kunmap_atomic(data_buf);
}

/*
@@ -809,16 +808,14 @@ static int gcmaes_crypt(struct aead_request *req, unsigned int assoclen,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
unsigned long data_len = req->cryptlen - (decrypt ? auth_tag_len : 0);
- struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk = {};
int retval = 0;
unsigned long bounce_buf_size = data_len + auth_tag_len + req->assoclen;

if (auth_tag_len > 16)
return -EINVAL;

- retval = get_request_buffer(req->src, &src_sg_walk, bounce_buf_size,
- &assoc, &bounce_buf, &src_mapped);
+ retval = get_request_buffer(req->src, bounce_buf_size, &assoc,
+ &bounce_buf, &src_mapped);
if (retval)
goto exit;

@@ -828,9 +825,8 @@ static int gcmaes_crypt(struct aead_request *req, unsigned int assoclen,
dst = src;
dst_mapped = src_mapped;
} else {
- retval = get_request_buffer(req->dst, &dst_sg_walk,
- bounce_buf_size, &dst, &bounce_buf,
- &dst_mapped);
+ retval = get_request_buffer(req->dst, bounce_buf_size, &dst,
+ &bounce_buf, &dst_mapped);
if (retval)
goto exit;

@@ -866,11 +862,9 @@ static int gcmaes_crypt(struct aead_request *req, unsigned int assoclen,
1);
exit:
if (req->dst != req->src)
- put_request_buffer(dst - req->assoclen, req->dst->length,
- dst_mapped, &dst_sg_walk, true);
+ put_request_buffer(dst - req->assoclen, dst_mapped);

- put_request_buffer(assoc, req->src->length, src_mapped, &src_sg_walk,
- false);
+ put_request_buffer(assoc, src_mapped);

kfree(bounce_buf);
return retval;
--
2.16.0.rc1.238.g530d649a79-goog

2018-01-31 20:27:28

by Junaid Shahid

[permalink] [raw]
Subject: [PATCH v3 1/4] crypto: aesni - Fix out-of-bounds access of the AAD buffer in AVX gcm-aesni

The AVX/AVX2 versions of gcm-aes encryption/decryption functions can
access memory after the end of the AAD buffer if the AAD length is
not a multiple of 4 bytes. It didn't matter as long as the AAD and
data buffers were always contiguous, since the AVX version are not used
for small data sizes and hence enough data bytes were always present to
cover the over-run. However, now that we have support for non-contiguous
AAD and data buffers, that is no longer the case. This can potentially
result in accessing a page that is not mapped and thus causing the
machine to crash. This patch fixes that by reading the last <16 byte
block of the AAD byte-by-byte and optionally via an 8-byte load if the
block was at least 8 bytes.

Signed-off-by: Junaid Shahid <[email protected]>
---
arch/x86/crypto/aesni-intel_avx-x86_64.S | 154 +++++++++----------------------
1 file changed, 42 insertions(+), 112 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index faecb1518bf8..97029059dc1a 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -106,14 +106,6 @@
##
## AAD Format with 64-bit Extended Sequence Number
##
-##
-## aadLen:
-## from the definition of the spec, aadLen can only be 8 or 12 bytes.
-## The code additionally supports aadLen of length 16 bytes.
-##
-## TLen:
-## from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
-##
## poly = x^128 + x^127 + x^126 + x^121 + 1
## throughout the code, one tab and two tab indentations are used. one tab is
## for GHASH part, two tabs is for AES part.
@@ -155,30 +147,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000

-.section .rodata
-.align 16
-.type aad_shift_arr, @object
-.size aad_shift_arr, 272
-aad_shift_arr:
- .octa 0xffffffffffffffffffffffffffffffff
- .octa 0xffffffffffffffffffffffffffffff0C
- .octa 0xffffffffffffffffffffffffffff0D0C
- .octa 0xffffffffffffffffffffffffff0E0D0C
- .octa 0xffffffffffffffffffffffff0F0E0D0C
- .octa 0xffffffffffffffffffffff0C0B0A0908
- .octa 0xffffffffffffffffffff0D0C0B0A0908
- .octa 0xffffffffffffffffff0E0D0C0B0A0908
- .octa 0xffffffffffffffff0F0E0D0C0B0A0908
- .octa 0xffffffffffffff0C0B0A090807060504
- .octa 0xffffffffffff0D0C0B0A090807060504
- .octa 0xffffffffff0E0D0C0B0A090807060504
- .octa 0xffffffff0F0E0D0C0B0A090807060504
- .octa 0xffffff0C0B0A09080706050403020100
- .octa 0xffff0D0C0B0A09080706050403020100
- .octa 0xff0E0D0C0B0A09080706050403020100
- .octa 0x0F0E0D0C0B0A09080706050403020100
-
-
.text


@@ -280,6 +248,36 @@ VARIABLE_OFFSET = 16*8
vaesenclast 16*10(arg1), \XMM0, \XMM0
.endm

+# Reads DLEN bytes starting at DPTR and stores in XMMDst
+# where 0 < DLEN < 16
+# Clobbers %rax, DLEN and XMM1
+.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
+ cmp $8, \DLEN
+ jl _read_lt8_\@
+ movq (\DPTR), \XMMDst
+ sub $8, \DLEN
+ jz _done_read_partial_block_\@
+ xor %eax, %eax
+_read_next_byte_\@:
+ shl $8, %rax
+ mov 7(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_\@
+ movq %rax, \XMM1
+ pslldq $8, \XMM1
+ por \XMM1, \XMMDst
+ jmp _done_read_partial_block_\@
+_read_lt8_\@:
+ xor %eax, %eax
+_read_next_byte_lt8_\@:
+ shl $8, %rax
+ mov -1(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_lt8_\@
+ movq %rax, \XMMDst
+_done_read_partial_block_\@:
+.endm
+
#ifdef CONFIG_AS_AVX
###############################################################################
# GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
@@ -400,63 +398,29 @@ VARIABLE_OFFSET = 16*8
setreg

mov arg6, %r10 # r10 = AAD
- mov arg7, %r12 # r12 = aadLen
-
-
- mov %r12, %r11
+ mov arg7, %r11 # r11 = aadLen

vpxor reg_j, reg_j, reg_j
vpxor reg_i, reg_i, reg_i
cmp $16, %r11
- jl _get_AAD_rest8\@
+ jl _get_AAD_rest\@
_get_AAD_blocks\@:
vmovdqu (%r10), reg_i
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_i, reg_j, reg_j
GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6
add $16, %r10
- sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\@
vmovdqu reg_j, reg_i
+
+ /* read the last <16B of AAD. */
+_get_AAD_rest\@:
cmp $0, %r11
je _get_AAD_done\@

- vpxor reg_i, reg_i, reg_i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some CT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\@:
- cmp $4, %r11
- jle _get_AAD_rest4\@
- movq (%r10), \T1
- add $8, %r10
- sub $8, %r11
- vpslldq $8, \T1, \T1
- vpsrldq $8, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
- jmp _get_AAD_rest8\@
-_get_AAD_rest4\@:
- cmp $0, %r11
- jle _get_AAD_rest0\@
- mov (%r10), %eax
- movq %rax, \T1
- add $4, %r10
- sub $4, %r11
- vpslldq $12, \T1, \T1
- vpsrldq $4, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
-_get_AAD_rest0\@:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \T1
- vpshufb \T1, reg_i, reg_i
-_get_AAD_rest_final\@:
+ READ_PARTIAL_BLOCK %r10, %r11, \T1, reg_i
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_j, reg_i, reg_i
GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6
@@ -1706,64 +1670,30 @@ ENDPROC(aesni_gcm_dec_avx_gen2)
setreg

mov arg6, %r10 # r10 = AAD
- mov arg7, %r12 # r12 = aadLen
-
-
- mov %r12, %r11
+ mov arg7, %r11 # r11 = aadLen

vpxor reg_j, reg_j, reg_j
vpxor reg_i, reg_i, reg_i

cmp $16, %r11
- jl _get_AAD_rest8\@
+ jl _get_AAD_rest\@
_get_AAD_blocks\@:
vmovdqu (%r10), reg_i
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_i, reg_j, reg_j
GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6
add $16, %r10
- sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\@
vmovdqu reg_j, reg_i
+
+ /* read the last <16B of AAD. */
+_get_AAD_rest\@:
cmp $0, %r11
je _get_AAD_done\@

- vpxor reg_i, reg_i, reg_i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some CT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\@:
- cmp $4, %r11
- jle _get_AAD_rest4\@
- movq (%r10), \T1
- add $8, %r10
- sub $8, %r11
- vpslldq $8, \T1, \T1
- vpsrldq $8, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
- jmp _get_AAD_rest8\@
-_get_AAD_rest4\@:
- cmp $0, %r11
- jle _get_AAD_rest0\@
- mov (%r10), %eax
- movq %rax, \T1
- add $4, %r10
- sub $4, %r11
- vpslldq $12, \T1, \T1
- vpsrldq $4, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
-_get_AAD_rest0\@:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \T1
- vpshufb \T1, reg_i, reg_i
-_get_AAD_rest_final\@:
+ READ_PARTIAL_BLOCK %r10, %r11, \T1, reg_i
vpshufb SHUF_MASK(%rip), reg_i, reg_i
vpxor reg_j, reg_i, reg_i
GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6
--
2.16.0.rc1.238.g530d649a79-goog