2015-11-11 00:59:05

by Andrew Zaborowski

[permalink] [raw]
Subject: [PATCH 1/4] lib/mpi: only require buffers as big as needed for the integer

Since mpi_write_to_sgl and mpi_read_buffer explicitly left-align the
integers being written it makes no sense to require a buffer big enough for
the number + the leading zero bytes which are not written. The error
returned also doesn't convey any information. So instead require only the
size needed and return -EOVERFLOW to signal when buffer too short.

Signed-off-by: Andrew Zaborowski <[email protected]>
---
lib/mpi/mpicoder.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)

diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index c7e0a70..074d2df 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -135,7 +135,9 @@ EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
* @buf: bufer to which the output will be written to. Needs to be at
* leaset mpi_get_size(a) long.
* @buf_len: size of the buf.
- * @nbytes: receives the actual length of the data written.
+ * @nbytes: receives the actual length of the data written on success and
+ * the data to-be-written on -EOVERFLOW in case buf_len was too
+ * small.
* @sign: if not NULL, it will be set to the sign of a.
*
* Return: 0 on success or error code in case of error
@@ -148,7 +150,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
unsigned int n = mpi_get_size(a);
int i, lzeros = 0;

- if (buf_len < n || !buf || !nbytes)
+ if (!buf || !nbytes)
return -EINVAL;

if (sign)
@@ -163,6 +165,11 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
break;
}

+ if (buf_len < n - lzeros) {
+ *nbytes = n - lzeros;
+ return -EOVERFLOW;
+ }
+
p = buf;
*nbytes = n - lzeros;

@@ -332,7 +339,8 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer);
* @nbytes: in/out param - it has the be set to the maximum number of
* bytes that can be written to sgl. This has to be at least
* the size of the integer a. On return it receives the actual
- * length of the data written.
+ * length of the data written on success or the data that would
+ * be written if buffer was too small.
* @sign: if not NULL, it will be set to the sign of a.
*
* Return: 0 on success or error code in case of error
@@ -345,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
unsigned int n = mpi_get_size(a);
int i, x, y = 0, lzeros = 0, buf_len;

- if (!nbytes || *nbytes < n)
+ if (!nbytes)
return -EINVAL;

if (sign)
@@ -360,6 +368,11 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
break;
}

+ if (*nbytes < n - lzeros) {
+ *nbytes = n - lzeros;
+ return -EOVERFLOW;
+ }
+
*nbytes = n - lzeros;
buf_len = sgl->length;
p2 = sg_virt(sgl);
--
2.1.4


2015-11-11 00:59:05

by Andrew Zaborowski

[permalink] [raw]
Subject: [PATCH 2/4] crypto: rsa: only require output buffers as big as needed.

rhe RSA operations explicitly left-align the integers being written
skipping any leading zero bytes, but still require the output buffers to
include just enough space for the integer + the leading zero bytes.
Since the size of integer + the leading zero bytes (i.e. the key modulus
size) can now be obtained more easily through crypto_akcipher_maxsize
change the operations to only require as big a buffer as actually needed
if the caller has that information. The semantics for request->dst_len
don't change.

Signed-off-by: Andrew Zaborowski <[email protected]>
---
crypto/rsa.c | 24 ------------------------
1 file changed, 24 deletions(-)

diff --git a/crypto/rsa.c b/crypto/rsa.c
index 1093e04..58aad69 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -91,12 +91,6 @@ static int rsa_enc(struct akcipher_request *req)
goto err_free_c;
}

- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_c;
- }
-
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@@ -136,12 +130,6 @@ static int rsa_dec(struct akcipher_request *req)
goto err_free_m;
}

- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_m;
- }
-
ret = -ENOMEM;
c = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!c)
@@ -180,12 +168,6 @@ static int rsa_sign(struct akcipher_request *req)
goto err_free_s;
}

- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_s;
- }
-
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@@ -225,12 +207,6 @@ static int rsa_verify(struct akcipher_request *req)
goto err_free_m;
}

- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_m;
- }
-
ret = -ENOMEM;
s = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!s) {
--
2.1.4

2015-11-11 00:59:07

by Andrew Zaborowski

[permalink] [raw]
Subject: [PATCH 3/4] crypto: akcipher: add crypto_akcipher_type methods needed by templates.

Add two dummy methods that are required by the crypto API internals:
.ctxsize and .init
(just because the framework calls them without checking if they were
provided). They're only required by the complicated code path needed to
instantiate a template algorithm. Also expose crypto_akcipher_type like
other crypto types are exposed to be used from outside modules.

Signed-off-by: Andrew Zaborowski <[email protected]>
---
crypto/akcipher.c | 16 +++++++++++++++-
include/crypto/algapi.h | 1 +
2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 120ec04..6ef7f99 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -53,6 +53,11 @@ static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_puts(m, "type : akcipher\n");
}

+static int crypto_akcipher_init(struct crypto_tfm *tfm, u32 type, u32 mask)
+{
+ return 0;
+}
+
static void crypto_akcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm);
@@ -75,8 +80,16 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}

-static const struct crypto_type crypto_akcipher_type = {
+static unsigned int crypto_akcipher_ctxsize(struct crypto_alg *alg, u32 type,
+ u32 mask)
+{
+ return alg->cra_ctxsize;
+}
+
+const struct crypto_type crypto_akcipher_type = {
+ .ctxsize = crypto_akcipher_ctxsize,
.extsize = crypto_alg_extsize,
+ .init = crypto_akcipher_init,
.init_tfm = crypto_akcipher_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
@@ -87,6 +100,7 @@ static const struct crypto_type crypto_akcipher_type = {
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
};
+EXPORT_SYMBOL_GPL(crypto_akcipher_type);

struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
u32 mask)
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index c9fe145..1089f20 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -130,6 +130,7 @@ struct ablkcipher_walk {

extern const struct crypto_type crypto_ablkcipher_type;
extern const struct crypto_type crypto_blkcipher_type;
+extern const struct crypto_type crypto_akcipher_type;

void crypto_mod_put(struct crypto_alg *alg);

--
2.1.4

2015-11-11 00:59:08

by Andrew Zaborowski

[permalink] [raw]
Subject: [PATCH 4/4] crypto: RSA padding algorithm

This patch adds PKCS#1 v1.5 standard RSA padding as a separate template.
This way an RSA cipher with padding can be obtained by instantiating
"pkcs1pad(rsa)". The reason for adding this is that RSA is almost
never used without this padding (or OAEP) so it will be needed for
either certificate work in the kernel or the userspace, and also I hear
that it is likely implemented by hardware RSA in which case an
implementation of the whole "pkcs1pad(rsa)" can be provided.

Signed-off-by: Andrew Zaborowski <[email protected]>
---
crypto/Makefile | 1 +
crypto/rsa-padding.c | 586 ++++++++++++++++++++++++++++++++++++++++++
crypto/rsa.c | 16 +-
include/crypto/internal/rsa.h | 2 +
4 files changed, 604 insertions(+), 1 deletion(-)
create mode 100644 crypto/rsa-padding.c

diff --git a/crypto/Makefile b/crypto/Makefile
index f7aba92..46fe0b4 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
rsa_generic-y += rsaprivkey-asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
+rsa_generic-y += rsa-padding.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o

cryptomgr-y := algboss.o testmgr.o
diff --git a/crypto/rsa-padding.c b/crypto/rsa-padding.c
new file mode 100644
index 0000000..b9f9f31
--- /dev/null
+++ b/crypto/rsa-padding.c
@@ -0,0 +1,586 @@
+/*
+ * RSA padding templates.
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/akcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+struct pkcs1pad_ctx {
+ struct crypto_akcipher *child;
+
+ unsigned int key_size;
+};
+
+struct pkcs1pad_request {
+ struct akcipher_request child_req;
+
+ struct scatterlist in_sg[3], out_sg[2];
+ uint8_t *in_buf, *out_buf;
+};
+
+static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int err, size;
+
+ err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
+
+ if (!err) {
+ /* Find out new modulus size from rsa implementation */
+ size = crypto_akcipher_maxsize(ctx->child);
+
+ ctx->key_size = size > 0 ? size : 0;
+ if (size <= 0)
+ err = size;
+ }
+
+ return err;
+}
+
+static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int err, size;
+
+ err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
+
+ if (!err) {
+ /* Find out new modulus size from rsa implementation */
+ size = crypto_akcipher_maxsize(ctx->child);
+
+ ctx->key_size = size > 0 ? size : 0;
+ if (size <= 0)
+ err = size;
+ }
+
+ return err;
+}
+
+static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /*
+ * The maximum destination buffer size for the encrypt/sign operations
+ * will be the same as for RSA, even though it's smaller for
+ * decrypt/verify.
+ */
+
+ return ctx->key_size ?: -EINVAL;
+}
+
+static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
+ struct scatterlist *next)
+{
+ int nsegs = next ? 1 : 0;
+
+ if (offset_in_page(buf) + len <= PAGE_SIZE) {
+ nsegs += 1;
+ sg_init_table(sg, nsegs);
+ sg_set_buf(sg, buf, len);
+ } else {
+ nsegs += 2;
+ sg_init_table(sg, nsegs);
+ sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
+ sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
+ offset_in_page(buf) + len - PAGE_SIZE);
+ }
+
+ if (next)
+ sg_chain(sg, nsegs, next);
+}
+
+static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len];
+
+ if (!err) {
+ if (req_ctx->child_req.dst_len < ctx->key_size) {
+ memset(zeros, 0, sizeof(zeros));
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst,
+ sizeof(zeros)),
+ zeros, sizeof(zeros));
+ }
+
+ sg_pcopy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, ctx->key_size),
+ req_ctx->out_buf, req_ctx->child_req.dst_len,
+ sizeof(zeros));
+ }
+ req->dst_len = ctx->key_size;
+
+ kfree(req_ctx->in_buf);
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_encrypt_sign_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req,
+ pkcs1pad_encrypt_sign_complete(req, err));
+}
+
+static int pkcs1pad_encrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+ unsigned int i, ps_end;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (req->src_len > ctx->key_size - 11)
+ return -EOVERFLOW;
+
+ if (req->dst_len < ctx->key_size) {
+ req->dst_len = ctx->key_size;
+ return -EOVERFLOW;
+ }
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /*
+ * Replace both input and output to add the padding in the input and
+ * the potential missing leading zeros in the output.
+ */
+ req_ctx->child_req.src = req_ctx->in_sg;
+ req_ctx->child_req.src_len = ctx->key_size - 1;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size;
+
+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->in_buf)
+ return -ENOMEM;
+
+ ps_end = ctx->key_size - req->src_len - 2;
+ req_ctx->in_buf[0] = 0x02;
+ for (i = 1; i < ps_end; i++)
+ req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
+ req_ctx->in_buf[ps_end] = 0x00;
+
+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+ ctx->key_size - 1 - req->src_len, req->src);
+
+ req_ctx->out_buf = kmalloc(ctx->key_size,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf) {
+ kfree(req_ctx->in_buf);
+ return -ENOMEM;
+ }
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_encrypt_sign_complete_cb, req);
+
+ err = crypto_akcipher_encrypt(&req_ctx->child_req);
+ if (err != -EINPROGRESS && err != -EBUSY)
+ return pkcs1pad_encrypt_sign_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ unsigned int pos;
+
+ if (err == -EOVERFLOW)
+ /* Decrypted value had no leading 0 byte */
+ err = -EINVAL;
+
+ if (err)
+ goto done;
+
+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (req_ctx->out_buf[0] != 0x02) {
+ err = -EINVAL;
+ goto done;
+ }
+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+ if (req_ctx->out_buf[pos] == 0x00)
+ break;
+ if (pos < 9 || pos == req_ctx->child_req.dst_len) {
+ err = -EINVAL;
+ goto done;
+ }
+ pos++;
+
+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ err = -EOVERFLOW;
+ req->dst_len = req_ctx->child_req.dst_len - pos;
+
+ if (!err)
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, req->dst_len),
+ req_ctx->out_buf + pos, req->dst_len);
+
+done:
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_decrypt_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
+}
+
+static int pkcs1pad_decrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+
+ if (!ctx->key_size || req->src_len != ctx->key_size)
+ return -EINVAL;
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /* Reuse input buffer, output to a new buffer */
+ req_ctx->child_req.src = req->src;
+ req_ctx->child_req.src_len = req->src_len;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf)
+ return -ENOMEM;
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size - 1, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_decrypt_complete_cb, req);
+
+ err = crypto_akcipher_decrypt(&req_ctx->child_req);
+ if (err != -EINPROGRESS && err != -EBUSY)
+ return pkcs1pad_decrypt_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_sign(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+ unsigned int i, ps_end;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (req->src_len > ctx->key_size - 11)
+ return -EOVERFLOW;
+
+ if (req->dst_len < ctx->key_size) {
+ req->dst_len = ctx->key_size;
+ return -EOVERFLOW;
+ }
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /*
+ * Replace both input and output to add the padding in the input and
+ * the potential missing leading zeros in the output.
+ */
+ req_ctx->child_req.src = req_ctx->in_sg;
+ req_ctx->child_req.src_len = ctx->key_size - 1;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size;
+
+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->in_buf)
+ return -ENOMEM;
+
+ ps_end = ctx->key_size - req->src_len - 2;
+ req_ctx->in_buf[0] = 0x01;
+ for (i = 1; i < ps_end; i++)
+ req_ctx->in_buf[i] = 0xff;
+ req_ctx->in_buf[ps_end] = 0x00;
+
+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+ ctx->key_size - 1 - req->src_len, req->src);
+
+ req_ctx->out_buf = kmalloc(ctx->key_size,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf) {
+ kfree(req_ctx->in_buf);
+ return -ENOMEM;
+ }
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_encrypt_sign_complete_cb, req);
+
+ err = crypto_akcipher_sign(&req_ctx->child_req);
+ if (err != -EINPROGRESS && err != -EBUSY)
+ return pkcs1pad_encrypt_sign_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ unsigned int pos;
+
+ if (err == -EOVERFLOW)
+ /* Decrypted value had no leading 0 byte */
+ err = -EINVAL;
+
+ if (err)
+ goto done;
+
+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (req_ctx->out_buf[0] != 0x01) {
+ err = -EINVAL;
+ goto done;
+ }
+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+ if (req_ctx->out_buf[pos] != 0xff)
+ break;
+ if (pos < 9 || pos == req_ctx->child_req.dst_len ||
+ req_ctx->out_buf[pos] != 0x00) {
+ err = -EINVAL;
+ goto done;
+ }
+ pos++;
+
+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ err = -EOVERFLOW;
+ req->dst_len = req_ctx->child_req.dst_len - pos;
+
+ if (!err)
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, req->dst_len),
+ req_ctx->out_buf + pos, req->dst_len);
+
+done:
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_verify_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
+}
+
+static int pkcs1pad_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /* Reuse input buffer, output to a new buffer */
+ req_ctx->child_req.src = req->src;
+ req_ctx->child_req.src_len = req->src_len;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf)
+ return -ENOMEM;
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size - 1, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_verify_complete_cb, req);
+
+ err = crypto_akcipher_verify(&req_ctx->child_req);
+ if (err != -EINPROGRESS && err != -EBUSY)
+ return pkcs1pad_verify_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = (void *) tfm->__crt_alg;
+ struct pkcs1pad_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_tfm *new_tfm;
+
+ new_tfm = crypto_spawn_tfm(crypto_instance_ctx(inst),
+ CRYPTO_ALG_TYPE_AKCIPHER, CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(new_tfm))
+ return PTR_ERR(new_tfm);
+
+ ctx->child = __crypto_akcipher_tfm(new_tfm);
+
+ return 0;
+}
+
+static void pkcs1pad_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct pkcs1pad_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_akcipher(ctx->child);
+}
+
+static struct crypto_instance *pkcs1pad_alloc(struct rtattr **tb)
+{
+ struct crypto_instance *inst = NULL;
+ struct crypto_alg *alg;
+ struct akcipher_alg *akalg;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER);
+ if (err)
+ return ERR_PTR(err);
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AKCIPHER,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
+ akalg = crypto_alloc_instance2("pkcs1pad", alg,
+ offsetof(struct akcipher_alg, base));
+ if (IS_ERR(akalg)) {
+ inst = ERR_CAST(akalg);
+ goto out_put_alg;
+ }
+
+ inst = container_of(&akalg->base, struct crypto_instance, alg);
+
+ err = crypto_init_spawn2(crypto_instance_ctx(inst), alg, inst,
+ &crypto_akcipher_type);
+ if (err) {
+ inst = ERR_PTR(err);
+ kfree(akalg);
+
+ goto out_put_alg;
+ }
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AKCIPHER;
+ inst->alg.cra_priority = alg->cra_priority;
+ inst->alg.cra_type = alg->cra_type;
+
+ inst->alg.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
+
+ inst->alg.cra_init = pkcs1pad_init_tfm;
+ inst->alg.cra_exit = pkcs1pad_exit_tfm;
+
+ akalg->encrypt = pkcs1pad_encrypt;
+ akalg->decrypt = pkcs1pad_decrypt;
+ akalg->sign = pkcs1pad_sign;
+ akalg->verify = pkcs1pad_verify;
+ akalg->set_pub_key = pkcs1pad_set_pub_key;
+ akalg->set_priv_key = pkcs1pad_set_priv_key;
+ akalg->max_size = pkcs1pad_get_max_size;
+ akalg->reqsize = sizeof(struct pkcs1pad_request) +
+ __crypto_akcipher_alg(alg)->reqsize;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return inst;
+}
+
+static void pkcs1pad_free(struct crypto_instance *inst)
+{
+ struct akcipher_alg *akalg = __crypto_akcipher_alg(&inst->alg);
+
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(akalg);
+}
+
+struct crypto_template rsa_pkcs1pad_tmpl = {
+ .name = "pkcs1pad",
+ .alloc = pkcs1pad_alloc,
+ .free = pkcs1pad_free,
+ .module = THIS_MODULE,
+};
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 58aad69..77d737f 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -13,6 +13,7 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
+#include <crypto/algapi.h>

/*
* RSAEP function [RFC3447 sec 5.1.1]
@@ -315,11 +316,24 @@ static struct akcipher_alg rsa = {

static int rsa_init(void)
{
- return crypto_register_akcipher(&rsa);
+ int err;
+
+ err = crypto_register_akcipher(&rsa);
+ if (err)
+ return err;
+
+ err = crypto_register_template(&rsa_pkcs1pad_tmpl);
+ if (err) {
+ crypto_unregister_akcipher(&rsa);
+ return err;
+ }
+
+ return 0;
}

static void rsa_exit(void)
{
+ crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}

diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
index f997e2d..c7585bd 100644
--- a/include/crypto/internal/rsa.h
+++ b/include/crypto/internal/rsa.h
@@ -27,4 +27,6 @@ int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len);

void rsa_free_key(struct rsa_key *rsa_key);
+
+extern struct crypto_template rsa_pkcs1pad_tmpl;
#endif
--
2.1.4

2015-11-11 13:19:45

by Stephan Müller

[permalink] [raw]
Subject: Re: [PATCH 4/4] crypto: RSA padding algorithm

Am Mittwoch, 11. November 2015, 01:58:45 schrieb Andrew Zaborowski:

Hi Andrew,

>This patch adds PKCS#1 v1.5 standard RSA padding as a separate template.
>This way an RSA cipher with padding can be obtained by instantiating
>"pkcs1pad(rsa)". The reason for adding this is that RSA is almost
>never used without this padding (or OAEP) so it will be needed for
>either certificate work in the kernel or the userspace, and also I hear
>that it is likely implemented by hardware RSA in which case an
>implementation of the whole "pkcs1pad(rsa)" can be provided.

In general, I think that there is a PKCS 1 implementation in the kernel in
crypto/asymmetric_keys/rsa.c

Shouldn't that all somehow being synchronized?

Maybe this patch should go in but then crypto/asymmetric_keys/rsa.c should
kind of being removed or point to kernel crypto API?
>
>Signed-off-by: Andrew Zaborowski <[email protected]>
>---
> crypto/Makefile | 1 +
> crypto/rsa-padding.c | 586
>++++++++++++++++++++++++++++++++++++++++++ crypto/rsa.c |
>16 +-
> include/crypto/internal/rsa.h | 2 +
> 4 files changed, 604 insertions(+), 1 deletion(-)
> create mode 100644 crypto/rsa-padding.c
>
>diff --git a/crypto/Makefile b/crypto/Makefile
>index f7aba92..46fe0b4 100644
>--- a/crypto/Makefile
>+++ b/crypto/Makefile
>@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
> rsa_generic-y += rsaprivkey-asn1.o
> rsa_generic-y += rsa.o
> rsa_generic-y += rsa_helper.o
>+rsa_generic-y += rsa-padding.o
> obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
>
> cryptomgr-y := algboss.o testmgr.o
>diff --git a/crypto/rsa-padding.c b/crypto/rsa-padding.c
>new file mode 100644
>index 0000000..b9f9f31
>--- /dev/null
>+++ b/crypto/rsa-padding.c
>@@ -0,0 +1,586 @@
>+/*
>+ * RSA padding templates.
>+ *
>+ * Copyright (c) 2015 Intel Corporation
>+ *
>+ * This program is free software; you can redistribute it and/or modify it
>+ * under the terms of the GNU General Public License as published by the
>Free + * Software Foundation; either version 2 of the License, or (at your
>option) + * any later version.
>+ */
>+
>+#include <crypto/algapi.h>
>+#include <crypto/akcipher.h>
>+#include <crypto/internal/akcipher.h>
>+#include <linux/err.h>
>+#include <linux/init.h>
>+#include <linux/kernel.h>
>+#include <linux/module.h>
>+#include <linux/random.h>
>+
>+struct pkcs1pad_ctx {
>+ struct crypto_akcipher *child;
>+
>+ unsigned int key_size;
>+};
>+
>+struct pkcs1pad_request {
>+ struct akcipher_request child_req;
>+
>+ struct scatterlist in_sg[3], out_sg[2];
>+ uint8_t *in_buf, *out_buf;
>+};
>+
>+static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void
>*key, + unsigned int keylen)
>+{
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ int err, size;
>+
>+ err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
>+
>+ if (!err) {
>+ /* Find out new modulus size from rsa implementation */
>+ size = crypto_akcipher_maxsize(ctx->child);
>+
>+ ctx->key_size = size > 0 ? size : 0;
>+ if (size <= 0)
>+ err = size;
>+ }
>+
>+ return err;
>+}
>+
>+static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void
>*key, + unsigned int keylen)
>+{
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ int err, size;
>+
>+ err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
>+
>+ if (!err) {
>+ /* Find out new modulus size from rsa implementation */
>+ size = crypto_akcipher_maxsize(ctx->child);
>+
>+ ctx->key_size = size > 0 ? size : 0;
>+ if (size <= 0)
>+ err = size;
>+ }
>+
>+ return err;
>+}
>+
>+static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
>+{
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+
>+ /*
>+ * The maximum destination buffer size for the encrypt/sign operations
>+ * will be the same as for RSA, even though it's smaller for
>+ * decrypt/verify.
>+ */
>+
>+ return ctx->key_size ?: -EINVAL;
>+}
>+
>+static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t
>len, + struct scatterlist *next)
>+{
>+ int nsegs = next ? 1 : 0;
>+
>+ if (offset_in_page(buf) + len <= PAGE_SIZE) {
>+ nsegs += 1;
>+ sg_init_table(sg, nsegs);
>+ sg_set_buf(sg, buf, len);
>+ } else {
>+ nsegs += 2;
>+ sg_init_table(sg, nsegs);
>+ sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
>+ sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
>+ offset_in_page(buf) + len - PAGE_SIZE);
>+ }
>+
>+ if (next)
>+ sg_chain(sg, nsegs, next);
>+}
>+
>+static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int
>err) +{
>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>+ uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len];
>+
>+ if (!err) {
>+ if (req_ctx->child_req.dst_len < ctx->key_size) {
>+ memset(zeros, 0, sizeof(zeros));
>+ sg_copy_from_buffer(req->dst,
>+ sg_nents_for_len(req->dst,
>+ sizeof(zeros)),
>+ zeros, sizeof(zeros));
>+ }
>+
>+ sg_pcopy_from_buffer(req->dst,
>+ sg_nents_for_len(req->dst, ctx->key_size),
>+ req_ctx->out_buf, req_ctx->child_req.dst_len,
>+ sizeof(zeros));
>+ }
>+ req->dst_len = ctx->key_size;
>+
>+ kfree(req_ctx->in_buf);
>+ kzfree(req_ctx->out_buf);
>+
>+ return err;
>+}
>+
>+static void pkcs1pad_encrypt_sign_complete_cb(
>+ struct crypto_async_request *child_async_req, int err)
>+{
>+ struct akcipher_request *req = child_async_req->data;
>+ struct crypto_async_request async_req;
>+
>+ if (err == -EINPROGRESS)
>+ return;
>+
>+ async_req.data = req->base.data;
>+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
>+ async_req.flags = child_async_req->flags;
>+ req->base.complete(&async_req,
>+ pkcs1pad_encrypt_sign_complete(req, err));
>+}
>+
>+static int pkcs1pad_encrypt(struct akcipher_request *req)
>+{
>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>+ int err;
>+ unsigned int i, ps_end;
>+
>+ if (!ctx->key_size)
>+ return -EINVAL;
>+
>+ if (req->src_len > ctx->key_size - 11)
>+ return -EOVERFLOW;
>+
>+ if (req->dst_len < ctx->key_size) {
>+ req->dst_len = ctx->key_size;
>+ return -EOVERFLOW;
>+ }
>+
>+ if (ctx->key_size > PAGE_SIZE)
>+ return -ENOTSUPP;
>+
>+ /*
>+ * Replace both input and output to add the padding in the input and
>+ * the potential missing leading zeros in the output.
>+ */
>+ req_ctx->child_req.src = req_ctx->in_sg;
>+ req_ctx->child_req.src_len = ctx->key_size - 1;
>+ req_ctx->child_req.dst = req_ctx->out_sg;
>+ req_ctx->child_req.dst_len = ctx->key_size;
>+
>+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>+ GFP_KERNEL : GFP_ATOMIC);
>+ if (!req_ctx->in_buf)
>+ return -ENOMEM;
>+
>+ ps_end = ctx->key_size - req->src_len - 2;
>+ req_ctx->in_buf[0] = 0x02;
>+ for (i = 1; i < ps_end; i++)
>+ req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
>+ req_ctx->in_buf[ps_end] = 0x00;
>+
>+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
>+ ctx->key_size - 1 - req->src_len, req->src);
>+
>+ req_ctx->out_buf = kmalloc(ctx->key_size,
>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>+ GFP_KERNEL : GFP_ATOMIC);
>+ if (!req_ctx->out_buf) {
>+ kfree(req_ctx->in_buf);
>+ return -ENOMEM;
>+ }
>+
>+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
>+ ctx->key_size, NULL);
>+
>+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
>+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
>+ pkcs1pad_encrypt_sign_complete_cb, req);
>+
>+ err = crypto_akcipher_encrypt(&req_ctx->child_req);
>+ if (err != -EINPROGRESS && err != -EBUSY)
>+ return pkcs1pad_encrypt_sign_complete(req, err);
>+
>+ return err;
>+}
>+
>+static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
>+{
>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>+ unsigned int pos;
>+
>+ if (err == -EOVERFLOW)
>+ /* Decrypted value had no leading 0 byte */
>+ err = -EINVAL;
>+
>+ if (err)
>+ goto done;
>+
>+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
>+ err = -EINVAL;
>+ goto done;
>+ }
>+
>+ if (req_ctx->out_buf[0] != 0x02) {
>+ err = -EINVAL;
>+ goto done;
>+ }
>+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
>+ if (req_ctx->out_buf[pos] == 0x00)
>+ break;
>+ if (pos < 9 || pos == req_ctx->child_req.dst_len) {
>+ err = -EINVAL;
>+ goto done;
>+ }
>+ pos++;
>+
>+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
>+ err = -EOVERFLOW;
>+ req->dst_len = req_ctx->child_req.dst_len - pos;
>+
>+ if (!err)
>+ sg_copy_from_buffer(req->dst,
>+ sg_nents_for_len(req->dst, req->dst_len),
>+ req_ctx->out_buf + pos, req->dst_len);
>+
>+done:
>+ kzfree(req_ctx->out_buf);
>+
>+ return err;
>+}
>+
>+static void pkcs1pad_decrypt_complete_cb(
>+ struct crypto_async_request *child_async_req, int err)
>+{
>+ struct akcipher_request *req = child_async_req->data;
>+ struct crypto_async_request async_req;
>+
>+ if (err == -EINPROGRESS)
>+ return;
>+
>+ async_req.data = req->base.data;
>+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
>+ async_req.flags = child_async_req->flags;
>+ req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
>+}
>+
>+static int pkcs1pad_decrypt(struct akcipher_request *req)
>+{
>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>+ int err;
>+
>+ if (!ctx->key_size || req->src_len != ctx->key_size)
>+ return -EINVAL;
>+
>+ if (ctx->key_size > PAGE_SIZE)
>+ return -ENOTSUPP;
>+
>+ /* Reuse input buffer, output to a new buffer */
>+ req_ctx->child_req.src = req->src;
>+ req_ctx->child_req.src_len = req->src_len;
>+ req_ctx->child_req.dst = req_ctx->out_sg;
>+ req_ctx->child_req.dst_len = ctx->key_size - 1;
>+
>+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>+ GFP_KERNEL : GFP_ATOMIC);
>+ if (!req_ctx->out_buf)
>+ return -ENOMEM;
>+
>+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
>+ ctx->key_size - 1, NULL);
>+
>+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
>+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
>+ pkcs1pad_decrypt_complete_cb, req);
>+
>+ err = crypto_akcipher_decrypt(&req_ctx->child_req);
>+ if (err != -EINPROGRESS && err != -EBUSY)
>+ return pkcs1pad_decrypt_complete(req, err);
>+
>+ return err;
>+}
>+
>+static int pkcs1pad_sign(struct akcipher_request *req)
>+{
>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>+ int err;
>+ unsigned int i, ps_end;
>+
>+ if (!ctx->key_size)
>+ return -EINVAL;
>+
>+ if (req->src_len > ctx->key_size - 11)
>+ return -EOVERFLOW;
>+
>+ if (req->dst_len < ctx->key_size) {
>+ req->dst_len = ctx->key_size;
>+ return -EOVERFLOW;
>+ }
>+
>+ if (ctx->key_size > PAGE_SIZE)
>+ return -ENOTSUPP;
>+
>+ /*
>+ * Replace both input and output to add the padding in the input and
>+ * the potential missing leading zeros in the output.
>+ */
>+ req_ctx->child_req.src = req_ctx->in_sg;
>+ req_ctx->child_req.src_len = ctx->key_size - 1;
>+ req_ctx->child_req.dst = req_ctx->out_sg;
>+ req_ctx->child_req.dst_len = ctx->key_size;
>+
>+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>+ GFP_KERNEL : GFP_ATOMIC);
>+ if (!req_ctx->in_buf)
>+ return -ENOMEM;
>+
>+ ps_end = ctx->key_size - req->src_len - 2;
>+ req_ctx->in_buf[0] = 0x01;
>+ for (i = 1; i < ps_end; i++)
>+ req_ctx->in_buf[i] = 0xff;

why not using memset here?

>+ req_ctx->in_buf[ps_end] = 0x00;
>+
>+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
>+ ctx->key_size - 1 - req->src_len, req->src);
>+
>+ req_ctx->out_buf = kmalloc(ctx->key_size,
>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>+ GFP_KERNEL : GFP_ATOMIC);
>+ if (!req_ctx->out_buf) {
>+ kfree(req_ctx->in_buf);
>+ return -ENOMEM;
>+ }
>+
>+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
>+ ctx->key_size, NULL);
>+
>+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
>+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
>+ pkcs1pad_encrypt_sign_complete_cb, req);
>+
>+ err = crypto_akcipher_sign(&req_ctx->child_req);
>+ if (err != -EINPROGRESS && err != -EBUSY)
>+ return pkcs1pad_encrypt_sign_complete(req, err);
>+
>+ return err;
>+}
>+
>+static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
>+{
>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>+ unsigned int pos;
>+
>+ if (err == -EOVERFLOW)
>+ /* Decrypted value had no leading 0 byte */
>+ err = -EINVAL;
>+
>+ if (err)
>+ goto done;
>+
>+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
>+ err = -EINVAL;
>+ goto done;
>+ }
>+
>+ if (req_ctx->out_buf[0] != 0x01) {
>+ err = -EINVAL;
>+ goto done;
>+ }
>+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
>+ if (req_ctx->out_buf[pos] != 0xff)
>+ break;
>+ if (pos < 9 || pos == req_ctx->child_req.dst_len ||
>+ req_ctx->out_buf[pos] != 0x00) {
>+ err = -EINVAL;
>+ goto done;
>+ }
>+ pos++;
>+
>+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
>+ err = -EOVERFLOW;
>+ req->dst_len = req_ctx->child_req.dst_len - pos;
>+
>+ if (!err)
>+ sg_copy_from_buffer(req->dst,
>+ sg_nents_for_len(req->dst, req->dst_len),
>+ req_ctx->out_buf + pos, req->dst_len);
>+
>+done:
>+ kzfree(req_ctx->out_buf);
>+
>+ return err;
>+}
>+
>+static void pkcs1pad_verify_complete_cb(
>+ struct crypto_async_request *child_async_req, int err)
>+{
>+ struct akcipher_request *req = child_async_req->data;
>+ struct crypto_async_request async_req;
>+
>+ if (err == -EINPROGRESS)
>+ return;
>+
>+ async_req.data = req->base.data;
>+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
>+ async_req.flags = child_async_req->flags;
>+ req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
>+}
>+
>+static int pkcs1pad_verify(struct akcipher_request *req)
>+{
>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>+ int err;
>+
>+ if (!ctx->key_size)
>+ return -EINVAL;
>+
>+ if (ctx->key_size > PAGE_SIZE)
>+ return -ENOTSUPP;
>+
>+ /* Reuse input buffer, output to a new buffer */
>+ req_ctx->child_req.src = req->src;
>+ req_ctx->child_req.src_len = req->src_len;
>+ req_ctx->child_req.dst = req_ctx->out_sg;
>+ req_ctx->child_req.dst_len = ctx->key_size - 1;
>+
>+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>+ GFP_KERNEL : GFP_ATOMIC);
>+ if (!req_ctx->out_buf)
>+ return -ENOMEM;
>+
>+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
>+ ctx->key_size - 1, NULL);
>+
>+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
>+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
>+ pkcs1pad_verify_complete_cb, req);
>+
>+ err = crypto_akcipher_verify(&req_ctx->child_req);
>+ if (err != -EINPROGRESS && err != -EBUSY)
>+ return pkcs1pad_verify_complete(req, err);
>+
>+ return err;
>+}
>+
>+static int pkcs1pad_init_tfm(struct crypto_tfm *tfm)
>+{
>+ struct crypto_instance *inst = (void *) tfm->__crt_alg;
>+ struct pkcs1pad_ctx *ctx = crypto_tfm_ctx(tfm);
>+ struct crypto_tfm *new_tfm;
>+
>+ new_tfm = crypto_spawn_tfm(crypto_instance_ctx(inst),
>+ CRYPTO_ALG_TYPE_AKCIPHER, CRYPTO_ALG_TYPE_MASK);
>+ if (IS_ERR(new_tfm))
>+ return PTR_ERR(new_tfm);
>+
>+ ctx->child = __crypto_akcipher_tfm(new_tfm);
>+
>+ return 0;
>+}
>+
>+static void pkcs1pad_exit_tfm(struct crypto_tfm *tfm)
>+{
>+ struct pkcs1pad_ctx *ctx = crypto_tfm_ctx(tfm);
>+
>+ crypto_free_akcipher(ctx->child);
>+}
>+
>+static struct crypto_instance *pkcs1pad_alloc(struct rtattr **tb)
>+{
>+ struct crypto_instance *inst = NULL;
>+ struct crypto_alg *alg;
>+ struct akcipher_alg *akalg;
>+ int err;
>+
>+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER);
>+ if (err)
>+ return ERR_PTR(err);
>+
>+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AKCIPHER,
>+ CRYPTO_ALG_TYPE_MASK);
>+ if (IS_ERR(alg))
>+ return ERR_CAST(alg);
>+
>+ akalg = crypto_alloc_instance2("pkcs1pad", alg,
>+ offsetof(struct akcipher_alg, base));
>+ if (IS_ERR(akalg)) {
>+ inst = ERR_CAST(akalg);
>+ goto out_put_alg;
>+ }
>+
>+ inst = container_of(&akalg->base, struct crypto_instance, alg);
>+
>+ err = crypto_init_spawn2(crypto_instance_ctx(inst), alg, inst,
>+ &crypto_akcipher_type);
>+ if (err) {
>+ inst = ERR_PTR(err);
>+ kfree(akalg);
>+
>+ goto out_put_alg;
>+ }
>+
>+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AKCIPHER;
>+ inst->alg.cra_priority = alg->cra_priority;
>+ inst->alg.cra_type = alg->cra_type;
>+
>+ inst->alg.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
>+
>+ inst->alg.cra_init = pkcs1pad_init_tfm;
>+ inst->alg.cra_exit = pkcs1pad_exit_tfm;
>+
>+ akalg->encrypt = pkcs1pad_encrypt;
>+ akalg->decrypt = pkcs1pad_decrypt;
>+ akalg->sign = pkcs1pad_sign;
>+ akalg->verify = pkcs1pad_verify;
>+ akalg->set_pub_key = pkcs1pad_set_pub_key;
>+ akalg->set_priv_key = pkcs1pad_set_priv_key;
>+ akalg->max_size = pkcs1pad_get_max_size;
>+ akalg->reqsize = sizeof(struct pkcs1pad_request) +
>+ __crypto_akcipher_alg(alg)->reqsize;
>+
>+out_put_alg:
>+ crypto_mod_put(alg);
>+ return inst;
>+}
>+
>+static void pkcs1pad_free(struct crypto_instance *inst)
>+{
>+ struct akcipher_alg *akalg = __crypto_akcipher_alg(&inst->alg);
>+
>+ crypto_drop_spawn(crypto_instance_ctx(inst));
>+ kfree(akalg);
>+}
>+
>+struct crypto_template rsa_pkcs1pad_tmpl = {
>+ .name = "pkcs1pad",
>+ .alloc = pkcs1pad_alloc,
>+ .free = pkcs1pad_free,
>+ .module = THIS_MODULE,
>+};
>diff --git a/crypto/rsa.c b/crypto/rsa.c
>index 58aad69..77d737f 100644
>--- a/crypto/rsa.c
>+++ b/crypto/rsa.c
>@@ -13,6 +13,7 @@
> #include <crypto/internal/rsa.h>
> #include <crypto/internal/akcipher.h>
> #include <crypto/akcipher.h>
>+#include <crypto/algapi.h>
>
> /*
> * RSAEP function [RFC3447 sec 5.1.1]
>@@ -315,11 +316,24 @@ static struct akcipher_alg rsa = {
>
> static int rsa_init(void)
> {
>- return crypto_register_akcipher(&rsa);
>+ int err;
>+
>+ err = crypto_register_akcipher(&rsa);
>+ if (err)
>+ return err;
>+
>+ err = crypto_register_template(&rsa_pkcs1pad_tmpl);
>+ if (err) {
>+ crypto_unregister_akcipher(&rsa);
>+ return err;
>+ }
>+
>+ return 0;
> }
>
> static void rsa_exit(void)
> {
>+ crypto_unregister_template(&rsa_pkcs1pad_tmpl);
> crypto_unregister_akcipher(&rsa);
> }
>
>diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
>index f997e2d..c7585bd 100644
>--- a/include/crypto/internal/rsa.h
>+++ b/include/crypto/internal/rsa.h
>@@ -27,4 +27,6 @@ int rsa_parse_priv_key(struct rsa_key *rsa_key, const void
>*key, unsigned int key_len);
>
> void rsa_free_key(struct rsa_key *rsa_key);
>+
>+extern struct crypto_template rsa_pkcs1pad_tmpl;
> #endif


Ciao
Stephan

2015-11-11 13:30:56

by Marcel Holtmann

[permalink] [raw]
Subject: Re: [PATCH 4/4] crypto: RSA padding algorithm

Hi Stephan,

>> This patch adds PKCS#1 v1.5 standard RSA padding as a separate template.
>> This way an RSA cipher with padding can be obtained by instantiating
>> "pkcs1pad(rsa)". The reason for adding this is that RSA is almost
>> never used without this padding (or OAEP) so it will be needed for
>> either certificate work in the kernel or the userspace, and also I hear
>> that it is likely implemented by hardware RSA in which case an
>> implementation of the whole "pkcs1pad(rsa)" can be provided.
>
> In general, I think that there is a PKCS 1 implementation in the kernel in
> crypto/asymmetric_keys/rsa.c
>
> Shouldn't that all somehow being synchronized?
>
> Maybe this patch should go in but then crypto/asymmetric_keys/rsa.c should
> kind of being removed or point to kernel crypto API?

I think crypto/asymmetric_keys/ needs to move to security/keys/asymmetric/ and then utilize akcipher and also PKCS 1 from crypto/

Regards

Marcel

2015-11-11 22:01:37

by Tadeusz Struk

[permalink] [raw]
Subject: Re: [PATCH 4/4] crypto: RSA padding algorithm

On 11/10/2015 04:58 PM, Andrew Zaborowski wrote:
> This patch adds PKCS#1 v1.5 standard RSA padding as a separate template.
> This way an RSA cipher with padding can be obtained by instantiating
> "pkcs1pad(rsa)". The reason for adding this is that RSA is almost
> never used without this padding (or OAEP) so it will be needed for
> either certificate work in the kernel or the userspace, and also I hear
> that it is likely implemented by hardware RSA in which case an
> implementation of the whole "pkcs1pad(rsa)" can be provided.
>
> Signed-off-by: Andrew Zaborowski <[email protected]>
> ---
> crypto/Makefile | 1 +
> crypto/rsa-padding.c | 586 ++++++++++++++++++++++++++++++++++++++++++
> crypto/rsa.c | 16 +-
> include/crypto/internal/rsa.h | 2 +
> 4 files changed, 604 insertions(+), 1 deletion(-)
> create mode 100644 crypto/rsa-padding.c

Can we call this new file rsa-pkcs1pad.c instead?
Thanks

2015-11-13 09:52:23

by Andrew Zaborowski

[permalink] [raw]
Subject: Re: [PATCH 4/4] crypto: RSA padding algorithm

Hi Stephan,

On 11 November 2015 at 14:19, Stephan Mueller <[email protected]> wrote:
> Am Mittwoch, 11. November 2015, 01:58:45 schrieb Andrew Zaborowski:
>
> Hi Andrew,
>
>>This patch adds PKCS#1 v1.5 standard RSA padding as a separate template.
>>This way an RSA cipher with padding can be obtained by instantiating
>>"pkcs1pad(rsa)". The reason for adding this is that RSA is almost
>>never used without this padding (or OAEP) so it will be needed for
>>either certificate work in the kernel or the userspace, and also I hear
>>that it is likely implemented by hardware RSA in which case an
>>implementation of the whole "pkcs1pad(rsa)" can be provided.
>
> In general, I think that there is a PKCS 1 implementation in the kernel in
> crypto/asymmetric_keys/rsa.c
>
> Shouldn't that all somehow being synchronized?

Probably as Marcel says the certificate code should use the crypto
algorithm API. In its current form it won't be able to take advantage
of hardware acceleration but it must have tons of overhead less than
if it used the crypto API.

>
> Maybe this patch should go in but then crypto/asymmetric_keys/rsa.c should
> kind of being removed or point to kernel crypto API?
>>
>>Signed-off-by: Andrew Zaborowski <[email protected]>
>>---
>> crypto/Makefile | 1 +
>> crypto/rsa-padding.c | 586
>>++++++++++++++++++++++++++++++++++++++++++ crypto/rsa.c |
>>16 +-
>> include/crypto/internal/rsa.h | 2 +
>> 4 files changed, 604 insertions(+), 1 deletion(-)
>> create mode 100644 crypto/rsa-padding.c
>>
>>diff --git a/crypto/Makefile b/crypto/Makefile
>>index f7aba92..46fe0b4 100644
>>--- a/crypto/Makefile
>>+++ b/crypto/Makefile
>>@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
>> rsa_generic-y += rsaprivkey-asn1.o
>> rsa_generic-y += rsa.o
>> rsa_generic-y += rsa_helper.o
>>+rsa_generic-y += rsa-padding.o
>> obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
>>
>> cryptomgr-y := algboss.o testmgr.o
>>diff --git a/crypto/rsa-padding.c b/crypto/rsa-padding.c
>>new file mode 100644
>>index 0000000..b9f9f31
>>--- /dev/null
>>+++ b/crypto/rsa-padding.c
>>@@ -0,0 +1,586 @@
>>+/*
>>+ * RSA padding templates.
>>+ *
>>+ * Copyright (c) 2015 Intel Corporation
>>+ *
>>+ * This program is free software; you can redistribute it and/or modify it
>>+ * under the terms of the GNU General Public License as published by the
>>Free + * Software Foundation; either version 2 of the License, or (at your
>>option) + * any later version.
>>+ */
>>+
>>+#include <crypto/algapi.h>
>>+#include <crypto/akcipher.h>
>>+#include <crypto/internal/akcipher.h>
>>+#include <linux/err.h>
>>+#include <linux/init.h>
>>+#include <linux/kernel.h>
>>+#include <linux/module.h>
>>+#include <linux/random.h>
>>+
>>+struct pkcs1pad_ctx {
>>+ struct crypto_akcipher *child;
>>+
>>+ unsigned int key_size;
>>+};
>>+
>>+struct pkcs1pad_request {
>>+ struct akcipher_request child_req;
>>+
>>+ struct scatterlist in_sg[3], out_sg[2];
>>+ uint8_t *in_buf, *out_buf;
>>+};
>>+
>>+static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void
>>*key, + unsigned int keylen)
>>+{
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+ int err, size;
>>+
>>+ err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
>>+
>>+ if (!err) {
>>+ /* Find out new modulus size from rsa implementation */
>>+ size = crypto_akcipher_maxsize(ctx->child);
>>+
>>+ ctx->key_size = size > 0 ? size : 0;
>>+ if (size <= 0)
>>+ err = size;
>>+ }
>>+
>>+ return err;
>>+}
>>+
>>+static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void
>>*key, + unsigned int keylen)
>>+{
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+ int err, size;
>>+
>>+ err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
>>+
>>+ if (!err) {
>>+ /* Find out new modulus size from rsa implementation */
>>+ size = crypto_akcipher_maxsize(ctx->child);
>>+
>>+ ctx->key_size = size > 0 ? size : 0;
>>+ if (size <= 0)
>>+ err = size;
>>+ }
>>+
>>+ return err;
>>+}
>>+
>>+static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
>>+{
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+
>>+ /*
>>+ * The maximum destination buffer size for the encrypt/sign operations
>>+ * will be the same as for RSA, even though it's smaller for
>>+ * decrypt/verify.
>>+ */
>>+
>>+ return ctx->key_size ?: -EINVAL;
>>+}
>>+
>>+static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t
>>len, + struct scatterlist *next)
>>+{
>>+ int nsegs = next ? 1 : 0;
>>+
>>+ if (offset_in_page(buf) + len <= PAGE_SIZE) {
>>+ nsegs += 1;
>>+ sg_init_table(sg, nsegs);
>>+ sg_set_buf(sg, buf, len);
>>+ } else {
>>+ nsegs += 2;
>>+ sg_init_table(sg, nsegs);
>>+ sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
>>+ sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
>>+ offset_in_page(buf) + len - PAGE_SIZE);
>>+ }
>>+
>>+ if (next)
>>+ sg_chain(sg, nsegs, next);
>>+}
>>+
>>+static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int
>>err) +{
>>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>>+ uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len];
>>+
>>+ if (!err) {
>>+ if (req_ctx->child_req.dst_len < ctx->key_size) {
>>+ memset(zeros, 0, sizeof(zeros));
>>+ sg_copy_from_buffer(req->dst,
>>+ sg_nents_for_len(req->dst,
>>+ sizeof(zeros)),
>>+ zeros, sizeof(zeros));
>>+ }
>>+
>>+ sg_pcopy_from_buffer(req->dst,
>>+ sg_nents_for_len(req->dst, ctx->key_size),
>>+ req_ctx->out_buf, req_ctx->child_req.dst_len,
>>+ sizeof(zeros));
>>+ }
>>+ req->dst_len = ctx->key_size;
>>+
>>+ kfree(req_ctx->in_buf);
>>+ kzfree(req_ctx->out_buf);
>>+
>>+ return err;
>>+}
>>+
>>+static void pkcs1pad_encrypt_sign_complete_cb(
>>+ struct crypto_async_request *child_async_req, int err)
>>+{
>>+ struct akcipher_request *req = child_async_req->data;
>>+ struct crypto_async_request async_req;
>>+
>>+ if (err == -EINPROGRESS)
>>+ return;
>>+
>>+ async_req.data = req->base.data;
>>+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
>>+ async_req.flags = child_async_req->flags;
>>+ req->base.complete(&async_req,
>>+ pkcs1pad_encrypt_sign_complete(req, err));
>>+}
>>+
>>+static int pkcs1pad_encrypt(struct akcipher_request *req)
>>+{
>>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>>+ int err;
>>+ unsigned int i, ps_end;
>>+
>>+ if (!ctx->key_size)
>>+ return -EINVAL;
>>+
>>+ if (req->src_len > ctx->key_size - 11)
>>+ return -EOVERFLOW;
>>+
>>+ if (req->dst_len < ctx->key_size) {
>>+ req->dst_len = ctx->key_size;
>>+ return -EOVERFLOW;
>>+ }
>>+
>>+ if (ctx->key_size > PAGE_SIZE)
>>+ return -ENOTSUPP;
>>+
>>+ /*
>>+ * Replace both input and output to add the padding in the input and
>>+ * the potential missing leading zeros in the output.
>>+ */
>>+ req_ctx->child_req.src = req_ctx->in_sg;
>>+ req_ctx->child_req.src_len = ctx->key_size - 1;
>>+ req_ctx->child_req.dst = req_ctx->out_sg;
>>+ req_ctx->child_req.dst_len = ctx->key_size;
>>+
>>+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
>>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>>+ GFP_KERNEL : GFP_ATOMIC);
>>+ if (!req_ctx->in_buf)
>>+ return -ENOMEM;
>>+
>>+ ps_end = ctx->key_size - req->src_len - 2;
>>+ req_ctx->in_buf[0] = 0x02;
>>+ for (i = 1; i < ps_end; i++)
>>+ req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
>>+ req_ctx->in_buf[ps_end] = 0x00;
>>+
>>+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
>>+ ctx->key_size - 1 - req->src_len, req->src);
>>+
>>+ req_ctx->out_buf = kmalloc(ctx->key_size,
>>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>>+ GFP_KERNEL : GFP_ATOMIC);
>>+ if (!req_ctx->out_buf) {
>>+ kfree(req_ctx->in_buf);
>>+ return -ENOMEM;
>>+ }
>>+
>>+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
>>+ ctx->key_size, NULL);
>>+
>>+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
>>+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
>>+ pkcs1pad_encrypt_sign_complete_cb, req);
>>+
>>+ err = crypto_akcipher_encrypt(&req_ctx->child_req);
>>+ if (err != -EINPROGRESS && err != -EBUSY)
>>+ return pkcs1pad_encrypt_sign_complete(req, err);
>>+
>>+ return err;
>>+}
>>+
>>+static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
>>+{
>>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>>+ unsigned int pos;
>>+
>>+ if (err == -EOVERFLOW)
>>+ /* Decrypted value had no leading 0 byte */
>>+ err = -EINVAL;
>>+
>>+ if (err)
>>+ goto done;
>>+
>>+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
>>+ err = -EINVAL;
>>+ goto done;
>>+ }
>>+
>>+ if (req_ctx->out_buf[0] != 0x02) {
>>+ err = -EINVAL;
>>+ goto done;
>>+ }
>>+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
>>+ if (req_ctx->out_buf[pos] == 0x00)
>>+ break;
>>+ if (pos < 9 || pos == req_ctx->child_req.dst_len) {
>>+ err = -EINVAL;
>>+ goto done;
>>+ }
>>+ pos++;
>>+
>>+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
>>+ err = -EOVERFLOW;
>>+ req->dst_len = req_ctx->child_req.dst_len - pos;
>>+
>>+ if (!err)
>>+ sg_copy_from_buffer(req->dst,
>>+ sg_nents_for_len(req->dst, req->dst_len),
>>+ req_ctx->out_buf + pos, req->dst_len);
>>+
>>+done:
>>+ kzfree(req_ctx->out_buf);
>>+
>>+ return err;
>>+}
>>+
>>+static void pkcs1pad_decrypt_complete_cb(
>>+ struct crypto_async_request *child_async_req, int err)
>>+{
>>+ struct akcipher_request *req = child_async_req->data;
>>+ struct crypto_async_request async_req;
>>+
>>+ if (err == -EINPROGRESS)
>>+ return;
>>+
>>+ async_req.data = req->base.data;
>>+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
>>+ async_req.flags = child_async_req->flags;
>>+ req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
>>+}
>>+
>>+static int pkcs1pad_decrypt(struct akcipher_request *req)
>>+{
>>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>>+ int err;
>>+
>>+ if (!ctx->key_size || req->src_len != ctx->key_size)
>>+ return -EINVAL;
>>+
>>+ if (ctx->key_size > PAGE_SIZE)
>>+ return -ENOTSUPP;
>>+
>>+ /* Reuse input buffer, output to a new buffer */
>>+ req_ctx->child_req.src = req->src;
>>+ req_ctx->child_req.src_len = req->src_len;
>>+ req_ctx->child_req.dst = req_ctx->out_sg;
>>+ req_ctx->child_req.dst_len = ctx->key_size - 1;
>>+
>>+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
>>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>>+ GFP_KERNEL : GFP_ATOMIC);
>>+ if (!req_ctx->out_buf)
>>+ return -ENOMEM;
>>+
>>+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
>>+ ctx->key_size - 1, NULL);
>>+
>>+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
>>+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
>>+ pkcs1pad_decrypt_complete_cb, req);
>>+
>>+ err = crypto_akcipher_decrypt(&req_ctx->child_req);
>>+ if (err != -EINPROGRESS && err != -EBUSY)
>>+ return pkcs1pad_decrypt_complete(req, err);
>>+
>>+ return err;
>>+}
>>+
>>+static int pkcs1pad_sign(struct akcipher_request *req)
>>+{
>>+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>>+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
>>+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
>>+ int err;
>>+ unsigned int i, ps_end;
>>+
>>+ if (!ctx->key_size)
>>+ return -EINVAL;
>>+
>>+ if (req->src_len > ctx->key_size - 11)
>>+ return -EOVERFLOW;
>>+
>>+ if (req->dst_len < ctx->key_size) {
>>+ req->dst_len = ctx->key_size;
>>+ return -EOVERFLOW;
>>+ }
>>+
>>+ if (ctx->key_size > PAGE_SIZE)
>>+ return -ENOTSUPP;
>>+
>>+ /*
>>+ * Replace both input and output to add the padding in the input and
>>+ * the potential missing leading zeros in the output.
>>+ */
>>+ req_ctx->child_req.src = req_ctx->in_sg;
>>+ req_ctx->child_req.src_len = ctx->key_size - 1;
>>+ req_ctx->child_req.dst = req_ctx->out_sg;
>>+ req_ctx->child_req.dst_len = ctx->key_size;
>>+
>>+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
>>+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>>+ GFP_KERNEL : GFP_ATOMIC);
>>+ if (!req_ctx->in_buf)
>>+ return -ENOMEM;
>>+
>>+ ps_end = ctx->key_size - req->src_len - 2;
>>+ req_ctx->in_buf[0] = 0x01;
>>+ for (i = 1; i < ps_end; i++)
>>+ req_ctx->in_buf[i] = 0xff;
>
> why not using memset here?

I will do this, also rename the file as suggested by Tadeusz, add one
missing check, and resend.

Best regards