2017-06-21 22:47:49

by Gary R Hook

[permalink] [raw]
Subject: [PATCH 0/4] Enable full RSA support on CCPs

The following series enables RSA operations on version 5 devices,
adds a set-reqsize function (to provide uniformity with other cipher
APIs), implements akcipher enablement in the crypto layer, and
makes a tweak for expanded v5 device capabilities.

---

Gary R Hook (4):
crypto: ccp - Fix base RSA function for version 5 CCPs
crypto: Add akcipher_set_reqsize() function
crypto: ccp - Add support for RSA on the CCP
crypto: ccp - Expand RSA support for a v5 ccp


drivers/crypto/ccp/Makefile | 1
drivers/crypto/ccp/ccp-crypto-main.c | 21 ++
drivers/crypto/ccp/ccp-crypto-rsa.c | 289 ++++++++++++++++++++++++++++++++++
drivers/crypto/ccp/ccp-crypto.h | 32 ++++
drivers/crypto/ccp/ccp-debugfs.c | 1
drivers/crypto/ccp/ccp-dev-v3.c | 1
drivers/crypto/ccp/ccp-dev-v5.c | 12 +
drivers/crypto/ccp/ccp-dev.c | 1
drivers/crypto/ccp/ccp-dev.h | 2
drivers/crypto/ccp/ccp-ops.c | 98 +++++++-----
include/crypto/internal/akcipher.h | 6 +
include/linux/ccp.h | 1
12 files changed, 421 insertions(+), 44 deletions(-)
create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c


2017-06-21 22:47:58

by Gary R Hook

[permalink] [raw]
Subject: [PATCH 1/4] crypto: ccp - Fix base RSA function for version 5 CCPs

Version 5 devices have requirements for buffer lengths, as well as
parameter format (e.g. bits vs. bytes). Fix the base CCP driver
code to meet requirements all supported versions.

Signed-off-by: Gary R Hook <[email protected]>
---
drivers/crypto/ccp/ccp-dev-v5.c | 10 ++--
drivers/crypto/ccp/ccp-ops.c | 95 ++++++++++++++++++++++++---------------
2 files changed, 64 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index b10d2d2075cb..632518efd685 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -469,7 +469,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;

function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
+ CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;

CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -484,10 +484,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;

- /* Exponent is in LSB memory */
- CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
- CCP5_CMD_KEY_HI(&desc) = 0;
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+ /* Key (Exponent) is in external memory */
+ CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
+ CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;

return ccp5_do_cmd(&desc, op->cmd_q);
}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c0dfdacbdff5..11155e52c52c 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1731,10 +1731,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_rsa_engine *rsa = &cmd->u.rsa;
- struct ccp_dm_workarea exp, src;
- struct ccp_data dst;
+ struct ccp_dm_workarea exp, src, dst;
struct ccp_op op;
unsigned int sb_count, i_len, o_len;
+ unsigned int key_size_bytes;
int ret;

if (rsa->key_size > CCP_RSA_MAX_WIDTH)
@@ -1743,31 +1743,41 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
return -EINVAL;

- /* The RSA modulus must precede the message being acted upon, so
- * it must be copied to a DMA area where the message and the
- * modulus can be concatenated. Therefore the input buffer
- * length required is twice the output buffer length (which
- * must be a multiple of 256-bits).
- */
- o_len = ((rsa->key_size + 255) / 256) * 32;
- i_len = o_len * 2;
-
- sb_count = o_len / CCP_SB_BYTES;
-
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);

- if (!op.sb_key)
- return -EIO;
+ /* Compute o_len, i_len in bytes. */
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+ /* The RSA modulus must precede the message being acted upon, so
+ * it must be copied to a DMA area where the message and the
+ * modulus can be concatenated. Therefore the input buffer
+ * length required is twice the output buffer length (which
+ * must be a multiple of 256-bits). sb_count is the
+ * number of storage block slots required for the modulus
+ */
+ key_size_bytes = (rsa->key_size + 7) >> 3;
+ o_len = ((rsa->key_size + 255) / 256) * CCP_SB_BYTES;
+ i_len = key_size_bytes * 2;
+
+ sb_count = o_len / CCP_SB_BYTES;
+
+ op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
+ sb_count);
+ if (!op.sb_key)
+ return -EIO;
+ } else {
+ /* A version 5 device allows a modulus size that will not fit
+ * in the LSB, so the command will transfer it from memory.
+ * But more importantly, the buffer sizes must be a multiple
+ * of 32 bytes; rounding up may be required.
+ */
+ key_size_bytes = 32 * ((rsa->key_size + 255) / 256);
+ o_len = key_size_bytes;
+ i_len = o_len * 2; /* bytes */
+ op.sb_key = cmd_q->sb_key;
+ }

- /* The RSA exponent may span multiple (32-byte) SB entries and must
- * be in little endian format. Reverse copy each 32-byte chunk
- * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
- * and each byte within that chunk and do not perform any byte swap
- * operations on the passthru operation.
- */
ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
if (ret)
goto e_sb;
@@ -1775,11 +1785,23 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
if (ret)
goto e_exp;
- ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_exp;
+
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
+ /* The RSA exponent may span multiple (32-byte) KSB entries and
+ * must be in little endian format. Reverse copy each 32-byte
+ * chunk of the exponent (En chunk to E0 chunk, E(n-1) chunk to
+ * E1 chunk) and each byte within that chunk and do not perform
+ * any byte swap operations on the passthru operation.
+ */
+ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_exp;
+ }
+ } else {
+ op.exp.u.dma.address = exp.dma.address;
+ op.exp.u.dma.offset = 0;
}

/* Concatenate the modulus and the message. Both the modulus and
@@ -1793,13 +1815,13 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
if (ret)
goto e_src;
- ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
+ ret = ccp_reverse_set_dm_area(&src, key_size_bytes, rsa->src, 0,
+ rsa->src_len);
if (ret)
goto e_src;

/* Prepare the output area for the operation */
- ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
- o_len, DMA_FROM_DEVICE);
+ ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
if (ret)
goto e_src;

@@ -1807,9 +1829,9 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.src.u.dma.address = src.dma.address;
op.src.u.dma.offset = 0;
op.src.u.dma.length = i_len;
- op.dst.u.dma.address = dst.dm_wa.dma.address;
+ op.dst.u.dma.address = dst.dma.address;
op.dst.u.dma.offset = 0;
- op.dst.u.dma.length = o_len;
+ op.dst.u.dma.length = key_size_bytes;

op.u.rsa.mod_size = rsa->key_size;
op.u.rsa.input_len = i_len;
@@ -1820,10 +1842,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}

- ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
+ ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);

e_dst:
- ccp_free_data(&dst, cmd_q);
+ ccp_dm_free(&dst);

e_src:
ccp_dm_free(&src);
@@ -1832,7 +1854,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ccp_dm_free(&exp);

e_sb:
- cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
+ cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);

return ret;
}

2017-06-21 22:48:04

by Gary R Hook

[permalink] [raw]
Subject: [PATCH 2/4] crypto: Add akcipher_set_reqsize() function

Signed-off-by: Gary R Hook <[email protected]>
---
include/crypto/internal/akcipher.h | 6 ++++++
1 file changed, 6 insertions(+)

diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index 479a0078f0f7..805686ba2be4 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -38,6 +38,12 @@ static inline void *akcipher_request_ctx(struct akcipher_request *req)
return req->__ctx;
}

+static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher,
+ unsigned int reqsize)
+{
+ crypto_akcipher_alg(akcipher)->reqsize = reqsize;
+}
+
static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
{
return tfm->base.__crt_ctx;

2017-06-21 22:48:13

by Gary R Hook

[permalink] [raw]
Subject: [PATCH 3/4] crypto: ccp - Add support for RSA on the CCP

Wire up the v3 CCP as a cipher provider.

Signed-off-by: Gary R Hook <[email protected]>
---
drivers/crypto/ccp/Makefile | 1
drivers/crypto/ccp/ccp-crypto-main.c | 21 ++
drivers/crypto/ccp/ccp-crypto-rsa.c | 286 ++++++++++++++++++++++++++++++++++
drivers/crypto/ccp/ccp-crypto.h | 31 ++++
drivers/crypto/ccp/ccp-debugfs.c | 1
drivers/crypto/ccp/ccp-dev.c | 1
drivers/crypto/ccp/ccp-ops.c | 2
include/linux/ccp.h | 1
8 files changed, 341 insertions(+), 3 deletions(-)
create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 59493fd3a751..439bc2fcb464 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes-xts.o \
ccp-crypto-aes-galois.o \
ccp-crypto-des3.o \
+ ccp-crypto-rsa.o \
ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 8dccbddabef1..dd7d00c680e7 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -17,6 +17,7 @@
#include <linux/ccp.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/akcipher.h>

#include "ccp-crypto.h"

@@ -37,10 +38,15 @@
module_param(des3_disable, uint, 0444);
MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");

+static unsigned int rsa_disable;
+module_param(rsa_disable, uint, 0444);
+MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
+
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
static LIST_HEAD(aead_algs);
+static LIST_HEAD(akcipher_algs);

/* For any tfm, requests for that tfm must be returned on the order
* received. With multiple queues available, the CCP can process more
@@ -358,6 +364,14 @@ static int ccp_register_algs(void)
return ret;
}

+ if (!rsa_disable) {
+ ret = ccp_register_rsa_algs(&akcipher_algs);
+ if (ret) {
+ rsa_disable = 1;
+ return ret;
+ }
+ }
+
return 0;
}

@@ -366,6 +380,7 @@ static void ccp_unregister_algs(void)
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
+ struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;

list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
crypto_unregister_ahash(&ahash_alg->alg);
@@ -384,6 +399,12 @@ static void ccp_unregister_algs(void)
list_del(&aead_alg->entry);
kfree(aead_alg);
}
+
+ list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
+ crypto_unregister_akcipher(&akc_alg->alg);
+ list_del(&akc_alg->entry);
+ kfree(akc_alg);
+ }
}

static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
new file mode 100644
index 000000000000..4a2a71463594
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -0,0 +1,286 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static inline struct akcipher_request *akcipher_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct akcipher_request, base);
+}
+
+static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct akcipher_request *req = akcipher_request_cast(async_req);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+
+ if (!ret)
+ req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
+
+ ret = 0;
+
+ return ret;
+}
+
+static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+{
+ return CCP_RSA_MAXMOD;
+}
+
+static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+ int ret = 0;
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_RSA;
+
+ rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
+ if (encrypt) {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
+ } else {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
+ }
+ rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
+ rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
+ rctx->cmd.u.rsa.src = req->src;
+ rctx->cmd.u.rsa.src_len = req->src_len;
+ rctx->cmd.u.rsa.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_rsa_encrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, true);
+}
+
+static int ccp_rsa_decrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, false);
+}
+
+static int ccp_check_key_length(unsigned int len)
+{
+ /* In bits */
+ if (len < 8 || len > 4096)
+ return -EINVAL;
+ return 0;
+}
+
+static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
+{
+ /* Clean up old key data */
+ kfree(ctx->u.rsa.e_buf);
+ ctx->u.rsa.e_buf = NULL;
+ ctx->u.rsa.e_len = 0;
+ kfree(ctx->u.rsa.n_buf);
+ ctx->u.rsa.n_buf = NULL;
+ ctx->u.rsa.n_len = 0;
+ kfree(ctx->u.rsa.d_buf);
+ ctx->u.rsa.d_buf = NULL;
+ ctx->u.rsa.d_len = 0;
+}
+
+static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key;
+ int key_len, i;
+ int ret;
+
+ ccp_rsa_free_key_bufs(ctx);
+ memset(&raw_key, 0, sizeof(raw_key));
+
+ /* Code borrowed from crypto/rsa.c */
+ if (private)
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret)
+ goto e_key;
+
+ /* Remove leading zeroes from the modulus (n) */
+ key_len = 0;
+ for (i = 0; i < raw_key.n_sz; i++)
+ if (raw_key.n[i]) {
+ key_len = raw_key.n_sz - i;
+ break;
+ }
+ ctx->u.rsa.key_len = key_len << 3; /* bits */
+ if (ccp_check_key_length(ctx->u.rsa.key_len)) {
+ ret = -EINVAL;
+ goto e_key;
+ }
+ ctx->u.rsa.n_len = key_len;
+ sg_init_one(&ctx->u.rsa.n_sg, raw_key.n + i, key_len);
+
+ /* Remove leading zeroes from the public key (e) */
+ key_len = 0;
+ for (i = 0; i < raw_key.e_sz; i++)
+ if (raw_key.e[i]) {
+ key_len = raw_key.e_sz - i;
+ break;
+ }
+ ctx->u.rsa.e_len = key_len;
+ sg_init_one(&ctx->u.rsa.e_sg, raw_key.e + i, key_len);
+
+ if (private) {
+ /* Remove leading zeroes from the private key (d) */
+ key_len = 0;
+ for (i = 0; i < raw_key.d_sz; i++)
+ if (raw_key.d[i]) {
+ key_len = raw_key.d_sz - i;
+ break;
+ }
+ ctx->u.rsa.d_len = key_len;
+ sg_init_one(&ctx->u.rsa.d_sg, raw_key.d + i, key_len);
+ }
+
+ return 0;
+
+e_key:
+ return ret;
+}
+
+static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, true);
+}
+
+static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
+ ctx->complete = ccp_rsa_complete;
+
+ return 0;
+}
+
+static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ ccp_rsa_free_key_bufs(ctx);
+}
+
+static struct akcipher_alg ccp_rsa_defaults = {
+ .encrypt = ccp_rsa_encrypt,
+ .decrypt = ccp_rsa_decrypt,
+ .sign = ccp_rsa_decrypt,
+ .verify = ccp_rsa_encrypt,
+ .set_pub_key = ccp_rsa_setpubkey,
+ .set_priv_key = ccp_rsa_setprivkey,
+ .max_size = ccp_rsa_maxsize,
+ .init = ccp_rsa_init_tfm,
+ .exit = ccp_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-ccp",
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
+ },
+};
+
+struct ccp_rsa_def {
+ unsigned int version;
+ const char *name;
+ const char *driver_name;
+ unsigned int reqsize;
+ struct akcipher_alg *alg_defaults;
+};
+
+static struct ccp_rsa_def rsa_algs[] = {
+ {
+ .version = CCP_VERSION(3, 0),
+ .name = "rsa",
+ .driver_name = "rsa-ccp",
+ .reqsize = sizeof(struct ccp_rsa_req_ctx),
+ .alg_defaults = &ccp_rsa_defaults,
+ }
+};
+
+int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+{
+ struct ccp_crypto_akcipher_alg *ccp_alg;
+ struct akcipher_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ alg = &ccp_alg->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ ret = crypto_register_akcipher(alg);
+ if (ret) {
+ pr_err("%s akcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_rsa_algs(struct list_head *head)
+{
+ int i, ret;
+ unsigned int ccpversion = ccp_version();
+
+ /* Register the RSA algorithm in standard mode
+ * This works for CCP v3 and later
+ */
+ for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
+ if (rsa_algs[i].version > ccpversion)
+ continue;
+ ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index dd5bf15f06e5..5d592ecc9af5 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -24,6 +24,8 @@
#include <crypto/ctr.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/rsa.h>

#define CCP_LOG_LEVEL KERN_INFO

@@ -58,6 +60,12 @@ struct ccp_crypto_ahash_alg {
struct ahash_alg alg;
};

+struct ccp_crypto_akcipher_alg {
+ struct list_head entry;
+
+ struct akcipher_alg alg;
+};
+
static inline struct ccp_crypto_ablkcipher_alg *
ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
{
@@ -227,12 +235,34 @@ struct ccp_sha_exp_ctx {
u8 buf[MAX_SHA_BLOCK_SIZE];
};

+/***** RSA related defines *****/
+
+struct ccp_rsa_ctx {
+ unsigned int key_len; /* in bits */
+ struct scatterlist e_sg;
+ u8 *e_buf;
+ unsigned int e_len;
+ struct scatterlist n_sg;
+ u8 *n_buf;
+ unsigned int n_len;
+ struct scatterlist d_sg;
+ u8 *d_buf;
+ unsigned int d_len;
+};
+
+struct ccp_rsa_req_ctx {
+ struct ccp_cmd cmd;
+};
+
+#define CCP_RSA_MAXMOD (4 * 1024 / 8)
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);

union {
struct ccp_aes_ctx aes;
+ struct ccp_rsa_ctx rsa;
struct ccp_sha_ctx sha;
struct ccp_des3_ctx des3;
} u;
@@ -249,5 +279,6 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
int ccp_register_aes_aeads(struct list_head *head);
int ccp_register_sha_algs(struct list_head *head);
int ccp_register_des3_algs(struct list_head *head);
+int ccp_register_rsa_algs(struct list_head *head);

#endif
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 99aba1622613..88191c45ca7d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -291,7 +291,6 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
struct dentry *debugfs_q_instance;
struct dentry *debugfs_q_stats;
unsigned long flags;
- int rc = 0;
int i;

if (!debugfs_initialized())
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2506b5025700..67cbb3e76888 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -415,6 +415,7 @@ static void ccp_do_cmd_complete(unsigned long data)
struct ccp_cmd *cmd = tdata->cmd;

cmd->callback(cmd->data, cmd->ret);
+
complete(&tdata->completion);
}

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 11155e52c52c..2cdd15a92178 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1786,7 +1786,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
goto e_exp;

- if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
/* The RSA exponent may span multiple (32-byte) KSB entries and
* must be in little endian format. Reverse copy each 32-byte
* chunk of the exponent (En chunk to E0 chunk, E(n-1) chunk to
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 3285c944194a..c03ee844a99d 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -20,7 +20,6 @@
#include <crypto/aes.h>
#include <crypto/sha.h>

-
struct ccp_device;
struct ccp_cmd;


2017-06-21 22:48:21

by Gary R Hook

[permalink] [raw]
Subject: [PATCH 4/4] crypto: ccp - Expand RSA support for a v5 ccp

A V5 device can accommodate larger keys, as well as read the keys
directly from memory instead of requiring them to be in a local
storage block.


Signed-off-by: Gary R Hook <[email protected]>
---
drivers/crypto/ccp/ccp-crypto-rsa.c | 5 ++++-
drivers/crypto/ccp/ccp-crypto.h | 1 +
drivers/crypto/ccp/ccp-dev-v3.c | 1 +
drivers/crypto/ccp/ccp-dev-v5.c | 2 ++
drivers/crypto/ccp/ccp-dev.h | 2 ++
drivers/crypto/ccp/ccp-ops.c | 3 ++-
6 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 4a2a71463594..93e6b00ce34d 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -43,7 +43,10 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)

static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
{
- return CCP_RSA_MAXMOD;
+ if (ccp_version() > CCP_VERSION(3, 0))
+ return CCP5_RSA_MAXMOD;
+ else
+ return CCP_RSA_MAXMOD;
}

static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 5d592ecc9af5..40598894113b 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -255,6 +255,7 @@ struct ccp_rsa_req_ctx {
};

#define CCP_RSA_MAXMOD (4 * 1024 / 8)
+#define CCP5_RSA_MAXMOD (16 * 1024 / 8)

/***** Common Context Structure *****/
struct ccp_ctx {
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 367c2e30656f..9b159b0a891e 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -592,4 +592,5 @@ static void ccp_destroy(struct ccp_device *ccp)
.perform = &ccp3_actions,
.bar = 2,
.offset = 0x20000,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 632518efd685..6043552322fd 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1115,6 +1115,7 @@ static void ccp5other_config(struct ccp_device *ccp)
.perform = &ccp5_actions,
.bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};

const struct ccp_vdata ccpv5b = {
@@ -1124,4 +1125,5 @@ static void ccp5other_config(struct ccp_device *ccp)
.perform = &ccp5_actions,
.bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a70154ac7405..8242cf54d90f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -200,6 +200,7 @@
#define CCP_SHA_SB_COUNT 1

#define CCP_RSA_MAX_WIDTH 4096
+#define CCP5_RSA_MAX_WIDTH 16384

#define CCP_PASSTHRU_BLOCKSIZE 256
#define CCP_PASSTHRU_MASKSIZE 32
@@ -677,6 +678,7 @@ struct ccp_vdata {
const struct ccp_actions *perform;
const unsigned int bar;
const unsigned int offset;
+ const unsigned int rsamax;
};

extern const struct ccp_vdata ccpv3;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 2cdd15a92178..ea5e4ede1eed 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1737,7 +1737,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
unsigned int key_size_bytes;
int ret;

- if (rsa->key_size > CCP_RSA_MAX_WIDTH)
+ /* Check against the maximum allowable size, in bits */
+ if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
return -EINVAL;

if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)

2017-06-22 05:15:06

by Stephan Müller

[permalink] [raw]
Subject: Re: [PATCH 3/4] crypto: ccp - Add support for RSA on the CCP

Am Donnerstag, 22. Juni 2017, 00:48:01 CEST schrieb Gary R Hook:

Hi Gary,

> Wire up the v3 CCP as a cipher provider.
>
> Signed-off-by: Gary R Hook <[email protected]>
> ---
> drivers/crypto/ccp/Makefile | 1
> drivers/crypto/ccp/ccp-crypto-main.c | 21 ++
> drivers/crypto/ccp/ccp-crypto-rsa.c | 286
> ++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h |
> 31 ++++
> drivers/crypto/ccp/ccp-debugfs.c | 1
> drivers/crypto/ccp/ccp-dev.c | 1
> drivers/crypto/ccp/ccp-ops.c | 2
> include/linux/ccp.h | 1
> 8 files changed, 341 insertions(+), 3 deletions(-)
> create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c
>
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> index 59493fd3a751..439bc2fcb464 100644
> --- a/drivers/crypto/ccp/Makefile
> +++ b/drivers/crypto/ccp/Makefile
> @@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
> ccp-crypto-aes-xts.o \
> ccp-crypto-aes-galois.o \
> ccp-crypto-des3.o \
> + ccp-crypto-rsa.o \
> ccp-crypto-sha.o
> diff --git a/drivers/crypto/ccp/ccp-crypto-main.c
> b/drivers/crypto/ccp/ccp-crypto-main.c index 8dccbddabef1..dd7d00c680e7
> 100644
> --- a/drivers/crypto/ccp/ccp-crypto-main.c
> +++ b/drivers/crypto/ccp/ccp-crypto-main.c
> @@ -17,6 +17,7 @@
> #include <linux/ccp.h>
> #include <linux/scatterlist.h>
> #include <crypto/internal/hash.h>
> +#include <crypto/internal/akcipher.h>
>
> #include "ccp-crypto.h"
>
> @@ -37,10 +38,15 @@
> module_param(des3_disable, uint, 0444);
> MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
>
> +static unsigned int rsa_disable;
> +module_param(rsa_disable, uint, 0444);
> +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
> +
> /* List heads for the supported algorithms */
> static LIST_HEAD(hash_algs);
> static LIST_HEAD(cipher_algs);
> static LIST_HEAD(aead_algs);
> +static LIST_HEAD(akcipher_algs);
>
> /* For any tfm, requests for that tfm must be returned on the order
> * received. With multiple queues available, the CCP can process more
> @@ -358,6 +364,14 @@ static int ccp_register_algs(void)
> return ret;
> }
>
> + if (!rsa_disable) {
> + ret = ccp_register_rsa_algs(&akcipher_algs);
> + if (ret) {
> + rsa_disable = 1;
> + return ret;
> + }
> + }
> +
> return 0;
> }
>
> @@ -366,6 +380,7 @@ static void ccp_unregister_algs(void)
> struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
> struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
> struct ccp_crypto_aead *aead_alg, *aead_tmp;
> + struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
>
> list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
> crypto_unregister_ahash(&ahash_alg->alg);
> @@ -384,6 +399,12 @@ static void ccp_unregister_algs(void)
> list_del(&aead_alg->entry);
> kfree(aead_alg);
> }
> +
> + list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
> + crypto_unregister_akcipher(&akc_alg->alg);
> + list_del(&akc_alg->entry);
> + kfree(akc_alg);
> + }
> }
>
> static int ccp_crypto_init(void)
> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c
> b/drivers/crypto/ccp/ccp-crypto-rsa.c new file mode 100644
> index 000000000000..4a2a71463594
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
> @@ -0,0 +1,286 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
> + *
> + * Copyright (C) 2016 Advanced Micro Devices, Inc.
> + *
> + * Author: Gary R Hook <[email protected]>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/module.h>
> +#include <linux/sched.h>
> +#include <linux/scatterlist.h>
> +#include <linux/crypto.h>
> +#include <crypto/algapi.h>
> +#include <crypto/internal/rsa.h>
> +#include <crypto/internal/akcipher.h>
> +#include <crypto/akcipher.h>
> +#include <crypto/scatterwalk.h>
> +
> +#include "ccp-crypto.h"
> +
> +static inline struct akcipher_request *akcipher_request_cast(
> + struct crypto_async_request *req)
> +{
> + return container_of(req, struct akcipher_request, base);
> +}
> +
> +static int ccp_rsa_complete(struct crypto_async_request *async_req, int
> ret) +{
> + struct akcipher_request *req = akcipher_request_cast(async_req);
> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> +
> + if (!ret)
> + req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
> +
> + ret = 0;
> +
> + return ret;
> +}
> +
> +static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
> +{
> + return CCP_RSA_MAXMOD;
> +}
> +
> +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
> +{
> + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> + int ret = 0;
> +
> + memset(&rctx->cmd, 0, sizeof(rctx->cmd));
> + INIT_LIST_HEAD(&rctx->cmd.entry);
> + rctx->cmd.engine = CCP_ENGINE_RSA;
> +
> + rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
> + if (encrypt) {
> + rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
> + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
> + } else {
> + rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
> + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
> + }
> + rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
> + rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
> + rctx->cmd.u.rsa.src = req->src;
> + rctx->cmd.u.rsa.src_len = req->src_len;
> + rctx->cmd.u.rsa.dst = req->dst;
> +
> + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
> +
> + return ret;
> +}
> +
> +static int ccp_rsa_encrypt(struct akcipher_request *req)
> +{
> + return ccp_rsa_crypt(req, true);
> +}
> +
> +static int ccp_rsa_decrypt(struct akcipher_request *req)
> +{
> + return ccp_rsa_crypt(req, false);
> +}
> +
> +static int ccp_check_key_length(unsigned int len)
> +{
> + /* In bits */
> + if (len < 8 || len > 4096)
> + return -EINVAL;
> + return 0;
> +}
> +
> +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
> +{
> + /* Clean up old key data */
> + kfree(ctx->u.rsa.e_buf);
> + ctx->u.rsa.e_buf = NULL;
> + ctx->u.rsa.e_len = 0;
> + kfree(ctx->u.rsa.n_buf);
> + ctx->u.rsa.n_buf = NULL;
> + ctx->u.rsa.n_len = 0;
> + kfree(ctx->u.rsa.d_buf);

kzfree, please

> + ctx->u.rsa.d_buf = NULL;
> + ctx->u.rsa.d_len = 0;
> +}
> +
> +static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
> + unsigned int keylen, bool private)
> +{
> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> + struct rsa_key raw_key;
> + int key_len, i;
> + int ret;
> +
> + ccp_rsa_free_key_bufs(ctx);
> + memset(&raw_key, 0, sizeof(raw_key));
> +
> + /* Code borrowed from crypto/rsa.c */
> + if (private)
> + ret = rsa_parse_priv_key(&raw_key, key, keylen);
> + else
> + ret = rsa_parse_pub_key(&raw_key, key, keylen);
> + if (ret)
> + goto e_key;
> +
> + /* Remove leading zeroes from the modulus (n) */

Three fragments doing the same -- isn't an inline cleaner here?

> + key_len = 0;
> + for (i = 0; i < raw_key.n_sz; i++)
> + if (raw_key.n[i]) {
> + key_len = raw_key.n_sz - i;
> + break;
> + }
> + ctx->u.rsa.key_len = key_len << 3; /* bits */
> + if (ccp_check_key_length(ctx->u.rsa.key_len)) {
> + ret = -EINVAL;
> + goto e_key;
> + }
> + ctx->u.rsa.n_len = key_len;
> + sg_init_one(&ctx->u.rsa.n_sg, raw_key.n + i, key_len);
> +
> + /* Remove leading zeroes from the public key (e) */
> + key_len = 0;
> + for (i = 0; i < raw_key.e_sz; i++)
> + if (raw_key.e[i]) {
> + key_len = raw_key.e_sz - i;
> + break;
> + }
> + ctx->u.rsa.e_len = key_len;
> + sg_init_one(&ctx->u.rsa.e_sg, raw_key.e + i, key_len);
> +
> + if (private) {
> + /* Remove leading zeroes from the private key (d) */
> + key_len = 0;
> + for (i = 0; i < raw_key.d_sz; i++)
> + if (raw_key.d[i]) {
> + key_len = raw_key.d_sz - i;
> + break;
> + }
> + ctx->u.rsa.d_len = key_len;
> + sg_init_one(&ctx->u.rsa.d_sg, raw_key.d + i, key_len);

As I see no memcpy for the key components, how is it ensured that the caller's
memory holding the key will stay alive after a setkey call? Further, wouldn'd
the ccp_rsa_free_key_bufs function cause a double free as it would act on
user-provided memory the user may also try to free?

Ciao
Stephan

2017-06-22 14:45:27

by Tom Lendacky

[permalink] [raw]
Subject: Re: [PATCH 1/4] crypto: ccp - Fix base RSA function for version 5 CCPs

On 6/21/2017 5:47 PM, Gary R Hook wrote:
> Version 5 devices have requirements for buffer lengths, as well as
> parameter format (e.g. bits vs. bytes). Fix the base CCP driver
> code to meet requirements all supported versions.
>
> Signed-off-by: Gary R Hook <[email protected]>
> ---
> drivers/crypto/ccp/ccp-dev-v5.c | 10 ++--
> drivers/crypto/ccp/ccp-ops.c | 95 ++++++++++++++++++++++++---------------
> 2 files changed, 64 insertions(+), 41 deletions(-)
>
> diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
> index b10d2d2075cb..632518efd685 100644
> --- a/drivers/crypto/ccp/ccp-dev-v5.c
> +++ b/drivers/crypto/ccp/ccp-dev-v5.c
> @@ -469,7 +469,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
> CCP5_CMD_PROT(&desc) = 0;
>
> function.raw = 0;
> - CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
> + CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
> CCP5_CMD_FUNCTION(&desc) = function.raw;
>
> CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
> @@ -484,10 +484,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
> CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
> CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
>
> - /* Exponent is in LSB memory */
> - CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
> - CCP5_CMD_KEY_HI(&desc) = 0;
> - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
> + /* Key (Exponent) is in external memory */
> + CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
> + CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
> + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
>
> return ccp5_do_cmd(&desc, op->cmd_q);
> }
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index c0dfdacbdff5..11155e52c52c 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -1731,10 +1731,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> {
> struct ccp_rsa_engine *rsa = &cmd->u.rsa;
> - struct ccp_dm_workarea exp, src;
> - struct ccp_data dst;
> + struct ccp_dm_workarea exp, src, dst;
> struct ccp_op op;
> unsigned int sb_count, i_len, o_len;
> + unsigned int key_size_bytes;
> int ret;
>
> if (rsa->key_size > CCP_RSA_MAX_WIDTH)
> @@ -1743,31 +1743,41 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
> return -EINVAL;
>
> - /* The RSA modulus must precede the message being acted upon, so
> - * it must be copied to a DMA area where the message and the
> - * modulus can be concatenated. Therefore the input buffer
> - * length required is twice the output buffer length (which
> - * must be a multiple of 256-bits).
> - */
> - o_len = ((rsa->key_size + 255) / 256) * 32;
> - i_len = o_len * 2;
> -
> - sb_count = o_len / CCP_SB_BYTES;
> -
> memset(&op, 0, sizeof(op));
> op.cmd_q = cmd_q;
> - op.jobid = ccp_gen_jobid(cmd_q->ccp);
> - op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
> + op.jobid = CCP_NEW_JOBID(cmd_q->ccp);

This change isn't related to RSA support, should be a separate patch.

>
> - if (!op.sb_key)
> - return -EIO;
> + /* Compute o_len, i_len in bytes. */
> + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
> + /* The RSA modulus must precede the message being acted upon, so
> + * it must be copied to a DMA area where the message and the
> + * modulus can be concatenated. Therefore the input buffer
> + * length required is twice the output buffer length (which
> + * must be a multiple of 256-bits). sb_count is the
> + * number of storage block slots required for the modulus
> + */
> + key_size_bytes = (rsa->key_size + 7) >> 3; > + o_len = ((rsa->key_size + 255) / 256) * CCP_SB_BYTES;

This calculation shouldn't change the "32" to CCP_SB_BYTES. This is
purely to get the 256-bit alignment.

> + i_len = key_size_bytes * 2;

This violates the comment above, key_size_bytes is byte aligned vs the
256-bit/8-byte alignment required. i_len should stay as o_len * 2.
Should key_size_bytes be moved down and set to o_len for this path?

> +
> + sb_count = o_len / CCP_SB_BYTES;
> +
> + op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
> + sb_count);
> + if (!op.sb_key)
> + return -EIO;
> + } else {
> + /* A version 5 device allows a modulus size that will not fit
> + * in the LSB, so the command will transfer it from memory.
> + * But more importantly, the buffer sizes must be a multiple
> + * of 32 bytes; rounding up may be required.
> + */
> + key_size_bytes = 32 * ((rsa->key_size + 255) / 256);
> + o_len = key_size_bytes;
> + i_len = o_len * 2; /* bytes */

Ok, so this is exactly what the previous code was doing... 32 byte (or
256-bit) alignement. So the only thing that is needed for the V3 vs V5
difference is how the key is handled. The o_len and i_len calculations
can be left as is and then key_size_bytes is no longer needed.

> + op.sb_key = cmd_q->sb_key;
> + }
>
> - /* The RSA exponent may span multiple (32-byte) SB entries and must
> - * be in little endian format. Reverse copy each 32-byte chunk
> - * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
> - * and each byte within that chunk and do not perform any byte swap
> - * operations on the passthru operation.
> - */

This comment (or part of it) should stay. The general concept and action
is still being done in the code below (ccp_init_dm_workarea() and
ccp_reverse_set_dm_area()). The only difference between V3 and V5 is
that you don't have to move it to an SB for V5.

> ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
> if (ret)
> goto e_sb;
> @@ -1775,11 +1785,23 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
> if (ret)
> goto e_exp;
> - ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
> - CCP_PASSTHRU_BYTESWAP_NOOP);
> - if (ret) {
> - cmd->engine_error = cmd_q->cmd_error;
> - goto e_exp;
> +
> + if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {

CCP_VERSION(5, 0) ?

> + /* The RSA exponent may span multiple (32-byte) KSB entries and
> + * must be in little endian format. Reverse copy each 32-byte
> + * chunk of the exponent (En chunk to E0 chunk, E(n-1) chunk to
> + * E1 chunk) and each byte within that chunk and do not perform
> + * any byte swap operations on the passthru operation.
> + */

Change this to say the exponent is being copied to an SB

> + ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
> + CCP_PASSTHRU_BYTESWAP_NOOP);
> + if (ret) {
> + cmd->engine_error = cmd_q->cmd_error;
> + goto e_exp;
> + }
> + } else {

Add a comment here saying the exponent can be DMA'd directly.

> + op.exp.u.dma.address = exp.dma.address;
> + op.exp.u.dma.offset = 0;
> }
>
> /* Concatenate the modulus and the message. Both the modulus and
> @@ -1793,13 +1815,13 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
> if (ret)
> goto e_src;
> - ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
> + ret = ccp_reverse_set_dm_area(&src, key_size_bytes, rsa->src, 0,
> + rsa->src_len);
> if (ret)
> goto e_src;
>
> /* Prepare the output area for the operation */
> - ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
> - o_len, DMA_FROM_DEVICE);
> + ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
> if (ret)
> goto e_src;
>
> @@ -1807,9 +1829,9 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> op.src.u.dma.address = src.dma.address;
> op.src.u.dma.offset = 0;
> op.src.u.dma.length = i_len;
> - op.dst.u.dma.address = dst.dm_wa.dma.address;
> + op.dst.u.dma.address = dst.dma.address;
> op.dst.u.dma.offset = 0;
> - op.dst.u.dma.length = o_len;
> + op.dst.u.dma.length = key_size_bytes;

So this changes the dst DMA length for a V3 CCP from a 256 bit aligned
length to a byte aligned length. But based on above comments I think
this will be reverted anyway.

Thanks,
Tom

>
> op.u.rsa.mod_size = rsa->key_size;
> op.u.rsa.input_len = i_len;
> @@ -1820,10 +1842,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> goto e_dst;
> }
>
> - ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
> + ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
>
> e_dst:
> - ccp_free_data(&dst, cmd_q);
> + ccp_dm_free(&dst);
>
> e_src:
> ccp_dm_free(&src);
> @@ -1832,7 +1854,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> ccp_dm_free(&exp);
>
> e_sb:
> - cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
> + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
> + cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
>
> return ret;
> }
>

2017-06-22 16:17:06

by Tom Lendacky

[permalink] [raw]
Subject: Re: [PATCH 3/4] crypto: ccp - Add support for RSA on the CCP

On 6/21/2017 5:48 PM, Gary R Hook wrote:
> Wire up the v3 CCP as a cipher provider.

The V5 support will be invoked through this also. Maybe something like:

Wire up the CCP as an RSA cipher provider.

>
> Signed-off-by: Gary R Hook <[email protected]>
> ---
> drivers/crypto/ccp/Makefile | 1
> drivers/crypto/ccp/ccp-crypto-main.c | 21 ++
> drivers/crypto/ccp/ccp-crypto-rsa.c | 286 ++++++++++++++++++++++++++++++++++
> drivers/crypto/ccp/ccp-crypto.h | 31 ++++
> drivers/crypto/ccp/ccp-debugfs.c | 1
> drivers/crypto/ccp/ccp-dev.c | 1
> drivers/crypto/ccp/ccp-ops.c | 2
> include/linux/ccp.h | 1
> 8 files changed, 341 insertions(+), 3 deletions(-)
> create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c
>
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> index 59493fd3a751..439bc2fcb464 100644
> --- a/drivers/crypto/ccp/Makefile
> +++ b/drivers/crypto/ccp/Makefile
> @@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
> ccp-crypto-aes-xts.o \
> ccp-crypto-aes-galois.o \
> ccp-crypto-des3.o \
> + ccp-crypto-rsa.o \
> ccp-crypto-sha.o
> diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
> index 8dccbddabef1..dd7d00c680e7 100644
> --- a/drivers/crypto/ccp/ccp-crypto-main.c
> +++ b/drivers/crypto/ccp/ccp-crypto-main.c
> @@ -17,6 +17,7 @@
> #include <linux/ccp.h>
> #include <linux/scatterlist.h>
> #include <crypto/internal/hash.h>
> +#include <crypto/internal/akcipher.h>
>
> #include "ccp-crypto.h"
>
> @@ -37,10 +38,15 @@
> module_param(des3_disable, uint, 0444);
> MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
>
> +static unsigned int rsa_disable;
> +module_param(rsa_disable, uint, 0444);
> +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
> +
> /* List heads for the supported algorithms */
> static LIST_HEAD(hash_algs);
> static LIST_HEAD(cipher_algs);
> static LIST_HEAD(aead_algs);
> +static LIST_HEAD(akcipher_algs);
>
> /* For any tfm, requests for that tfm must be returned on the order
> * received. With multiple queues available, the CCP can process more
> @@ -358,6 +364,14 @@ static int ccp_register_algs(void)
> return ret;
> }
>
> + if (!rsa_disable) {
> + ret = ccp_register_rsa_algs(&akcipher_algs);
> + if (ret) {
> + rsa_disable = 1;

Not sure what this does... The return of the error code will cause the
init to fail and unregister everything. This path won't be taken again
to make use of the change in value.

> + return ret;
> + }
> + }
> +
> return 0;
> }
>
> @@ -366,6 +380,7 @@ static void ccp_unregister_algs(void)
> struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
> struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
> struct ccp_crypto_aead *aead_alg, *aead_tmp;
> + struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
>
> list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
> crypto_unregister_ahash(&ahash_alg->alg);
> @@ -384,6 +399,12 @@ static void ccp_unregister_algs(void)
> list_del(&aead_alg->entry);
> kfree(aead_alg);
> }
> +
> + list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
> + crypto_unregister_akcipher(&akc_alg->alg);
> + list_del(&akc_alg->entry);
> + kfree(akc_alg);
> + }
> }
>
> static int ccp_crypto_init(void)
> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
> new file mode 100644
> index 000000000000..4a2a71463594
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
> @@ -0,0 +1,286 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
> + *
> + * Copyright (C) 2016 Advanced Micro Devices, Inc.
> + *
> + * Author: Gary R Hook <[email protected]>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/module.h>
> +#include <linux/sched.h>
> +#include <linux/scatterlist.h>
> +#include <linux/crypto.h>
> +#include <crypto/algapi.h>
> +#include <crypto/internal/rsa.h>
> +#include <crypto/internal/akcipher.h>
> +#include <crypto/akcipher.h>
> +#include <crypto/scatterwalk.h>
> +
> +#include "ccp-crypto.h"
> +
> +static inline struct akcipher_request *akcipher_request_cast(
> + struct crypto_async_request *req)
> +{
> + return container_of(req, struct akcipher_request, base);
> +}
> +
> +static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
> +{
> + struct akcipher_request *req = akcipher_request_cast(async_req);
> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> +
> + if (!ret)
> + req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
> +
> + ret = 0;
> +
> + return ret;

This seems odd. You should probably make this similar to the other CCP
complete functions:

if (ret)
return ret;

req->dst_len = ...

return 0;

> +}
> +
> +static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
> +{
> + return CCP_RSA_MAXMOD;
> +}
> +
> +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
> +{
> + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> + int ret = 0;
> +
> + memset(&rctx->cmd, 0, sizeof(rctx->cmd));
> + INIT_LIST_HEAD(&rctx->cmd.entry);
> + rctx->cmd.engine = CCP_ENGINE_RSA;
> +
> + rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
> + if (encrypt) {
> + rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
> + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
> + } else {
> + rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
> + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
> + }
> + rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
> + rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
> + rctx->cmd.u.rsa.src = req->src;
> + rctx->cmd.u.rsa.src_len = req->src_len;
> + rctx->cmd.u.rsa.dst = req->dst;
> +
> + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
> +
> + return ret;
> +}
> +
> +static int ccp_rsa_encrypt(struct akcipher_request *req)
> +{
> + return ccp_rsa_crypt(req, true);
> +}
> +
> +static int ccp_rsa_decrypt(struct akcipher_request *req)
> +{
> + return ccp_rsa_crypt(req, false);
> +}
> +
> +static int ccp_check_key_length(unsigned int len)
> +{
> + /* In bits */
> + if (len < 8 || len > 4096)
> + return -EINVAL;
> + return 0;
> +}
> +
> +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
> +{
> + /* Clean up old key data */
> + kfree(ctx->u.rsa.e_buf);

I don't see where this is ever set.

> + ctx->u.rsa.e_buf = NULL;
> + ctx->u.rsa.e_len = 0;
> + kfree(ctx->u.rsa.n_buf);

I don't see where this is ever set.

> + ctx->u.rsa.n_buf = NULL;
> + ctx->u.rsa.n_len = 0;
> + kfree(ctx->u.rsa.d_buf);

I don't see where this is ever set.

> + ctx->u.rsa.d_buf = NULL;
> + ctx->u.rsa.d_len = 0;
> +}
> +
> +static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
> + unsigned int keylen, bool private)
> +{
> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> + struct rsa_key raw_key;
> + int key_len, i;
> + int ret;
> +
> + ccp_rsa_free_key_bufs(ctx);
> + memset(&raw_key, 0, sizeof(raw_key));
> +
> + /* Code borrowed from crypto/rsa.c */
> + if (private)
> + ret = rsa_parse_priv_key(&raw_key, key, keylen);
> + else
> + ret = rsa_parse_pub_key(&raw_key, key, keylen);
> + if (ret)
> + goto e_key;
> +
> + /* Remove leading zeroes from the modulus (n) */
> + key_len = 0;
> + for (i = 0; i < raw_key.n_sz; i++)
> + if (raw_key.n[i]) {
> + key_len = raw_key.n_sz - i;
> + break;
> + }
> + ctx->u.rsa.key_len = key_len << 3; /* bits */
> + if (ccp_check_key_length(ctx->u.rsa.key_len)) {
> + ret = -EINVAL;
> + goto e_key;
> + }
> + ctx->u.rsa.n_len = key_len;
> + sg_init_one(&ctx->u.rsa.n_sg, raw_key.n + i, key_len);

Shouldn't this be where u.rsa.n_buf is allocated and then the key
copied to that buf. Then the sg_init_one would be performed against
the allocated buffer.

> +
> + /* Remove leading zeroes from the public key (e) */
> + key_len = 0;
> + for (i = 0; i < raw_key.e_sz; i++)
> + if (raw_key.e[i]) {
> + key_len = raw_key.e_sz - i;
> + break;
> + }
> + ctx->u.rsa.e_len = key_len;
> + sg_init_one(&ctx->u.rsa.e_sg, raw_key.e + i, key_len);

Ditto.

> +
> + if (private) {
> + /* Remove leading zeroes from the private key (d) */
> + key_len = 0;
> + for (i = 0; i < raw_key.d_sz; i++)
> + if (raw_key.d[i]) {
> + key_len = raw_key.d_sz - i;
> + break;
> + }
> + ctx->u.rsa.d_len = key_len;
> + sg_init_one(&ctx->u.rsa.d_sg, raw_key.d + i, key_len);

Ditto.

> + }
> +
> + return 0;
> +
> +e_key:
> + return ret;
> +}
> +
> +static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
> + unsigned int keylen)
> +{
> + return ccp_rsa_setkey(tfm, key, keylen, true);
> +}
> +
> +static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
> + unsigned int keylen)
> +{
> + return ccp_rsa_setkey(tfm, key, keylen, false);
> +}
> +
> +static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
> +{
> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> +
> + akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
> + ctx->complete = ccp_rsa_complete;
> +
> + return 0;
> +}
> +
> +static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
> +{
> + struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
> +
> + ccp_rsa_free_key_bufs(ctx);
> +}
> +
> +static struct akcipher_alg ccp_rsa_defaults = {
> + .encrypt = ccp_rsa_encrypt,
> + .decrypt = ccp_rsa_decrypt,
> + .sign = ccp_rsa_decrypt,
> + .verify = ccp_rsa_encrypt,
> + .set_pub_key = ccp_rsa_setpubkey,
> + .set_priv_key = ccp_rsa_setprivkey,
> + .max_size = ccp_rsa_maxsize,
> + .init = ccp_rsa_init_tfm,
> + .exit = ccp_rsa_exit_tfm,
> + .base = {
> + .cra_name = "rsa",
> + .cra_driver_name = "rsa-ccp",
> + .cra_priority = CCP_CRA_PRIORITY,
> + .cra_module = THIS_MODULE,
> + .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
> + },
> +};
> +
> +struct ccp_rsa_def {
> + unsigned int version;
> + const char *name;
> + const char *driver_name;
> + unsigned int reqsize;
> + struct akcipher_alg *alg_defaults;
> +};
> +
> +static struct ccp_rsa_def rsa_algs[] = {
> + {
> + .version = CCP_VERSION(3, 0),
> + .name = "rsa",
> + .driver_name = "rsa-ccp",
> + .reqsize = sizeof(struct ccp_rsa_req_ctx),
> + .alg_defaults = &ccp_rsa_defaults,
> + }
> +};
> +
> +int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
> +{
> + struct ccp_crypto_akcipher_alg *ccp_alg;
> + struct akcipher_alg *alg;
> + int ret;
> +
> + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
> + if (!ccp_alg)
> + return -ENOMEM;
> +
> + INIT_LIST_HEAD(&ccp_alg->entry);
> +
> + alg = &ccp_alg->alg;
> + *alg = *def->alg_defaults;
> + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
> + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
> + def->driver_name);
> + ret = crypto_register_akcipher(alg);
> + if (ret) {
> + pr_err("%s akcipher algorithm registration error (%d)\n",
> + alg->base.cra_name, ret);
> + kfree(ccp_alg);
> + return ret;
> + }
> +
> + list_add(&ccp_alg->entry, head);
> +
> + return 0;
> +}
> +
> +int ccp_register_rsa_algs(struct list_head *head)
> +{
> + int i, ret;
> + unsigned int ccpversion = ccp_version();
> +
> + /* Register the RSA algorithm in standard mode
> + * This works for CCP v3 and later
> + */
> + for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
> + if (rsa_algs[i].version > ccpversion)
> + continue;
> + ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
> index dd5bf15f06e5..5d592ecc9af5 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -24,6 +24,8 @@
> #include <crypto/ctr.h>
> #include <crypto/hash.h>
> #include <crypto/sha.h>
> +#include <crypto/akcipher.h>
> +#include <crypto/internal/rsa.h>
>
> #define CCP_LOG_LEVEL KERN_INFO
>
> @@ -58,6 +60,12 @@ struct ccp_crypto_ahash_alg {
> struct ahash_alg alg;
> };
>
> +struct ccp_crypto_akcipher_alg {
> + struct list_head entry;
> +
> + struct akcipher_alg alg;
> +};
> +
> static inline struct ccp_crypto_ablkcipher_alg *
> ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
> {
> @@ -227,12 +235,34 @@ struct ccp_sha_exp_ctx {
> u8 buf[MAX_SHA_BLOCK_SIZE];
> };
>
> +/***** RSA related defines *****/
> +
> +struct ccp_rsa_ctx {
> + unsigned int key_len; /* in bits */
> + struct scatterlist e_sg;
> + u8 *e_buf;
> + unsigned int e_len;
> + struct scatterlist n_sg;
> + u8 *n_buf;
> + unsigned int n_len;
> + struct scatterlist d_sg;
> + u8 *d_buf;
> + unsigned int d_len;
> +};
> +
> +struct ccp_rsa_req_ctx {
> + struct ccp_cmd cmd;
> +};
> +
> +#define CCP_RSA_MAXMOD (4 * 1024 / 8)
> +
> /***** Common Context Structure *****/
> struct ccp_ctx {
> int (*complete)(struct crypto_async_request *req, int ret);
>
> union {
> struct ccp_aes_ctx aes;
> + struct ccp_rsa_ctx rsa;
> struct ccp_sha_ctx sha;
> struct ccp_des3_ctx des3;
> } u;
> @@ -249,5 +279,6 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
> int ccp_register_aes_aeads(struct list_head *head);
> int ccp_register_sha_algs(struct list_head *head);
> int ccp_register_des3_algs(struct list_head *head);
> +int ccp_register_rsa_algs(struct list_head *head);
>
> #endif
> diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
> index 99aba1622613..88191c45ca7d 100644
> --- a/drivers/crypto/ccp/ccp-debugfs.c
> +++ b/drivers/crypto/ccp/ccp-debugfs.c
> @@ -291,7 +291,6 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
> struct dentry *debugfs_q_instance;
> struct dentry *debugfs_q_stats;
> unsigned long flags;
> - int rc = 0;

Should be a separate patch.

> int i;
>
> if (!debugfs_initialized())
> diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
> index 2506b5025700..67cbb3e76888 100644
> --- a/drivers/crypto/ccp/ccp-dev.c
> +++ b/drivers/crypto/ccp/ccp-dev.c
> @@ -415,6 +415,7 @@ static void ccp_do_cmd_complete(unsigned long data)
> struct ccp_cmd *cmd = tdata->cmd;
>
> cmd->callback(cmd->data, cmd->ret);
> +

Should be a separate patch.

> complete(&tdata->completion);
> }
>
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 11155e52c52c..2cdd15a92178 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -1786,7 +1786,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> if (ret)
> goto e_exp;
>
> - if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
> + if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {

Should be fixed based on the comment in the previous patch.

> /* The RSA exponent may span multiple (32-byte) KSB entries and
> * must be in little endian format. Reverse copy each 32-byte
> * chunk of the exponent (En chunk to E0 chunk, E(n-1) chunk to
> diff --git a/include/linux/ccp.h b/include/linux/ccp.h
> index 3285c944194a..c03ee844a99d 100644
> --- a/include/linux/ccp.h
> +++ b/include/linux/ccp.h
> @@ -20,7 +20,6 @@
> #include <crypto/aes.h>
> #include <crypto/sha.h>
>
> -

Should be a separate patch.

Thanks,
Tom

> struct ccp_device;
> struct ccp_cmd;
>
>

2017-06-22 16:37:46

by Tom Lendacky

[permalink] [raw]
Subject: Re: [PATCH 4/4] crypto: ccp - Expand RSA support for a v5 ccp

On 6/21/2017 5:48 PM, Gary R Hook wrote:
> A V5 device can accommodate larger keys, as well as read the keys
> directly from memory instead of requiring them to be in a local
> storage block.

The previous patch already reads them from memory so just the first
part of this sentence is needed.

>
>
> Signed-off-by: Gary R Hook <[email protected]>
> ---
> drivers/crypto/ccp/ccp-crypto-rsa.c | 5 ++++-
> drivers/crypto/ccp/ccp-crypto.h | 1 +
> drivers/crypto/ccp/ccp-dev-v3.c | 1 +
> drivers/crypto/ccp/ccp-dev-v5.c | 2 ++
> drivers/crypto/ccp/ccp-dev.h | 2 ++
> drivers/crypto/ccp/ccp-ops.c | 3 ++-
> 6 files changed, 12 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
> index 4a2a71463594..93e6b00ce34d 100644
> --- a/drivers/crypto/ccp/ccp-crypto-rsa.c
> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
> @@ -43,7 +43,10 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
>
> static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
> {
> - return CCP_RSA_MAXMOD;
> + if (ccp_version() > CCP_VERSION(3, 0))
> + return CCP5_RSA_MAXMOD;
> + else
> + return CCP_RSA_MAXMOD;

The ccp_check_key_length() function in this file has a hardcoded 4096
that should be changed to use vdata value.

Thanks,
Tom

> }
>
> static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
> index 5d592ecc9af5..40598894113b 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -255,6 +255,7 @@ struct ccp_rsa_req_ctx {
> };
>
> #define CCP_RSA_MAXMOD (4 * 1024 / 8)
> +#define CCP5_RSA_MAXMOD (16 * 1024 / 8)
>
> /***** Common Context Structure *****/
> struct ccp_ctx {
> diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
> index 367c2e30656f..9b159b0a891e 100644
> --- a/drivers/crypto/ccp/ccp-dev-v3.c
> +++ b/drivers/crypto/ccp/ccp-dev-v3.c
> @@ -592,4 +592,5 @@ static void ccp_destroy(struct ccp_device *ccp)
> .perform = &ccp3_actions,
> .bar = 2,
> .offset = 0x20000,
> + .rsamax = CCP_RSA_MAX_WIDTH,
> };
> diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
> index 632518efd685..6043552322fd 100644
> --- a/drivers/crypto/ccp/ccp-dev-v5.c
> +++ b/drivers/crypto/ccp/ccp-dev-v5.c
> @@ -1115,6 +1115,7 @@ static void ccp5other_config(struct ccp_device *ccp)
> .perform = &ccp5_actions,
> .bar = 2,
> .offset = 0x0,
> + .rsamax = CCP5_RSA_MAX_WIDTH,
> };
>
> const struct ccp_vdata ccpv5b = {
> @@ -1124,4 +1125,5 @@ static void ccp5other_config(struct ccp_device *ccp)
> .perform = &ccp5_actions,
> .bar = 2,
> .offset = 0x0,
> + .rsamax = CCP5_RSA_MAX_WIDTH,
> };
> diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
> index a70154ac7405..8242cf54d90f 100644
> --- a/drivers/crypto/ccp/ccp-dev.h
> +++ b/drivers/crypto/ccp/ccp-dev.h
> @@ -200,6 +200,7 @@
> #define CCP_SHA_SB_COUNT 1
>
> #define CCP_RSA_MAX_WIDTH 4096
> +#define CCP5_RSA_MAX_WIDTH 16384
>
> #define CCP_PASSTHRU_BLOCKSIZE 256
> #define CCP_PASSTHRU_MASKSIZE 32
> @@ -677,6 +678,7 @@ struct ccp_vdata {
> const struct ccp_actions *perform;
> const unsigned int bar;
> const unsigned int offset;
> + const unsigned int rsamax;
> };
>
> extern const struct ccp_vdata ccpv3;
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 2cdd15a92178..ea5e4ede1eed 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -1737,7 +1737,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> unsigned int key_size_bytes;
> int ret;
>
> - if (rsa->key_size > CCP_RSA_MAX_WIDTH)
> + /* Check against the maximum allowable size, in bits */
> + if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
> return -EINVAL;
>
> if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
>

2017-06-22 17:09:36

by Gary R Hook

[permalink] [raw]
Subject: Re: [PATCH 3/4] crypto: ccp - Add support for RSA on the CCP

On 06/22/2017 12:15 AM, Stephan M?ller wrote:
> Am Donnerstag, 22. Juni 2017, 00:48:01 CEST schrieb Gary R Hook:
>
> Hi Gary,

Thanks, Stephen. Good catch(es). I will re-work this, but it looks like
my changes should wait
until after the patch set posted by Brijesh (Introduce AMD Secure
Processor device).

Please ignore these for now.


>
>> Wire up the v3 CCP as a cipher provider.
>>
>> Signed-off-by: Gary R Hook <[email protected]>
>> ---
>> drivers/crypto/ccp/Makefile | 1
>> drivers/crypto/ccp/ccp-crypto-main.c | 21 ++
>> drivers/crypto/ccp/ccp-crypto-rsa.c | 286
>> ++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h |
>> 31 ++++
>> drivers/crypto/ccp/ccp-debugfs.c | 1
>> drivers/crypto/ccp/ccp-dev.c | 1
>> drivers/crypto/ccp/ccp-ops.c | 2
>> include/linux/ccp.h | 1
>> 8 files changed, 341 insertions(+), 3 deletions(-)
>> create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c
>>
>> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
>> index 59493fd3a751..439bc2fcb464 100644
>> --- a/drivers/crypto/ccp/Makefile
>> +++ b/drivers/crypto/ccp/Makefile
>> @@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
>> ccp-crypto-aes-xts.o \
>> ccp-crypto-aes-galois.o \
>> ccp-crypto-des3.o \
>> + ccp-crypto-rsa.o \
>> ccp-crypto-sha.o
>> diff --git a/drivers/crypto/ccp/ccp-crypto-main.c
>> b/drivers/crypto/ccp/ccp-crypto-main.c index 8dccbddabef1..dd7d00c680e7
>> 100644
>> --- a/drivers/crypto/ccp/ccp-crypto-main.c
>> +++ b/drivers/crypto/ccp/ccp-crypto-main.c
>> @@ -17,6 +17,7 @@
>> #include <linux/ccp.h>
>> #include <linux/scatterlist.h>
>> #include <crypto/internal/hash.h>
>> +#include <crypto/internal/akcipher.h>
>>
>> #include "ccp-crypto.h"
>>
>> @@ -37,10 +38,15 @@
>> module_param(des3_disable, uint, 0444);
>> MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
>>
>> +static unsigned int rsa_disable;
>> +module_param(rsa_disable, uint, 0444);
>> +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
>> +
>> /* List heads for the supported algorithms */
>> static LIST_HEAD(hash_algs);
>> static LIST_HEAD(cipher_algs);
>> static LIST_HEAD(aead_algs);
>> +static LIST_HEAD(akcipher_algs);
>>
>> /* For any tfm, requests for that tfm must be returned on the order
>> * received. With multiple queues available, the CCP can process more
>> @@ -358,6 +364,14 @@ static int ccp_register_algs(void)
>> return ret;
>> }
>>
>> + if (!rsa_disable) {
>> + ret = ccp_register_rsa_algs(&akcipher_algs);
>> + if (ret) {
>> + rsa_disable = 1;
>> + return ret;
>> + }
>> + }
>> +
>> return 0;
>> }
>>
>> @@ -366,6 +380,7 @@ static void ccp_unregister_algs(void)
>> struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
>> struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
>> struct ccp_crypto_aead *aead_alg, *aead_tmp;
>> + struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
>>
>> list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
>> crypto_unregister_ahash(&ahash_alg->alg);
>> @@ -384,6 +399,12 @@ static void ccp_unregister_algs(void)
>> list_del(&aead_alg->entry);
>> kfree(aead_alg);
>> }
>> +
>> + list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
>> + crypto_unregister_akcipher(&akc_alg->alg);
>> + list_del(&akc_alg->entry);
>> + kfree(akc_alg);
>> + }
>> }
>>
>> static int ccp_crypto_init(void)
>> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c
>> b/drivers/crypto/ccp/ccp-crypto-rsa.c new file mode 100644
>> index 000000000000..4a2a71463594
>> --- /dev/null
>> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
>> @@ -0,0 +1,286 @@
>> +/*
>> + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
>> + *
>> + * Copyright (C) 2016 Advanced Micro Devices, Inc.
>> + *
>> + * Author: Gary R Hook <[email protected]>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + */
>> +
>> +#include <linux/module.h>
>> +#include <linux/sched.h>
>> +#include <linux/scatterlist.h>
>> +#include <linux/crypto.h>
>> +#include <crypto/algapi.h>
>> +#include <crypto/internal/rsa.h>
>> +#include <crypto/internal/akcipher.h>
>> +#include <crypto/akcipher.h>
>> +#include <crypto/scatterwalk.h>
>> +
>> +#include "ccp-crypto.h"
>> +
>> +static inline struct akcipher_request *akcipher_request_cast(
>> + struct crypto_async_request *req)
>> +{
>> + return container_of(req, struct akcipher_request, base);
>> +}
>> +
>> +static int ccp_rsa_complete(struct crypto_async_request *async_req, int
>> ret) +{
>> + struct akcipher_request *req = akcipher_request_cast(async_req);
>> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
>> +
>> + if (!ret)
>> + req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
>> +
>> + ret = 0;
>> +
>> + return ret;
>> +}
>> +
>> +static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
>> +{
>> + return CCP_RSA_MAXMOD;
>> +}
>> +
>> +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
>> +{
>> + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
>> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
>> + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
>> + int ret = 0;
>> +
>> + memset(&rctx->cmd, 0, sizeof(rctx->cmd));
>> + INIT_LIST_HEAD(&rctx->cmd.entry);
>> + rctx->cmd.engine = CCP_ENGINE_RSA;
>> +
>> + rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
>> + if (encrypt) {
>> + rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
>> + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
>> + } else {
>> + rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
>> + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
>> + }
>> + rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
>> + rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
>> + rctx->cmd.u.rsa.src = req->src;
>> + rctx->cmd.u.rsa.src_len = req->src_len;
>> + rctx->cmd.u.rsa.dst = req->dst;
>> +
>> + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
>> +
>> + return ret;
>> +}
>> +
>> +static int ccp_rsa_encrypt(struct akcipher_request *req)
>> +{
>> + return ccp_rsa_crypt(req, true);
>> +}
>> +
>> +static int ccp_rsa_decrypt(struct akcipher_request *req)
>> +{
>> + return ccp_rsa_crypt(req, false);
>> +}
>> +
>> +static int ccp_check_key_length(unsigned int len)
>> +{
>> + /* In bits */
>> + if (len < 8 || len > 4096)
>> + return -EINVAL;
>> + return 0;
>> +}
>> +
>> +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
>> +{
>> + /* Clean up old key data */
>> + kfree(ctx->u.rsa.e_buf);
>> + ctx->u.rsa.e_buf = NULL;
>> + ctx->u.rsa.e_len = 0;
>> + kfree(ctx->u.rsa.n_buf);
>> + ctx->u.rsa.n_buf = NULL;
>> + ctx->u.rsa.n_len = 0;
>> + kfree(ctx->u.rsa.d_buf);
>
> kzfree, please
>
>> + ctx->u.rsa.d_buf = NULL;
>> + ctx->u.rsa.d_len = 0;
>> +}
>> +
>> +static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
>> + unsigned int keylen, bool private)
>> +{
>> + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
>> + struct rsa_key raw_key;
>> + int key_len, i;
>> + int ret;
>> +
>> + ccp_rsa_free_key_bufs(ctx);
>> + memset(&raw_key, 0, sizeof(raw_key));
>> +
>> + /* Code borrowed from crypto/rsa.c */
>> + if (private)
>> + ret = rsa_parse_priv_key(&raw_key, key, keylen);
>> + else
>> + ret = rsa_parse_pub_key(&raw_key, key, keylen);
>> + if (ret)
>> + goto e_key;
>> +
>> + /* Remove leading zeroes from the modulus (n) */
>
> Three fragments doing the same -- isn't an inline cleaner here?
>
>> + key_len = 0;
>> + for (i = 0; i < raw_key.n_sz; i++)
>> + if (raw_key.n[i]) {
>> + key_len = raw_key.n_sz - i;
>> + break;
>> + }
>> + ctx->u.rsa.key_len = key_len << 3; /* bits */
>> + if (ccp_check_key_length(ctx->u.rsa.key_len)) {
>> + ret = -EINVAL;
>> + goto e_key;
>> + }
>> + ctx->u.rsa.n_len = key_len;
>> + sg_init_one(&ctx->u.rsa.n_sg, raw_key.n + i, key_len);
>> +
>> + /* Remove leading zeroes from the public key (e) */
>> + key_len = 0;
>> + for (i = 0; i < raw_key.e_sz; i++)
>> + if (raw_key.e[i]) {
>> + key_len = raw_key.e_sz - i;
>> + break;
>> + }
>> + ctx->u.rsa.e_len = key_len;
>> + sg_init_one(&ctx->u.rsa.e_sg, raw_key.e + i, key_len);
>> +
>> + if (private) {
>> + /* Remove leading zeroes from the private key (d) */
>> + key_len = 0;
>> + for (i = 0; i < raw_key.d_sz; i++)
>> + if (raw_key.d[i]) {
>> + key_len = raw_key.d_sz - i;
>> + break;
>> + }
>> + ctx->u.rsa.d_len = key_len;
>> + sg_init_one(&ctx->u.rsa.d_sg, raw_key.d + i, key_len);
>
> As I see no memcpy for the key components, how is it ensured that the
> caller's
> memory holding the key will stay alive after a setkey call? Further,
> wouldn'd
> the ccp_rsa_free_key_bufs function cause a double free as it would act on
> user-provided memory the user may also try to free?
>
> Ciao
> Stephan