A set of new features, mostly support for CryptoCell 713
features including protected keys, security disable mode and
new HW revision indetification interface alongside many bug fixes.
Gilad Ben-Yossef (30):
crypto: testmgr: add missing self test entries for protected keys
crypto: ccree: move key load desc. before flow desc.
crypto: ccree: move MLLI desc. before key load
crypto: ccree: add support for sec disabled mode
crypto: ccree: add CPP completion handling
crypto: ccree: add remaining logic for CPP
crypto: ccree: add SM4 protected keys support
crypto: ccree: adapt CPP descriptor to new HW
crypto: ccree: read next IV from HW
crypto: ccree: add CID and PID support
crypto: ccree: fix backlog notifications
crypto: ccree: use proper callback completion api
crypto: ccree: remove special handling of chained sg
crypto: ccree: fix typo in debugfs error path
crypto: ccree: fix mem leak on error path
crypto: ccree: use devm_kzalloc for device data
crypto: ccree: use std api when possible
crypto: ccree: copyright header update
crypto: ccree: zero out internal struct before use
crypto: ccree: do not copy zero size MLLI table
crypto: ccree: remove unused defines
crypto: ccree: simplify fragment ICV detection
crypto: ccree: simplify AEAD ICV addr calculation
crypto: ccree: don't mangle the request assoclen
crypto: ccree: make AEAD sgl iterator well behaved
crypto: ccree: zap entire sg on aead request unmap
crypto: ccree: use correct internal state sizes for export
crypto: ccree: allow more AEAD assoc data fragments
crypto: ccree: don't map MAC key on stack
crypto: ccree: don't map AEAD key and IV on stack
Ofir Drang (5):
crypto: ccree: pm resume first enable the source clk
crypto: ccree: remove cc7x3 obsoleted AXIM configs
crypto: ccree: HOST_POWER_DOWN_EN should be the last CC access during
suspend
crypto: ccree: add function to handle cryptocell tee fips error
crypto: ccree: handle tee fips error during power management resume
crypto/testmgr.c | 20 +
drivers/crypto/ccree/Makefile | 1 +
drivers/crypto/ccree/cc_aead.c | 81 +++-
drivers/crypto/ccree/cc_aead.h | 3 +-
drivers/crypto/ccree/cc_buffer_mgr.c | 341 ++++----------
drivers/crypto/ccree/cc_buffer_mgr.h | 2 +-
drivers/crypto/ccree/cc_cipher.c | 591 +++++++++++++++---------
drivers/crypto/ccree/cc_cipher.h | 3 +-
drivers/crypto/ccree/cc_crypto_ctx.h | 10 +-
drivers/crypto/ccree/cc_debugfs.c | 44 +-
drivers/crypto/ccree/cc_debugfs.h | 2 +-
drivers/crypto/ccree/cc_driver.c | 120 ++++-
drivers/crypto/ccree/cc_driver.h | 36 +-
drivers/crypto/ccree/cc_fips.c | 29 +-
drivers/crypto/ccree/cc_fips.h | 4 +-
drivers/crypto/ccree/cc_hash.c | 64 ++-
drivers/crypto/ccree/cc_hash.h | 2 +-
drivers/crypto/ccree/cc_host_regs.h | 123 ++++-
drivers/crypto/ccree/cc_hw_queue_defs.h | 35 +-
drivers/crypto/ccree/cc_ivgen.c | 11 +-
drivers/crypto/ccree/cc_ivgen.h | 2 +-
drivers/crypto/ccree/cc_kernel_regs.h | 2 +-
drivers/crypto/ccree/cc_lli_defs.h | 4 +-
drivers/crypto/ccree/cc_pm.c | 11 +-
drivers/crypto/ccree/cc_pm.h | 2 +-
drivers/crypto/ccree/cc_request_mgr.c | 116 +++--
drivers/crypto/ccree/cc_request_mgr.h | 2 +-
drivers/crypto/ccree/cc_sram_mgr.c | 7 +-
drivers/crypto/ccree/cc_sram_mgr.h | 2 +-
29 files changed, 1068 insertions(+), 602 deletions(-)
--
2.21.0
Mark sm4 and missing aes using protected keys which are indetical to
same algs with no HW protected keys as tested.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
crypto/testmgr.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 0f6bfb6ce6a4..d04f75d84fc0 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3012,6 +3012,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "cbc(paes)",
.test = alg_test_null,
.fips_allowed = 1,
+ }, {
+ /* Same as cbc(sm4) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "cbc(psm4)",
+ .test = alg_test_null,
}, {
.alg = "cbc(serpent)",
.test = alg_test_skcipher,
@@ -3147,6 +3153,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+
+ /* Same as ctr(sm4) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "ctr(psm4)",
+ .test = alg_test_null,
+ }, {
.alg = "ctr(serpent)",
.test = alg_test_skcipher,
.suite = {
@@ -3171,6 +3184,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.cipher = __VECS(cts_mode_tv_template)
}
+ }, {
+ /* Same as cts(cbc((aes)) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "cts(cbc(paes))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
}, {
.alg = "deflate",
.test = alg_test_comp,
--
2.21.0
Refactor the descriptor setup code in order to move the key loading
descriptor to one before last position. This has no effect on current
functionality but is needed for later support of Content Protection
Policy keys.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_cipher.c | 107 +++++++++++++++++++++----------
1 file changed, 73 insertions(+), 34 deletions(-)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 0abcdc224ab0..dcab96861d6f 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -399,7 +399,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return 0;
}
-static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
struct cc_hw_desc desc[],
@@ -423,11 +423,13 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
du_size = cc_alg->data_unit;
switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
case DRV_CIPHER_CBC:
case DRV_CIPHER_CBC_CTS:
case DRV_CIPHER_CTR:
case DRV_CIPHER_OFB:
- /* Load cipher state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
NS_BIT);
@@ -441,7 +443,71 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
}
(*seq_size)++;
- /*FALLTHROUGH*/
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* load XEX key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_is_hw_key(tfm)) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key2_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ (key_dma_addr + (key_len / 2)),
+ (key_len / 2), NS_BIT);
+ }
+ set_xex_data_unit_size(&desc[*seq_size], du_size);
+ set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+ (*seq_size)++;
+
+ /* Load IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+
+static void cc_setup_key_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
case DRV_CIPHER_ECB:
/* Load key */
hw_desc_init(&desc[*seq_size]);
@@ -486,35 +552,6 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_flow_mode(&desc[*seq_size], flow_mode);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
(*seq_size)++;
-
- /* load XEX key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key2_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI,
- (key_dma_addr + (key_len / 2)),
- (key_len / 2), NS_BIT);
- }
- set_xex_data_unit_size(&desc[*seq_size], du_size);
- set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
- (*seq_size)++;
-
- /* Set state */
- hw_desc_init(&desc[*seq_size]);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
- CC_AES_BLOCK_SIZE, NS_BIT);
- (*seq_size)++;
break;
default:
dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
@@ -755,8 +792,10 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup processing */
- cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup IV and XEX key used */
+ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup key */
+ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
/* Data processing */
cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
&seq_len);
--
2.21.0
Refactor to move the descriptor copying the MLLI line to SRAM
to before the key loading descriptor in preparation to the
introduction of CPP later on.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_cipher.c | 58 +++++++++++++++++++-------------
1 file changed, 35 insertions(+), 23 deletions(-)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index dcab96861d6f..e4398de3aa39 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -558,12 +558,38 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
}
}
-static void cc_setup_cipher_data(struct crypto_tfm *tfm,
- struct cipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- void *areq, struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, void *areq,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ /* bypass */
+ dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+ &req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+ }
+}
+
+static void cc_setup_flow_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, void *areq,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
@@ -600,21 +626,6 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
} else {
- /* bypass */
- dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
- &req_ctx->mlli_params.mlli_dma_addr,
- req_ctx->mlli_params.mlli_len,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr);
- hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI,
- req_ctx->mlli_params.mlli_dma_addr,
- req_ctx->mlli_params.mlli_len, NS_BIT);
- set_dout_sram(&desc[*seq_size],
- ctx_p->drvdata->mlli_sram_addr,
- req_ctx->mlli_params.mlli_len);
- set_flow_mode(&desc[*seq_size], BYPASS);
- (*seq_size)++;
-
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_MLLI,
ctx_p->drvdata->mlli_sram_addr,
@@ -794,11 +805,12 @@ static int cc_cipher_process(struct skcipher_request *req,
/* Setup IV and XEX key used */
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup MLLI line, if needed */
+ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
/* Setup key */
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
/* Data processing */
- cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
- &seq_len);
+ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
/* STAT_PHASE_3: Lock HW and push sequence */
--
2.21.0
Add the logic needed to track and report CPP operation rejection.
The new logic will be used by the CPP feature introduced later.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_crypto_ctx.h | 8 ++
drivers/crypto/ccree/cc_driver.c | 25 +++++--
drivers/crypto/ccree/cc_driver.h | 28 +++++++
drivers/crypto/ccree/cc_host_regs.h | 75 ++++++++++++++++++-
drivers/crypto/ccree/cc_request_mgr.c | 103 +++++++++++++++++++-------
5 files changed, 200 insertions(+), 39 deletions(-)
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index c8dac273c563..97e56e9af01e 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -55,6 +55,14 @@
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+#define CC_CPP_NUM_SLOTS 8
+#define CC_CPP_NUM_ALGS 2
+
+enum cc_cpp_alg {
+ CC_CPP_SM4 = 1,
+ CC_CPP_AES = 0
+};
+
enum drv_engine_type {
DRV_ENGINE_NULL = 0,
DRV_ENGINE_AES = 1,
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 255d2367bfa8..1cded418f223 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -118,12 +118,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (irr & CC_COMP_IRQ_MASK) {
- /* Mask AXI completion interrupt - will be unmasked in
- * Deferred service handler
+ if (irr & drvdata->comp_mask) {
+ /* Mask all completion interrupts - will be unmasked in
+ * deferred service handler
*/
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
- irr &= ~CC_COMP_IRQ_MASK;
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
+ irr &= ~drvdata->comp_mask;
complete_request(drvdata);
}
#ifdef CONFIG_CRYPTO_FIPS
@@ -175,7 +175,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+ val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
if (drvdata->hw_rev >= CC_HW_REV_712)
val |= CC_GPR0_IRQ_MASK;
@@ -235,6 +235,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
}
+ new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
+
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
@@ -315,6 +317,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
+ new_drvdata->sec_disabled = cc_sec_disable;
+
if (hw_rev->rev <= CC_HW_REV_712) {
/* Verify correct mapping */
val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
@@ -328,10 +332,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
} else {
val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
val &= CC_SECURITY_DISABLED_MASK;
- new_drvdata->sec_disabled = !!val;
+ new_drvdata->sec_disabled |= !!val;
+
+ if (!new_drvdata->sec_disabled) {
+ new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
+ if (new_drvdata->std_bodies & CC_STD_NIST)
+ new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
+ }
}
- new_drvdata->sec_disabled |= cc_sec_disable;
if (new_drvdata->sec_disabled)
dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index b4273f1aec62..10439ce27b3b 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -76,6 +76,26 @@ enum cc_std_body {
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+#define CC_CPP_AES_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
+
+#define CC_CPP_SM4_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
+
/* Register name mangling macro */
#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
@@ -97,6 +117,12 @@ enum cc_std_body {
* field in the HW descriptor. The DMA engine +8 that value.
*/
+struct cc_cpp_req {
+ bool is_cpp;
+ enum cc_cpp_alg alg;
+ u8 slot;
+};
+
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
struct cc_crypto_req {
void (*user_cb)(struct device *dev, void *req, int err);
@@ -111,6 +137,7 @@ struct cc_crypto_req {
/* The generated IV size required, 8/16 B allowed. */
unsigned int ivgen_size;
struct completion seq_compl; /* request completion */
+ struct cc_cpp_req cpp;
};
/**
@@ -144,6 +171,7 @@ struct cc_drvdata {
u32 ver_offset;
int std_bodies;
bool sec_disabled;
+ u32 comp_mask;
};
struct cc_crypto_alg {
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index c1be372a5bfe..6124b97680ac 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -7,33 +7,102 @@
// --------------------------------------
// BLOCK: HOST_P
// --------------------------------------
+
+
+/* IRR */
#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
-#define CC_HOST_IMR_REG_OFFSET 0xA04UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+
+/* IMR */
+#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+
+/* ICR */
#define CC_HOST_ICR_REG_OFFSET 0xA08UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 83a8aaae61c7..88c97a580dd8 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
#include <linux/kernel.h>
+#include <linux/nospec.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_request_mgr.h"
@@ -52,11 +53,38 @@ struct cc_bl_item {
bool notif;
};
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
+};
+
static void comp_handler(unsigned long devarg);
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work);
#endif
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
+{
+ alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
+ slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
+
+ return cc_cpp_int_masks[alg][slot];
+}
+
void cc_req_mgr_fini(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -579,6 +607,8 @@ static void proc_completions(struct cc_drvdata *drvdata)
drvdata->request_mgr_handle;
unsigned int *tail = &request_mgr_handle->req_queue_tail;
unsigned int *head = &request_mgr_handle->req_queue_head;
+ int rc;
+ u32 mask;
while (request_mgr_handle->axi_completed) {
request_mgr_handle->axi_completed--;
@@ -596,8 +626,22 @@ static void proc_completions(struct cc_drvdata *drvdata)
cc_req = &request_mgr_handle->req_queue[*tail];
+ if (cc_req->cpp.is_cpp) {
+
+ dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
+ cc_req->cpp.slot, cc_req->cpp.alg);
+ mask = cc_cpp_int_mask(cc_req->cpp.alg,
+ cc_req->cpp.slot);
+ rc = (drvdata->irq & mask ? -EPERM : 0);
+ dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
+ drvdata->irq, rc);
+ } else {
+ dev_dbg(dev, "None CPP request completion\n");
+ rc = 0;
+ }
+
if (cc_req->user_cb)
- cc_req->user_cb(dev, cc_req->user_arg, 0);
+ cc_req->user_cb(dev, cc_req->user_arg, rc);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
dev_dbg(dev, "Request completed. axi_completed=%d\n",
@@ -618,47 +662,50 @@ static void comp_handler(unsigned long devarg)
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
-
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irq;
- irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+ dev_dbg(dev, "Completion handler called!\n");
+ irq = (drvdata->irq & drvdata->comp_mask);
- if (irq & CC_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it,
- * we clear it now
- */
- cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
- /* Avoid race with above clear: Test completion counter
- * once more
- */
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
-
- while (request_mgr_handle->axi_completed) {
- do {
- proc_completions(drvdata);
- /* At this point (after proc_completions()),
- * request_mgr_handle->axi_completed is 0.
- */
- request_mgr_handle->axi_completed =
- cc_axi_comp_count(drvdata);
- } while (request_mgr_handle->axi_completed > 0);
+ /* Avoid race with above clear: Test completion counter once more */
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
- CC_COMP_IRQ_MASK);
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
+
+ dev_dbg(dev, "AXI completion after updated: %d\n",
+ request_mgr_handle->axi_completed);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
+ irq = (drvdata->irq & drvdata->comp_mask);
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
- }
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
+
/* after verifing that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
cc_proc_backlog(drvdata);
+ dev_dbg(dev, "Comp. handler done.\n");
}
/*
--
2.21.0
Add the missing logic to set usage policy protections for keys.
This enables key policy protection for AES.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_cipher.c | 186 +++++++++++++++++-------
drivers/crypto/ccree/cc_hw_queue_defs.h | 37 +++++
drivers/crypto/ccree/cc_kernel_regs.h | 6 +
3 files changed, 178 insertions(+), 51 deletions(-)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 8a9c664390f0..d1754d1156ee 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -34,6 +34,18 @@ struct cc_hw_key_info {
enum cc_hw_crypto_key key2_slot;
};
+struct cc_cpp_key_info {
+ u8 slot;
+ enum cc_cpp_alg alg;
+};
+
+enum cc_key_type {
+ CC_UNPROTECTED_KEY, /* User key */
+ CC_HW_PROTECTED_KEY, /* HW (FDE) key */
+ CC_POLICY_PROTECTED_KEY, /* CPP key */
+ CC_INVALID_PROTECTED_KEY /* Invalid key */
+};
+
struct cc_cipher_ctx {
struct cc_drvdata *drvdata;
int keylen;
@@ -41,19 +53,22 @@ struct cc_cipher_ctx {
int cipher_mode;
int flow_mode;
unsigned int flags;
- bool hw_key;
+ enum cc_key_type key_type;
struct cc_user_key_info user;
- struct cc_hw_key_info hw;
+ union {
+ struct cc_hw_key_info hw;
+ struct cc_cpp_key_info cpp;
+ };
struct crypto_shash *shash_tfm;
};
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
-static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- return ctx_p->hw_key;
+ return ctx_p->key_type;
}
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
@@ -232,7 +247,7 @@ struct tdes_keys {
u8 key3[DES_KEY_SIZE];
};
-static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
+static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
{
switch (slot_num) {
case 0:
@@ -247,6 +262,22 @@ static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
return END_OF_KEYS;
}
+static u8 cc_slot_to_cpp_key(u8 slot_num)
+{
+ return (slot_num - CC_FIRST_CPP_KEY_SLOT);
+}
+
+static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
+{
+ if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
+ return CC_HW_PROTECTED_KEY;
+ else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
+ slot_num <= CC_LAST_CPP_KEY_SLOT)
+ return CC_POLICY_PROTECTED_KEY;
+ else
+ return CC_INVALID_PROTECTED_KEY;
+}
+
static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
unsigned int keylen)
{
@@ -261,18 +292,13 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
- /* This check the size of the hardware key token */
+ /* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
- dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+ dev_err(dev, "Unsupported protected key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (ctx_p->flow_mode != S_DIN_to_AES) {
- dev_err(dev, "HW key not supported for non-AES flows\n");
- return -EINVAL;
- }
-
memcpy(&hki, key, keylen);
/* The real key len for crypto op is the size of the HW key
@@ -286,31 +312,70 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
- if (ctx_p->hw.key1_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
- return -EINVAL;
- }
+ ctx_p->keylen = keylen;
- if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
- if (hki.hw_key1 == hki.hw_key2) {
- dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
- hki.hw_key1, hki.hw_key2);
+ switch (cc_slot_to_key_type(hki.hw_key1)) {
+ case CC_HW_PROTECTED_KEY:
+ if (ctx_p->flow_mode == S_DIN_to_SM4) {
+ dev_err(dev, "Only AES HW protected keys are supported\n");
return -EINVAL;
}
- ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
- if (ctx_p->hw.key2_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key2 number (%d)\n",
- hki.hw_key2);
+
+ ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki.hw_key1);
return -EINVAL;
}
- }
- ctx_p->keylen = keylen;
- ctx_p->hw_key = true;
- dev_dbg(dev, "cc_is_hw_key ret 0");
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki.hw_key1 == hki.hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki.hw_key1, hki.hw_key2);
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki.hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->key_type = CC_HW_PROTECTED_KEY;
+ dev_dbg(dev, "HW protected key %d/%d set\n.",
+ ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
+ break;
+
+ case CC_POLICY_PROTECTED_KEY:
+ if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
+ dev_err(dev, "CPP keys not supported in this hardware revision.\n");
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
+ ctx_p->cipher_mode != DRV_CIPHER_CTR) {
+ dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
+ return -EINVAL;
+ }
+
+ ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
+ if (ctx_p->flow_mode == S_DIN_to_AES)
+ ctx_p->cpp.alg = CC_CPP_AES;
+ else /* Must be SM4 since due to sethkey registration */
+ ctx_p->cpp.alg = CC_CPP_SM4;
+ ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
+ dev_dbg(dev, "policy protedcted key alg: %d slot: %d.\n",
+ ctx_p->cpp.alg, ctx_p->cpp.slot);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
+ return -EINVAL;
+ }
return 0;
}
@@ -338,7 +403,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw_key = false;
+ ctx_p->key_type = CC_UNPROTECTED_KEY;
/*
* Verify DES weak keys
@@ -451,7 +516,7 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key2_slot);
} else {
@@ -495,6 +560,7 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
unsigned int key_len = ctx_p->keylen;
unsigned int du_size = nbytes;
+ unsigned int din_size;
struct cc_crypto_alg *cc_alg =
container_of(tfm->__crt_alg, struct cc_crypto_alg,
@@ -511,27 +577,38 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_ECB:
/* Load key */
hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (flow_mode == S_DIN_to_AES) {
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
+ if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
+ set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.alg,
+ cipher_mode, ctx_p->cpp.slot);
+ } else {
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ /* CC_POLICY_UNPROTECTED_KEY
+ * Invalid keys are filtered out in
+ * sethkey()
+ */
+ din_size = (key_len == 24) ?
+ AES_MAX_KEY_SIZE : key_len;
+
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, din_size,
+ NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
} else {
+ /*des*/
set_din_type(&desc[*seq_size], DMA_DLLI,
- key_dma_addr, ((key_len == 24) ?
- AES_MAX_KEY_SIZE :
- key_len), NS_BIT);
+ key_dma_addr, key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
}
- set_key_size_aes(&desc[*seq_size], key_len);
- } else {
- /*des*/
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- key_len, NS_BIT);
- set_key_size_des(&desc[*seq_size], key_len);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
}
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
(*seq_size)++;
break;
case DRV_CIPHER_XTS:
@@ -541,7 +618,7 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key1_slot);
} else {
@@ -789,6 +866,13 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
+ /* Setup CPP operation details */
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
+ cc_req.cpp.is_cpp = true;
+ cc_req.cpp.alg = ctx_p->cpp.alg;
+ cc_req.cpp.slot = ctx_p->cpp.slot;
+ }
+
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 7a9b90db7db7..2c8cd907d8db 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -28,11 +28,13 @@
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
#define WORD0_VALUE CC_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
#define WORD2_VALUE CC_GENMASK(2, VALUE)
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
@@ -53,6 +55,8 @@
#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
+#define WORD4_CPP_ALG CC_GENMASK(4, CPP_ALG)
+#define WORD4_CPP_SLOT CC_GENMASK(4, CPP_SLOT)
#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
@@ -176,6 +180,15 @@ enum cc_hw_crypto_key {
END_OF_KEYS = S32_MAX,
};
+#define CC_NUM_HW_KEY_SLOTS 4
+#define CC_FIRST_HW_KEY_SLOT 0
+#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
+
+#define CC_NUM_CPP_KEY_SLOTS 8
+#define CC_FIRST_CPP_KEY_SLOT 16
+#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
+ CC_NUM_CPP_KEY_SLOTS - 1)
+
enum cc_hw_aes_key_size {
AES_128_KEY = 0,
AES_192_KEY = 1,
@@ -189,6 +202,8 @@ enum cc_hash_cipher_pad {
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
};
+#define CC_CPP_DESC_INDICATOR 0xFF0000UL
+
/*****************************/
/* Descriptor packing macros */
/*****************************/
@@ -248,6 +263,28 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
}
+/*
+ * Setup the special CPP descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @alg: cipher used (AES / SM4)
+ * @mode: mode used (CTR or CBC)
+ * @slot: slot number
+ * @ksize: key size
+ */
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc,
+ enum cc_cpp_alg alg,
+ enum drv_cipher_mode mode, u8 slot)
+{
+ u8 mode_val = (mode == DRV_CIPHER_CBC ? 0 : 1);
+
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DESC_INDICATOR);
+ pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
+ pdesc->word[0] |= FIELD_PREP(WORD0_CPP_CIPHER_MODE, mode_val);
+ pdesc->word[4] |= FIELD_PREP(WORD4_CPP_ALG, alg);
+ pdesc->word[4] |= FIELD_PREP(WORD4_CPP_SLOT, slot);
+}
+
/*
* Set the DIN field of a HW descriptors to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a35156..f148d13c4b65 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -31,6 +31,8 @@
#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD0_CPP_CIPHER_MODE_BIT_SHIFT 0x5UL
+#define CC_DSCRPTR_QUEUE_WORD0_CPP_CIPHER_MODE_BIT_SIZE 0x3UL
#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
@@ -97,6 +99,10 @@
#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CPP_SLOT_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_WORD4_CPP_SLOT_BIT_SIZE 0x3UL
+#define CC_DSCRPTR_QUEUE_WORD4_CPP_ALG_BIT_SHIFT 0xDUL
+#define CC_DSCRPTR_QUEUE_WORD4_CPP_ALG_BIT_SIZE 0x1UL
#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
--
2.21.0
Add the registration for the SM4 based policy protected keys ciphers.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_cipher.c | 36 ++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d1754d1156ee..8acedbafbcb3 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1578,6 +1578,42 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_713,
.std_body = CC_STD_OSCCA,
},
+ {
+ .name = "cbc(psm4)",
+ .driver_name = "cbc-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
+ {
+ .name = "ctr(psm4)",
+ .driver_name = "ctr-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
};
static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
--
2.21.0
Adapt the CPP descriptor to new HW interface.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_cipher.c | 42 +++++++++++++------------
drivers/crypto/ccree/cc_hw_queue_defs.h | 18 +++++------
drivers/crypto/ccree/cc_kernel_regs.h | 6 ----
3 files changed, 29 insertions(+), 37 deletions(-)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 8acedbafbcb3..4c7231d24631 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -546,6 +546,19 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
}
}
+static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return DIN_AES_DOUT;
+ case S_DIN_to_DES:
+ return DIN_DES_DOUT;
+ case S_DIN_to_SM4:
+ return DIN_SM4_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
static void cc_setup_key_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
@@ -577,12 +590,15 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_ECB:
/* Load key */
hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+
if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
- set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.alg,
- cipher_mode, ctx_p->cpp.slot);
+ /* We use the AES key size coding for all CPP algs */
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
+ flow_mode = cc_out_flow_mode(ctx_p);
} else {
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
if (flow_mode == S_DIN_to_AES) {
if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
set_hw_crypto_key(&desc[*seq_size],
@@ -606,9 +622,9 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
key_dma_addr, key_len, NS_BIT);
set_key_size_des(&desc[*seq_size], key_len);
}
- set_flow_mode(&desc[*seq_size], flow_mode);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
}
+ set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
break;
case DRV_CIPHER_XTS:
@@ -670,22 +686,8 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- unsigned int flow_mode = ctx_p->flow_mode;
+ unsigned int flow_mode = cc_out_flow_mode(ctx_p);
- switch (ctx_p->flow_mode) {
- case S_DIN_to_AES:
- flow_mode = DIN_AES_DOUT;
- break;
- case S_DIN_to_DES:
- flow_mode = DIN_DES_DOUT;
- break;
- case S_DIN_to_SM4:
- flow_mode = DIN_SM4_DOUT;
- break;
- default:
- dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
- return;
- }
/* Process */
if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
dev_dbg(dev, " data params addr %pad length 0x%X\n",
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 2c8cd907d8db..fd693681808e 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -55,8 +55,6 @@
#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
-#define WORD4_CPP_ALG CC_GENMASK(4, CPP_ALG)
-#define WORD4_CPP_SLOT CC_GENMASK(4, CPP_SLOT)
#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
@@ -202,7 +200,8 @@ enum cc_hash_cipher_pad {
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
};
-#define CC_CPP_DESC_INDICATOR 0xFF0000UL
+#define CC_CPP_DIN_ADDR 0xFF00FF00UL
+#define CC_CPP_DIN_SIZE 0xFF00FFUL
/*****************************/
/* Descriptor packing macros */
@@ -272,17 +271,14 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
* @slot: slot number
* @ksize: key size
*/
-static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc,
- enum cc_cpp_alg alg,
- enum drv_cipher_mode mode, u8 slot)
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
{
- u8 mode_val = (mode == DRV_CIPHER_CBC ? 0 : 1);
+ pdesc->word[0] |= CC_CPP_DIN_ADDR;
- pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DESC_INDICATOR);
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
- pdesc->word[0] |= FIELD_PREP(WORD0_CPP_CIPHER_MODE, mode_val);
- pdesc->word[4] |= FIELD_PREP(WORD4_CPP_ALG, alg);
- pdesc->word[4] |= FIELD_PREP(WORD4_CPP_SLOT, slot);
+
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
}
/*
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index f148d13c4b65..8d7262a35156 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -31,8 +31,6 @@
#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
-#define CC_DSCRPTR_QUEUE_WORD0_CPP_CIPHER_MODE_BIT_SHIFT 0x5UL
-#define CC_DSCRPTR_QUEUE_WORD0_CPP_CIPHER_MODE_BIT_SIZE 0x3UL
#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
@@ -99,10 +97,6 @@
#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
-#define CC_DSCRPTR_QUEUE_WORD4_CPP_SLOT_BIT_SHIFT 0xAUL
-#define CC_DSCRPTR_QUEUE_WORD4_CPP_SLOT_BIT_SIZE 0x3UL
-#define CC_DSCRPTR_QUEUE_WORD4_CPP_ALG_BIT_SHIFT 0xDUL
-#define CC_DSCRPTR_QUEUE_WORD4_CPP_ALG_BIT_SIZE 0x1UL
#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
--
2.21.0
We were computing the next IV in software instead of reading it from HW
on the premise that this can be quicker due to the small size of IVs but
this proved to be much more hassle and bug ridden than expected.
Move to reading the next IV as computed by the HW.
This fixes a number of issue with next IV being wrong for OFB, CTS-CBC
and probably most of the other ciphers as well.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_buffer_mgr.c | 4 +-
drivers/crypto/ccree/cc_cipher.c | 179 +++++++++++++--------------
drivers/crypto/ccree/cc_cipher.h | 1 -
3 files changed, 85 insertions(+), 99 deletions(-)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 0ee1c52da0a4..adef3cfa1251 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -457,7 +457,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -499,7 +499,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 4c7231d24631..15da3a35a6a1 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -464,6 +464,76 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return 0;
}
+static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return S_AES_to_DOUT;
+ case S_DIN_to_DES:
+ return S_DES_to_DOUT;
+ case S_DIN_to_SM4:
+ return S_SM4_to_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = cc_out_setup_mode(ctx_p);
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
+ return;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Read next IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
+ }
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
@@ -681,12 +751,14 @@ static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
static void cc_setup_flow_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes, void *areq,
- struct cc_hw_desc desc[], unsigned int *seq_size)
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int flow_mode = cc_out_flow_mode(ctx_p);
+ bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
+ ctx_p->cipher_mode == DRV_CIPHER_ECB);
/* Process */
if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
@@ -698,8 +770,8 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
nbytes, NS_BIT);
set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
- nbytes, NS_BIT, (!areq ? 0 : 1));
- if (areq)
+ nbytes, NS_BIT, (!last_desc ? 0 : 1));
+ if (last_desc)
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
set_flow_mode(&desc[*seq_size], flow_mode);
@@ -716,7 +788,7 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
} else {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -727,9 +799,9 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
(LLI_ENTRY_BYTE_SIZE *
req_ctx->in_mlli_nents)),
req_ctx->out_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
}
- if (areq)
+ if (last_desc)
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
set_flow_mode(&desc[*seq_size], flow_mode);
@@ -737,38 +809,6 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
}
}
-/*
- * Update a CTR-AES 128 bit counter
- */
-static void cc_update_ctr(u8 *ctr, unsigned int increment)
-{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
- IS_ALIGNED((unsigned long)ctr, 8)) {
-
- __be64 *high_be = (__be64 *)ctr;
- __be64 *low_be = high_be + 1;
- u64 orig_low = __be64_to_cpu(*low_be);
- u64 new_low = orig_low + (u64)increment;
-
- *low_be = __cpu_to_be64(new_low);
-
- if (new_low < orig_low)
- *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
- } else {
- u8 *pos = (ctr + AES_BLOCK_SIZE);
- u8 val;
- unsigned int size;
-
- for (; increment; increment--)
- for (size = AES_BLOCK_SIZE; size; size--) {
- val = *--pos + 1;
- *pos = val;
- if (val)
- break;
- }
- }
-}
-
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -776,44 +816,11 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- unsigned int len;
cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
-
- switch (ctx_p->cipher_mode) {
- case DRV_CIPHER_CBC:
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req->iv, req->dst, len,
- ivsize, 0);
- }
- break;
-
- case DRV_CIPHER_CTR:
- /* Compute the counter of the last block */
- len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
- cc_update_ctr((u8 *)req->iv, len);
- break;
-
- default:
- break;
- }
-
+ memcpy(req->iv, req_ctx->iv, ivsize);
kzfree(req_ctx->iv);
-
skcipher_request_complete(req, err);
}
@@ -896,7 +903,9 @@ static int cc_cipher_process(struct skcipher_request *req,
/* Setup key */
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
/* Data processing */
- cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
+ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
+ /* Read next IV */
+ cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
/* STAT_PHASE_3: Lock HW and push sequence */
@@ -911,7 +920,6 @@ static int cc_cipher_process(struct skcipher_request *req,
exit_process:
if (rc != -EINPROGRESS && rc != -EBUSY) {
- kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
}
@@ -929,31 +937,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- gfp_t flags = cc_gfp_flags(&req->base);
- unsigned int len;
memset(req_ctx, 0, sizeof(*req_ctx));
- if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
- (req->cryptlen >= ivsize)) {
-
- /* Allocate and save the last IV sized bytes of the source,
- * which will be lost in case of in-place decryption.
- */
- req_ctx->backup_info = kzalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
-
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
- ivsize, 0);
- }
-
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1e6d5c..312d67f88414 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
u32 in_mlli_nents;
u32 out_nents;
u32 out_mlli_nents;
- u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
struct mlli_params mlli_params;
};
--
2.21.0
The new HW uses a new standard product and component ID registers
replacing the old ad-hoc version and signature gister schemes.
Update the driver to support the new HW ID registers.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_debugfs.c | 44 ++++++++++++++++---
drivers/crypto/ccree/cc_driver.c | 68 +++++++++++++++++++++++++----
drivers/crypto/ccree/cc_host_regs.h | 45 ++++++++++++++++++-
3 files changed, 140 insertions(+), 17 deletions(-)
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5fa05a7bcf36..989dd624f135 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/debugfs.h>
@@ -25,9 +25,24 @@ struct cc_debugfs_ctx {
*/
static struct dentry *cc_debugfs_dir;
-static struct debugfs_reg32 debug_regs[] = {
+static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "SIGNATURE" }, /* Must be 0th */
{ .name = "VERSION" }, /* Must be 1st */
+};
+
+static struct debugfs_reg32 pid_cid_regs[] = {
+ CC_DEBUG_REG(PERIPHERAL_ID_0),
+ CC_DEBUG_REG(PERIPHERAL_ID_1),
+ CC_DEBUG_REG(PERIPHERAL_ID_2),
+ CC_DEBUG_REG(PERIPHERAL_ID_3),
+ CC_DEBUG_REG(PERIPHERAL_ID_4),
+ CC_DEBUG_REG(COMPONENT_ID_0),
+ CC_DEBUG_REG(COMPONENT_ID_1),
+ CC_DEBUG_REG(COMPONENT_ID_2),
+ CC_DEBUG_REG(COMPONENT_ID_3),
+};
+
+static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
@@ -53,10 +68,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
struct cc_debugfs_ctx *ctx;
- struct debugfs_regset32 *regset;
-
- debug_regs[0].offset = drvdata->sig_offset;
- debug_regs[1].offset = drvdata->ver_offset;
+ struct debugfs_regset32 *regset, *verset;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -75,8 +87,26 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
debugfs_create_regset32("regs", 0400, ctx->dir, regset);
debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
- drvdata->debugfs = ctx;
+ verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
+ /* Failing here is not important enough to fail the module load */
+ if (!regset)
+ goto out;
+
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ ver_sig_regs[0].offset = drvdata->sig_offset;
+ ver_sig_regs[1].offset = drvdata->ver_offset;
+ verset->regs = ver_sig_regs;
+ verset->nregs = ARRAY_SIZE(ver_sig_regs);
+ } else {
+ verset->regs = pid_cid_regs;
+ verset->nregs = ARRAY_SIZE(pid_cid_regs);
+ }
+ verset->base = drvdata->cc_base;
+
+ debugfs_create_regset32("version", 0400, ctx->dir, verset);
+out:
+ drvdata->debugfs = ctx;
return 0;
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 1cded418f223..a28548192211 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -30,7 +30,6 @@
bool cc_dump_desc;
module_param_named(dump_desc, cc_dump_desc, bool, 0600);
MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
-
bool cc_dump_bytes;
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
@@ -43,18 +42,35 @@ struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
u32 sig;
+ u32 cidr_0123;
+ u32 pidr_0124;
int std_bodies;
};
+#define CC_NUM_IDRS 4
+
+/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
+static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
+ CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
+ CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
+};
+
+static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
+ CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
+ CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
+};
+
/* Hardware revisions defs. */
/* The 703 is a OSCCA only variant of the 713 */
static const struct cc_hw_data cc703_hw = {
- .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
+ .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
};
static const struct cc_hw_data cc713_hw = {
- .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
+ .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc712_hw = {
@@ -82,6 +98,20 @@ static const struct of_device_id arm_ccree_dev_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
+{
+ int i;
+ union {
+ u8 regs[CC_NUM_IDRS];
+ u32 val;
+ } idr;
+
+ for (i = 0; i < CC_NUM_IDRS; ++i)
+ idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
+
+ return le32_to_cpu(idr.val);
+}
+
void __dump_byte_array(const char *name, const u8 *buf, size_t len)
{
char prefix[64];
@@ -205,7 +235,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
- u32 val;
+ u32 val, hw_rev_pidr, sig_cidr;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
@@ -328,8 +358,29 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = -EINVAL;
goto post_clk_err;
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", val);
+ sig_cidr = val;
+ hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
} else {
+ /* Verify correct mapping */
+ val = cc_read_idr(new_drvdata, pidr_0124_offsets);
+ if (val != hw_rev->pidr_0124) {
+ dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
+ val, hw_rev->pidr_0124);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ hw_rev_pidr = val;
+
+ val = cc_read_idr(new_drvdata, cidr_0123_offsets);
+ if (val != hw_rev->cidr_0123) {
+ dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
+ val, hw_rev->cidr_0123);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ sig_cidr = val;
+
+ /* Check security disable state */
val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
val &= CC_SECURITY_DISABLED_MASK;
new_drvdata->sec_disabled |= !!val;
@@ -345,9 +396,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
/* Display HW versions */
- dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
- DRV_MODULE_VERSION);
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
+ hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 6124b97680ac..d0764147573f 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#ifndef __CC_HOST_H__
#define __CC_HOST_H__
@@ -204,6 +204,49 @@
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
// --------------------------------------
+// BLOCK: ID_REGISTERS
+// --------------------------------------
+#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
+#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
+#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
+#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
+#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
+// --------------------------------------
// BLOCK: HOST_SRAM
// --------------------------------------
#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
--
2.21.0
Use proper hash callback completion API instead of open coding it.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_hash.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index e824ab60b59c..8f15ce3deecd 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -286,7 +286,7 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
cc_unmap_req(dev, state, ctx);
}
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
@@ -306,7 +306,7 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
cc_unmap_req(dev, state, ctx);
}
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
@@ -326,7 +326,7 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
cc_unmap_req(dev, state, ctx);
}
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
--
2.21.0
Fix a typo in debugfs interface error path which can result in a
panic following a memory allocation failure.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_debugfs.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 989dd624f135..566999738698 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -89,7 +89,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
/* Failing here is not important enough to fail the module load */
- if (!regset)
+ if (!verset)
goto out;
if (drvdata->hw_rev <= CC_HW_REV_712) {
--
2.21.0
Fix a memory leak on the error path of IV generation code.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_ivgen.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 769458323394..1abec3896a78 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
}
ivgen_ctx->pool = NULL_SRAM_ADDR;
-
- /* release "this" context */
- kfree(ivgen_ctx);
}
/*!
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
int rc;
/* Allocate "this" context */
- ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
if (!ivgen_ctx)
return -ENOMEM;
+ drvdata->ivgen_handle = ivgen_ctx;
+
/* Allocate pool's header for initial enc. key/IV */
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma,
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
goto out;
}
- drvdata->ivgen_handle = ivgen_ctx;
-
return cc_init_iv_sram(drvdata);
out:
--
2.21.0
Move some remaining device data allocation to the safer devm_*
interface.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_fips.c | 4 +---
drivers/crypto/ccree/cc_sram_mgr.c | 5 ++---
2 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d983e0..9c03ab238a75 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -49,8 +49,6 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
/* Kill tasklet */
tasklet_kill(&fips_h->tasklet);
-
- kfree(fips_h);
drvdata->fips_handle = NULL;
}
@@ -104,7 +102,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
if (p_drvdata->hw_rev < CC_HW_REV_712)
return 0;
- fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
if (!fips_h)
return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index c8c276f6dee9..86319788e390 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -19,8 +19,7 @@ struct cc_sram_ctx {
*/
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
{
- /* Free "this" context */
- kfree(drvdata->sram_mgr_handle);
+ /* Nothing needed */
}
/**
@@ -48,7 +47,7 @@ int cc_sram_mgr_init(struct cc_drvdata *drvdata)
}
/* Allocate "this" context */
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
--
2.21.0
Add support for the Security Disabled mode under which only
pure cryptographic functionality is enabled and protected keys
services are unavailable.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_cipher.c | 17 ++++++++++++++++-
drivers/crypto/ccree/cc_driver.c | 22 +++++++++++++++++-----
drivers/crypto/ccree/cc_driver.h | 4 ++++
drivers/crypto/ccree/cc_host_regs.h | 3 +++
4 files changed, 40 insertions(+), 6 deletions(-)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index e4398de3aa39..8a9c664390f0 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -889,6 +889,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts512(paes)",
@@ -907,6 +908,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts4096(paes)",
@@ -925,6 +927,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv(paes)",
@@ -942,6 +945,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv512(paes)",
@@ -960,6 +964,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv4096(paes)",
@@ -978,6 +983,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker(paes)",
@@ -995,6 +1001,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker512(paes)",
@@ -1013,6 +1020,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker4096(paes)",
@@ -1031,6 +1039,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ecb(paes)",
@@ -1048,6 +1057,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cbc(paes)",
@@ -1065,6 +1075,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ofb(paes)",
@@ -1082,6 +1093,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cts(cbc(paes))",
@@ -1099,6 +1111,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ctr(paes)",
@@ -1116,6 +1129,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts(aes)",
@@ -1555,7 +1569,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
ARRAY_SIZE(skcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
- !(drvdata->std_bodies & skcipher_algs[alg].std_body))
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
+ (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
continue;
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 3bcc6c76e090..255d2367bfa8 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -35,6 +35,10 @@ bool cc_dump_bytes;
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+bool cc_sec_disable;
+module_param_named(sec_disable, cc_sec_disable, bool, 0600);
+MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
+
struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
@@ -201,7 +205,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
- u32 signature_val;
+ u32 val;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
@@ -313,16 +317,24 @@ static int init_cc_resources(struct platform_device *plat_dev)
if (hw_rev->rev <= CC_HW_REV_712) {
/* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
- if (signature_val != hw_rev->sig) {
+ val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (val != hw_rev->sig) {
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, hw_rev->sig);
+ val, hw_rev->sig);
rc = -EINVAL;
goto post_clk_err;
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+ dev_dbg(dev, "CC SIGNATURE=0x%08X\n", val);
+ } else {
+ val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
+ val &= CC_SECURITY_DISABLED_MASK;
+ new_drvdata->sec_disabled = !!val;
}
+ new_drvdata->sec_disabled |= cc_sec_disable;
+ if (new_drvdata->sec_disabled)
+ dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
+
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 384a886f613a..b4273f1aec62 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -70,6 +70,8 @@ enum cc_std_body {
#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
@@ -141,6 +143,7 @@ struct cc_drvdata {
u32 sig_offset;
u32 ver_offset;
int std_bodies;
+ bool sec_disabled;
};
struct cc_crypto_alg {
@@ -167,6 +170,7 @@ struct cc_alg_template {
int auth_mode;
u32 min_hw_rev;
enum cc_std_body std_body;
+ bool sec_func;
unsigned int data_unit;
struct cc_drvdata *drvdata;
};
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 616b2e1c41ba..c1be372a5bfe 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -45,6 +45,9 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
--
2.21.0
Move to use the std api sg_nents_for_len() when we do not in fact
require the extra information about the number of bytes in the last
entry provided by the in-driver variant cc_get_sgl_nents().
This also resolves a Coverity warning cause by us not using
the output value.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_buffer_mgr.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 7c92373df351..ccb0257bdc58 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -133,9 +133,9 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
- u32 nents, lbytes;
+ u32 nents;
- nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
+ nents = sg_nents_for_len(sg, end);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -519,7 +519,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- u32 dummy;
u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
@@ -583,15 +582,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if (areq_ctx->is_gcm4543)
size_to_unmap += crypto_aead_ivsize(tfm);
- dma_unmap_sg(dev, req->src,
- cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
+ dma_unmap_sg(dev, req->src, sg_nents_for_len(req->src, size_to_unmap),
DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
- cc_get_sgl_nents(dev, req->dst, size_to_unmap,
- &dummy),
+ sg_nents_for_len(req->dst, size_to_unmap),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -1429,8 +1426,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents =
- cc_get_sgl_nents(dev, src, nbytes, &dummy);
+ areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
--
2.21.0
This sacrificial copyright header update is offered to the legal department
as atonement for any changes made in this driver files in the course of
the current year which have not been duly recorded as such.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/Makefile | 1 +
drivers/crypto/ccree/cc_aead.c | 2 +-
drivers/crypto/ccree/cc_aead.h | 2 +-
drivers/crypto/ccree/cc_buffer_mgr.c | 2 +-
drivers/crypto/ccree/cc_buffer_mgr.h | 2 +-
drivers/crypto/ccree/cc_cipher.c | 2 +-
drivers/crypto/ccree/cc_cipher.h | 2 +-
drivers/crypto/ccree/cc_crypto_ctx.h | 2 +-
drivers/crypto/ccree/cc_debugfs.h | 2 +-
drivers/crypto/ccree/cc_driver.h | 2 +-
drivers/crypto/ccree/cc_fips.c | 2 +-
drivers/crypto/ccree/cc_fips.h | 2 +-
drivers/crypto/ccree/cc_hash.c | 2 +-
drivers/crypto/ccree/cc_hash.h | 2 +-
drivers/crypto/ccree/cc_hw_queue_defs.h | 2 +-
drivers/crypto/ccree/cc_ivgen.c | 2 +-
drivers/crypto/ccree/cc_ivgen.h | 2 +-
drivers/crypto/ccree/cc_kernel_regs.h | 2 +-
drivers/crypto/ccree/cc_lli_defs.h | 2 +-
drivers/crypto/ccree/cc_pm.c | 2 +-
drivers/crypto/ccree/cc_pm.h | 2 +-
drivers/crypto/ccree/cc_request_mgr.c | 2 +-
drivers/crypto/ccree/cc_request_mgr.h | 2 +-
drivers/crypto/ccree/cc_sram_mgr.c | 2 +-
drivers/crypto/ccree/cc_sram_mgr.h | 2 +-
25 files changed, 25 insertions(+), 24 deletions(-)
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index bdc27970f95f..145e50bdbf16 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 8c08a50a4008..ed306a612c0f 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b351fa4..6cfae15f85e5 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_aead.h
* ARM CryptoCell AEAD Crypto API
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index ccb0257bdc58..09d352591a30 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 3ec4b4db5247..a726016bdbc1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_buffer_mgr.h
* Buffer Manager
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 1ba7c8a7bd52..d9f8cd543ee3 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 312d67f88414..da3a38707fae 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_cipher.h
* ARM CryptoCell Cipher Crypto API
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index 97e56e9af01e..ccf960a0d989 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_CRYPTO_CTX_H_
#define _CC_CRYPTO_CTX_H_
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 01cbd9a95659..664ff402e0e9 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_DEBUGFS_H__
#define __CC_DEBUGFS_H__
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 10439ce27b3b..d21e0f0b5218 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_driver.h
* ARM CryptoCell Linux Crypto Driver
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 9c03ab238a75..4a67248f5625 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/fips.h>
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096a7a82..2c287faf10ff 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_FIPS_H__
#define __CC_FIPS_H__
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 8f15ce3deecd..940101fee68e 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 2e5bf8b0bbb6..0d6dc61484d7 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_hash.h
* ARM CryptoCell Hash Crypto API
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index fd693681808e..9f4db9956e91 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_HW_QUEUE_DEFS_H__
#define __CC_HW_QUEUE_DEFS_H__
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 1abec3896a78..99dc69383e20 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/ctr.h>
#include "cc_driver.h"
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
index b6ac16903dda..a9f5e8bba4f1 100644
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_IVGEN_H__
#define __CC_IVGEN_H__
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a35156..582bae450596 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_CRYS_KERNEL_H__
#define __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 64b15ac9f1d3..43aca6b79b9b 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_LLI_DEFS_H_
#define _CC_LLI_DEFS_H_
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6ff7e75ad90e..6f278a0d8ee6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/interrupt.h>
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 907a6db4d6c0..6190cdba5dad 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_pm.h
*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index c2e8190bb067..0bc6ccb0b899 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/nospec.h>
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index 573cb97af085..f46cf766fe4d 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_request_mgr.h
* Request Manager
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index 86319788e390..62c885e6e791 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include "cc_driver.h"
#include "cc_sram_mgr.h"
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index d48649fb3323..1d14de9ee8c3 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_SRAM_MGR_H__
#define __CC_SRAM_MGR_H__
--
2.21.0
We did not zero out the internal struct before use causing problem
in some rare error code paths.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_aead.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index ed306a612c0f..0fa676b5603c 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2062,6 +2062,8 @@ static int cc_aead_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
@@ -2091,6 +2093,8 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
@@ -2110,6 +2114,8 @@ static int cc_aead_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
@@ -2137,6 +2143,8 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
@@ -2254,6 +2262,8 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
@@ -2277,6 +2287,8 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
@@ -2309,6 +2321,8 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
@@ -2332,6 +2346,8 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
--
2.21.0
When we are given a 0 sized cryptlen and assoclen in
a scatterlist with two entries we were falsely trying to
create a zero length MLLI table, causing the HW to choke.
Don't try to copy a zero sized MLLI table.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_aead.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 0fa676b5603c..0e8d2381c175 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1163,9 +1163,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
- !req_ctx->is_single_pass) {
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
--
2.21.0
Remove unused definitions from AEAD driver code.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_aead.c | 4 ----
1 file changed, 4 deletions(-)
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 0e8d2381c175..f9e410de66a8 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -23,12 +23,8 @@
#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
-#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
struct cc_aead_handle {
cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
--
2.21.0
The function cc_prepare_aead_data_dlli() which calculates ICV addresses
was needlessly complicate it. This patch simplifies it without altering
its functionality.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_buffer_mgr.c | 28 +++++++++++-----------------
1 file changed, 11 insertions(+), 17 deletions(-)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 8269474cb9fa..8554cfb2963a 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -749,27 +749,21 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
+ struct scatterlist *sg;
+ ssize_t offset;
areq_ctx->is_icv_fragmented = false;
- if (req->src == req->dst) {
- /*INPLACE*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
- /*NON-INPLACE and DECRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
+
+ if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ sg = areq_ctx->src_sgl;
+ offset = *src_last_bytes - authsize;
} else {
- /*NON-INPLACE and ENCRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
+ sg = areq_ctx->dst_sgl;
+ offset = *dst_last_bytes - authsize;
}
+
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
+ areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
}
static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
--
2.21.0
We were mangling the request struct assoclen field.
Fix it by keeping an internal version and working on it.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_aead.c | 40 +++++++++++++++++-----------
drivers/crypto/ccree/cc_aead.h | 1 +
drivers/crypto/ccree/cc_buffer_mgr.c | 22 +++++++--------
3 files changed, 37 insertions(+), 26 deletions(-)
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index f9e410de66a8..7447fd0ff48e 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -731,7 +731,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT);
+ areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@@ -1080,9 +1080,11 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
+
/* Hash associated data */
- if (req->assoclen > 0)
+ if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@@ -1310,7 +1312,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- unsigned int assoclen = req->assoclen;
+ unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1469,7 +1471,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
idx++;
/* process assoc data */
- if (req->assoclen > 0) {
+ if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@@ -1561,7 +1563,7 @@ static int config_ccm_adata(struct aead_request *req)
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1572,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@@ -1604,7 +1606,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= CCM_BLOCK_IV_SIZE;
+ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1814,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@@ -1836,8 +1838,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
- __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1853,7 +1855,7 @@ static int config_gcm_context(struct aead_request *req)
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
- temp64 = cpu_to_be64(req->assoclen * 8);
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1865,8 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
- cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen +
+ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1886,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1911,7 @@ static int cc_proc_aead(struct aead_request *req,
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2062,6 +2064,7 @@ static int cc_aead_encrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2093,6 +2096,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2114,6 +2118,7 @@ static int cc_aead_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2143,6 +2148,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2262,6 +2268,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2290,6 +2297,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2321,6 +2329,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2349,6 +2358,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 6cfae15f85e5..e51724b96c56 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -67,6 +67,7 @@ struct aead_req_ctx {
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ u32 assoclen; /* internal assoclen */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 8554cfb2963a..86e498bee0bb 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = req->assoclen + req->cryptlen;
+ u32 skip = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
@@ -575,8 +575,8 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
- req->assoclen, req->cryptlen);
- size_to_unmap = req->assoclen + req->cryptlen;
+ areq_ctx->assoclen, req->cryptlen);
+ size_to_unmap = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_unmap += areq_ctx->req_authsize;
if (areq_ctx->is_gcm4543)
@@ -663,7 +663,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
struct scatterlist *current_sg = req->src;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int sg_index = 0;
- u32 size_of_assoc = req->assoclen;
+ u32 size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@@ -674,7 +674,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- if (req->assoclen == 0) {
+ if (areq_ctx->assoclen == 0) {
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@@ -734,7 +734,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
- req->assoclen, 0, is_last,
+ areq_ctx->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
@@ -893,11 +893,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = req->assoclen + req->cryptlen;
+ unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = areq_ctx->assoclen;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@@ -941,7 +941,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = req->assoclen + req->cryptlen;
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
if (is_gcm4543)
@@ -1107,7 +1107,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->ccm_iv0_dma_addr = dma_addr;
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen);
+ &sg_data, areq_ctx->assoclen);
if (rc)
goto aead_map_failure;
}
@@ -1158,7 +1158,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + req->assoclen;
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_map += authsize;
--
2.21.0
We were handling chained scattergather lists with specialized code
needlessly as the regular sg APIs handle them just fine. The code
handling this also had an (unused) code path with a use-before-init
error, flagged by Coverity.
Remove all special handling of chained sg and leave their handling
to the regular sg APIs.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_buffer_mgr.c | 98 +++++++---------------------
1 file changed, 22 insertions(+), 76 deletions(-)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index adef3cfa1251..7c92373df351 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
- unsigned int nbytes, u32 *lbytes,
- bool *is_chained)
+ unsigned int nbytes, u32 *lbytes)
{
unsigned int nents = 0;
while (nbytes && sg_list) {
- if (sg_list->length) {
- nents++;
- /* get the number of bytes in the last entry */
- *lbytes = nbytes;
- nbytes -= (sg_list->length > nbytes) ?
- nbytes : sg_list->length;
- sg_list = sg_next(sg_list);
- } else {
- sg_list = (struct scatterlist *)sg_page(sg_list);
- if (is_chained)
- *is_chained = true;
- }
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
}
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
@@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
{
u32 nents, lbytes;
- nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->num_of_buffers++;
}
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
-{
- u32 i, j;
- struct scatterlist *l_sg = sg;
-
- for (i = 0; i < nents; i++) {
- if (!l_sg)
- break;
- if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
- dev_err(dev, "dma_map_page() sg buffer failed\n");
- goto err;
- }
- l_sg = sg_next(l_sg);
- }
- return nents;
-
-err:
- /* Restore mapped parts */
- for (j = 0; j < i; j++) {
- if (!sg)
- break;
- dma_unmap_sg(dev, sg, 1, direction);
- sg = sg_next(sg);
- }
- return 0;
-}
-
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
- bool is_chained = false;
-
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
- if (!is_chained) {
- /* In case of mmu the number of mapped nents might
- * be changed from the original sgl nents
- */
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (*mapped_nents == 0) {
- *nents = 0;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
- } else {
- /*In this case the driver maps entry by entry so it
- * must have the same nents before and after map
- */
- *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
- direction);
- if (*mapped_nents != *nents) {
- *nents = *mapped_nents;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
+ /* In case of mmu the number of mapped nents might
+ * be changed from the original sgl nents
+ */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (*mapped_nents == 0) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
}
}
@@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy;
- bool chained;
u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
@@ -636,15 +584,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
size_to_unmap += crypto_aead_ivsize(tfm);
dma_unmap_sg(dev, req->src,
- cc_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
+ cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
cc_get_sgl_nents(dev, req->dst, size_to_unmap,
- &dummy, &chained),
+ &dummy),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -1022,7 +969,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
unsigned int size_for_map = req->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
- bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543;
u32 size_to_skip = req->assoclen;
@@ -1043,7 +989,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
- &src_last_bytes, &chained);
+ &src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@@ -1083,7 +1029,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
- &dst_last_bytes, &chained);
+ &dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
@@ -1484,7 +1430,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
- cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ cc_get_sgl_nents(dev, src, nbytes, &dummy);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
--
2.21.0
Fix some scatter list interation code was not handling scatter lists
being shorter than expected in a graceful manner.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_buffer_mgr.c | 55 +++++++++-------------------
1 file changed, 17 insertions(+), 38 deletions(-)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 86e498bee0bb..fa625bdde3f9 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -659,11 +659,9 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
- u32 mapped_nents = 0;
- struct scatterlist *current_sg = req->src;
+ int mapped_nents = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int sg_index = 0;
- u32 size_of_assoc = areq_ctx->assoclen;
+ unsigned int size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@@ -684,26 +682,10 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- //iterate over the sgl to see how many entries are for associated data
- //it is assumed that if we reach here , the sgl is already mapped
- sg_index = current_sg->length;
- //the first entry in the scatter list contains all the associated data
- if (sg_index > size_of_assoc) {
- mapped_nents++;
- } else {
- while (sg_index <= size_of_assoc) {
- current_sg = sg_next(current_sg);
- /* if have reached the end of the sgl, then this is
- * unexpected
- */
- if (!current_sg) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
- sg_index += current_sg->length;
- mapped_nents++;
- }
- }
+ mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+ if (mapped_nents < 0)
+ return mapped_nents;
+
if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -898,6 +880,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 sg_index = 0;
bool is_gcm4543 = areq_ctx->is_gcm4543;
u32 size_to_skip = areq_ctx->assoclen;
+ struct scatterlist *sgl;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@@ -920,15 +903,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
+ src_mapped_nents--;
offset -= areq_ctx->src_sgl->length;
- areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->src_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->src_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->src_sgl = sgl;
sg_index += areq_ctx->src_sgl->length;
- src_mapped_nents--;
}
if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -962,15 +943,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
//check where the data starts
while (sg_index <= size_to_skip) {
+ dst_mapped_nents--;
offset -= areq_ctx->dst_sgl->length;
- areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->dst_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->dst_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->dst_sgl = sgl;
sg_index += areq_ctx->dst_sgl->length;
- dst_mapped_nents--;
}
if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
--
2.21.0
Increase the maximum supported AEAD associated data fragments.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_lli_defs.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 43aca6b79b9b..f891ab813f41 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -14,7 +14,7 @@
#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
-#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
--
2.21.0
The AEAD authenc key and IVs might be passed to us on stack. Copy it to
a slab buffer before mapping to gurantee proper DMA mapping.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_aead.c | 11 ++++++++++-
drivers/crypto/ccree/cc_buffer_mgr.c | 15 ++++++++++++---
drivers/crypto/ccree/cc_driver.h | 1 +
3 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 7447fd0ff48e..ca44a227b211 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
+ u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ kzfree(key);
+
return rc;
}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 09dceec7d828..c81ad33f9115 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -557,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
+ kzfree(areq_ctx->gen_ctx.iv);
}
/* Release pool */
@@ -607,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
+ gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0;
if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
+ areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
- hw_iv_size,
- DMA_BIDIRECTIONAL);
+ areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+ if (!areq_ctx->gen_ctx.iv)
+ return -ENOMEM;
+
+ areq_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
+ kzfree(areq_ctx->gen_ctx.iv);
+ areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM;
goto chain_iv_exit;
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 935ae0ba75c0..cc403d705c9d 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -204,6 +204,7 @@ struct cc_alg_template {
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr;
+ u8 *iv;
enum drv_crypto_direction op_type;
};
--
2.21.0
From: Ofir Drang <[email protected]>
On power management resume function first enable the device clk source
to allow access to the device registers.
Signed-off-by: Ofir Drang <[email protected]>
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_pm.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6f278a0d8ee6..9a5abe406996 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -42,14 +42,15 @@ int cc_pm_resume(struct device *dev)
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
+ /* Enables the device source clk */
rc = cc_clk_on(drvdata);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+
rc = init_cc_regs(drvdata, false);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
--
2.21.0
From: Ofir Drang <[email protected]>
Adds function that checks if cryptocell tee fips error occurred
and in such case triggers system error through kernel panic.
Change fips function to use this new routine.
Signed-off-by: Ofir Drang <[email protected]>
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_fips.c | 23 +++++++++++++++--------
drivers/crypto/ccree/cc_fips.h | 2 ++
2 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 4a67248f5625..5ad3ffb7acaa 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -70,20 +70,28 @@ static inline void tee_fips_error(struct device *dev)
dev_err(dev, "TEE reported error!\n");
}
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+}
+
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
- struct device *dev = drvdata_to_dev(drvdata);
- u32 irq, state, val;
+ u32 irq, val;
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
- state = cc_ioread(drvdata, CC_REG(GPR_HOST));
-
- if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(drvdata);
}
/* after verifing that there is nothing to do,
@@ -111,8 +119,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
- if (!cc_get_tee_fips_status(p_drvdata))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(p_drvdata);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 2c287faf10ff..fc33eeb4d566 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
void cc_fips_fini(struct cc_drvdata *drvdata);
void fips_handler(struct cc_drvdata *drvdata);
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
#else /* CONFIG_CRYPTO_FIPS */
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {}
static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
--
2.21.0
From: Ofir Drang <[email protected]>
in order to support cryptocell tee fips error that may occurs while
cryptocell ree is suspended, an cc_tee_handle_fips_error call added
to the cc_pm_resume function.
Signed-off-by: Ofir Drang <[email protected]>
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_pm.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 77025da2dcb6..2dad9c9543c6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -11,6 +11,7 @@
#include "cc_ivgen.h"
#include "cc_hash.h"
#include "cc_pm.h"
+#include "cc_fips.h"
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
@@ -50,12 +51,13 @@ int cc_pm_resume(struct device *dev)
}
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
rc = init_cc_regs(drvdata, false);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
+ /* check if tee fips error occurred during power down */
+ cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc) {
--
2.21.0
The code detecting whether the ICV is fragmented was overly
complex and limited the number of fragments an ICV may be
comprised of with no reason in the current code, casuing the
new testmgr tests to fail.
This patch removes this legacy limitation and greatly simplifies
the code.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_buffer_mgr.c | 106 ++++++---------------------
drivers/crypto/ccree/cc_driver.h | 1 -
2 files changed, 21 insertions(+), 86 deletions(-)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 09d352591a30..8269474cb9fa 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -602,55 +602,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
}
}
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
- unsigned int sgl_nents, unsigned int authsize,
- u32 last_entry_data_size,
- bool *is_icv_fragmented)
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size)
{
- unsigned int icv_max_size = 0;
- unsigned int icv_required_size = authsize > last_entry_data_size ?
- (authsize - last_entry_data_size) :
- authsize;
- unsigned int nents;
- unsigned int i;
-
- if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
- *is_icv_fragmented = false;
- return 0;
- }
-
- for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
- if (!sgl)
- break;
- sgl = sg_next(sgl);
- }
-
- if (sgl)
- icv_max_size = sgl->length;
-
- if (last_entry_data_size > authsize) {
- /* ICV attached to data in last entry (not fragmented!) */
- nents = 0;
- *is_icv_fragmented = false;
- } else if (last_entry_data_size == authsize) {
- /* ICV placed in whole last entry (not fragmented!) */
- nents = 1;
- *is_icv_fragmented = false;
- } else if (icv_max_size > icv_required_size) {
- nents = 1;
- *is_icv_fragmented = true;
- } else if (icv_max_size == icv_required_size) {
- nents = 2;
- *is_icv_fragmented = true;
- } else {
- dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
- nents = -1; /*unsupported*/
- }
- dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
-
- return nents;
+ return ((sgl_nents > 1) && (last_entry_data_size < authsize));
}
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
@@ -817,16 +772,15 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
}
}
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- u32 *src_last_bytes, u32 *dst_last_bytes,
- bool is_last_table)
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
- int rc = 0, icv_nents;
struct device *dev = drvdata_to_dev(drvdata);
struct scatterlist *sg;
@@ -837,14 +791,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
@@ -886,16 +835,11 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->dst_offset, is_last_table,
&areq_ctx->dst.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
-
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
/* Backup happens only when ICV is fragmented, ICV
+
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
@@ -923,14 +867,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize, *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->dst.nents, authsize,
+ *dst_last_bytes);
if (!areq_ctx->is_icv_fragmented) {
sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
@@ -944,9 +883,6 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
}
}
-
-prepare_data_mlli_exit:
- return rc;
}
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
@@ -1053,9 +989,9 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
dst_mapped_nents > 1 ||
do_chain) {
areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
- rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
- &src_last_bytes,
- &dst_last_bytes, is_last_table);
+ cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes, &dst_last_bytes,
+ is_last_table);
} else {
areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
cc_prepare_aead_data_dlli(req, &src_last_bytes,
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index d21e0f0b5218..935ae0ba75c0 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -108,7 +108,6 @@ enum cc_std_body {
#define MAX_REQUEST_QUEUE_SIZE 4096
#define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
/* Definitions for HW descriptors DIN/DOUT fields */
#define NS_BIT 1
--
2.21.0
We were doing backlog notification callbacks via a cipher/hash/aead
request structure cast to the base structure, which may or may not
work based on how the structure is laid in memory and is not safe.
Fix it by delegating the backlog notification to the appropriate
internal callbacks which are type aware.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_aead.c | 4 ++++
drivers/crypto/ccree/cc_cipher.c | 10 +++++++---
drivers/crypto/ccree/cc_hash.c | 28 +++++++++++++++++++--------
drivers/crypto/ccree/cc_request_mgr.c | 11 ++++++++---
4 files changed, 39 insertions(+), 14 deletions(-)
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index a3527c00b29a..8c08a50a4008 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -220,6 +220,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ /* BACKLOG notification */
+ if (err == -EINPROGRESS)
+ goto done;
+
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 15da3a35a6a1..1ba7c8a7bd52 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -818,9 +818,13 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
- memcpy(req->iv, req_ctx->iv, ivsize);
- kzfree(req_ctx->iv);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ kzfree(req_ctx->iv);
+ }
+
skcipher_request_complete(req, err);
}
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 2c4ddc8fb76b..e824ab60b59c 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -280,8 +280,12 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_req(dev, state, ctx);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ }
+
req->base.complete(&req->base, err);
}
@@ -295,9 +299,13 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
req->base.complete(&req->base, err);
}
@@ -311,9 +319,13 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
req->base.complete(&req->base, err);
}
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 88c97a580dd8..c2e8190bb067 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -364,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
struct cc_bl_item *bli)
{
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
spin_lock_bh(&mgr->bl_lock);
list_add_tail(&bli->list, &mgr->backlog);
++mgr->bl_len;
+ dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
spin_unlock_bh(&mgr->bl_lock);
tasklet_schedule(&mgr->comptask);
}
@@ -377,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
struct cc_bl_item *bli;
struct cc_crypto_req *creq;
- struct crypto_async_request *req;
+ void *req;
bool ivgen;
unsigned int total_len;
struct device *dev = drvdata_to_dev(drvdata);
@@ -387,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
while (mgr->bl_len) {
bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
+
spin_unlock(&mgr->bl_lock);
+
creq = &bli->creq;
- req = (struct crypto_async_request *)creq->user_arg;
+ req = creq->user_arg;
/*
* Notify the request we're moving out of the backlog
* but only if we haven't done so already.
*/
if (!bli->notif) {
- req->complete(req, -EINPROGRESS);
+ creq->user_cb(dev, req, -EINPROGRESS);
bli->notif = true;
}
--
2.21.0
We were trying to be clever zapping out of the cache only the required
length out of scatter list on AEAD request completion and getting it
wrong.
As Knuth said: "when in douby, use brute force". Zap the whole length of
the scatter list.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_buffer_mgr.c | 13 ++-----------
1 file changed, 2 insertions(+), 11 deletions(-)
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index fa625bdde3f9..09dceec7d828 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -517,9 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -576,19 +574,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
- size_to_unmap = areq_ctx->assoclen + req->cryptlen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
- size_to_unmap += areq_ctx->req_authsize;
- if (areq_ctx->is_gcm4543)
- size_to_unmap += crypto_aead_ivsize(tfm);
- dma_unmap_sg(dev, req->src, sg_nents_for_len(req->src, size_to_unmap),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst,
- sg_nents_for_len(req->dst, size_to_unmap),
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
--
2.21.0
We were computing the size of the import buffer based on the digest size
but the 318 and 224 byte variants use 512 and 256 bytes internal state
sizes respectfully, thus causing the import buffer to overrun.
Fix it by using the right sizes.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_hash.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 940101fee68e..36e9fb4141f8 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1633,7 +1633,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA224,
@@ -1660,7 +1660,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA384,
--
2.21.0
The MAC hash key might be passed to us on stack. Copy it to
a slab buffer before mapping to gurantee proper DMA mapping.
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_hash.c | 24 +++++++++++++++++++++---
1 file changed, 21 insertions(+), 3 deletions(-)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 36e9fb4141f8..a6abe4e3bb0e 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -69,6 +69,7 @@ struct cc_hash_alg {
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
+ u8 *key;
};
/* hash per-session context */
@@ -742,13 +743,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
+ ctx->key_params.key = NULL;
if (keylen) {
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
- key, keylen);
+ ctx->key_params.key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -899,6 +907,9 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
+
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -925,11 +936,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -981,6 +997,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ kzfree(ctx->key_params.key);
+
return rc;
}
--
2.21.0
From: Ofir Drang <[email protected]>
During power management suspend the driver need to prepare the device
for the power down operation and as a last indication write to the
HOST_POWER_DOWN_EN register which signals to the hardware that
The ccree is ready for power down.
Signed-off-by: Ofir Drang <[email protected]>
Signed-off-by: Gilad Ben-Yossef <[email protected]>
Cc: [email protected] # v4.19+
---
drivers/crypto/ccree/cc_pm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 9a5abe406996..77025da2dcb6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -25,13 +25,13 @@ int cc_pm_suspend(struct device *dev)
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = cc_suspend_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc;
}
fini_cc_regs(drvdata);
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
return 0;
}
--
2.21.0
From: Ofir Drang <[email protected]>
AXIM configuration register modified in cc7x3 and no longer
includes AXI interrupt masking fields.
Signed-off-by: Ofir Drang <[email protected]>
Signed-off-by: Gilad Ben-Yossef <[email protected]>
---
drivers/crypto/ccree/cc_driver.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index a28548192211..902f196d4be1 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -193,11 +193,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
- /* Unmask all AXI interrupt sources AXI_CFG1 register */
- val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
- cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
- dev_dbg(dev, "AXIM_CFG=0x%08X\n",
- cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ /* AXI interrupt config are obsoleted startign at cc7x3 */
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ }
/* Clear all pending interrupts */
val = cc_ioread(drvdata, CC_REG(HOST_IRR));
--
2.21.0
On Sun, May 19, 2019 at 11:28:05AM +0300, Gilad Ben-Yossef wrote:
> On Sat, May 18, 2019 at 10:36 AM Gilad Ben-Yossef <[email protected]> wrote:
> >
> > Hi
> >
> > On Fri, May 17, 2019 at 5:52 PM Greg KH <[email protected]> wrote:
> > >
> > > On Sun, Apr 21, 2019 at 11:52:55AM +0300, Gilad Ben-Yossef wrote:
> > > > On Thu, Apr 18, 2019 at 4:39 PM Gilad Ben-Yossef <[email protected]> wrote:
> > > > >
> > > > > A set of new features, mostly support for CryptoCell 713
> > > > > features including protected keys, security disable mode and
> > > > > new HW revision indetification interface alongside many bug fixes.
> > > >
> > > > FYI,
> > > >
> > > > A port of those patches from this patch series which have been marked
> > > > for stable is available at
> > > > https://github.com/gby/linux/tree/4.19-ccree
> > >
> > > Hm, all I seem to need are 2 patches that failed to apply. Can you just
> > > provide backports for them?
> >
> > Sure, I'll send them early next week.
>
> hm... I've just fetched the latest from
> git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git,
> rebased that branch against the linux-4.19.y branch and it all went
> smooth.
>
> What am I'm missing? is there some other tree I should be doing this on?
I do not know, can you just send the 2 patches that I said failed for
me? Those are the only ones that I need here.
I can't use random github trees, sorry, let's stick to email for patches
please.
thanks,
greg k-h