This patch series contains support for crypto engine which can be found on
a few Marvell SoC. The crypto driver currently uses dmac_flush_range() to
flush the result back to phys mem. This hack goes away once I've implemented
DMA support. The alternative temporary solution would be to use
flush_kernel_dcache_page().
The driver was tested against dm-crypt on my QNAP TS-209 with no problems
so far. Here are some numbers:
Native, no crypto at all
~~~~~~~~~~~~~~~~~~~~~~~~
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 5.67222 s, 18.5 MB/s
The crypto driver with flush_kernel_dcache_page()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|AES-ECB-128
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 31.3831 s, 3.3 MB/s
|
|# dd if=file bs=1048576 of=/dev/null
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 30.6151 s, 3.4 MB/s
|
|AES-ECB-256
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 32.8273 s, 3.2 MB/s
|
|# dd if=file bs=1048576 of=/dev/null
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 31.301 s, 3.3 MB/s
|
|AES-CBC-128
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 41.9262 s, 2.5 MB/s
|
|# dd if=file bs=1048576 of=/dev/null
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 39.2683 s, 2.7 MB/s
|
|AES-CBC-256
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 37.599 s, 2.8 MB/s
|
|# dd if=file bs=1048576 of=/dev/null
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 36.4306 s, 2.9 MB/s
The crypto driver with dmac_flush_range()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|AES-CBC-256
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 31.3163 s, 3.3 MB/s
|
|# dd if=file bs=1048576 of=/dev/null
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 29.9738 s, 3.5 MB/s
|
|AES-ECB-128
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 23.6008 s, 4.4 MB/s
|
|# dd if=file bs=1048576 of=/dev/null
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 22.9579 s, 4.6 MB/s
Pure software
~~~~~~~~~~~~~
|AES-CBC-128
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 54.6493 s, 1.9 MB/s
|
|AES-CBC-256
|# dd if=/dev/zero of=file bs=1048576 count=100
|100+0 records in
|100+0 records out
|104857600 bytes (105 MB) copied, 64.3429 s, 1.6 MB/s
Sebastian
From: Sebastian Andrzej Siewior <[email protected]>
use the new driver for the crypto engine
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
arch/arm/mach-orion5x/ts209-setup.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
index 9d68905..fbea307 100644
--- a/arch/arm/mach-orion5x/ts209-setup.c
+++ b/arch/arm/mach-orion5x/ts209-setup.c
@@ -302,6 +302,7 @@ static void __init qnap_ts209_init(void)
orion5x_uart0_init();
orion5x_uart1_init();
orion5x_xor_init();
+ orion5x_crypto_init();
platform_device_register(&qnap_ts209_button_device);
--
1.6.0.6
From: Sebastian Andrzej Siewior <[email protected]>
This adds support for Marvell's Cryptographic Engines and Security
Accelerator (CESA) which can be found on a few SoC.
Tested with dm-crypt.
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
drivers/crypto/Kconfig | 13 +
drivers/crypto/Makefile | 1 +
drivers/crypto/mv_cesa.c | 610 ++++++++++++++++++++++++++++++++++++++++++++++
drivers/crypto/mv_cesa.h | 120 +++++++++
4 files changed, 744 insertions(+), 0 deletions(-)
create mode 100644 drivers/crypto/mv_cesa.c
create mode 100644 drivers/crypto/mv_cesa.h
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 01afd75..0320187 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -157,6 +157,19 @@ config S390_PRNG
ANSI X9.17 standard. The PRNG is usable via the char device
/dev/prandom.
+config CRYPTO_DEV_MV_CESA
+ tristate "Marvell's Cryptographic Engine"
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
+ help
+ This driver allows you utilize the Cryptographic Engines and Security
+ Accelerator (CESA) which can be found on certain SoC like QNAP's
+ TS-209.
+
+ Currently the driver supports AES in ECB and CBC mode without DMA.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2b..6ffcb3f 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 0000000..22ea1b1
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,610 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPL
+ *
+ */
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+#include "mv_cesa.h"
+/*
+ * STM:
+ * /---------------------------------------\
+ * | | request complete
+ * \./ |
+ * IDLE -> new request -> BUSY -> done -> DEQUEUE
+ * /°\ |
+ * | | more scatter entries
+ * \________________/
+ */
+enum engine_status {
+ ENGINE_IDLE,
+ ENGINE_BUSY,
+ ENGINE_W_DEQUEUE,
+};
+
+/**
+ * struct req_progress - used for every crypt request
+ * @src_sg_it: sg iterator for src
+ * @dst_sg_it: sg iterator for dst
+ * @sg_src_left: bytes left in src to process (scatter list)
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @total_req_bytes: total number of bytes processed (request).
+ *
+ * sg helper are used to iterate over the scatterlist. Since the size of the
+ * SRAM may be less than the scatter size, this struct struct is used to keep
+ * track of progress within current scatterlist.
+ */
+struct req_progress {
+ struct sg_mapping_iter src_sg_it;
+ struct sg_mapping_iter dst_sg_it;
+
+ /* src mostly */
+ int sg_src_left;
+ int src_start;
+ int crypt_len;
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+ int total_req_bytes;
+};
+
+struct crypto_priv {
+ void __iomem *reg;
+ void __iomem *sram;
+ int irq;
+ struct task_struct *queue_th;
+
+ /* the lock protects queue and eng_st */
+ spinlock_t lock;
+ struct crypto_queue queue;
+ enum engine_status eng_st;
+ struct ablkcipher_request *cur_req;
+ struct req_progress p;
+};
+
+static struct crypto_priv *cpg;
+
+struct mv_ctx {
+ u8 aes_enc_key[AES_KEY_LEN];
+ u32 aes_dec_key[8];
+ int key_len;
+ u32 need_calc_aes_dkey;
+};
+
+enum crypto_op {
+ COP_AES_ECB,
+ COP_AES_CBC,
+};
+
+struct mv_req_ctx {
+ enum crypto_op op;
+ int decrypt;
+};
+
+static void compute_aes_dec_key(struct mv_ctx *ctx)
+{
+ struct crypto_aes_ctx gen_aes_key;
+ int key_pos;
+
+ if (!ctx->need_calc_aes_dkey)
+ return;
+
+ crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
+
+ key_pos = ctx->key_len + 24;
+ memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_256:
+ key_pos -= 2;
+ /* fall */
+ case AES_KEYSIZE_192:
+ key_pos -= 2;
+ memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
+ 4 * 4);
+ break;
+ }
+ ctx->need_calc_aes_dkey = 0;
+}
+
+static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = len;
+ ctx->need_calc_aes_dkey = 1;
+
+ memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
+ return 0;
+}
+
+static void setup_data_in(struct ablkcipher_request *req)
+{
+ int ret;
+ void *buf;
+
+ if (!cpg->p.sg_src_left) {
+ ret = sg_miter_next(&cpg->p.src_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+ cpg->p.src_start = 0;
+ }
+
+ cpg->p.crypt_len = min(cpg->p.sg_src_left, MAX_REQ_SIZE);
+
+ buf = cpg->p.src_sg_it.addr;
+ buf += cpg->p.src_start;
+
+ memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+
+ cpg->p.sg_src_left -= cpg->p.crypt_len;
+ cpg->p.src_start += cpg->p.crypt_len;
+}
+
+static void mv_process_current_q(int first_block)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct sec_accel_config op;
+
+ switch (req_ctx->op) {
+ case COP_AES_ECB:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ break;
+ case COP_AES_CBC:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+ if (first_block)
+ memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
+ break;
+ }
+ if (req_ctx->decrypt) {
+ op.config |= CFG_DIR_DEC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
+ AES_KEY_LEN);
+ } else {
+ op.config |= CFG_DIR_ENC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
+ AES_KEY_LEN);
+ }
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ op.config |= CFG_AES_LEN_128;
+ break;
+ case AES_KEYSIZE_192:
+ op.config |= CFG_AES_LEN_192;
+ break;
+ case AES_KEYSIZE_256:
+ op.config |= CFG_AES_LEN_256;
+ break;
+ }
+ op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+ ENC_P_DST(SRAM_DATA_OUT_START);
+ op.enc_key_p = SRAM_DATA_KEY_P;
+
+ setup_data_in(req);
+ op.enc_len = cpg->p.crypt_len;
+ memcpy(cpg->sram + SRAM_CONFIG, &op,
+ sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static void mv_crypto_algo_completion(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ if (req_ctx->op != COP_AES_CBC)
+ return ;
+
+ memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+}
+
+static void dequeue_complete_req(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+
+ cpg->p.total_req_bytes += cpg->p.crypt_len;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
+
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
+
+ dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+
+ memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+ /*
+ * XXX This is a temporary hack to flush data from cache back
+ * into phys ram so user space sees correct data. This goes
+ * away as soon as DMA support is added to this driver
+ */
+ dmac_flush_range(buf, buf + dst_copy);
+
+ cpg->p.sg_dst_left -= dst_copy;
+ cpg->p.crypt_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (cpg->p.crypt_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.total_req_bytes < req->nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+ } else {
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
+ req->base.complete(&req->base, 0);
+ }
+}
+
+static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+{
+ int i = 0;
+
+ do {
+ total_bytes -= sl[i].length;
+ i++;
+
+ } while (total_bytes > 0);
+
+ return i;
+}
+
+static void mv_enqueue_new_req(struct ablkcipher_request *req)
+{
+ int num_sgs;
+
+ cpg->cur_req = req;
+ memset(&cpg->p, 0, sizeof(struct req_progress));
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, 0);
+
+ num_sgs = count_sgs(req->dst, req->nbytes);
+ sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, 0);
+ mv_process_current_q(1);
+}
+
+static int queue_manag(void *data)
+{
+ cpg->eng_st = ENGINE_IDLE;
+ do {
+ struct ablkcipher_request *req;
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog;
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (cpg->eng_st == ENGINE_W_DEQUEUE)
+ dequeue_complete_req();
+
+ spin_lock_irq(&cpg->lock);
+ if (cpg->eng_st == ENGINE_IDLE) {
+ backlog = crypto_get_backlog(&cpg->queue);
+ async_req = crypto_dequeue_request(&cpg->queue);
+ if (async_req) {
+ BUG_ON(cpg->eng_st != ENGINE_IDLE);
+ cpg->eng_st = ENGINE_BUSY;
+ }
+ }
+ spin_unlock_irq(&cpg->lock);
+
+ if (backlog) {
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (async_req) {
+ req = container_of(async_req,
+ struct ablkcipher_request, base);
+ mv_enqueue_new_req(req);
+ async_req = NULL;
+ }
+
+ schedule();
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int mv_handle_req(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cpg->lock, flags);
+ ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ spin_unlock_irqrestore(&cpg->lock, flags);
+ wake_up_process(cpg->queue_th);
+ return ret;
+}
+
+static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
+ return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+ u32 val;
+
+ val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
+ if (!(val & SEC_INT_ACCEL0_DONE))
+ return IRQ_NONE;
+
+ val &= ~SEC_INT_ACCEL0_DONE;
+ writel(val, cpg->reg + FPGA_INT_STATUS);
+ writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
+ BUG_ON(cpg->eng_st != ENGINE_BUSY);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+ return IRQ_HANDLED;
+}
+
+struct crypto_alg mv_aes_alg_ecb = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_ecb,
+ .decrypt = mv_dec_aes_ecb,
+ },
+ },
+};
+
+struct crypto_alg mv_aes_alg_cbc = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_cbc,
+ .decrypt = mv_dec_aes_cbc,
+ },
+ },
+};
+
+static int m_probe(struct platform_device *pdev)
+{
+ struct crypto_priv *cp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ if (cpg) {
+ printk(KERN_ERR "Second crypto dev?\n");
+ return -EEXIST;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res)
+ return -ENXIO;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ spin_lock_init(&cp->lock);
+ crypto_init_queue(&cp->queue, 50);
+ cp->reg = ioremap(res->start, res->end - res->start + 1);
+ if (!cp->reg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!res) {
+ ret = -ENXIO;
+ goto err_unmap_reg;
+ }
+
+ cp->sram = ioremap(res->start, res->end - res->start + 1);
+ if (!cp->sram) {
+ ret = -ENOMEM;
+ goto err_unmap_reg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq == NO_IRQ) {
+ ret = irq;
+ goto err_unmap_sram;
+ }
+ cp->irq = irq;
+
+ platform_set_drvdata(pdev, cp);
+ cpg = cp;
+
+ cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
+ if (IS_ERR(cp->queue_th)) {
+ ret = PTR_ERR(cp->queue_th);
+ goto err_thread;
+ }
+
+ ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ cp);
+ if (ret)
+ goto err_unmap_sram;
+
+ writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
+ writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+
+ ret = crypto_register_alg(&mv_aes_alg_ecb);
+ if (ret)
+ goto err_reg;
+
+ ret = crypto_register_alg(&mv_aes_alg_cbc);
+ if (ret)
+ goto err_unreg_ecb;
+ return 0;
+err_unreg_ecb:
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+err_thread:
+ free_irq(irq, cp);
+err_reg:
+ kthread_stop(cp->queue_th);
+err_unmap_sram:
+ iounmap(cp->sram);
+err_unmap_reg:
+ iounmap(cp->reg);
+err:
+ kfree(cp);
+ cpg = NULL;
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int m_remove(struct platform_device *pdev)
+{
+ struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+ crypto_unregister_alg(&mv_aes_alg_cbc);
+ kthread_stop(cp->queue_th);
+ free_irq(cp->irq, cp);
+ memset(cp->sram, 0, 8 * 1024);
+ iounmap(cp->sram);
+ iounmap(cp->reg);
+ kfree(cp);
+ cpg = NULL;
+ return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+ .probe = m_probe,
+ .remove = m_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mv,orion5x-crypto",
+ },
+};
+
+static int __init crypto_init(void)
+{
+ return platform_driver_register(&marvell_crypto);
+}
+module_init(crypto_init);
+
+static void __exit crypto_exit(void)
+{
+ platform_driver_unregister(&marvell_crypto);
+}
+module_exit(crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <[email protected]>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 0000000..7198be9
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,120 @@
+#ifndef __MV_CRYPTO_H__
+
+#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DES_CMD_REG 0xdd58
+
+#define SEC_ACCEL_CMD 0xde00
+#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
+#define SEC_CMD_DISABLE_SEC (1 << 2)
+
+#define SEC_ACCEL_DESC_P0 0xde04
+#define SEC_DESC_P0_PTR(x) (x)
+
+#define SEC_ACCEL_DESC_P1 0xde14
+#define SEC_DESC_P1_PTR(x) (x)
+
+#define SEC_ACCEL_CFG 0xde08
+#define SEC_CFG_STOP_DIG_ERR (1 << 0)
+#define SEC_CFG_CH0_W_IDMA (1 << 7)
+#define SEC_CFG_CH1_W_IDMA (1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
+
+#define SEC_ACCEL_STATUS 0xde0c
+#define SEC_ST_ACT_0 (1 << 0)
+#define SEC_ST_ACT_1 (1 << 1)
+
+/*
+ * FPGA_INT_STATUS looks like a FPGA leftover and is undocumented. I asumme
+ * that it was part of an IRQ-controller in FPGA and someone forgot to remove
+ * it while switching to the core and moving to SEC_ACCEL_INT_STATUS.
+ */
+#define FPGA_INT_STATUS 0xdd68
+#define SEC_ACCEL_INT_STATUS 0xde20
+#define SEC_INT_AUTH_DONE (1 << 0)
+#define SEC_INT_DES_E_DONE (1 << 1)
+#define SEC_INT_AES_E_DONE (1 << 2)
+#define SEC_INT_AES_D_DONE (1 << 3)
+#define SEC_INT_ENC_DONE (1 << 4)
+#define SEC_INT_ACCEL0_DONE (1 << 5)
+#define SEC_INT_ACCEL1_DONE (1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
+
+#define SEC_ACCEL_INT_MASK 0xde24
+
+#define AES_KEY_LEN (8 * 4)
+
+struct sec_accel_config {
+
+ u32 config;
+#define CFG_OP_MAC_ONLY 0
+#define CFG_OP_CRYPT_ONLY 1
+#define CFG_OP_MAC_CRYPT 2
+#define CFG_OP_CRYPT_MAC 3
+#define CFG_MACM_MD5 (4 << 4)
+#define CFG_MACM_SHA1 (5 << 4)
+#define CFG_MACM_HMAC_MD5 (6 << 4)
+#define CFG_MACM_HMAC_SHA1 (7 << 4)
+#define CFG_ENCM_DES (1 << 8)
+#define CFG_ENCM_3DES (2 << 8)
+#define CFG_ENCM_AES (3 << 8)
+#define CFG_DIR_ENC (0 << 12)
+#define CFG_DIR_DEC (1 << 12)
+#define CFG_ENC_MODE_ECB (0 << 16)
+#define CFG_ENC_MODE_CBC (1 << 16)
+#define CFG_3DES_EEE (0 << 20)
+#define CFG_3DES_EDE (1 << 20)
+#define CFG_AES_LEN_128 (0 << 24)
+#define CFG_AES_LEN_192 (1 << 24)
+#define CFG_AES_LEN_256 (2 << 24)
+
+ u32 enc_p;
+#define ENC_P_SRC(x) (x)
+#define ENC_P_DST(x) ((x) << 16)
+
+ u32 enc_len;
+#define ENC_LEN(x) (x)
+
+ u32 enc_key_p;
+#define ENC_KEY_P(x) (x)
+
+ u32 enc_iv;
+#define ENC_IV_POINT(x) ((x) << 0)
+#define ENC_IV_BUF_POINT(x) ((x) << 16)
+
+ u32 mac_src_p;
+#define MAC_SRC_DATA_P(x) (x)
+#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
+
+ u32 mac_digest;
+ u32 mac_iv;
+}__attribute__ ((packed));
+ /*
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | CRYPT KEY | 8 * 4
+ * |-----------| 0x40
+ * | IV IN | 4 * 4
+ * |-----------| 0x40 (inplace)
+ * | IV BUF | 4 * 4
+ * |-----------| 0x50
+ * | DATA IN | 16 * x (max MAX_REQ_SIZE)
+ * |-----------| 0x50 (inplace operation)
+ * | DATA OUT | 16 * x (max MAX_REQ_SIZE)
+ * |-----------| 0x1f90
+ * | nuttin |
+ * \-----------/ 0x2000
+ */
+#define SRAM_CONFIG 0x00
+#define SRAM_DATA_KEY_P 0x20
+#define SRAM_DATA_IV 0x40
+#define SRAM_DATA_IV_BUF 0x40
+#define SRAM_DATA_IN_START 0x50
+#define SRAM_DATA_OUT_START 0x50
+
+#define MAX_REQ_SIZE (8000)
+
+#endif
--
1.6.0.6
From: Sebastian Andrzej Siewior <[email protected]>
The security accelerator which can act as a puppet player for the crypto
engine requires its commands in the sram. This patch adds support for the
phys mapping and creates a platform device the actual driver.
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
arch/arm/mach-orion5x/addr-map.c | 14 ++++++++-
arch/arm/mach-orion5x/common.c | 36 ++++++++++++++++++++++++++
arch/arm/mach-orion5x/common.h | 2 +
arch/arm/mach-orion5x/include/mach/orion5x.h | 6 ++++
4 files changed, 56 insertions(+), 2 deletions(-)
diff --git a/arch/arm/mach-orion5x/addr-map.c b/arch/arm/mach-orion5x/addr-map.c
index c14d121..d78731e 100644
--- a/arch/arm/mach-orion5x/addr-map.c
+++ b/arch/arm/mach-orion5x/addr-map.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/io.h>
+#include <linux/errno.h>
#include <mach/hardware.h>
#include "common.h"
@@ -44,6 +45,7 @@
#define TARGET_DEV_BUS 1
#define TARGET_PCI 3
#define TARGET_PCIE 4
+#define TARGET_SRAM 9
#define ATTR_PCIE_MEM 0x59
#define ATTR_PCIE_IO 0x51
#define ATTR_PCIE_WA 0x79
@@ -53,6 +55,7 @@
#define ATTR_DEV_CS1 0x1d
#define ATTR_DEV_CS2 0x1b
#define ATTR_DEV_BOOT 0xf
+#define ATTR_SRAM 0x0
/*
* Helpers to get DDR bank info
@@ -87,13 +90,13 @@ static int __init orion5x_cpu_win_can_remap(int win)
return 0;
}
-static void __init setup_cpu_win(int win, u32 base, u32 size,
+static int __init setup_cpu_win(int win, u32 base, u32 size,
u8 target, u8 attr, int remap)
{
if (win >= 8) {
printk(KERN_ERR "setup_cpu_win: trying to allocate "
"window %d\n", win);
- return;
+ return -ENOSPC;
}
writel(base & 0xffff0000, CPU_WIN_BASE(win));
@@ -107,6 +110,7 @@ static void __init setup_cpu_win(int win, u32 base, u32 size,
writel(remap & 0xffff0000, CPU_WIN_REMAP_LO(win));
writel(0, CPU_WIN_REMAP_HI(win));
}
+ return 0;
}
void __init orion5x_setup_cpu_mbus_bridge(void)
@@ -193,3 +197,9 @@ void __init orion5x_setup_pcie_wa_win(u32 base, u32 size)
setup_cpu_win(win_alloc_count++, base, size,
TARGET_PCIE, ATTR_PCIE_WA, -1);
}
+
+int __init orion5x_setup_sram_win(void)
+{
+ return setup_cpu_win(win_alloc_count++, ORION5X_SRAM_PHYS_BASE,
+ ORION5X_SRAM_SIZE, TARGET_SRAM, ATTR_SRAM, -1);
+}
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index b1c7778..f290582 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -536,6 +536,42 @@ void __init orion5x_xor_init(void)
platform_device_register(&orion5x_xor1_channel);
}
+static struct resource orion5x_crypto_res[] = {
+ {
+ .name = "regs",
+ .start = ORION5X_CRYPTO_PHYS_BASE,
+ .end = ORION5X_CRYPTO_PHYS_BASE + 0xffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "sram",
+ .start = ORION5X_SRAM_PHYS_BASE,
+ .end = ORION5X_SRAM_PHYS_BASE + 8 * 1024,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "crypto interrupt",
+ .start = IRQ_ORION5X_CESA,
+ .end = IRQ_ORION5X_CESA,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device orion5x_crypto_device = {
+ .name = "mv,orion5x-crypto",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(orion5x_crypto_res),
+ .resource = orion5x_crypto_res,
+};
+
+int __init orion5x_crypto_init(void)
+{
+ int ret;
+
+ ret = orion5x_setup_sram_win();
+ if (ret)
+ return ret;
+
+ return platform_device_register(&orion5x_crypto_device);
+}
/*****************************************************************************
* Watchdog
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 798b9a5..de483e8 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -26,6 +26,7 @@ void orion5x_setup_dev0_win(u32 base, u32 size);
void orion5x_setup_dev1_win(u32 base, u32 size);
void orion5x_setup_dev2_win(u32 base, u32 size);
void orion5x_setup_pcie_wa_win(u32 base, u32 size);
+int orion5x_setup_sram_win(void);
void orion5x_ehci0_init(void);
void orion5x_ehci1_init(void);
@@ -37,6 +38,7 @@ void orion5x_spi_init(void);
void orion5x_uart0_init(void);
void orion5x_uart1_init(void);
void orion5x_xor_init(void);
+int orion5x_crypto_init(void);
/*
* PCIe/PCI functions.
diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h
index 377a773..2d87665 100644
--- a/arch/arm/mach-orion5x/include/mach/orion5x.h
+++ b/arch/arm/mach-orion5x/include/mach/orion5x.h
@@ -24,6 +24,7 @@
* f1000000 on-chip peripheral registers
* f2000000 PCIe I/O space
* f2100000 PCI I/O space
+ * f2200000 SRAM dedicated for the crypto unit
* f4000000 device bus mappings (boot)
* fa000000 device bus mappings (cs0)
* fa800000 device bus mappings (cs2)
@@ -49,6 +50,9 @@
#define ORION5X_PCI_IO_BUS_BASE 0x00100000
#define ORION5X_PCI_IO_SIZE SZ_1M
+#define ORION5X_SRAM_PHYS_BASE (0xf2200000)
+#define ORION5X_SRAM_SIZE SZ_8K
+
/* Relevant only for Orion-1/Orion-NAS */
#define ORION5X_PCIE_WA_PHYS_BASE 0xf0000000
#define ORION5X_PCIE_WA_VIRT_BASE 0xfe000000
@@ -94,6 +98,8 @@
#define ORION5X_SATA_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x80000)
#define ORION5X_SATA_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x80000)
+#define ORION5X_CRYPTO_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x90000)
+
#define ORION5X_USB1_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0xa0000)
#define ORION5X_USB1_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0xa0000)
--
1.6.0.6
On Thu, 11 Jun 2009, [email protected] wrote:
> From: Sebastian Andrzej Siewior <[email protected]>
>
> The security accelerator which can act as a puppet player for the crypto
> engine requires its commands in the sram. This patch adds support for the
> phys mapping and creates a platform device the actual driver.
>
> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
This one is already merged in the Orion and ARM tree, with a minor fix
and device renamed to be more generic. The equivalent registration for
Kirkwood is merged as well already.
> ---
> arch/arm/mach-orion5x/addr-map.c | 14 ++++++++-
> arch/arm/mach-orion5x/common.c | 36 ++++++++++++++++++++++++++
> arch/arm/mach-orion5x/common.h | 2 +
> arch/arm/mach-orion5x/include/mach/orion5x.h | 6 ++++
> 4 files changed, 56 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm/mach-orion5x/addr-map.c b/arch/arm/mach-orion5x/addr-map.c
> index c14d121..d78731e 100644
> --- a/arch/arm/mach-orion5x/addr-map.c
> +++ b/arch/arm/mach-orion5x/addr-map.c
> @@ -14,6 +14,7 @@
> #include <linux/init.h>
> #include <linux/mbus.h>
> #include <linux/io.h>
> +#include <linux/errno.h>
> #include <mach/hardware.h>
> #include "common.h"
>
> @@ -44,6 +45,7 @@
> #define TARGET_DEV_BUS 1
> #define TARGET_PCI 3
> #define TARGET_PCIE 4
> +#define TARGET_SRAM 9
> #define ATTR_PCIE_MEM 0x59
> #define ATTR_PCIE_IO 0x51
> #define ATTR_PCIE_WA 0x79
> @@ -53,6 +55,7 @@
> #define ATTR_DEV_CS1 0x1d
> #define ATTR_DEV_CS2 0x1b
> #define ATTR_DEV_BOOT 0xf
> +#define ATTR_SRAM 0x0
>
> /*
> * Helpers to get DDR bank info
> @@ -87,13 +90,13 @@ static int __init orion5x_cpu_win_can_remap(int win)
> return 0;
> }
>
> -static void __init setup_cpu_win(int win, u32 base, u32 size,
> +static int __init setup_cpu_win(int win, u32 base, u32 size,
> u8 target, u8 attr, int remap)
> {
> if (win >= 8) {
> printk(KERN_ERR "setup_cpu_win: trying to allocate "
> "window %d\n", win);
> - return;
> + return -ENOSPC;
> }
>
> writel(base & 0xffff0000, CPU_WIN_BASE(win));
> @@ -107,6 +110,7 @@ static void __init setup_cpu_win(int win, u32 base, u32 size,
> writel(remap & 0xffff0000, CPU_WIN_REMAP_LO(win));
> writel(0, CPU_WIN_REMAP_HI(win));
> }
> + return 0;
> }
>
> void __init orion5x_setup_cpu_mbus_bridge(void)
> @@ -193,3 +197,9 @@ void __init orion5x_setup_pcie_wa_win(u32 base, u32 size)
> setup_cpu_win(win_alloc_count++, base, size,
> TARGET_PCIE, ATTR_PCIE_WA, -1);
> }
> +
> +int __init orion5x_setup_sram_win(void)
> +{
> + return setup_cpu_win(win_alloc_count++, ORION5X_SRAM_PHYS_BASE,
> + ORION5X_SRAM_SIZE, TARGET_SRAM, ATTR_SRAM, -1);
> +}
> diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
> index b1c7778..f290582 100644
> --- a/arch/arm/mach-orion5x/common.c
> +++ b/arch/arm/mach-orion5x/common.c
> @@ -536,6 +536,42 @@ void __init orion5x_xor_init(void)
> platform_device_register(&orion5x_xor1_channel);
> }
>
> +static struct resource orion5x_crypto_res[] = {
> + {
> + .name = "regs",
> + .start = ORION5X_CRYPTO_PHYS_BASE,
> + .end = ORION5X_CRYPTO_PHYS_BASE + 0xffff,
> + .flags = IORESOURCE_MEM,
> + }, {
> + .name = "sram",
> + .start = ORION5X_SRAM_PHYS_BASE,
> + .end = ORION5X_SRAM_PHYS_BASE + 8 * 1024,
> + .flags = IORESOURCE_MEM,
> + }, {
> + .name = "crypto interrupt",
> + .start = IRQ_ORION5X_CESA,
> + .end = IRQ_ORION5X_CESA,
> + .flags = IORESOURCE_IRQ,
> + },
> +};
> +
> +static struct platform_device orion5x_crypto_device = {
> + .name = "mv,orion5x-crypto",
> + .id = 0,
> + .num_resources = ARRAY_SIZE(orion5x_crypto_res),
> + .resource = orion5x_crypto_res,
> +};
> +
> +int __init orion5x_crypto_init(void)
> +{
> + int ret;
> +
> + ret = orion5x_setup_sram_win();
> + if (ret)
> + return ret;
> +
> + return platform_device_register(&orion5x_crypto_device);
> +}
>
> /*****************************************************************************
> * Watchdog
> diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
> index 798b9a5..de483e8 100644
> --- a/arch/arm/mach-orion5x/common.h
> +++ b/arch/arm/mach-orion5x/common.h
> @@ -26,6 +26,7 @@ void orion5x_setup_dev0_win(u32 base, u32 size);
> void orion5x_setup_dev1_win(u32 base, u32 size);
> void orion5x_setup_dev2_win(u32 base, u32 size);
> void orion5x_setup_pcie_wa_win(u32 base, u32 size);
> +int orion5x_setup_sram_win(void);
>
> void orion5x_ehci0_init(void);
> void orion5x_ehci1_init(void);
> @@ -37,6 +38,7 @@ void orion5x_spi_init(void);
> void orion5x_uart0_init(void);
> void orion5x_uart1_init(void);
> void orion5x_xor_init(void);
> +int orion5x_crypto_init(void);
>
> /*
> * PCIe/PCI functions.
> diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h
> index 377a773..2d87665 100644
> --- a/arch/arm/mach-orion5x/include/mach/orion5x.h
> +++ b/arch/arm/mach-orion5x/include/mach/orion5x.h
> @@ -24,6 +24,7 @@
> * f1000000 on-chip peripheral registers
> * f2000000 PCIe I/O space
> * f2100000 PCI I/O space
> + * f2200000 SRAM dedicated for the crypto unit
> * f4000000 device bus mappings (boot)
> * fa000000 device bus mappings (cs0)
> * fa800000 device bus mappings (cs2)
> @@ -49,6 +50,9 @@
> #define ORION5X_PCI_IO_BUS_BASE 0x00100000
> #define ORION5X_PCI_IO_SIZE SZ_1M
>
> +#define ORION5X_SRAM_PHYS_BASE (0xf2200000)
> +#define ORION5X_SRAM_SIZE SZ_8K
> +
> /* Relevant only for Orion-1/Orion-NAS */
> #define ORION5X_PCIE_WA_PHYS_BASE 0xf0000000
> #define ORION5X_PCIE_WA_VIRT_BASE 0xfe000000
> @@ -94,6 +98,8 @@
> #define ORION5X_SATA_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x80000)
> #define ORION5X_SATA_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x80000)
>
> +#define ORION5X_CRYPTO_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x90000)
> +
> #define ORION5X_USB1_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0xa0000)
> #define ORION5X_USB1_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0xa0000)
>
> --
> 1.6.0.6
>
On Thu, 11 Jun 2009, [email protected] wrote:
> From: Sebastian Andrzej Siewior <[email protected]>
>
> use the new driver for the crypto engine
>
> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Since the crypto engine is part of the SOC, this is not a board specific
thing. Hence this should be initialized for all targets and not only
the TS209, just like the watchdog driver is.
> ---
> arch/arm/mach-orion5x/ts209-setup.c | 1 +
> 1 files changed, 1 insertions(+), 0 deletions(-)
>
> diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
> index 9d68905..fbea307 100644
> --- a/arch/arm/mach-orion5x/ts209-setup.c
> +++ b/arch/arm/mach-orion5x/ts209-setup.c
> @@ -302,6 +302,7 @@ static void __init qnap_ts209_init(void)
> orion5x_uart0_init();
> orion5x_uart1_init();
> orion5x_xor_init();
> + orion5x_crypto_init();
>
> platform_device_register(&qnap_ts209_button_device);
>
> --
> 1.6.0.6
>
* Nicolas Pitre | 2009-06-11 15:17:09 [-0400]:
>This one is already merged in the Orion and ARM tree, with a minor fix
>and device renamed to be more generic. The equivalent registration for
>Kirkwood is merged as well already.
Adding a revision history is good thing... I could not find the ARM tree
but I've rebased this patch against the orion tree [0].
Since the driver got renamed, I'm going to send a delta if nothing else
comes up.
[0] git://git.marvell.com/orion for-rmk
From: Sebastian Andrzej Siewior <[email protected]>
Subject: [PATCH] arm/orion5x: increment window counter after adding sram mapping
Without incrementing the counter the next window setup will overwrite
the SRAM mapping.
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
arch/arm/mach-orion5x/addr-map.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/arch/arm/mach-orion5x/addr-map.c b/arch/arm/mach-orion5x/addr-map.c
index 6f3f77d..d78731e 100644
--- a/arch/arm/mach-orion5x/addr-map.c
+++ b/arch/arm/mach-orion5x/addr-map.c
@@ -200,6 +200,6 @@ void __init orion5x_setup_pcie_wa_win(u32 base, u32 size)
int __init orion5x_setup_sram_win(void)
{
- return setup_cpu_win(win_alloc_count, ORION5X_SRAM_PHYS_BASE,
+ return setup_cpu_win(win_alloc_count++, ORION5X_SRAM_PHYS_BASE,
ORION5X_SRAM_SIZE, TARGET_SRAM, ATTR_SRAM, -1);
}
--
1.6.0.6
From: Sebastian Andrzej Siewior <[email protected]>
init & register the crypto device.
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
arch/arm/mach-orion5x/common.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index eafcc49..745af50 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -700,6 +700,7 @@ void __init orion5x_init(void)
* Register watchdog driver
*/
orion5x_wdt_init();
+ orion5x_crypto_init();
}
/*
--
1.6.0.6
On Thu, 11 Jun 2009, Sebastian Andrzej Siewior wrote:
> * Nicolas Pitre | 2009-06-11 15:17:09 [-0400]:
>
> >This one is already merged in the Orion and ARM tree, with a minor fix
> >and device renamed to be more generic. The equivalent registration for
> >Kirkwood is merged as well already.
>
> Adding a revision history is good thing... I could not find the ARM tree
> but I've rebased this patch against the orion tree [0].
Actually, I'm leaning towards the removal of such dynamic mappings
altogether and keep an unconditional static mapping instead, just like
Kirkwood does.
> Since the driver got renamed, I'm going to send a delta if nothing else
> comes up.
What is your plan for this driver? Submit it now and add incremental
improvements afterward or wait until it is more functional?
Nicolas
* Nicolas Pitre | 2009-06-11 16:36:42 [-0400]:
>> Adding a revision history is good thing... I could not find the ARM tree
>> but I've rebased this patch against the orion tree [0].
>
>Actually, I'm leaning towards the removal of such dynamic mappings
>altogether and keep an unconditional static mapping instead, just like
>Kirkwood does.
Oh now I remember: I've been counting the number possible window
mappings and they exceeded the number of availble slots. That's why I've
made it dynamic and board specific. However if this is not an issue than
static is probaly the better way.
>> Since the driver got renamed, I'm going to send a delta if nothing else
>> comes up.
>
>What is your plan for this driver? Submit it now and add incremental
>improvements afterward or wait until it is more functional?
I would like to get it squeezed into this merge window unless there are
any objections and improve it afterwards.
If you thing it is too early I can keep hacking in my own git tree until
I get the dmac_flush_range() hack out or so.
>Nicolas
Sebastian
On Thu, Jun 11, 2009 at 11:07:34PM +0200, Sebastian Andrzej Siewior wrote:
> If you thing it is too early I can keep hacking in my own git tree until
> I get the dmac_flush_range() hack out or so.
The problem that I percieve with these kinds of hacks is that they
tend to spread into other code, and then we end up with problems
when new architectures come along.
For these interfaces, I am a strong believer in purpose-defined
interfaces to caches and the like. If what we have doesn't provide
what's required, we need to provide something else.
So, the question is what are you trying to do with this
dmac_flush_range() and your SRAM? Are you trying to achieve I/D cache
coherency for data written there, so you can execute it later, or are
you trying to ensure DMA coherency from SRAM?
On Thu, 11 Jun 2009, Sebastian Andrzej Siewior wrote:
> * Nicolas Pitre | 2009-06-11 16:36:42 [-0400]:
>
> >> Adding a revision history is good thing... I could not find the ARM tree
> >> but I've rebased this patch against the orion tree [0].
> >
> >Actually, I'm leaning towards the removal of such dynamic mappings
> >altogether and keep an unconditional static mapping instead, just like
> >Kirkwood does.
> Oh now I remember: I've been counting the number possible window
> mappings and they exceeded the number of availble slots. That's why I've
> made it dynamic and board specific. However if this is not an issue than
> static is probaly the better way.
There is no need for other physical mappings that I can see in the set
of boards we currently support. So I'll make it static until there is a
real need for dynamic mapping.
> >> Since the driver got renamed, I'm going to send a delta if nothing else
> >> comes up.
> >
> >What is your plan for this driver? Submit it now and add incremental
> >improvements afterward or wait until it is more functional?
> I would like to get it squeezed into this merge window unless there are
> any objections and improve it afterwards.
> If you thing it is too early I can keep hacking in my own git tree until
> I get the dmac_flush_range() hack out or so.
I have no problem with you submitting it now. It is not complete yet
but what is there is plenty functional. However I'd prefer if you used
the API based on sg_copy_buffer() which includes a call to
flush_kernel_dcache_page() already for mainline inclusion, so to have
good style up front. ( a patch to fix flush_kernel_dcache_page() on ARM
is already queued).
Nicolas
On Thu, 11 Jun 2009, Russell King - ARM Linux wrote:
> For these interfaces, I am a strong believer in purpose-defined
> interfaces to caches and the like. If what we have doesn't provide
> what's required, we need to provide something else.
>
> So, the question is what are you trying to do with this
> dmac_flush_range() and your SRAM? Are you trying to achieve I/D cache
> coherency for data written there, so you can execute it later, or are
> you trying to ensure DMA coherency from SRAM?
It is a typical case of cache coherency between kernel and user space.
The Orion crypto engine works on data stored in a block of SRAM.
Currently the driver does not (yet) make use of the available DMA to
copy data back and forth between SRAM and main memory. At the
moment it uses memcpy() which dirties the cache when the data is copied
back to the kernel memory.
Amongst users of the crypto API you have dm_crypt which makes decrypted
data visible to user space. So we get the same classic corruption
effect on ARM as with a hardware block driver using PIO without
flush_dcache_page(). Other kernel based crypto users don't have that
issue of course.
It is of course the goal to use DMA instead of memcpy() eventually, at
which point there won't be any dirty data in the cache and this issue
will go away.
Nicolas
On Thu, 11 Jun 2009, Nicolas Pitre wrote:
> On Thu, 11 Jun 2009, Sebastian Andrzej Siewior wrote:
>
> > * Nicolas Pitre | 2009-06-11 16:36:42 [-0400]:
> >
> > >> Adding a revision history is good thing... I could not find the ARM tree
> > >> but I've rebased this patch against the orion tree [0].
> > >
> > >Actually, I'm leaning towards the removal of such dynamic mappings
> > >altogether and keep an unconditional static mapping instead, just like
> > >Kirkwood does.
> > Oh now I remember: I've been counting the number possible window
> > mappings and they exceeded the number of availble slots. That's why I've
> > made it dynamic and board specific. However if this is not an issue than
> > static is probaly the better way.
>
> There is no need for other physical mappings that I can see in the set
> of boards we currently support. So I'll make it static until there is a
> real need for dynamic mapping.
I changed my mind. Actually, it is not all Orion SoC variants that
support the crypto unit. I therefore kept the dynamic mapping and
initialized it and registered the device only on those Socs that have
crypto support.
Nicolas
[email protected] wrote:
> The crypto driver with flush_kernel_dcache_page()
> [...]
> |104857600 bytes (105 MB) copied, 31.3831 s, 3.3 MB/s
FYI I did tests with blocks of 4096 bytes (instead of dm-dcrypt's 512)
and the results suggest that the hardware can do at least 68 MB/s.
This makes sense since some of these devices come with gigabit ethernet.
Is there hope of achieving such throughputs with Linux and cryptoapi
on these SoCs ? Or would this require very aggressive optimizations,
e.g. DMA'ing directly from the SATA interface to the CESA SRAM, and
then from the CESA SRAM to the ethernet interface ?
Pascal
-------------------------------------------------------------------
List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
FAQ: http://www.arm.linux.org.uk/mailinglists/faq.php
Etiquette: http://www.arm.linux.org.uk/mailinglists/etiquette.php
* Nicolas Pitre | 2009-06-11 17:19:22 [-0400]:
>> >What is your plan for this driver? Submit it now and add incremental
>> >improvements afterward or wait until it is more functional?
>> I would like to get it squeezed into this merge window unless there are
>> any objections and improve it afterwards.
>> If you thing it is too early I can keep hacking in my own git tree until
>> I get the dmac_flush_range() hack out or so.
>
>I have no problem with you submitting it now. It is not complete yet
>but what is there is plenty functional. However I'd prefer if you used
>the API based on sg_copy_buffer() which includes a call to
>flush_kernel_dcache_page() already for mainline inclusion, so to have
>good style up front. ( a patch to fix flush_kernel_dcache_page() on ARM
>is already queued).
sg_copy_from_buffer() since the other one is static. I'm little unhappy
with it because this forces me to have the entire buffer "ready". This
is not a problem for dm-crypt because I get just one scatterlist entry
with 512 bytes. I don't know about others but this will put a
limitation / break them.
It looks like sg_miter_start() which is exported doesn't provide the
the required flush interface. What about:
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -259,12 +259,6 @@ static void dequeue_complete_req(void)
dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
- /*
- * XXX This is a temporary hack to flush data from cache back
- * into phys ram so user space sees correct data. This goes
- * away as soon as DMA support is added to this driver
- */
- dmac_flush_range(buf, buf + dst_copy);
cpg->p.sg_dst_left -= dst_copy;
cpg->p.crypt_len -= dst_copy;
@@ -309,7 +303,7 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, 0);
num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, 0);
+ sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_FLUSH_CACHE);
mv_process_current_q(1);
}
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index e599698..8f98450 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -242,6 +242,7 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
*/
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
+#define SG_MITER_FLUSH_CACHE (1 << 1) /* flash cache on unmap */
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a295e40..4b07f7a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,6 +394,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
if (miter->addr) {
miter->__offset += miter->consumed;
+ if (miter->__flags & SG_MITER_FLUSH_CACHE)
+ flush_kernel_dcache_page(miter->page);
+
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON(!irqs_disabled());
kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
--
1.6.0.6
>
>
>Nicolas
Sebastian
Hello,
Does anyone know what the status of Orion5X Crypto driver?
Will it be merged into the mainline kernel (2.6.32)?
Will it get SHA1 and 3DES support?
Thanks in advance,
Uri Yosef
On Fri, Jun 12, 2009 at 3:09 PM, [email protected] <[email protected]> wrote:
> [email protected] wrote:
> > The crypto driver with flush_kernel_dcache_page()
> > [...]
> > |104857600 bytes (105 MB) copied, 31.3831 s, 3.3 MB/s
>
> FYI I did tests with blocks of 4096 bytes (instead of dm-dcrypt's 512)
> and the results suggest that the hardware can do at least 68 MB/s.
> This makes sense since some of these devices come with gigabit ethernet.
>
> Is there hope of achieving such throughputs with Linux and cryptoapi
> on these SoCs ? Or would this require very aggressive optimizations,
> e.g. DMA'ing directly from the SATA interface to the CESA SRAM, and
> then from the CESA SRAM to the ethernet interface ?
>
> Pascal
>
>
> -------------------------------------------------------------------
> List admin:
> http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
> FAQ: http://www.arm.linux.org.uk/mailinglists/faq.php
> Etiquette: http://www.arm.linux.org.uk/mailinglists/etiquette.php
>
-------------------------------------------------------------------
List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
FAQ: http://www.arm.linux.org.uk/mailinglists/faq.php
Etiquette: http://www.arm.linux.org.uk/mailinglists/etiquette.php
* Nicolas Pitre | 2009-06-11 17:19:22 [-0400]:
>I have no problem with you submitting it now. It is not complete yet
>but what is there is plenty functional. However I'd prefer if you used
>the API based on sg_copy_buffer() which includes a call to
>flush_kernel_dcache_page() already for mainline inclusion, so to have
>good style up front. ( a patch to fix flush_kernel_dcache_page() on ARM
>is already queued).
I just wanted to let you know, that since -rc5 I have the needful change
in sg code [0]. On the other hand I have the IDMA somehow working so I
probably should sit my ass down and make it work with cesa...
[0] http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=6de7e356faf54aa75de5b624bbce28a5b776dfa8
>Nicolas
Sebastian
On Sat, 1 Aug 2009, Sebastian Andrzej Siewior wrote:
> * Nicolas Pitre | 2009-06-11 17:19:22 [-0400]:
>
> >I have no problem with you submitting it now. It is not complete yet
> >but what is there is plenty functional. However I'd prefer if you used
> >the API based on sg_copy_buffer() which includes a call to
> >flush_kernel_dcache_page() already for mainline inclusion, so to have
> >good style up front. ( a patch to fix flush_kernel_dcache_page() on ARM
> >is already queued).
> I just wanted to let you know, that since -rc5 I have the needful change
> in sg code [0]. On the other hand I have the IDMA somehow working so I
> probably should sit my ass down and make it work with cesa...
Please submit it with the sg as is. You can submit incremental updates
later. That would have the effect of making it clearer what the DMA
support entails in the git history, as well as allowing more people to
test it more easily in the mean time, especially that DMA is somewhat
different on Orion and Kirkwood.
Nicolas
From: Sebastian Andrzej Siewior <[email protected]>
This adds support for Marvell's Cryptographic Engines and Security
Accelerator (CESA) which can be found on a few SoC.
Tested with dm-crypt.
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
* Nicolas Pitre | 2009-08-02 10:14:57 [-0400]:
>Please submit it with the sg as is.
Okay, here it is. The change is really minimal: flush_dcache out,
SG_MITER_FROM_SG & SG_MITER_TO_SG in. Those slipped in just before -rc5.
Herbert: are in good mood? Good enough to submit this new driver in 31?
drivers/crypto/Kconfig | 13 +
drivers/crypto/Makefile | 1 +
drivers/crypto/mv_cesa.c | 606 ++++++++++++++++++++++++++++++++++++++++++++++
drivers/crypto/mv_cesa.h | 119 +++++++++
4 files changed, 739 insertions(+), 0 deletions(-)
create mode 100644 drivers/crypto/mv_cesa.c
create mode 100644 drivers/crypto/mv_cesa.h
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692..c463896 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -157,6 +157,19 @@ config S390_PRNG
ANSI X9.17 standard. The PRNG is usable via the char device
/dev/prandom.
+config CRYPTO_DEV_MV_CESA
+ tristate "Marvell's Cryptographic Engine"
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
+ help
+ This driver allows you utilize the Cryptographic Engines and Security
+ Accelerator (CESA) which can be found on certain SoC like QNAP's
+ TS-209.
+
+ Currently the driver supports AES in ECB and CBC mode without DMA.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2b..6ffcb3f 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 0000000..f28502c
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPL
+ *
+ */
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "mv_cesa.h"
+/*
+ * STM:
+ * /---------------------------------------\
+ * | | request complete
+ * \./ |
+ * IDLE -> new request -> BUSY -> done -> DEQUEUE
+ * /°\ |
+ * | | more scatter entries
+ * \________________/
+ */
+enum engine_status {
+ ENGINE_IDLE,
+ ENGINE_BUSY,
+ ENGINE_W_DEQUEUE,
+};
+
+/**
+ * struct req_progress - used for every crypt request
+ * @src_sg_it: sg iterator for src
+ * @dst_sg_it: sg iterator for dst
+ * @sg_src_left: bytes left in src to process (scatter list)
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @total_req_bytes: total number of bytes processed (request).
+ *
+ * sg helper are used to iterate over the scatterlist. Since the size of the
+ * SRAM may be less than the scatter size, this struct struct is used to keep
+ * track of progress within current scatterlist.
+ */
+struct req_progress {
+ struct sg_mapping_iter src_sg_it;
+ struct sg_mapping_iter dst_sg_it;
+
+ /* src mostly */
+ int sg_src_left;
+ int src_start;
+ int crypt_len;
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+ int total_req_bytes;
+};
+
+struct crypto_priv {
+ void __iomem *reg;
+ void __iomem *sram;
+ int irq;
+ struct task_struct *queue_th;
+
+ /* the lock protects queue and eng_st */
+ spinlock_t lock;
+ struct crypto_queue queue;
+ enum engine_status eng_st;
+ struct ablkcipher_request *cur_req;
+ struct req_progress p;
+ int max_req_size;
+ int sram_size;
+};
+
+static struct crypto_priv *cpg;
+
+struct mv_ctx {
+ u8 aes_enc_key[AES_KEY_LEN];
+ u32 aes_dec_key[8];
+ int key_len;
+ u32 need_calc_aes_dkey;
+};
+
+enum crypto_op {
+ COP_AES_ECB,
+ COP_AES_CBC,
+};
+
+struct mv_req_ctx {
+ enum crypto_op op;
+ int decrypt;
+};
+
+static void compute_aes_dec_key(struct mv_ctx *ctx)
+{
+ struct crypto_aes_ctx gen_aes_key;
+ int key_pos;
+
+ if (!ctx->need_calc_aes_dkey)
+ return;
+
+ crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
+
+ key_pos = ctx->key_len + 24;
+ memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_256:
+ key_pos -= 2;
+ /* fall */
+ case AES_KEYSIZE_192:
+ key_pos -= 2;
+ memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
+ 4 * 4);
+ break;
+ }
+ ctx->need_calc_aes_dkey = 0;
+}
+
+static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = len;
+ ctx->need_calc_aes_dkey = 1;
+
+ memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
+ return 0;
+}
+
+static void setup_data_in(struct ablkcipher_request *req)
+{
+ int ret;
+ void *buf;
+
+ if (!cpg->p.sg_src_left) {
+ ret = sg_miter_next(&cpg->p.src_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+ cpg->p.src_start = 0;
+ }
+
+ cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
+
+ buf = cpg->p.src_sg_it.addr;
+ buf += cpg->p.src_start;
+
+ memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+
+ cpg->p.sg_src_left -= cpg->p.crypt_len;
+ cpg->p.src_start += cpg->p.crypt_len;
+}
+
+static void mv_process_current_q(int first_block)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct sec_accel_config op;
+
+ switch (req_ctx->op) {
+ case COP_AES_ECB:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ break;
+ case COP_AES_CBC:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+ if (first_block)
+ memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
+ break;
+ }
+ if (req_ctx->decrypt) {
+ op.config |= CFG_DIR_DEC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
+ AES_KEY_LEN);
+ } else {
+ op.config |= CFG_DIR_ENC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
+ AES_KEY_LEN);
+ }
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ op.config |= CFG_AES_LEN_128;
+ break;
+ case AES_KEYSIZE_192:
+ op.config |= CFG_AES_LEN_192;
+ break;
+ case AES_KEYSIZE_256:
+ op.config |= CFG_AES_LEN_256;
+ break;
+ }
+ op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+ ENC_P_DST(SRAM_DATA_OUT_START);
+ op.enc_key_p = SRAM_DATA_KEY_P;
+
+ setup_data_in(req);
+ op.enc_len = cpg->p.crypt_len;
+ memcpy(cpg->sram + SRAM_CONFIG, &op,
+ sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static void mv_crypto_algo_completion(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ if (req_ctx->op != COP_AES_CBC)
+ return ;
+
+ memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+}
+
+static void dequeue_complete_req(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+
+ cpg->p.total_req_bytes += cpg->p.crypt_len;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
+
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
+
+ dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+
+ memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+
+ cpg->p.sg_dst_left -= dst_copy;
+ cpg->p.crypt_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (cpg->p.crypt_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.total_req_bytes < req->nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+ } else {
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
+ req->base.complete(&req->base, 0);
+ }
+}
+
+static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+{
+ int i = 0;
+
+ do {
+ total_bytes -= sl[i].length;
+ i++;
+
+ } while (total_bytes > 0);
+
+ return i;
+}
+
+static void mv_enqueue_new_req(struct ablkcipher_request *req)
+{
+ int num_sgs;
+
+ cpg->cur_req = req;
+ memset(&cpg->p, 0, sizeof(struct req_progress));
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ num_sgs = count_sgs(req->dst, req->nbytes);
+ sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ mv_process_current_q(1);
+}
+
+static int queue_manag(void *data)
+{
+ cpg->eng_st = ENGINE_IDLE;
+ do {
+ struct ablkcipher_request *req;
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog;
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (cpg->eng_st == ENGINE_W_DEQUEUE)
+ dequeue_complete_req();
+
+ spin_lock_irq(&cpg->lock);
+ if (cpg->eng_st == ENGINE_IDLE) {
+ backlog = crypto_get_backlog(&cpg->queue);
+ async_req = crypto_dequeue_request(&cpg->queue);
+ if (async_req) {
+ BUG_ON(cpg->eng_st != ENGINE_IDLE);
+ cpg->eng_st = ENGINE_BUSY;
+ }
+ }
+ spin_unlock_irq(&cpg->lock);
+
+ if (backlog) {
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (async_req) {
+ req = container_of(async_req,
+ struct ablkcipher_request, base);
+ mv_enqueue_new_req(req);
+ async_req = NULL;
+ }
+
+ schedule();
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int mv_handle_req(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cpg->lock, flags);
+ ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ spin_unlock_irqrestore(&cpg->lock, flags);
+ wake_up_process(cpg->queue_th);
+ return ret;
+}
+
+static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
+ return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+ u32 val;
+
+ val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
+ if (!(val & SEC_INT_ACCEL0_DONE))
+ return IRQ_NONE;
+
+ val &= ~SEC_INT_ACCEL0_DONE;
+ writel(val, cpg->reg + FPGA_INT_STATUS);
+ writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
+ BUG_ON(cpg->eng_st != ENGINE_BUSY);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+ return IRQ_HANDLED;
+}
+
+struct crypto_alg mv_aes_alg_ecb = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_ecb,
+ .decrypt = mv_dec_aes_ecb,
+ },
+ },
+};
+
+struct crypto_alg mv_aes_alg_cbc = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_cbc,
+ .decrypt = mv_dec_aes_cbc,
+ },
+ },
+};
+
+static int mv_probe(struct platform_device *pdev)
+{
+ struct crypto_priv *cp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ if (cpg) {
+ printk(KERN_ERR "Second crypto dev?\n");
+ return -EEXIST;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res)
+ return -ENXIO;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ spin_lock_init(&cp->lock);
+ crypto_init_queue(&cp->queue, 50);
+ cp->reg = ioremap(res->start, res->end - res->start + 1);
+ if (!cp->reg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!res) {
+ ret = -ENXIO;
+ goto err_unmap_reg;
+ }
+ cp->sram_size = res->end - res->start + 1;
+ cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
+ cp->sram = ioremap(res->start, cp->sram_size);
+ if (!cp->sram) {
+ ret = -ENOMEM;
+ goto err_unmap_reg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq == NO_IRQ) {
+ ret = irq;
+ goto err_unmap_sram;
+ }
+ cp->irq = irq;
+
+ platform_set_drvdata(pdev, cp);
+ cpg = cp;
+
+ cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
+ if (IS_ERR(cp->queue_th)) {
+ ret = PTR_ERR(cp->queue_th);
+ goto err_thread;
+ }
+
+ ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ cp);
+ if (ret)
+ goto err_unmap_sram;
+
+ writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
+ writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+
+ ret = crypto_register_alg(&mv_aes_alg_ecb);
+ if (ret)
+ goto err_reg;
+
+ ret = crypto_register_alg(&mv_aes_alg_cbc);
+ if (ret)
+ goto err_unreg_ecb;
+ return 0;
+err_unreg_ecb:
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+err_thread:
+ free_irq(irq, cp);
+err_reg:
+ kthread_stop(cp->queue_th);
+err_unmap_sram:
+ iounmap(cp->sram);
+err_unmap_reg:
+ iounmap(cp->reg);
+err:
+ kfree(cp);
+ cpg = NULL;
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int mv_remove(struct platform_device *pdev)
+{
+ struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+ crypto_unregister_alg(&mv_aes_alg_cbc);
+ kthread_stop(cp->queue_th);
+ free_irq(cp->irq, cp);
+ memset(cp->sram, 0, cp->sram_size);
+ iounmap(cp->sram);
+ iounmap(cp->reg);
+ kfree(cp);
+ cpg = NULL;
+ return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+ .probe = mv_probe,
+ .remove = mv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mv_crypto",
+ },
+};
+MODULE_ALIAS("platform:mv_crypto");
+
+static int __init mv_crypto_init(void)
+{
+ return platform_driver_register(&marvell_crypto);
+}
+module_init(mv_crypto_init);
+
+static void __exit mv_crypto_exit(void)
+{
+ platform_driver_unregister(&marvell_crypto);
+}
+module_exit(mv_crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <[email protected]>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 0000000..c3e25d3
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
+#ifndef __MV_CRYPTO_H__
+
+#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DES_CMD_REG 0xdd58
+
+#define SEC_ACCEL_CMD 0xde00
+#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
+#define SEC_CMD_DISABLE_SEC (1 << 2)
+
+#define SEC_ACCEL_DESC_P0 0xde04
+#define SEC_DESC_P0_PTR(x) (x)
+
+#define SEC_ACCEL_DESC_P1 0xde14
+#define SEC_DESC_P1_PTR(x) (x)
+
+#define SEC_ACCEL_CFG 0xde08
+#define SEC_CFG_STOP_DIG_ERR (1 << 0)
+#define SEC_CFG_CH0_W_IDMA (1 << 7)
+#define SEC_CFG_CH1_W_IDMA (1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
+
+#define SEC_ACCEL_STATUS 0xde0c
+#define SEC_ST_ACT_0 (1 << 0)
+#define SEC_ST_ACT_1 (1 << 1)
+
+/*
+ * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
+ * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
+ * someone forgot to remove it while switching to the core and moving to
+ * SEC_ACCEL_INT_STATUS.
+ */
+#define FPGA_INT_STATUS 0xdd68
+#define SEC_ACCEL_INT_STATUS 0xde20
+#define SEC_INT_AUTH_DONE (1 << 0)
+#define SEC_INT_DES_E_DONE (1 << 1)
+#define SEC_INT_AES_E_DONE (1 << 2)
+#define SEC_INT_AES_D_DONE (1 << 3)
+#define SEC_INT_ENC_DONE (1 << 4)
+#define SEC_INT_ACCEL0_DONE (1 << 5)
+#define SEC_INT_ACCEL1_DONE (1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
+
+#define SEC_ACCEL_INT_MASK 0xde24
+
+#define AES_KEY_LEN (8 * 4)
+
+struct sec_accel_config {
+
+ u32 config;
+#define CFG_OP_MAC_ONLY 0
+#define CFG_OP_CRYPT_ONLY 1
+#define CFG_OP_MAC_CRYPT 2
+#define CFG_OP_CRYPT_MAC 3
+#define CFG_MACM_MD5 (4 << 4)
+#define CFG_MACM_SHA1 (5 << 4)
+#define CFG_MACM_HMAC_MD5 (6 << 4)
+#define CFG_MACM_HMAC_SHA1 (7 << 4)
+#define CFG_ENCM_DES (1 << 8)
+#define CFG_ENCM_3DES (2 << 8)
+#define CFG_ENCM_AES (3 << 8)
+#define CFG_DIR_ENC (0 << 12)
+#define CFG_DIR_DEC (1 << 12)
+#define CFG_ENC_MODE_ECB (0 << 16)
+#define CFG_ENC_MODE_CBC (1 << 16)
+#define CFG_3DES_EEE (0 << 20)
+#define CFG_3DES_EDE (1 << 20)
+#define CFG_AES_LEN_128 (0 << 24)
+#define CFG_AES_LEN_192 (1 << 24)
+#define CFG_AES_LEN_256 (2 << 24)
+
+ u32 enc_p;
+#define ENC_P_SRC(x) (x)
+#define ENC_P_DST(x) ((x) << 16)
+
+ u32 enc_len;
+#define ENC_LEN(x) (x)
+
+ u32 enc_key_p;
+#define ENC_KEY_P(x) (x)
+
+ u32 enc_iv;
+#define ENC_IV_POINT(x) ((x) << 0)
+#define ENC_IV_BUF_POINT(x) ((x) << 16)
+
+ u32 mac_src_p;
+#define MAC_SRC_DATA_P(x) (x)
+#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
+
+ u32 mac_digest;
+ u32 mac_iv;
+}__attribute__ ((packed));
+ /*
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | CRYPT KEY | 8 * 4
+ * |-----------| 0x40
+ * | IV IN | 4 * 4
+ * |-----------| 0x40 (inplace)
+ * | IV BUF | 4 * 4
+ * |-----------| 0x50
+ * | DATA IN | 16 * x (max ->max_req_size)
+ * |-----------| 0x50 (inplace operation)
+ * | DATA OUT | 16 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
+#define SRAM_CONFIG 0x00
+#define SRAM_DATA_KEY_P 0x20
+#define SRAM_DATA_IV 0x40
+#define SRAM_DATA_IV_BUF 0x40
+#define SRAM_DATA_IN_START 0x50
+#define SRAM_DATA_OUT_START 0x50
+
+#define SRAM_CFG_SPACE 0x50
+
+#endif
--
1.6.2.5
On Mon, 3 Aug 2009, Sebastian Andrzej Siewior wrote:
> From: Sebastian Andrzej Siewior <[email protected]>
>
> This adds support for Marvell's Cryptographic Engines and Security
> Accelerator (CESA) which can be found on a few SoC.
> Tested with dm-crypt.
>
> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
> ---
> * Nicolas Pitre | 2009-08-02 10:14:57 [-0400]:
> >Please submit it with the sg as is.
>
> Okay, here it is. The change is really minimal: flush_dcache out,
> SG_MITER_FROM_SG & SG_MITER_TO_SG in. Those slipped in just before -rc5.
>
> Herbert: are in good mood? Good enough to submit this new driver in 31?
>
> drivers/crypto/Kconfig | 13 +
> drivers/crypto/Makefile | 1 +
> drivers/crypto/mv_cesa.c | 606 ++++++++++++++++++++++++++++++++++++++++++++++
> drivers/crypto/mv_cesa.h | 119 +++++++++
> 4 files changed, 739 insertions(+), 0 deletions(-)
> create mode 100644 drivers/crypto/mv_cesa.c
> create mode 100644 drivers/crypto/mv_cesa.h
>
> diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
> index 5b27692..c463896 100644
> --- a/drivers/crypto/Kconfig
> +++ b/drivers/crypto/Kconfig
> @@ -157,6 +157,19 @@ config S390_PRNG
> ANSI X9.17 standard. The PRNG is usable via the char device
> /dev/prandom.
>
> +config CRYPTO_DEV_MV_CESA
> + tristate "Marvell's Cryptographic Engine"
> + depends on PLAT_ORION
> + select CRYPTO_ALGAPI
> + select CRYPTO_AES
> + select CRYPTO_BLKCIPHER2
> + help
> + This driver allows you utilize the Cryptographic Engines and Security
> + Accelerator (CESA) which can be found on certain SoC like QNAP's
> + TS-209.
The TS-209 is not a sOC. You should probably say "... the Marvell Orion
and Kirkwood SoCs, such as found in QNAP's TS-209" instead.
Other than that, you can add
Acked-by: Nicolas Pitre <[email protected]>
> + Currently the driver supports AES in ECB and CBC mode without DMA.
> +
> config CRYPTO_DEV_HIFN_795X
> tristate "Driver HIFN 795x crypto accelerator chips"
> select CRYPTO_DES
> diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
> index 9bf4a2b..6ffcb3f 100644
> --- a/drivers/crypto/Makefile
> +++ b/drivers/crypto/Makefile
> @@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
> obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
> obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
> obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
> +obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
> obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
> obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
> obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
> diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
> new file mode 100644
> index 0000000..f28502c
> --- /dev/null
> +++ b/drivers/crypto/mv_cesa.c
> @@ -0,0 +1,606 @@
> +/*
> + * Support for Marvell's crypto engine which can be found on some Orion5X
> + * boards.
> + *
> + * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
> + * License: GPL
> + *
> + */
> +#include <crypto/aes.h>
> +#include <crypto/algapi.h>
> +#include <linux/crypto.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/kthread.h>
> +#include <linux/platform_device.h>
> +#include <linux/scatterlist.h>
> +
> +#include "mv_cesa.h"
> +/*
> + * STM:
> + * /---------------------------------------\
> + * | | request complete
> + * \./ |
> + * IDLE -> new request -> BUSY -> done -> DEQUEUE
> + * /??\ |
> + * | | more scatter entries
> + * \________________/
> + */
> +enum engine_status {
> + ENGINE_IDLE,
> + ENGINE_BUSY,
> + ENGINE_W_DEQUEUE,
> +};
> +
> +/**
> + * struct req_progress - used for every crypt request
> + * @src_sg_it: sg iterator for src
> + * @dst_sg_it: sg iterator for dst
> + * @sg_src_left: bytes left in src to process (scatter list)
> + * @src_start: offset to add to src start position (scatter list)
> + * @crypt_len: length of current crypt process
> + * @sg_dst_left: bytes left dst to process in this scatter list
> + * @dst_start: offset to add to dst start position (scatter list)
> + * @total_req_bytes: total number of bytes processed (request).
> + *
> + * sg helper are used to iterate over the scatterlist. Since the size of the
> + * SRAM may be less than the scatter size, this struct struct is used to keep
> + * track of progress within current scatterlist.
> + */
> +struct req_progress {
> + struct sg_mapping_iter src_sg_it;
> + struct sg_mapping_iter dst_sg_it;
> +
> + /* src mostly */
> + int sg_src_left;
> + int src_start;
> + int crypt_len;
> + /* dst mostly */
> + int sg_dst_left;
> + int dst_start;
> + int total_req_bytes;
> +};
> +
> +struct crypto_priv {
> + void __iomem *reg;
> + void __iomem *sram;
> + int irq;
> + struct task_struct *queue_th;
> +
> + /* the lock protects queue and eng_st */
> + spinlock_t lock;
> + struct crypto_queue queue;
> + enum engine_status eng_st;
> + struct ablkcipher_request *cur_req;
> + struct req_progress p;
> + int max_req_size;
> + int sram_size;
> +};
> +
> +static struct crypto_priv *cpg;
> +
> +struct mv_ctx {
> + u8 aes_enc_key[AES_KEY_LEN];
> + u32 aes_dec_key[8];
> + int key_len;
> + u32 need_calc_aes_dkey;
> +};
> +
> +enum crypto_op {
> + COP_AES_ECB,
> + COP_AES_CBC,
> +};
> +
> +struct mv_req_ctx {
> + enum crypto_op op;
> + int decrypt;
> +};
> +
> +static void compute_aes_dec_key(struct mv_ctx *ctx)
> +{
> + struct crypto_aes_ctx gen_aes_key;
> + int key_pos;
> +
> + if (!ctx->need_calc_aes_dkey)
> + return;
> +
> + crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
> +
> + key_pos = ctx->key_len + 24;
> + memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
> + switch (ctx->key_len) {
> + case AES_KEYSIZE_256:
> + key_pos -= 2;
> + /* fall */
> + case AES_KEYSIZE_192:
> + key_pos -= 2;
> + memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
> + 4 * 4);
> + break;
> + }
> + ctx->need_calc_aes_dkey = 0;
> +}
> +
> +static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
> + unsigned int len)
> +{
> + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
> + struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
> +
> + switch (len) {
> + case AES_KEYSIZE_128:
> + case AES_KEYSIZE_192:
> + case AES_KEYSIZE_256:
> + break;
> + default:
> + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
> + return -EINVAL;
> + }
> + ctx->key_len = len;
> + ctx->need_calc_aes_dkey = 1;
> +
> + memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
> + return 0;
> +}
> +
> +static void setup_data_in(struct ablkcipher_request *req)
> +{
> + int ret;
> + void *buf;
> +
> + if (!cpg->p.sg_src_left) {
> + ret = sg_miter_next(&cpg->p.src_sg_it);
> + BUG_ON(!ret);
> + cpg->p.sg_src_left = cpg->p.src_sg_it.length;
> + cpg->p.src_start = 0;
> + }
> +
> + cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
> +
> + buf = cpg->p.src_sg_it.addr;
> + buf += cpg->p.src_start;
> +
> + memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
> +
> + cpg->p.sg_src_left -= cpg->p.crypt_len;
> + cpg->p.src_start += cpg->p.crypt_len;
> +}
> +
> +static void mv_process_current_q(int first_block)
> +{
> + struct ablkcipher_request *req = cpg->cur_req;
> + struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> + struct sec_accel_config op;
> +
> + switch (req_ctx->op) {
> + case COP_AES_ECB:
> + op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
> + break;
> + case COP_AES_CBC:
> + op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
> + op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
> + ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
> + if (first_block)
> + memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
> + break;
> + }
> + if (req_ctx->decrypt) {
> + op.config |= CFG_DIR_DEC;
> + memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
> + AES_KEY_LEN);
> + } else {
> + op.config |= CFG_DIR_ENC;
> + memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
> + AES_KEY_LEN);
> + }
> +
> + switch (ctx->key_len) {
> + case AES_KEYSIZE_128:
> + op.config |= CFG_AES_LEN_128;
> + break;
> + case AES_KEYSIZE_192:
> + op.config |= CFG_AES_LEN_192;
> + break;
> + case AES_KEYSIZE_256:
> + op.config |= CFG_AES_LEN_256;
> + break;
> + }
> + op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
> + ENC_P_DST(SRAM_DATA_OUT_START);
> + op.enc_key_p = SRAM_DATA_KEY_P;
> +
> + setup_data_in(req);
> + op.enc_len = cpg->p.crypt_len;
> + memcpy(cpg->sram + SRAM_CONFIG, &op,
> + sizeof(struct sec_accel_config));
> +
> + writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
> + /* GO */
> + writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
> +
> + /*
> + * XXX: add timer if the interrupt does not occur for some mystery
> + * reason
> + */
> +}
> +
> +static void mv_crypto_algo_completion(void)
> +{
> + struct ablkcipher_request *req = cpg->cur_req;
> + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> + if (req_ctx->op != COP_AES_CBC)
> + return ;
> +
> + memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
> +}
> +
> +static void dequeue_complete_req(void)
> +{
> + struct ablkcipher_request *req = cpg->cur_req;
> + void *buf;
> + int ret;
> +
> + cpg->p.total_req_bytes += cpg->p.crypt_len;
> + do {
> + int dst_copy;
> +
> + if (!cpg->p.sg_dst_left) {
> + ret = sg_miter_next(&cpg->p.dst_sg_it);
> + BUG_ON(!ret);
> + cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
> + cpg->p.dst_start = 0;
> + }
> +
> + buf = cpg->p.dst_sg_it.addr;
> + buf += cpg->p.dst_start;
> +
> + dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
> +
> + memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
> +
> + cpg->p.sg_dst_left -= dst_copy;
> + cpg->p.crypt_len -= dst_copy;
> + cpg->p.dst_start += dst_copy;
> + } while (cpg->p.crypt_len > 0);
> +
> + BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
> + if (cpg->p.total_req_bytes < req->nbytes) {
> + /* process next scatter list entry */
> + cpg->eng_st = ENGINE_BUSY;
> + mv_process_current_q(0);
> + } else {
> + sg_miter_stop(&cpg->p.src_sg_it);
> + sg_miter_stop(&cpg->p.dst_sg_it);
> + mv_crypto_algo_completion();
> + cpg->eng_st = ENGINE_IDLE;
> + req->base.complete(&req->base, 0);
> + }
> +}
> +
> +static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
> +{
> + int i = 0;
> +
> + do {
> + total_bytes -= sl[i].length;
> + i++;
> +
> + } while (total_bytes > 0);
> +
> + return i;
> +}
> +
> +static void mv_enqueue_new_req(struct ablkcipher_request *req)
> +{
> + int num_sgs;
> +
> + cpg->cur_req = req;
> + memset(&cpg->p, 0, sizeof(struct req_progress));
> +
> + num_sgs = count_sgs(req->src, req->nbytes);
> + sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
> +
> + num_sgs = count_sgs(req->dst, req->nbytes);
> + sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
> + mv_process_current_q(1);
> +}
> +
> +static int queue_manag(void *data)
> +{
> + cpg->eng_st = ENGINE_IDLE;
> + do {
> + struct ablkcipher_request *req;
> + struct crypto_async_request *async_req = NULL;
> + struct crypto_async_request *backlog;
> +
> + __set_current_state(TASK_INTERRUPTIBLE);
> +
> + if (cpg->eng_st == ENGINE_W_DEQUEUE)
> + dequeue_complete_req();
> +
> + spin_lock_irq(&cpg->lock);
> + if (cpg->eng_st == ENGINE_IDLE) {
> + backlog = crypto_get_backlog(&cpg->queue);
> + async_req = crypto_dequeue_request(&cpg->queue);
> + if (async_req) {
> + BUG_ON(cpg->eng_st != ENGINE_IDLE);
> + cpg->eng_st = ENGINE_BUSY;
> + }
> + }
> + spin_unlock_irq(&cpg->lock);
> +
> + if (backlog) {
> + backlog->complete(backlog, -EINPROGRESS);
> + backlog = NULL;
> + }
> +
> + if (async_req) {
> + req = container_of(async_req,
> + struct ablkcipher_request, base);
> + mv_enqueue_new_req(req);
> + async_req = NULL;
> + }
> +
> + schedule();
> +
> + } while (!kthread_should_stop());
> + return 0;
> +}
> +
> +static int mv_handle_req(struct ablkcipher_request *req)
> +{
> + unsigned long flags;
> + int ret;
> +
> + spin_lock_irqsave(&cpg->lock, flags);
> + ret = ablkcipher_enqueue_request(&cpg->queue, req);
> + spin_unlock_irqrestore(&cpg->lock, flags);
> + wake_up_process(cpg->queue_th);
> + return ret;
> +}
> +
> +static int mv_enc_aes_ecb(struct ablkcipher_request *req)
> +{
> + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> + req_ctx->op = COP_AES_ECB;
> + req_ctx->decrypt = 0;
> +
> + return mv_handle_req(req);
> +}
> +
> +static int mv_dec_aes_ecb(struct ablkcipher_request *req)
> +{
> + struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> + req_ctx->op = COP_AES_ECB;
> + req_ctx->decrypt = 1;
> +
> + compute_aes_dec_key(ctx);
> + return mv_handle_req(req);
> +}
> +
> +static int mv_enc_aes_cbc(struct ablkcipher_request *req)
> +{
> + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> + req_ctx->op = COP_AES_CBC;
> + req_ctx->decrypt = 0;
> +
> + return mv_handle_req(req);
> +}
> +
> +static int mv_dec_aes_cbc(struct ablkcipher_request *req)
> +{
> + struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> + struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> + req_ctx->op = COP_AES_CBC;
> + req_ctx->decrypt = 1;
> +
> + compute_aes_dec_key(ctx);
> + return mv_handle_req(req);
> +}
> +
> +static int mv_cra_init(struct crypto_tfm *tfm)
> +{
> + tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
> + return 0;
> +}
> +
> +irqreturn_t crypto_int(int irq, void *priv)
> +{
> + u32 val;
> +
> + val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
> + if (!(val & SEC_INT_ACCEL0_DONE))
> + return IRQ_NONE;
> +
> + val &= ~SEC_INT_ACCEL0_DONE;
> + writel(val, cpg->reg + FPGA_INT_STATUS);
> + writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
> + BUG_ON(cpg->eng_st != ENGINE_BUSY);
> + cpg->eng_st = ENGINE_W_DEQUEUE;
> + wake_up_process(cpg->queue_th);
> + return IRQ_HANDLED;
> +}
> +
> +struct crypto_alg mv_aes_alg_ecb = {
> + .cra_name = "ecb(aes)",
> + .cra_driver_name = "mv-ecb-aes",
> + .cra_priority = 300,
> + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> + .cra_blocksize = 16,
> + .cra_ctxsize = sizeof(struct mv_ctx),
> + .cra_alignmask = 0,
> + .cra_type = &crypto_ablkcipher_type,
> + .cra_module = THIS_MODULE,
> + .cra_init = mv_cra_init,
> + .cra_u = {
> + .ablkcipher = {
> + .min_keysize = AES_MIN_KEY_SIZE,
> + .max_keysize = AES_MAX_KEY_SIZE,
> + .setkey = mv_setkey_aes,
> + .encrypt = mv_enc_aes_ecb,
> + .decrypt = mv_dec_aes_ecb,
> + },
> + },
> +};
> +
> +struct crypto_alg mv_aes_alg_cbc = {
> + .cra_name = "cbc(aes)",
> + .cra_driver_name = "mv-cbc-aes",
> + .cra_priority = 300,
> + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> + .cra_blocksize = AES_BLOCK_SIZE,
> + .cra_ctxsize = sizeof(struct mv_ctx),
> + .cra_alignmask = 0,
> + .cra_type = &crypto_ablkcipher_type,
> + .cra_module = THIS_MODULE,
> + .cra_init = mv_cra_init,
> + .cra_u = {
> + .ablkcipher = {
> + .ivsize = AES_BLOCK_SIZE,
> + .min_keysize = AES_MIN_KEY_SIZE,
> + .max_keysize = AES_MAX_KEY_SIZE,
> + .setkey = mv_setkey_aes,
> + .encrypt = mv_enc_aes_cbc,
> + .decrypt = mv_dec_aes_cbc,
> + },
> + },
> +};
> +
> +static int mv_probe(struct platform_device *pdev)
> +{
> + struct crypto_priv *cp;
> + struct resource *res;
> + int irq;
> + int ret;
> +
> + if (cpg) {
> + printk(KERN_ERR "Second crypto dev?\n");
> + return -EEXIST;
> + }
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
> + if (!res)
> + return -ENXIO;
> +
> + cp = kzalloc(sizeof(*cp), GFP_KERNEL);
> + if (!cp)
> + return -ENOMEM;
> +
> + spin_lock_init(&cp->lock);
> + crypto_init_queue(&cp->queue, 50);
> + cp->reg = ioremap(res->start, res->end - res->start + 1);
> + if (!cp->reg) {
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
> + if (!res) {
> + ret = -ENXIO;
> + goto err_unmap_reg;
> + }
> + cp->sram_size = res->end - res->start + 1;
> + cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
> + cp->sram = ioremap(res->start, cp->sram_size);
> + if (!cp->sram) {
> + ret = -ENOMEM;
> + goto err_unmap_reg;
> + }
> +
> + irq = platform_get_irq(pdev, 0);
> + if (irq < 0 || irq == NO_IRQ) {
> + ret = irq;
> + goto err_unmap_sram;
> + }
> + cp->irq = irq;
> +
> + platform_set_drvdata(pdev, cp);
> + cpg = cp;
> +
> + cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
> + if (IS_ERR(cp->queue_th)) {
> + ret = PTR_ERR(cp->queue_th);
> + goto err_thread;
> + }
> +
> + ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
> + cp);
> + if (ret)
> + goto err_unmap_sram;
> +
> + writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
> + writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
> +
> + ret = crypto_register_alg(&mv_aes_alg_ecb);
> + if (ret)
> + goto err_reg;
> +
> + ret = crypto_register_alg(&mv_aes_alg_cbc);
> + if (ret)
> + goto err_unreg_ecb;
> + return 0;
> +err_unreg_ecb:
> + crypto_unregister_alg(&mv_aes_alg_ecb);
> +err_thread:
> + free_irq(irq, cp);
> +err_reg:
> + kthread_stop(cp->queue_th);
> +err_unmap_sram:
> + iounmap(cp->sram);
> +err_unmap_reg:
> + iounmap(cp->reg);
> +err:
> + kfree(cp);
> + cpg = NULL;
> + platform_set_drvdata(pdev, NULL);
> + return ret;
> +}
> +
> +static int mv_remove(struct platform_device *pdev)
> +{
> + struct crypto_priv *cp = platform_get_drvdata(pdev);
> +
> + crypto_unregister_alg(&mv_aes_alg_ecb);
> + crypto_unregister_alg(&mv_aes_alg_cbc);
> + kthread_stop(cp->queue_th);
> + free_irq(cp->irq, cp);
> + memset(cp->sram, 0, cp->sram_size);
> + iounmap(cp->sram);
> + iounmap(cp->reg);
> + kfree(cp);
> + cpg = NULL;
> + return 0;
> +}
> +
> +static struct platform_driver marvell_crypto = {
> + .probe = mv_probe,
> + .remove = mv_remove,
> + .driver = {
> + .owner = THIS_MODULE,
> + .name = "mv_crypto",
> + },
> +};
> +MODULE_ALIAS("platform:mv_crypto");
> +
> +static int __init mv_crypto_init(void)
> +{
> + return platform_driver_register(&marvell_crypto);
> +}
> +module_init(mv_crypto_init);
> +
> +static void __exit mv_crypto_exit(void)
> +{
> + platform_driver_unregister(&marvell_crypto);
> +}
> +module_exit(mv_crypto_exit);
> +
> +MODULE_AUTHOR("Sebastian Andrzej Siewior <[email protected]>");
> +MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
> +MODULE_LICENSE("GPL");
> diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
> new file mode 100644
> index 0000000..c3e25d3
> --- /dev/null
> +++ b/drivers/crypto/mv_cesa.h
> @@ -0,0 +1,119 @@
> +#ifndef __MV_CRYPTO_H__
> +
> +#define DIGEST_INITIAL_VAL_A 0xdd00
> +#define DES_CMD_REG 0xdd58
> +
> +#define SEC_ACCEL_CMD 0xde00
> +#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
> +#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
> +#define SEC_CMD_DISABLE_SEC (1 << 2)
> +
> +#define SEC_ACCEL_DESC_P0 0xde04
> +#define SEC_DESC_P0_PTR(x) (x)
> +
> +#define SEC_ACCEL_DESC_P1 0xde14
> +#define SEC_DESC_P1_PTR(x) (x)
> +
> +#define SEC_ACCEL_CFG 0xde08
> +#define SEC_CFG_STOP_DIG_ERR (1 << 0)
> +#define SEC_CFG_CH0_W_IDMA (1 << 7)
> +#define SEC_CFG_CH1_W_IDMA (1 << 8)
> +#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
> +#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
> +
> +#define SEC_ACCEL_STATUS 0xde0c
> +#define SEC_ST_ACT_0 (1 << 0)
> +#define SEC_ST_ACT_1 (1 << 1)
> +
> +/*
> + * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
> + * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
> + * someone forgot to remove it while switching to the core and moving to
> + * SEC_ACCEL_INT_STATUS.
> + */
> +#define FPGA_INT_STATUS 0xdd68
> +#define SEC_ACCEL_INT_STATUS 0xde20
> +#define SEC_INT_AUTH_DONE (1 << 0)
> +#define SEC_INT_DES_E_DONE (1 << 1)
> +#define SEC_INT_AES_E_DONE (1 << 2)
> +#define SEC_INT_AES_D_DONE (1 << 3)
> +#define SEC_INT_ENC_DONE (1 << 4)
> +#define SEC_INT_ACCEL0_DONE (1 << 5)
> +#define SEC_INT_ACCEL1_DONE (1 << 6)
> +#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
> +#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
> +
> +#define SEC_ACCEL_INT_MASK 0xde24
> +
> +#define AES_KEY_LEN (8 * 4)
> +
> +struct sec_accel_config {
> +
> + u32 config;
> +#define CFG_OP_MAC_ONLY 0
> +#define CFG_OP_CRYPT_ONLY 1
> +#define CFG_OP_MAC_CRYPT 2
> +#define CFG_OP_CRYPT_MAC 3
> +#define CFG_MACM_MD5 (4 << 4)
> +#define CFG_MACM_SHA1 (5 << 4)
> +#define CFG_MACM_HMAC_MD5 (6 << 4)
> +#define CFG_MACM_HMAC_SHA1 (7 << 4)
> +#define CFG_ENCM_DES (1 << 8)
> +#define CFG_ENCM_3DES (2 << 8)
> +#define CFG_ENCM_AES (3 << 8)
> +#define CFG_DIR_ENC (0 << 12)
> +#define CFG_DIR_DEC (1 << 12)
> +#define CFG_ENC_MODE_ECB (0 << 16)
> +#define CFG_ENC_MODE_CBC (1 << 16)
> +#define CFG_3DES_EEE (0 << 20)
> +#define CFG_3DES_EDE (1 << 20)
> +#define CFG_AES_LEN_128 (0 << 24)
> +#define CFG_AES_LEN_192 (1 << 24)
> +#define CFG_AES_LEN_256 (2 << 24)
> +
> + u32 enc_p;
> +#define ENC_P_SRC(x) (x)
> +#define ENC_P_DST(x) ((x) << 16)
> +
> + u32 enc_len;
> +#define ENC_LEN(x) (x)
> +
> + u32 enc_key_p;
> +#define ENC_KEY_P(x) (x)
> +
> + u32 enc_iv;
> +#define ENC_IV_POINT(x) ((x) << 0)
> +#define ENC_IV_BUF_POINT(x) ((x) << 16)
> +
> + u32 mac_src_p;
> +#define MAC_SRC_DATA_P(x) (x)
> +#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
> +
> + u32 mac_digest;
> + u32 mac_iv;
> +}__attribute__ ((packed));
> + /*
> + * /-----------\ 0
> + * | ACCEL CFG | 4 * 8
> + * |-----------| 0x20
> + * | CRYPT KEY | 8 * 4
> + * |-----------| 0x40
> + * | IV IN | 4 * 4
> + * |-----------| 0x40 (inplace)
> + * | IV BUF | 4 * 4
> + * |-----------| 0x50
> + * | DATA IN | 16 * x (max ->max_req_size)
> + * |-----------| 0x50 (inplace operation)
> + * | DATA OUT | 16 * x (max ->max_req_size)
> + * \-----------/ SRAM size
> + */
> +#define SRAM_CONFIG 0x00
> +#define SRAM_DATA_KEY_P 0x20
> +#define SRAM_DATA_IV 0x40
> +#define SRAM_DATA_IV_BUF 0x40
> +#define SRAM_DATA_IN_START 0x50
> +#define SRAM_DATA_OUT_START 0x50
> +
> +#define SRAM_CFG_SPACE 0x50
> +
> +#endif
> --
> 1.6.2.5
>
From: Sebastian Andrzej Siewior <[email protected]>
This adds support for Marvell's Cryptographic Engines and Security
Accelerator (CESA) which can be found on a few SoC.
Tested with dm-crypt.
Acked-by: Nicolas Pitre <[email protected]>
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
v3..v4: change Kconfig help text & added Nicolas' ack.
drivers/crypto/Kconfig | 13 +
drivers/crypto/Makefile | 1 +
drivers/crypto/mv_cesa.c | 606 ++++++++++++++++++++++++++++++++++++++++++++++
drivers/crypto/mv_cesa.h | 119 +++++++++
4 files changed, 739 insertions(+), 0 deletions(-)
create mode 100644 drivers/crypto/mv_cesa.c
create mode 100644 drivers/crypto/mv_cesa.h
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692..efc9484 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -157,6 +157,19 @@ config S390_PRNG
ANSI X9.17 standard. The PRNG is usable via the char device
/dev/prandom.
+config CRYPTO_DEV_MV_CESA
+ tristate "Marvell's Cryptographic Engine"
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
+ help
+ This driver allows you to utilize the Cryptographic Engines and
+ Security Accelerator (CESA) which can be found on the Marvell Orion
+ and Kirkwood SoCs, such as QNAP's TS-209.
+
+ Currently the driver supports AES in ECB and CBC mode without DMA.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2b..6ffcb3f 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 0000000..f28502c
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPLv2
+ *
+ */
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "mv_cesa.h"
+/*
+ * STM:
+ * /---------------------------------------\
+ * | | request complete
+ * \./ |
+ * IDLE -> new request -> BUSY -> done -> DEQUEUE
+ * /°\ |
+ * | | more scatter entries
+ * \________________/
+ */
+enum engine_status {
+ ENGINE_IDLE,
+ ENGINE_BUSY,
+ ENGINE_W_DEQUEUE,
+};
+
+/**
+ * struct req_progress - used for every crypt request
+ * @src_sg_it: sg iterator for src
+ * @dst_sg_it: sg iterator for dst
+ * @sg_src_left: bytes left in src to process (scatter list)
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @total_req_bytes: total number of bytes processed (request).
+ *
+ * sg helper are used to iterate over the scatterlist. Since the size of the
+ * SRAM may be less than the scatter size, this struct struct is used to keep
+ * track of progress within current scatterlist.
+ */
+struct req_progress {
+ struct sg_mapping_iter src_sg_it;
+ struct sg_mapping_iter dst_sg_it;
+
+ /* src mostly */
+ int sg_src_left;
+ int src_start;
+ int crypt_len;
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+ int total_req_bytes;
+};
+
+struct crypto_priv {
+ void __iomem *reg;
+ void __iomem *sram;
+ int irq;
+ struct task_struct *queue_th;
+
+ /* the lock protects queue and eng_st */
+ spinlock_t lock;
+ struct crypto_queue queue;
+ enum engine_status eng_st;
+ struct ablkcipher_request *cur_req;
+ struct req_progress p;
+ int max_req_size;
+ int sram_size;
+};
+
+static struct crypto_priv *cpg;
+
+struct mv_ctx {
+ u8 aes_enc_key[AES_KEY_LEN];
+ u32 aes_dec_key[8];
+ int key_len;
+ u32 need_calc_aes_dkey;
+};
+
+enum crypto_op {
+ COP_AES_ECB,
+ COP_AES_CBC,
+};
+
+struct mv_req_ctx {
+ enum crypto_op op;
+ int decrypt;
+};
+
+static void compute_aes_dec_key(struct mv_ctx *ctx)
+{
+ struct crypto_aes_ctx gen_aes_key;
+ int key_pos;
+
+ if (!ctx->need_calc_aes_dkey)
+ return;
+
+ crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
+
+ key_pos = ctx->key_len + 24;
+ memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_256:
+ key_pos -= 2;
+ /* fall */
+ case AES_KEYSIZE_192:
+ key_pos -= 2;
+ memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
+ 4 * 4);
+ break;
+ }
+ ctx->need_calc_aes_dkey = 0;
+}
+
+static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = len;
+ ctx->need_calc_aes_dkey = 1;
+
+ memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
+ return 0;
+}
+
+static void setup_data_in(struct ablkcipher_request *req)
+{
+ int ret;
+ void *buf;
+
+ if (!cpg->p.sg_src_left) {
+ ret = sg_miter_next(&cpg->p.src_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+ cpg->p.src_start = 0;
+ }
+
+ cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
+
+ buf = cpg->p.src_sg_it.addr;
+ buf += cpg->p.src_start;
+
+ memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+
+ cpg->p.sg_src_left -= cpg->p.crypt_len;
+ cpg->p.src_start += cpg->p.crypt_len;
+}
+
+static void mv_process_current_q(int first_block)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct sec_accel_config op;
+
+ switch (req_ctx->op) {
+ case COP_AES_ECB:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ break;
+ case COP_AES_CBC:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+ if (first_block)
+ memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
+ break;
+ }
+ if (req_ctx->decrypt) {
+ op.config |= CFG_DIR_DEC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
+ AES_KEY_LEN);
+ } else {
+ op.config |= CFG_DIR_ENC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
+ AES_KEY_LEN);
+ }
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ op.config |= CFG_AES_LEN_128;
+ break;
+ case AES_KEYSIZE_192:
+ op.config |= CFG_AES_LEN_192;
+ break;
+ case AES_KEYSIZE_256:
+ op.config |= CFG_AES_LEN_256;
+ break;
+ }
+ op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+ ENC_P_DST(SRAM_DATA_OUT_START);
+ op.enc_key_p = SRAM_DATA_KEY_P;
+
+ setup_data_in(req);
+ op.enc_len = cpg->p.crypt_len;
+ memcpy(cpg->sram + SRAM_CONFIG, &op,
+ sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static void mv_crypto_algo_completion(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ if (req_ctx->op != COP_AES_CBC)
+ return ;
+
+ memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+}
+
+static void dequeue_complete_req(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+
+ cpg->p.total_req_bytes += cpg->p.crypt_len;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
+
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
+
+ dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+
+ memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+
+ cpg->p.sg_dst_left -= dst_copy;
+ cpg->p.crypt_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (cpg->p.crypt_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.total_req_bytes < req->nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+ } else {
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
+ req->base.complete(&req->base, 0);
+ }
+}
+
+static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+{
+ int i = 0;
+
+ do {
+ total_bytes -= sl[i].length;
+ i++;
+
+ } while (total_bytes > 0);
+
+ return i;
+}
+
+static void mv_enqueue_new_req(struct ablkcipher_request *req)
+{
+ int num_sgs;
+
+ cpg->cur_req = req;
+ memset(&cpg->p, 0, sizeof(struct req_progress));
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ num_sgs = count_sgs(req->dst, req->nbytes);
+ sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ mv_process_current_q(1);
+}
+
+static int queue_manag(void *data)
+{
+ cpg->eng_st = ENGINE_IDLE;
+ do {
+ struct ablkcipher_request *req;
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog;
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (cpg->eng_st == ENGINE_W_DEQUEUE)
+ dequeue_complete_req();
+
+ spin_lock_irq(&cpg->lock);
+ if (cpg->eng_st == ENGINE_IDLE) {
+ backlog = crypto_get_backlog(&cpg->queue);
+ async_req = crypto_dequeue_request(&cpg->queue);
+ if (async_req) {
+ BUG_ON(cpg->eng_st != ENGINE_IDLE);
+ cpg->eng_st = ENGINE_BUSY;
+ }
+ }
+ spin_unlock_irq(&cpg->lock);
+
+ if (backlog) {
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (async_req) {
+ req = container_of(async_req,
+ struct ablkcipher_request, base);
+ mv_enqueue_new_req(req);
+ async_req = NULL;
+ }
+
+ schedule();
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int mv_handle_req(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cpg->lock, flags);
+ ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ spin_unlock_irqrestore(&cpg->lock, flags);
+ wake_up_process(cpg->queue_th);
+ return ret;
+}
+
+static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
+ return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+ u32 val;
+
+ val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
+ if (!(val & SEC_INT_ACCEL0_DONE))
+ return IRQ_NONE;
+
+ val &= ~SEC_INT_ACCEL0_DONE;
+ writel(val, cpg->reg + FPGA_INT_STATUS);
+ writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
+ BUG_ON(cpg->eng_st != ENGINE_BUSY);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+ return IRQ_HANDLED;
+}
+
+struct crypto_alg mv_aes_alg_ecb = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_ecb,
+ .decrypt = mv_dec_aes_ecb,
+ },
+ },
+};
+
+struct crypto_alg mv_aes_alg_cbc = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_cbc,
+ .decrypt = mv_dec_aes_cbc,
+ },
+ },
+};
+
+static int mv_probe(struct platform_device *pdev)
+{
+ struct crypto_priv *cp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ if (cpg) {
+ printk(KERN_ERR "Second crypto dev?\n");
+ return -EEXIST;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res)
+ return -ENXIO;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ spin_lock_init(&cp->lock);
+ crypto_init_queue(&cp->queue, 50);
+ cp->reg = ioremap(res->start, res->end - res->start + 1);
+ if (!cp->reg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!res) {
+ ret = -ENXIO;
+ goto err_unmap_reg;
+ }
+ cp->sram_size = res->end - res->start + 1;
+ cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
+ cp->sram = ioremap(res->start, cp->sram_size);
+ if (!cp->sram) {
+ ret = -ENOMEM;
+ goto err_unmap_reg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq == NO_IRQ) {
+ ret = irq;
+ goto err_unmap_sram;
+ }
+ cp->irq = irq;
+
+ platform_set_drvdata(pdev, cp);
+ cpg = cp;
+
+ cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
+ if (IS_ERR(cp->queue_th)) {
+ ret = PTR_ERR(cp->queue_th);
+ goto err_thread;
+ }
+
+ ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ cp);
+ if (ret)
+ goto err_unmap_sram;
+
+ writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
+ writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+
+ ret = crypto_register_alg(&mv_aes_alg_ecb);
+ if (ret)
+ goto err_reg;
+
+ ret = crypto_register_alg(&mv_aes_alg_cbc);
+ if (ret)
+ goto err_unreg_ecb;
+ return 0;
+err_unreg_ecb:
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+err_thread:
+ free_irq(irq, cp);
+err_reg:
+ kthread_stop(cp->queue_th);
+err_unmap_sram:
+ iounmap(cp->sram);
+err_unmap_reg:
+ iounmap(cp->reg);
+err:
+ kfree(cp);
+ cpg = NULL;
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int mv_remove(struct platform_device *pdev)
+{
+ struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+ crypto_unregister_alg(&mv_aes_alg_cbc);
+ kthread_stop(cp->queue_th);
+ free_irq(cp->irq, cp);
+ memset(cp->sram, 0, cp->sram_size);
+ iounmap(cp->sram);
+ iounmap(cp->reg);
+ kfree(cp);
+ cpg = NULL;
+ return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+ .probe = mv_probe,
+ .remove = mv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mv_crypto",
+ },
+};
+MODULE_ALIAS("platform:mv_crypto");
+
+static int __init mv_crypto_init(void)
+{
+ return platform_driver_register(&marvell_crypto);
+}
+module_init(mv_crypto_init);
+
+static void __exit mv_crypto_exit(void)
+{
+ platform_driver_unregister(&marvell_crypto);
+}
+module_exit(mv_crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <[email protected]>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 0000000..c3e25d3
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
+#ifndef __MV_CRYPTO_H__
+
+#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DES_CMD_REG 0xdd58
+
+#define SEC_ACCEL_CMD 0xde00
+#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
+#define SEC_CMD_DISABLE_SEC (1 << 2)
+
+#define SEC_ACCEL_DESC_P0 0xde04
+#define SEC_DESC_P0_PTR(x) (x)
+
+#define SEC_ACCEL_DESC_P1 0xde14
+#define SEC_DESC_P1_PTR(x) (x)
+
+#define SEC_ACCEL_CFG 0xde08
+#define SEC_CFG_STOP_DIG_ERR (1 << 0)
+#define SEC_CFG_CH0_W_IDMA (1 << 7)
+#define SEC_CFG_CH1_W_IDMA (1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
+
+#define SEC_ACCEL_STATUS 0xde0c
+#define SEC_ST_ACT_0 (1 << 0)
+#define SEC_ST_ACT_1 (1 << 1)
+
+/*
+ * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
+ * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
+ * someone forgot to remove it while switching to the core and moving to
+ * SEC_ACCEL_INT_STATUS.
+ */
+#define FPGA_INT_STATUS 0xdd68
+#define SEC_ACCEL_INT_STATUS 0xde20
+#define SEC_INT_AUTH_DONE (1 << 0)
+#define SEC_INT_DES_E_DONE (1 << 1)
+#define SEC_INT_AES_E_DONE (1 << 2)
+#define SEC_INT_AES_D_DONE (1 << 3)
+#define SEC_INT_ENC_DONE (1 << 4)
+#define SEC_INT_ACCEL0_DONE (1 << 5)
+#define SEC_INT_ACCEL1_DONE (1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
+
+#define SEC_ACCEL_INT_MASK 0xde24
+
+#define AES_KEY_LEN (8 * 4)
+
+struct sec_accel_config {
+
+ u32 config;
+#define CFG_OP_MAC_ONLY 0
+#define CFG_OP_CRYPT_ONLY 1
+#define CFG_OP_MAC_CRYPT 2
+#define CFG_OP_CRYPT_MAC 3
+#define CFG_MACM_MD5 (4 << 4)
+#define CFG_MACM_SHA1 (5 << 4)
+#define CFG_MACM_HMAC_MD5 (6 << 4)
+#define CFG_MACM_HMAC_SHA1 (7 << 4)
+#define CFG_ENCM_DES (1 << 8)
+#define CFG_ENCM_3DES (2 << 8)
+#define CFG_ENCM_AES (3 << 8)
+#define CFG_DIR_ENC (0 << 12)
+#define CFG_DIR_DEC (1 << 12)
+#define CFG_ENC_MODE_ECB (0 << 16)
+#define CFG_ENC_MODE_CBC (1 << 16)
+#define CFG_3DES_EEE (0 << 20)
+#define CFG_3DES_EDE (1 << 20)
+#define CFG_AES_LEN_128 (0 << 24)
+#define CFG_AES_LEN_192 (1 << 24)
+#define CFG_AES_LEN_256 (2 << 24)
+
+ u32 enc_p;
+#define ENC_P_SRC(x) (x)
+#define ENC_P_DST(x) ((x) << 16)
+
+ u32 enc_len;
+#define ENC_LEN(x) (x)
+
+ u32 enc_key_p;
+#define ENC_KEY_P(x) (x)
+
+ u32 enc_iv;
+#define ENC_IV_POINT(x) ((x) << 0)
+#define ENC_IV_BUF_POINT(x) ((x) << 16)
+
+ u32 mac_src_p;
+#define MAC_SRC_DATA_P(x) (x)
+#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
+
+ u32 mac_digest;
+ u32 mac_iv;
+}__attribute__ ((packed));
+ /*
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | CRYPT KEY | 8 * 4
+ * |-----------| 0x40
+ * | IV IN | 4 * 4
+ * |-----------| 0x40 (inplace)
+ * | IV BUF | 4 * 4
+ * |-----------| 0x50
+ * | DATA IN | 16 * x (max ->max_req_size)
+ * |-----------| 0x50 (inplace operation)
+ * | DATA OUT | 16 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
+#define SRAM_CONFIG 0x00
+#define SRAM_DATA_KEY_P 0x20
+#define SRAM_DATA_IV 0x40
+#define SRAM_DATA_IV_BUF 0x40
+#define SRAM_DATA_IN_START 0x50
+#define SRAM_DATA_OUT_START 0x50
+
+#define SRAM_CFG_SPACE 0x50
+
+#endif
--
1.6.2.5
On Wed, Aug 05, 2009 at 09:24:25AM +0200, Sebastian Andrzej Siewior wrote:
> From: Sebastian Andrzej Siewior <[email protected]>
>
> This adds support for Marvell's Cryptographic Engines and Security
> Accelerator (CESA) which can be found on a few SoC.
> Tested with dm-crypt.
>
> Acked-by: Nicolas Pitre <[email protected]>
> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Patch applied. Thanks Sebastian!
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt