This series does some basic cleanup and adds support for
AES GCM mode for omap aes driver.
Also adds a test case for async aead algos.
Tested on BeagelBoneBlack: http://pastebin.ubuntu.com/11808341/
Lokesh Vutla (10):
crypto: omap-aes: Add support for lengths not aligned with
AES_BLOCK_SIZE
crypto: omap-aes: Fix configuring of AES mode
crypto: aead: Add aead_request_cast() api
crypto: omap-aes: Use BIT() macro
crypto: omap-aes: Add support for GCM mode
crypto: omap-aes: gcm: Handle inputs properly
crypto: omap-aes: gcm: Add support for unaligned lengths
crypto: omap-aes: gmc: Add algo info
crypto: omap-aes: gcm: Add support for PIO mode
crypto: tcrypt: Added speed tests for Async AEAD crypto alogrithms
crypto/tcrypt.c | 233 +++++++++++++++++++++++++
crypto/tcrypt.h | 1 +
drivers/crypto/Makefile | 3 +-
drivers/crypto/omap-aes-gcm.c | 386 +++++++++++++++++++++++++++++++++++++++++
drivers/crypto/omap-aes.c | 322 +++++++++++++---------------------
drivers/crypto/omap-aes.h | 206 ++++++++++++++++++++++
include/linux/crypto.h | 6 +
7 files changed, 955 insertions(+), 202 deletions(-)
create mode 100644 drivers/crypto/omap-aes-gcm.c
create mode 100644 drivers/crypto/omap-aes.h
--
1.7.9.5
OMAP AES driver returns an error if the data is not aligned with
AES_BLOCK_SIZE bytes.
But OMAP AES hw allows data input upto 1 byte aligned, but still
zeros are to be appended and complete AES_BLOCK_SIZE has to be written.
And correct length has to be passed in LENGTH field.
Adding support for inputs not aligned with AES_BLOCK_SIZE.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/omap-aes.c | 33 ++++++++++++++++-----------------
1 file changed, 16 insertions(+), 17 deletions(-)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9a28b7e..a923101 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -558,6 +558,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
{
int len = 0;
+ if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
+ return -1;
+
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
return -1;
@@ -577,9 +580,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
{
void *buf_in, *buf_out;
- int pages;
+ int pages, total;
- pages = get_order(dd->total);
+ total = ALIGN(dd->total, AES_BLOCK_SIZE);
+ pages = get_order(total);
buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
@@ -594,11 +598,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
sg_init_table(&dd->in_sgl, 1);
- sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+ sg_set_buf(&dd->in_sgl, buf_in, total);
dd->in_sg = &dd->in_sgl;
sg_init_table(&dd->out_sgl, 1);
- sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+ sg_set_buf(&dd->out_sgl, buf_out, total);
dd->out_sg = &dd->out_sgl;
return 0;
@@ -611,7 +615,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
struct omap_aes_ctx *ctx;
struct omap_aes_reqctx *rctx;
unsigned long flags;
- int err, ret = 0;
+ int err, ret = 0, len;
spin_lock_irqsave(&dd->lock, flags);
if (req)
@@ -650,8 +654,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->sgs_copied = 0;
}
- dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
- dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+ len = ALIGN(dd->total, AES_BLOCK_SIZE);
+ dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
+ dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
rctx = ablkcipher_request_ctx(req);
@@ -678,7 +683,7 @@ static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
void *buf_in, *buf_out;
- int pages;
+ int pages, len;
pr_debug("enter done_task\n");
@@ -697,7 +702,8 @@ static void omap_aes_done_task(unsigned long data)
sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
- pages = get_order(dd->total_save);
+ len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
+ pages = get_order(len);
free_pages((unsigned long)buf_in, pages);
free_pages((unsigned long)buf_out, pages);
}
@@ -726,11 +732,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
- return -EINVAL;
- }
-
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
@@ -1046,9 +1047,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
}
}
- dd->total -= AES_BLOCK_SIZE;
-
- BUG_ON(dd->total < 0);
+ dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
/* Clear IRQ status */
status &= ~AES_REG_IRQ_DATA_OUT;
--
1.7.9.5
AES_CTRL_REG is used to configure AES mode. Before configuring
any mode we need to make sure all other modes are reset or else
driver will misbehave. So mask all modes before configuring
any AES mode.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/omap-aes.c | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a923101..96fc7f7 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -63,6 +63,7 @@
#define AES_REG_CTRL_DIRECTION (1 << 2)
#define AES_REG_CTRL_INPUT_READY (1 << 1)
#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
+#define AES_REG_CTRL_MASK FLD_MASK(24, 2)
#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
@@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
{
unsigned int key32;
int i, err;
- u32 val, mask = 0;
+ u32 val;
err = omap_aes_hw_init(dd);
if (err)
@@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
- if (dd->flags & FLAGS_CTR) {
+ if (dd->flags & FLAGS_CTR)
val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
- mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
- }
+
if (dd->flags & FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
- mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
- AES_REG_CTRL_KEY_SIZE;
-
- omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
+ omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
return 0;
}
--
1.7.9.5
Add aead_request_cast() api to get pointer to aead_request
from cryto_async_request.
Signed-off-by: Lokesh Vutla <[email protected]>
---
include/linux/crypto.h | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 10df5d2..20fac3d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -1460,6 +1460,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
}
+static inline struct aead_request *aead_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct aead_request, base);
+}
+
/**
* aead_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
--
1.7.9.5
Use BIT()/GENMASK() macros for all register definitions instead of
hand-writing bit masks.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/omap-aes.c | 36 ++++++++++++++++++------------------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 96fc7f7..d974ab6 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -52,30 +52,30 @@
#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
-#define AES_REG_CTRL_CTR (1 << 6)
-#define AES_REG_CTRL_CBC (1 << 5)
-#define AES_REG_CTRL_KEY_SIZE (3 << 3)
-#define AES_REG_CTRL_DIRECTION (1 << 2)
-#define AES_REG_CTRL_INPUT_READY (1 << 1)
-#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
-#define AES_REG_CTRL_MASK FLD_MASK(24, 2)
+#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32 0
+#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
+#define AES_REG_CTRL_CTR BIT(6)
+#define AES_REG_CTRL_CBC BIT(5)
+#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION BIT(2)
+#define AES_REG_CTRL_INPUT_READY BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY BIT(0)
+#define AES_REG_CTRL_MASK GENMASK(24, 2)
#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
-#define AES_REG_MASK_SIDLE (1 << 6)
-#define AES_REG_MASK_START (1 << 5)
-#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
-#define AES_REG_MASK_DMA_IN_EN (1 << 2)
-#define AES_REG_MASK_SOFTRESET (1 << 1)
-#define AES_REG_AUTOIDLE (1 << 0)
+#define AES_REG_MASK_SIDLE BIT(6)
+#define AES_REG_MASK_START BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN BIT(3)
+#define AES_REG_MASK_DMA_IN_EN BIT(2)
+#define AES_REG_MASK_SOFTRESET BIT(1)
+#define AES_REG_AUTOIDLE BIT(0)
#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
--
1.7.9.5
OMAP AES hw supports aes gcm mode.
Adding support for GCM mode in omap-aes driver.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/Makefile | 3 +-
drivers/crypto/omap-aes-gcm.c | 304 +++++++++++++++++++++++++++++++++++++++++
drivers/crypto/omap-aes.c | 238 +++++++++-----------------------
drivers/crypto/omap-aes.h | 205 +++++++++++++++++++++++++++
4 files changed, 575 insertions(+), 175 deletions(-)
create mode 100644 drivers/crypto/omap-aes-gcm.c
create mode 100644 drivers/crypto/omap-aes.h
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fb84be7..3afad7b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -13,7 +13,8 @@ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
-obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
+omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
new file mode 100644
index 0000000..1be9d91
--- /dev/null
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -0,0 +1,304 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP AES GCM HW acceleration.
+ *
+ * Copyright (c) 2015 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include "omap-aes.h"
+
+static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
+ struct aead_request *req);
+
+static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
+{
+ struct aead_request *req = dd->aead_req;
+
+ dd->flags &= ~FLAGS_BUSY;
+ dd->in_sg = NULL;
+ dd->out_sg = NULL;
+
+ req->base.complete(&req->base, ret);
+}
+
+static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
+{
+ u8 *tag;
+ int alen, clen, i, ret = 0, nsg;
+
+ alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
+ clen = ALIGN(dd->total, AES_BLOCK_SIZE);
+
+ nsg = 1 + !!(dd->assoc_len && dd->total);
+
+ if (!dd->pio_only) {
+ dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ omap_aes_crypt_dma_stop(dd);
+ }
+
+ if (dd->flags & FLAGS_ENCRYPT)
+ scatterwalk_map_and_copy(dd->ctx->auth_tag, dd->aead_req->dst,
+ dd->total, dd->authsize, 1);
+
+ if (!(dd->flags & FLAGS_ENCRYPT)) {
+ tag = (u8 *)dd->ctx->auth_tag;
+ for (i = 0; i < dd->authsize; i++) {
+ if (tag[i]) {
+ dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
+ ret = -EBADMSG;
+ }
+ }
+ }
+
+ omap_aes_gcm_finish_req(dd, ret);
+ omap_aes_gcm_handle_queue(dd, NULL);
+}
+
+static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
+ struct aead_request *req)
+{
+ void *buf_in;
+ int alen, clen;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int authlen = crypto_aead_authsize(aead);
+ u32 dec = !(dd->flags & FLAGS_ENCRYPT);
+
+ alen = req->assoclen;
+ clen = req->cryptlen - (dec * authlen);
+
+ dd->sgs_copied = 0;
+
+ sg_init_table(dd->in_sgl, 2);
+ buf_in = sg_virt(req->assoc);
+ sg_set_buf(dd->in_sgl, buf_in, alen);
+
+ buf_in = sg_virt(req->src);
+ sg_set_buf(&dd->in_sgl[1], buf_in, clen);
+
+ dd->in_sg = dd->in_sgl;
+ dd->total = clen;
+ dd->assoc_len = req->assoclen;
+ dd->authsize = authlen;
+ dd->out_sg = req->dst;
+
+ dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, alen + clen);
+ dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, clen);
+
+ return 0;
+}
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+ struct tcrypt_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static int do_encrypt_iv(struct aead_request *req, u32 *tag)
+{
+ struct scatterlist iv_sg;
+ struct ablkcipher_request *ablk_req;
+ struct crypto_ablkcipher *tfm;
+ struct tcrypt_result result;
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ int ret = 0;
+
+ tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
+ ablk_req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!ablk_req) {
+ pr_err("skcipher: Failed to allocate request\n");
+ return -1;
+ }
+
+ init_completion(&result.completion);
+
+ sg_init_one(&iv_sg, tag, AES_BLOCK_SIZE);
+ ablkcipher_request_set_callback(ablk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+ ret = crypto_ablkcipher_setkey(tfm, (u8 *)ctx->key, ctx->keylen);
+ ablkcipher_request_set_crypt(ablk_req, &iv_sg, &iv_sg, AES_BLOCK_SIZE,
+ req->iv);
+ ret = crypto_ablkcipher_encrypt(ablk_req);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(&result.completion);
+ if (!ret) {
+ ret = result.err;
+ if (!ret) {
+ reinit_completion(&result.completion);
+ break;
+ }
+ }
+ /* fall through */
+ default:
+ pr_err("Encryptio of IV failed for GCM mode");
+ break;
+ }
+
+ ablkcipher_request_free(ablk_req);
+ crypto_free_ablkcipher(tfm);
+ return ret;
+}
+
+void omap_aes_gcm_dma_out_callback(void *data)
+{
+ struct omap_aes_dev *dd = data;
+ int i, val;
+ u32 *auth_tag, tag[4];
+
+ if (!(dd->flags & FLAGS_ENCRYPT))
+ scatterwalk_map_and_copy(tag, dd->aead_req->src, dd->total,
+ dd->authsize, 0);
+
+ auth_tag = dd->ctx->auth_tag;
+ for (i = 0; i < 4; i++) {
+ val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
+ auth_tag[i] = val ^ auth_tag[i];
+ if (!(dd->flags & FLAGS_ENCRYPT))
+ auth_tag[i] = auth_tag[i] ^ tag[i];
+ }
+
+ /* dma_lch_out - completed */
+ omap_aes_gcm_done_task(dd);
+}
+
+static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
+ struct aead_request *req)
+{
+ struct omap_aes_ctx *ctx;
+ struct crypto_async_request *async_req, *backlog;
+ struct omap_aes_reqctx *rctx;
+ unsigned long flags;
+ int err, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = crypto_enqueue_request(&dd->aead_queue, &req->base);
+ if (dd->flags & FLAGS_BUSY) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+ backlog = crypto_get_backlog(&dd->aead_queue);
+ async_req = crypto_dequeue_request(&dd->aead_queue);
+ if (async_req)
+ dd->flags |= FLAGS_BUSY;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = aead_request_cast(async_req);
+
+ ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ rctx = aead_request_ctx(req);
+
+ dd->ctx = ctx;
+ ctx->dd = dd;
+ dd->aead_req = req;
+
+ rctx->mode &= FLAGS_MODE_MASK;
+ dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+ err = omap_aes_gcm_copy_buffers(dd, req);
+ if (err)
+ return err;
+
+ err = omap_aes_write_ctrl(dd);
+ if (!err)
+ err = omap_aes_crypt_dma_start(dd);
+
+ if (err) {
+ omap_aes_gcm_finish_req(dd, err);
+ omap_aes_gcm_handle_queue(dd, NULL);
+ }
+
+ return ret;
+}
+
+static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
+{
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct omap_aes_dev *dd;
+ __be32 counter = cpu_to_be32(1);
+ int err;
+
+ memset(ctx->auth_tag, 0, sizeof(ctx->auth_tag));
+ memcpy(req->iv + 12, &counter, 4);
+
+ /* Create E(K, IV) */
+ err = do_encrypt_iv(req, ctx->auth_tag);
+ if (err)
+ return err;
+
+ dd = omap_aes_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
+ rctx->mode = mode;
+
+ return omap_aes_gcm_handle_queue(dd, req);
+}
+
+int omap_aes_gcm_encrypt(struct aead_request *req)
+{
+ return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
+}
+
+int omap_aes_gcm_decrypt(struct aead_request *req)
+{
+ return omap_aes_gcm_crypt(req, FLAGS_GCM);
+}
+
+int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index d974ab6..e5e9a19 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -36,157 +36,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
-
-#define DST_MAXBURST 4
-#define DMA_MIN (DST_MAXBURST * sizeof(u32))
-
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
-/* OMAP TRM gives bitfields as start:end, where start is the higher bit
- number. For example 7:0 */
-#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
-#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-
-#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
- ((x ^ 0x01) * 0x04))
-#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
-
-#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
-#define AES_REG_CTRL_CTR_WIDTH_32 0
-#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
-#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
-#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
-#define AES_REG_CTRL_CTR BIT(6)
-#define AES_REG_CTRL_CBC BIT(5)
-#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
-#define AES_REG_CTRL_DIRECTION BIT(2)
-#define AES_REG_CTRL_INPUT_READY BIT(1)
-#define AES_REG_CTRL_OUTPUT_READY BIT(0)
-#define AES_REG_CTRL_MASK GENMASK(24, 2)
-
-#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
-
-#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
-
-#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
-#define AES_REG_MASK_SIDLE BIT(6)
-#define AES_REG_MASK_START BIT(5)
-#define AES_REG_MASK_DMA_OUT_EN BIT(3)
-#define AES_REG_MASK_DMA_IN_EN BIT(2)
-#define AES_REG_MASK_SOFTRESET BIT(1)
-#define AES_REG_AUTOIDLE BIT(0)
-
-#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
-
-#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
-#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
-#define AES_REG_IRQ_DATA_IN BIT(1)
-#define AES_REG_IRQ_DATA_OUT BIT(2)
-#define DEFAULT_TIMEOUT (5*HZ)
-
-#define FLAGS_MODE_MASK 0x000f
-#define FLAGS_ENCRYPT BIT(0)
-#define FLAGS_CBC BIT(1)
-#define FLAGS_GIV BIT(2)
-#define FLAGS_CTR BIT(3)
-
-#define FLAGS_INIT BIT(4)
-#define FLAGS_FAST BIT(5)
-#define FLAGS_BUSY BIT(6)
-
-#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
-
-struct omap_aes_ctx {
- struct omap_aes_dev *dd;
-
- int keylen;
- u32 key[AES_KEYSIZE_256 / sizeof(u32)];
- unsigned long flags;
-};
-
-struct omap_aes_reqctx {
- unsigned long mode;
-};
-
-#define OMAP_AES_QUEUE_LENGTH 1
-#define OMAP_AES_CACHE_SIZE 0
-
-struct omap_aes_algs_info {
- struct crypto_alg *algs_list;
- unsigned int size;
- unsigned int registered;
-};
-
-struct omap_aes_pdata {
- struct omap_aes_algs_info *algs_info;
- unsigned int algs_info_size;
-
- void (*trigger)(struct omap_aes_dev *dd, int length);
-
- u32 key_ofs;
- u32 iv_ofs;
- u32 ctrl_ofs;
- u32 data_ofs;
- u32 rev_ofs;
- u32 mask_ofs;
- u32 irq_enable_ofs;
- u32 irq_status_ofs;
-
- u32 dma_enable_in;
- u32 dma_enable_out;
- u32 dma_start;
-
- u32 major_mask;
- u32 major_shift;
- u32 minor_mask;
- u32 minor_shift;
-};
-
-struct omap_aes_dev {
- struct list_head list;
- unsigned long phys_base;
- void __iomem *io_base;
- struct omap_aes_ctx *ctx;
- struct device *dev;
- unsigned long flags;
- int err;
-
- spinlock_t lock;
- struct crypto_queue queue;
-
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
-
- struct ablkcipher_request *req;
-
- /*
- * total is used by PIO mode for book keeping so introduce
- * variable total_save as need it to calc page_order
- */
- size_t total;
- size_t total_save;
-
- struct scatterlist *in_sg;
- struct scatterlist *out_sg;
-
- /* Buffers for copying for unaligned cases */
- struct scatterlist in_sgl;
- struct scatterlist out_sgl;
- struct scatterlist *orig_out;
- int sgs_copied;
-
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
- int dma_in;
- struct dma_chan *dma_lch_in;
- int dma_out;
- struct dma_chan *dma_lch_out;
- int in_sg_len;
- int out_sg_len;
- int pio_only;
- const struct omap_aes_pdata *pdata;
-};
+#include "omap-aes.h"
/* keep registered devices data here */
static LIST_HEAD(dev_list);
@@ -202,7 +52,7 @@ static DEFINE_SPINLOCK(list_lock);
_read_ret; \
})
#else
-static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
+inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
{
return __raw_readl(dd->io_base + offset);
}
@@ -216,7 +66,7 @@ static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
__raw_writel(value, dd->io_base + offset); \
} while (0)
#else
-static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
+inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
u32 value)
{
__raw_writel(value, dd->io_base + offset);
@@ -251,7 +101,7 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
return 0;
}
-static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
+int omap_aes_write_ctrl(struct omap_aes_dev *dd)
{
unsigned int key32;
int i, err;
@@ -263,7 +113,11 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
key32 = dd->ctx->keylen / sizeof(u32);
- /* it seems a key should always be set even if it has not changed */
+ /* RESET the key as previous HASH keys should not get affected*/
+ if (dd->flags & FLAGS_GCM)
+ for (i = 0; i < 0x40; i = i + 4)
+ omap_aes_write(dd, i, 0x0);
+
for (i = 0; i < key32; i++) {
omap_aes_write(dd, AES_REG_KEY(dd, i),
__le32_to_cpu(dd->ctx->key[i]));
@@ -272,12 +126,20 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
+ if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv)
+ omap_aes_write_n(dd, AES_REG_IV(dd, 0),
+ (u32 *)dd->aead_req->iv, 4);
+
val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC;
- if (dd->flags & FLAGS_CTR)
+
+ if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
+ if (dd->flags & FLAGS_GCM)
+ val |= AES_REG_CTRL_GCM;
+
if (dd->flags & FLAGS_ENCRYPT)
val |= AES_REG_CTRL_DIRECTION;
@@ -308,6 +170,8 @@ static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
{
omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
+ if (dd->flags & FLAGS_GCM)
+ omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
omap_aes_dma_trigger_omap2(dd, length);
}
@@ -322,7 +186,7 @@ static void omap_aes_dma_stop(struct omap_aes_dev *dd)
omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
}
-static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
+struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
{
struct omap_aes_dev *dd = NULL, *tmp;
@@ -410,12 +274,11 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg,
scatterwalk_done(&walk, out, 0);
}
-static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
- struct scatterlist *in_sg, struct scatterlist *out_sg,
- int in_sg_len, int out_sg_len)
+static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
+ struct scatterlist *in_sg,
+ struct scatterlist *out_sg,
+ int in_sg_len, int out_sg_len)
{
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct omap_aes_dev *dd = ctx->dd;
struct dma_async_tx_descriptor *tx_in, *tx_out;
struct dma_slave_config cfg;
int ret;
@@ -476,7 +339,10 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
return -EINVAL;
}
- tx_out->callback = omap_aes_dma_out_callback;
+ if (dd->flags & FLAGS_GCM)
+ tx_out->callback = omap_aes_gcm_dma_out_callback;
+ else
+ tx_out->callback = omap_aes_dma_out_callback;
tx_out->callback_param = dd;
dmaengine_submit(tx_in);
@@ -491,10 +357,8 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
return 0;
}
-static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
+int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
- crypto_ablkcipher_reqtfm(dd->req));
int err;
pr_debug("total: %d\n", dd->total);
@@ -515,7 +379,7 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
}
}
- err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
+ err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
dd->out_sg_len);
if (err && !dd->pio_only) {
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
@@ -537,7 +401,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
req->base.complete(&req->base, err);
}
-static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
{
int err = 0;
@@ -551,7 +415,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
return err;
}
-static int omap_aes_check_aligned(struct scatterlist *sg, int total)
+int omap_aes_check_aligned(struct scatterlist *sg, int total)
{
int len = 0;
@@ -594,9 +458,9 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
- sg_init_table(&dd->in_sgl, 1);
- sg_set_buf(&dd->in_sgl, buf_in, total);
- dd->in_sg = &dd->in_sgl;
+ sg_init_table(dd->in_sgl, 1);
+ sg_set_buf(dd->in_sgl, buf_in, total);
+ dd->in_sg = dd->in_sgl;
sg_init_table(&dd->out_sgl, 1);
sg_set_buf(&dd->out_sgl, buf_out, total);
@@ -665,6 +529,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
ctx->dd = dd;
err = omap_aes_write_ctrl(dd);
+
if (!err)
err = omap_aes_crypt_dma_start(dd);
if (err) {
@@ -694,7 +559,7 @@ static void omap_aes_done_task(unsigned long data)
}
if (dd->sgs_copied) {
- buf_in = sg_virt(&dd->in_sgl);
+ buf_in = sg_virt(dd->in_sgl);
buf_out = sg_virt(&dd->out_sgl);
sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
@@ -811,6 +676,30 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int omap_aes_gcm_cra_init(struct crypto_tfm *tfm)
+{
+ struct omap_aes_dev *dd = NULL;
+ int err;
+
+ /* Find AES device, currently picks the first device */
+ spin_lock_bh(&list_lock);
+ list_for_each_entry(dd, &dev_list, list) {
+ break;
+ }
+ spin_unlock_bh(&list_lock);
+
+ err = pm_runtime_get_sync(dd->dev);
+ if (err < 0) {
+ dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return err;
+ }
+
+ tfm->crt_aead.reqsize = sizeof(struct omap_aes_reqctx);
+
+ return 0;
+}
+
static void omap_aes_cra_exit(struct crypto_tfm *tfm)
{
struct omap_aes_dev *dd = NULL;
@@ -899,7 +788,7 @@ static struct crypto_alg algs_ctr[] = {
.encrypt = omap_aes_ctr_encrypt,
.decrypt = omap_aes_ctr_decrypt,
}
-} ,
+},
};
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
@@ -1179,6 +1068,7 @@ static int omap_aes_probe(struct platform_device *pdev)
spin_lock_init(&dd->lock);
crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
+ crypto_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
omap_aes_get_res_pdev(dd, pdev, &res);
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
new file mode 100644
index 0000000..0863874
--- /dev/null
+++ b/drivers/crypto/omap-aes.h
@@ -0,0 +1,205 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP AES HW ACCELERATOR defines
+ *
+ * Copyright (c) 2015 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef __OMAP_AES_REGS_H__
+#define __OMAP_AES_REGS_H__
+
+#define DST_MAXBURST 4
+#define DMA_MIN (DST_MAXBURST * sizeof(u32))
+
+#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
+
+/* OMAP TRM gives bitfields as start:end, where start is the higher bit
+ number. For example 7:0 */
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+
+#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
+ ((x ^ 0x01) * 0x04))
+#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
+
+#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
+#define AES_REG_CTRL_CONTEXT_READY BIT(31)
+#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32 0
+#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
+#define AES_REG_CTRL_GCM GENMASK(17, 16)
+#define AES_REG_CTRL_CTR BIT(6)
+#define AES_REG_CTRL_CBC BIT(5)
+#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION BIT(2)
+#define AES_REG_CTRL_INPUT_READY BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY BIT(0)
+#define AES_REG_CTRL_MASK GENMASK(24, 2)
+
+#define AES_REG_C_LEN_0 0x54
+#define AES_REG_C_LEN_1 0x58
+#define AES_REG_A_LEN 0x5C
+
+#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
+#define AES_REG_TAG_N(dd, x) (0x70 + ((x) * 0x04))
+
+#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
+
+#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
+#define AES_REG_MASK_SIDLE BIT(6)
+#define AES_REG_MASK_START BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN BIT(3)
+#define AES_REG_MASK_DMA_IN_EN BIT(2)
+#define AES_REG_MASK_SOFTRESET BIT(1)
+#define AES_REG_AUTOIDLE BIT(0)
+
+#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
+
+#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
+#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
+#define AES_REG_IRQ_DATA_IN BIT(1)
+#define AES_REG_IRQ_DATA_OUT BIT(2)
+#define DEFAULT_TIMEOUT (5 * HZ)
+
+#define FLAGS_MODE_MASK 0x001f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+#define FLAGS_GIV BIT(2)
+#define FLAGS_CTR BIT(3)
+#define FLAGS_GCM BIT(4)
+
+#define FLAGS_INIT BIT(5)
+#define FLAGS_FAST BIT(6)
+#define FLAGS_BUSY BIT(7)
+
+#define AES_ASSOC_DATA_COPIED BIT(0)
+#define AES_IN_DATA_COPIED BIT(1)
+#define AES_OUT_DATA_COPIED BIT(2)
+
+#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
+
+struct tcrypt_result {
+ struct completion completion;
+ int err;
+};
+
+struct omap_aes_ctx {
+ struct omap_aes_dev *dd;
+
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct omap_aes_reqctx {
+ unsigned long mode;
+};
+
+#define OMAP_AES_QUEUE_LENGTH 1
+#define OMAP_AES_CACHE_SIZE 0
+
+struct omap_aes_algs_info {
+ struct crypto_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
+};
+
+struct omap_aes_pdata {
+ struct omap_aes_algs_info *algs_info;
+ unsigned int algs_info_size;
+
+ void (*trigger)(struct omap_aes_dev *dd, int length);
+
+ u32 key_ofs;
+ u32 iv_ofs;
+ u32 ctrl_ofs;
+ u32 data_ofs;
+ u32 rev_ofs;
+ u32 mask_ofs;
+ u32 irq_enable_ofs;
+ u32 irq_status_ofs;
+
+ u32 dma_enable_in;
+ u32 dma_enable_out;
+ u32 dma_start;
+
+ u32 major_mask;
+ u32 major_shift;
+ u32 minor_mask;
+ u32 minor_shift;
+};
+
+struct omap_aes_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ void __iomem *io_base;
+ struct omap_aes_ctx *ctx;
+ struct device *dev;
+ unsigned long flags;
+ int err;
+
+ /* Lock to acquire omap_aes_dd */
+ spinlock_t lock;
+ struct crypto_queue queue;
+ struct crypto_queue aead_queue;
+
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ struct ablkcipher_request *req;
+ struct aead_request *aead_req;
+
+ /*
+ * total is used by PIO mode for book keeping so introduce
+ * variable total_save as need it to calc page_order
+ */
+ size_t total;
+ size_t total_save;
+ size_t assoc_len;
+ size_t authsize;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *assoc_sg;
+ struct scatterlist *out_sg;
+
+ /* Buffers for copying for unaligned cases */
+ struct scatterlist in_sgl[2];
+ struct scatterlist out_sgl;
+ struct scatterlist aead_sgl[2];
+ struct scatterlist *orig_out;
+ int sgs_copied;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+ int dma_in;
+ struct dma_chan *dma_lch_in;
+ int dma_out;
+ struct dma_chan *dma_lch_out;
+ int in_sg_len;
+ int out_sg_len;
+ int pio_only;
+ const struct omap_aes_pdata *pdata;
+};
+
+u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset);
+void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value);
+struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx);
+int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+int omap_aes_gcm_encrypt(struct aead_request *req);
+int omap_aes_gcm_decrypt(struct aead_request *req);
+int omap_aes_write_ctrl(struct omap_aes_dev *dd);
+int omap_aes_check_aligned(struct scatterlist *sg, int total);
+int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
+void omap_aes_gcm_dma_out_callback(void *data);
+int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
+
+#endif
--
1.7.9.5
Its not necessary that assoc data and plain text is passed always.
Add these checks before processing the input.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/omap-aes-gcm.c | 26 ++++++++++++++++++++------
1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 1be9d91..72815af 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -87,7 +87,7 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
struct aead_request *req)
{
void *buf_in;
- int alen, clen;
+ int alen, clen, nsg;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authlen = crypto_aead_authsize(aead);
u32 dec = !(dd->flags & FLAGS_ENCRYPT);
@@ -97,12 +97,18 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd->sgs_copied = 0;
- sg_init_table(dd->in_sgl, 2);
- buf_in = sg_virt(req->assoc);
- sg_set_buf(dd->in_sgl, buf_in, alen);
+ nsg = 1 + !!(req->assoclen && req->cryptlen);
- buf_in = sg_virt(req->src);
- sg_set_buf(&dd->in_sgl[1], buf_in, clen);
+ sg_init_table(dd->in_sgl, nsg);
+ if (req->assoclen) {
+ buf_in = sg_virt(req->assoc);
+ sg_set_buf(dd->in_sgl, buf_in, alen);
+ }
+
+ if (req->cryptlen) {
+ buf_in = sg_virt(req->src);
+ sg_set_buf(&dd->in_sgl[nsg - 1], buf_in, clen);
+ }
dd->in_sg = dd->in_sgl;
dd->total = clen;
@@ -258,6 +264,8 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
{
struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int authlen = crypto_aead_authsize(aead);
struct omap_aes_dev *dd;
__be32 counter = cpu_to_be32(1);
int err;
@@ -270,6 +278,12 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
if (err)
return err;
+ if (req->assoclen + req->cryptlen == 0) {
+ scatterwalk_map_and_copy(ctx->auth_tag, req->dst, 0, authlen,
+ 1);
+ return 0;
+ }
+
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
--
1.7.9.5
Check if the inputs are not aligned, if not process
the input before starting the hw acceleration.
Similarly after completition of hw acceleration.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/omap-aes-gcm.c | 82 +++++++++++++++++++++++++++++++++++++----
1 file changed, 74 insertions(+), 8 deletions(-)
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 72815af..9c68ff0 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -48,8 +48,9 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
{
+ void *buf;
u8 *tag;
- int alen, clen, i, ret = 0, nsg;
+ int pages, alen, clen, i, ret = 0, nsg;
alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
clen = ALIGN(dd->total, AES_BLOCK_SIZE);
@@ -65,10 +66,29 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
omap_aes_crypt_dma_stop(dd);
}
+ if (dd->sgs_copied & AES_OUT_DATA_COPIED) {
+ buf = sg_virt(&dd->out_sgl);
+ scatterwalk_map_and_copy(buf, dd->orig_out, 0, dd->total, 1);
+
+ pages = get_order(clen);
+ free_pages((unsigned long)buf, pages);
+ }
+
if (dd->flags & FLAGS_ENCRYPT)
scatterwalk_map_and_copy(dd->ctx->auth_tag, dd->aead_req->dst,
dd->total, dd->authsize, 1);
+ if (dd->sgs_copied & AES_ASSOC_DATA_COPIED) {
+ buf = sg_virt(&dd->in_sgl[0]);
+ pages = get_order(alen);
+ free_pages((unsigned long)buf, pages);
+ }
+ if (dd->sgs_copied & AES_IN_DATA_COPIED) {
+ buf = sg_virt(&dd->in_sgl[nsg - 1]);
+ pages = get_order(clen);
+ free_pages((unsigned long)buf, pages);
+ }
+
if (!(dd->flags & FLAGS_ENCRYPT)) {
tag = (u8 *)dd->ctx->auth_tag;
for (i = 0; i < dd->authsize; i++) {
@@ -87,13 +107,14 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
struct aead_request *req)
{
void *buf_in;
- int alen, clen, nsg;
+ int pages, alen, clen, cryptlen, nsg;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authlen = crypto_aead_authsize(aead);
u32 dec = !(dd->flags & FLAGS_ENCRYPT);
- alen = req->assoclen;
- clen = req->cryptlen - (dec * authlen);
+ alen = ALIGN(req->assoclen, AES_BLOCK_SIZE);
+ cryptlen = req->cryptlen - (dec * authlen);
+ clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
dd->sgs_copied = 0;
@@ -101,20 +122,65 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
sg_init_table(dd->in_sgl, nsg);
if (req->assoclen) {
- buf_in = sg_virt(req->assoc);
+ if (omap_aes_check_aligned(req->assoc, req->assoclen)) {
+ dd->sgs_copied |= AES_ASSOC_DATA_COPIED;
+ pages = get_order(alen);
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf_in) {
+ pr_err("Couldn't allocate for unaligncases.\n");
+ return -1;
+ }
+
+ scatterwalk_map_and_copy(buf_in, req->assoc, 0,
+ req->assoclen, 0);
+ memset(buf_in + req->assoclen, 0, alen - req->assoclen);
+ } else {
+ buf_in = sg_virt(req->assoc);
+ }
sg_set_buf(dd->in_sgl, buf_in, alen);
}
if (req->cryptlen) {
- buf_in = sg_virt(req->src);
+ if (omap_aes_check_aligned(req->src, req->cryptlen)) {
+ dd->sgs_copied |= AES_IN_DATA_COPIED;
+ pages = get_order(clen);
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf_in) {
+ pr_err("Couldn't allocate for unaligncases.\n");
+ return -1;
+ }
+
+ memset(buf_in + cryptlen, 0, clen - cryptlen);
+ scatterwalk_map_and_copy(buf_in, req->src, 0, cryptlen,
+ 0);
+ } else {
+ buf_in = sg_virt(req->src);
+ }
sg_set_buf(&dd->in_sgl[nsg - 1], buf_in, clen);
}
dd->in_sg = dd->in_sgl;
- dd->total = clen;
+ dd->total = cryptlen;
dd->assoc_len = req->assoclen;
dd->authsize = authlen;
- dd->out_sg = req->dst;
+
+ if (omap_aes_check_aligned(req->dst, cryptlen)) {
+ pages = get_order(clen);
+
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in) {
+ pr_err("Couldn't allocate for unaligned cases.\n");
+ return -1;
+ }
+
+ sg_init_one(&dd->out_sgl, buf_in, clen);
+ dd->out_sg = &dd->out_sgl;
+ dd->orig_out = req->dst;
+ dd->sgs_copied |= AES_OUT_DATA_COPIED;
+ } else {
+ dd->out_sg = req->dst;
+ }
dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, alen + clen);
dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, clen);
--
1.7.9.5
Now the driver supports gcm mode, add omap-aes-gcm
algo info to omap-aes driver.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/omap-aes.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index e5e9a19..11f3850 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -789,6 +789,28 @@ static struct crypto_alg algs_ctr[] = {
.decrypt = omap_aes_ctr_decrypt,
}
},
+{
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-omap",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_aes_gcm_cra_init,
+ .cra_exit = omap_aes_cra_exit,
+ .cra_u.aead = {
+ .maxauthsize = AES_BLOCK_SIZE,
+ .geniv = "eseqiv",
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_gcm_setkey,
+ .encrypt = omap_aes_gcm_encrypt,
+ .decrypt = omap_aes_gcm_decrypt,
+ }
+},
};
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
--
1.7.9.5
Add support for PIO mode for GCM mode.
Signed-off-by: Lokesh Vutla <[email protected]>
---
drivers/crypto/omap-aes-gcm.c | 10 ++++++----
drivers/crypto/omap-aes.c | 24 ++++++++++++++++++------
drivers/crypto/omap-aes.h | 3 ++-
3 files changed, 26 insertions(+), 11 deletions(-)
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9c68ff0..370891b 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -52,8 +52,8 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
u8 *tag;
int pages, alen, clen, i, ret = 0, nsg;
- alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
- clen = ALIGN(dd->total, AES_BLOCK_SIZE);
+ alen = ALIGN(dd->assoc_len_save, AES_BLOCK_SIZE);
+ clen = ALIGN(dd->total_save, AES_BLOCK_SIZE);
nsg = 1 + !!(dd->assoc_len && dd->total);
@@ -161,7 +161,9 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd->in_sg = dd->in_sgl;
dd->total = cryptlen;
+ dd->total_save = cryptlen;
dd->assoc_len = req->assoclen;
+ dd->assoc_len_save = req->assoclen;
dd->authsize = authlen;
if (omap_aes_check_aligned(req->dst, cryptlen)) {
@@ -248,14 +250,14 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag)
return ret;
}
-void omap_aes_gcm_dma_out_callback(void *data)
+void omap_aes_gcm_process_auth_tag(void *data)
{
struct omap_aes_dev *dd = data;
int i, val;
u32 *auth_tag, tag[4];
if (!(dd->flags & FLAGS_ENCRYPT))
- scatterwalk_map_and_copy(tag, dd->aead_req->src, dd->total,
+ scatterwalk_map_and_copy(tag, dd->aead_req->src, dd->total_save,
dd->authsize, 0);
auth_tag = dd->ctx->auth_tag;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 11f3850..8aeb913 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -340,7 +340,7 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
}
if (dd->flags & FLAGS_GCM)
- tx_out->callback = omap_aes_gcm_dma_out_callback;
+ tx_out->callback = omap_aes_gcm_process_auth_tag;
else
tx_out->callback = omap_aes_dma_out_callback;
tx_out->callback_param = dd;
@@ -927,8 +927,15 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
status &= ~AES_REG_IRQ_DATA_IN;
omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
- /* Enable DATA_OUT interrupt */
- omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
+ /*
+ * if GCM mode enable DATA_IN till assoc data is copied
+ * else Enable DATA_OUT interrupt
+ * */
+ if ((dd->flags & FLAGS_GCM) && dd->assoc_len)
+ dd->assoc_len -= min((size_t)AES_BLOCK_SIZE,
+ dd->assoc_len);
+ else
+ omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
} else if (status & AES_REG_IRQ_DATA_OUT) {
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
@@ -961,12 +968,17 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
status &= ~AES_REG_IRQ_DATA_OUT;
omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
- if (!dd->total)
+ if (!dd->total) {
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
- else
+ if (dd->flags & FLAGS_GCM)
+ /* Process auth tag and call done_task */
+ omap_aes_gcm_process_auth_tag(dd);
+ else
+ tasklet_schedule(&dd->done_task);
+ } else {
/* Enable DATA_IN interrupt for next block */
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
+ }
}
return IRQ_HANDLED;
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0863874..e0621dd 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -164,6 +164,7 @@ struct omap_aes_dev {
size_t total;
size_t total_save;
size_t assoc_len;
+ size_t assoc_len_save;
size_t authsize;
struct scatterlist *in_sg;
@@ -199,7 +200,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req);
int omap_aes_write_ctrl(struct omap_aes_dev *dd);
int omap_aes_check_aligned(struct scatterlist *sg, int total);
int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
-void omap_aes_gcm_dma_out_callback(void *data);
+void omap_aes_gcm_process_auth_tag(void *data);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
#endif
--
1.7.9.5
Adding simple speed tests for a range of block sizes for Async AEAD crypto
algorithms.
Signed-off-by: Lokesh Vutla <[email protected]>
---
crypto/tcrypt.c | 233 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
crypto/tcrypt.h | 1 +
2 files changed, 234 insertions(+)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1a28001..b37f3f4 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -992,6 +992,234 @@ out:
crypto_free_ahash(tfm);
}
+static inline int do_one_aead_op(struct aead_request *req, int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ struct tcrypt_result *tr = req->base.data;
+
+ ret = wait_for_completion_interruptible(&tr->completion);
+ if (!ret)
+ ret = tr->err;
+ reinit_completion(&tr->completion);
+ }
+
+ return ret;
+}
+
+static int test_aaead_jiffies(struct aead_request *req, int enc,
+ int blen, int sec)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ if (enc)
+ ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+ else
+ ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount, sec, (long)bcount * blen);
+ return 0;
+}
+
+static int test_aaead_cycles(struct aead_request *req, int enc, int blen)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ if (enc)
+ ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+ else
+ ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ if (enc)
+ ret = do_one_aead_op(req, crypto_aead_encrypt(req));
+ else
+ ret = do_one_aead_op(req, crypto_aead_decrypt(req));
+
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / 8, blen);
+
+ return ret;
+}
+
+static void test_aaead_speed(const char *algo, int enc, unsigned int sec,
+ struct aead_speed_template *template,
+ unsigned int tcount, u8 authsize,
+ unsigned int aad_size, u8 *keysize)
+{
+ unsigned int i, j;
+ struct crypto_aead *tfm;
+ int ret = -ENOMEM;
+ const char *key;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ struct scatterlist *asg;
+ struct scatterlist *sgout;
+ const char *e;
+ void *assoc;
+ char iv[MAX_IVLEN];
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ char *axbuf[XBUFSIZE];
+ unsigned int *b_size;
+ unsigned int iv_len;
+ struct tcrypt_result result;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ if (testmgr_alloc_buf(xbuf))
+ goto out_noxbuf;
+ if (testmgr_alloc_buf(axbuf))
+ goto out_noaxbuf;
+ if (testmgr_alloc_buf(xoutbuf))
+ goto out_nooutbuf;
+
+ sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL);
+ if (!sg)
+ goto out_nosg;
+ asg = &sg[8];
+ sgout = &asg[8];
+
+ init_completion(&result.completion);
+ pr_info("\ntesting speed of %s %s\n", algo, e);
+
+ tfm = crypto_alloc_aead(algo, 0, 0);
+
+ if (IS_ERR(tfm)) {
+ pr_err("alg: aead: Failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ return;
+ }
+
+ req = aead_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: aead: Failed to allocate request for %s\n",
+ algo);
+ goto out;
+ }
+
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ i = 0;
+ do {
+ b_size = aead_sizes;
+ do {
+ assoc = axbuf[0];
+
+ if (aad_size < PAGE_SIZE) {
+ memset(assoc, 0xff, aad_size);
+ } else {
+ pr_err("associate data length (%u) too big\n",
+ aad_size);
+ goto out_nosg;
+ }
+ sg_init_one(&asg[0], assoc, aad_size);
+
+ if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for tvmem (%lu)\n",
+ *keysize + *b_size,
+ TVMEMSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+ ret = crypto_aead_setkey(tfm, key, *keysize);
+ ret = crypto_aead_setauthsize(tfm, authsize);
+
+ iv_len = crypto_aead_ivsize(tfm);
+ if (iv_len)
+ memset(&iv, 0xff, iv_len);
+
+ crypto_aead_clear_flags(tfm, ~0);
+ pr_info("test %u (%d bit key, %d byte blocks): ",
+ i, *keysize * 8, *b_size);
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_aead_get_flags(tfm));
+ goto out;
+ }
+
+ sg_init_aead(&sg[0], xbuf,
+ *b_size + (enc ? authsize : 0));
+
+ sg_init_aead(&sgout[0], xoutbuf,
+ *b_size + (enc ? authsize : 0));
+
+ aead_request_set_crypt(req, sg, sgout, *b_size, iv);
+ aead_request_set_assoc(req, asg, aad_size);
+
+ if (sec)
+ ret = test_aaead_jiffies(req, enc, *b_size,
+ sec);
+ else
+ ret = test_aaead_cycles(req, enc, *b_size);
+
+ if (ret) {
+ pr_err("%s() failed return code=%d\n", e, ret);
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ crypto_free_aead(tfm);
+ kfree(sg);
+out_nosg:
+ testmgr_free_buf(xoutbuf);
+out_nooutbuf:
+ testmgr_free_buf(axbuf);
+out_noaxbuf:
+ testmgr_free_buf(xbuf);
+out_noxbuf:
+ return;
+}
+
static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -2139,6 +2367,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_8_32);
break;
+ case 510:
+ test_aaead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
+ aead_speed_template_16_24_32);
+ break;
+
case 1000:
test_available();
break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 6c7e21a..9810edd 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -66,6 +66,7 @@ static u8 speed_template_32_64[] = {32, 64, 0};
* AEAD speed tests
*/
static u8 aead_speed_template_20[] = {20, 0};
+static u8 aead_speed_template_16_24_32[] = {16, 24, 32, 0};
/*
* Digest speed tests
--
1.7.9.5
On Thu, Jul 02, 2015 at 10:48:31AM +0530, Lokesh Vutla wrote:
> OMAP AES driver returns an error if the data is not aligned with
> AES_BLOCK_SIZE bytes.
> But OMAP AES hw allows data input upto 1 byte aligned, but still
> zeros are to be appended and complete AES_BLOCK_SIZE has to be written.
> And correct length has to be passed in LENGTH field.
> Adding support for inputs not aligned with AES_BLOCK_SIZE.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
> ---
> drivers/crypto/omap-aes.c | 33 ++++++++++++++++-----------------
> 1 file changed, 16 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
> index 9a28b7e..a923101 100644
> --- a/drivers/crypto/omap-aes.c
> +++ b/drivers/crypto/omap-aes.c
> @@ -558,6 +558,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
> {
> int len = 0;
>
> + if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
> + return -1;
-EINVAL?
--
balbi
On Thu, Jul 02, 2015 at 10:48:32AM +0530, Lokesh Vutla wrote:
> AES_CTRL_REG is used to configure AES mode. Before configuring
> any mode we need to make sure all other modes are reset or else
> driver will misbehave. So mask all modes before configuring
> any AES mode.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
> ---
> drivers/crypto/omap-aes.c | 13 +++++--------
> 1 file changed, 5 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
> index a923101..96fc7f7 100644
> --- a/drivers/crypto/omap-aes.c
> +++ b/drivers/crypto/omap-aes.c
> @@ -63,6 +63,7 @@
> #define AES_REG_CTRL_DIRECTION (1 << 2)
> #define AES_REG_CTRL_INPUT_READY (1 << 1)
> #define AES_REG_CTRL_OUTPUT_READY (1 << 0)
> +#define AES_REG_CTRL_MASK FLD_MASK(24, 2)
you end up masking bits which aren't even defined in this driver. What
are those bits ? Perhaps add macros for them and define
AES_REG_CTRL_MASK by explicitly ORing those macros ? That would, at
least, be clearer
--
balbi
On Thu, Jul 02, 2015 at 10:48:33AM +0530, Lokesh Vutla wrote:
> Add aead_request_cast() api to get pointer to aead_request
> from cryto_async_request.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
> ---
> include/linux/crypto.h | 6 ++++++
> 1 file changed, 6 insertions(+)
>
> diff --git a/include/linux/crypto.h b/include/linux/crypto.h
> index 10df5d2..20fac3d 100644
> --- a/include/linux/crypto.h
> +++ b/include/linux/crypto.h
> @@ -1460,6 +1460,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
> req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
> }
>
> +static inline struct aead_request *aead_request_cast(
> + struct crypto_async_request *req)
> +{
> + return container_of(req, struct aead_request, base);
container_of() ensures type safety, this can be a macro just fine.
--
balbi
On Thu, Jul 02, 2015 at 10:48:34AM +0530, Lokesh Vutla wrote:
> Use BIT()/GENMASK() macros for all register definitions instead of
> hand-writing bit masks.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
> ---
> drivers/crypto/omap-aes.c | 36 ++++++++++++++++++------------------
> 1 file changed, 18 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
> index 96fc7f7..d974ab6 100644
> --- a/drivers/crypto/omap-aes.c
> +++ b/drivers/crypto/omap-aes.c
> @@ -52,30 +52,30 @@
> #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
>
> #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
> -#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
> -#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
> -#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
> -#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
> -#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
> -#define AES_REG_CTRL_CTR (1 << 6)
> -#define AES_REG_CTRL_CBC (1 << 5)
> -#define AES_REG_CTRL_KEY_SIZE (3 << 3)
> -#define AES_REG_CTRL_DIRECTION (1 << 2)
> -#define AES_REG_CTRL_INPUT_READY (1 << 1)
> -#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
> -#define AES_REG_CTRL_MASK FLD_MASK(24, 2)
> +#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
> +#define AES_REG_CTRL_CTR_WIDTH_32 0
> +#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
> +#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
> +#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
> +#define AES_REG_CTRL_CTR BIT(6)
> +#define AES_REG_CTRL_CBC BIT(5)
> +#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
> +#define AES_REG_CTRL_DIRECTION BIT(2)
> +#define AES_REG_CTRL_INPUT_READY BIT(1)
> +#define AES_REG_CTRL_OUTPUT_READY BIT(0)
> +#define AES_REG_CTRL_MASK GENMASK(24, 2)
this was defined a couple patches ago, why didn't you define it with
GENMASK() to start with ?
--
balbi
Am Donnerstag, 2. Juli 2015, 10:48:38 schrieb Lokesh Vutla:
Hi Lokesh,
>Now the driver supports gcm mode, add omap-aes-gcm
>algo info to omap-aes driver.
>
>Signed-off-by: Lokesh Vutla <[email protected]>
>---
> drivers/crypto/omap-aes.c | 22 ++++++++++++++++++++++
> 1 file changed, 22 insertions(+)
>
>diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
>index e5e9a19..11f3850 100644
>--- a/drivers/crypto/omap-aes.c
>+++ b/drivers/crypto/omap-aes.c
>@@ -789,6 +789,28 @@ static struct crypto_alg algs_ctr[] = {
> .decrypt = omap_aes_ctr_decrypt,
> }
> },
>+{
>+ .cra_name = "gcm(aes)",
>+ .cra_driver_name = "gcm-aes-omap",
>+ .cra_priority = 100,
Why did you choose the priority 100? The software implementations commonly use
100. crypto/gcm.c uses the prio of the underlying cipher. In case of ARM,
there seem to be assembler implementations of AES which have the prio of 200
or 300. So, such software implementation of gcm(aes) would have a higher
precedence than your hw implementation.
So, if a user would use gcm(aes), isn't it more likely that he gets the
software implementation?
>+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
>+ CRYPTO_ALG_KERN_DRIVER_ONLY,
>+ .cra_blocksize = AES_BLOCK_SIZE,
>+ .cra_ctxsize = sizeof(struct omap_aes_ctx),
>+ .cra_alignmask = 0xf,
>+ .cra_type = &crypto_aead_type,
>+ .cra_module = THIS_MODULE,
>+ .cra_init = omap_aes_gcm_cra_init,
>+ .cra_exit = omap_aes_cra_exit,
>+ .cra_u.aead = {
>+ .maxauthsize = AES_BLOCK_SIZE,
>+ .geniv = "eseqiv",
>+ .ivsize = AES_BLOCK_SIZE,
>+ .setkey = omap_aes_gcm_setkey,
>+ .encrypt = omap_aes_gcm_encrypt,
>+ .decrypt = omap_aes_gcm_decrypt,
>+ }
>+},
> };
>
> static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
Ciao
Stephan
On Thu, Jul 02, 2015 at 10:48:35AM +0530, Lokesh Vutla wrote:
> OMAP AES hw supports aes gcm mode.
here you refer to it as 'gcm'
> Adding support for GCM mode in omap-aes driver.
while here and in subject as 'GCM'.
> Signed-off-by: Lokesh Vutla <[email protected]>
> ---
> drivers/crypto/Makefile | 3 +-
> drivers/crypto/omap-aes-gcm.c | 304 +++++++++++++++++++++++++++++++++++++++++
why does this have to be a separate source file ? Patch gets really
large with all the macro and structure definition being shuffled around.
> drivers/crypto/omap-aes.c | 238 +++++++++-----------------------
> drivers/crypto/omap-aes.h | 205 +++++++++++++++++++++++++++
> 4 files changed, 575 insertions(+), 175 deletions(-)
> create mode 100644 drivers/crypto/omap-aes-gcm.c
> create mode 100644 drivers/crypto/omap-aes.h
>
> diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
> index fb84be7..3afad7b 100644
> --- a/drivers/crypto/Makefile
> +++ b/drivers/crypto/Makefile
> @@ -13,7 +13,8 @@ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
> obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
> n2_crypto-y := n2_core.o n2_asm.o
> obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
> -obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
> +obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
> +omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o
... I mean, considering you unconditionally link these two together...
> diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
> new file mode 100644
> index 0000000..1be9d91
> --- /dev/null
> +++ b/drivers/crypto/omap-aes-gcm.c
> @@ -0,0 +1,304 @@
> +/*
> + * Cryptographic API.
> + *
> + * Support for OMAP AES GCM HW acceleration.
> + *
> + * Copyright (c) 2015 Texas Instruments Incorporated
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as published
> + * by the Free Software Foundation.
> + *
> + */
> +
> +#include <linux/err.h>
> +#include <linux/module.h>
> +#include <linux/init.h>
> +#include <linux/errno.h>
> +#include <linux/kernel.h>
> +#include <linux/platform_device.h>
> +#include <linux/scatterlist.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/dmaengine.h>
> +#include <linux/omap-dma.h>
> +#include <linux/pm_runtime.h>
> +#include <linux/of.h>
> +#include <linux/of_device.h>
> +#include <linux/of_address.h>
> +#include <linux/io.h>
> +#include <linux/crypto.h>
> +#include <linux/interrupt.h>
> +#include <crypto/scatterwalk.h>
> +#include <crypto/aes.h>
> +#include "omap-aes.h"
> +
> +static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
> + struct aead_request *req);
> +
> +static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
> +{
> + struct aead_request *req = dd->aead_req;
> +
> + dd->flags &= ~FLAGS_BUSY;
> + dd->in_sg = NULL;
> + dd->out_sg = NULL;
> +
> + req->base.complete(&req->base, ret);
> +}
> +
> +static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
> +{
> + u8 *tag;
> + int alen, clen, i, ret = 0, nsg;
> +
> + alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
> + clen = ALIGN(dd->total, AES_BLOCK_SIZE);
> +
> + nsg = 1 + !!(dd->assoc_len && dd->total);
> +
> + if (!dd->pio_only) {
> + dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
> + DMA_FROM_DEVICE);
> + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
> + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
> + DMA_FROM_DEVICE);
> + omap_aes_crypt_dma_stop(dd);
> + }
> +
> + if (dd->flags & FLAGS_ENCRYPT)
> + scatterwalk_map_and_copy(dd->ctx->auth_tag, dd->aead_req->dst,
> + dd->total, dd->authsize, 1);
> +
> + if (!(dd->flags & FLAGS_ENCRYPT)) {
> + tag = (u8 *)dd->ctx->auth_tag;
> + for (i = 0; i < dd->authsize; i++) {
> + if (tag[i]) {
> + dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
> + ret = -EBADMSG;
> + }
> + }
> + }
> +
> + omap_aes_gcm_finish_req(dd, ret);
> + omap_aes_gcm_handle_queue(dd, NULL);
> +}
> +
> +static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
> + struct aead_request *req)
> +{
> + void *buf_in;
> + int alen, clen;
> + struct crypto_aead *aead = crypto_aead_reqtfm(req);
> + unsigned int authlen = crypto_aead_authsize(aead);
> + u32 dec = !(dd->flags & FLAGS_ENCRYPT);
> +
> + alen = req->assoclen;
> + clen = req->cryptlen - (dec * authlen);
> +
> + dd->sgs_copied = 0;
> +
> + sg_init_table(dd->in_sgl, 2);
> + buf_in = sg_virt(req->assoc);
> + sg_set_buf(dd->in_sgl, buf_in, alen);
> +
> + buf_in = sg_virt(req->src);
> + sg_set_buf(&dd->in_sgl[1], buf_in, clen);
> +
> + dd->in_sg = dd->in_sgl;
> + dd->total = clen;
> + dd->assoc_len = req->assoclen;
> + dd->authsize = authlen;
> + dd->out_sg = req->dst;
> +
> + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, alen + clen);
> + dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, clen);
> +
> + return 0;
> +}
> +
> +static void tcrypt_complete(struct crypto_async_request *req, int err)
> +{
> + struct tcrypt_result *res = req->data;
> +
> + if (err == -EINPROGRESS)
> + return;
> +
> + res->err = err;
> + complete(&res->completion);
> +}
> +
> +static int do_encrypt_iv(struct aead_request *req, u32 *tag)
> +{
> + struct scatterlist iv_sg;
> + struct ablkcipher_request *ablk_req;
> + struct crypto_ablkcipher *tfm;
> + struct tcrypt_result result;
> + struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
> + int ret = 0;
> +
> + tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
> + ablk_req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
> + if (!ablk_req) {
> + pr_err("skcipher: Failed to allocate request\n");
> + return -1;
> + }
> +
> + init_completion(&result.completion);
> +
> + sg_init_one(&iv_sg, tag, AES_BLOCK_SIZE);
> + ablkcipher_request_set_callback(ablk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
> + tcrypt_complete, &result);
> + ret = crypto_ablkcipher_setkey(tfm, (u8 *)ctx->key, ctx->keylen);
looks like you should check result here.
> + ablkcipher_request_set_crypt(ablk_req, &iv_sg, &iv_sg, AES_BLOCK_SIZE,
> + req->iv);
> + ret = crypto_ablkcipher_encrypt(ablk_req);
> + switch (ret) {
> + case 0:
> + break;
> + case -EINPROGRESS:
> + case -EBUSY:
> + ret = wait_for_completion_interruptible(&result.completion);
> + if (!ret) {
> + ret = result.err;
> + if (!ret) {
> + reinit_completion(&result.completion);
> + break;
> + }
> + }
> + /* fall through */
> + default:
> + pr_err("Encryptio of IV failed for GCM mode");
Encryption.
> + break;
> + }
> +
> + ablkcipher_request_free(ablk_req);
> + crypto_free_ablkcipher(tfm);
> + return ret;
> +}
> +
> +void omap_aes_gcm_dma_out_callback(void *data)
> +{
> + struct omap_aes_dev *dd = data;
> + int i, val;
> + u32 *auth_tag, tag[4];
> +
> + if (!(dd->flags & FLAGS_ENCRYPT))
> + scatterwalk_map_and_copy(tag, dd->aead_req->src, dd->total,
> + dd->authsize, 0);
> +
> + auth_tag = dd->ctx->auth_tag;
> + for (i = 0; i < 4; i++) {
> + val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
> + auth_tag[i] = val ^ auth_tag[i];
> + if (!(dd->flags & FLAGS_ENCRYPT))
> + auth_tag[i] = auth_tag[i] ^ tag[i];
> + }
> +
> + /* dma_lch_out - completed */
> + omap_aes_gcm_done_task(dd);
> +}
> +
> +static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
> + struct aead_request *req)
> +{
> + struct omap_aes_ctx *ctx;
> + struct crypto_async_request *async_req, *backlog;
> + struct omap_aes_reqctx *rctx;
> + unsigned long flags;
> + int err, ret = 0;
> +
> + spin_lock_irqsave(&dd->lock, flags);
> + if (req)
> + ret = crypto_enqueue_request(&dd->aead_queue, &req->base);
> + if (dd->flags & FLAGS_BUSY) {
> + spin_unlock_irqrestore(&dd->lock, flags);
> + return ret;
> + }
> + backlog = crypto_get_backlog(&dd->aead_queue);
> + async_req = crypto_dequeue_request(&dd->aead_queue);
> + if (async_req)
> + dd->flags |= FLAGS_BUSY;
> + spin_unlock_irqrestore(&dd->lock, flags);
> +
> + if (!async_req)
> + return ret;
> +
> + if (backlog)
> + backlog->complete(backlog, -EINPROGRESS);
> +
> + req = aead_request_cast(async_req);
> +
> + ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
> + rctx = aead_request_ctx(req);
> +
> + dd->ctx = ctx;
> + ctx->dd = dd;
> + dd->aead_req = req;
> +
> + rctx->mode &= FLAGS_MODE_MASK;
> + dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
> +
> + err = omap_aes_gcm_copy_buffers(dd, req);
> + if (err)
> + return err;
> +
> + err = omap_aes_write_ctrl(dd);
> + if (!err)
> + err = omap_aes_crypt_dma_start(dd);
> +
> + if (err) {
> + omap_aes_gcm_finish_req(dd, err);
> + omap_aes_gcm_handle_queue(dd, NULL);
> + }
> +
> + return ret;
> +}
> +
> +static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
> +{
> + struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
> + struct omap_aes_reqctx *rctx = aead_request_ctx(req);
> + struct omap_aes_dev *dd;
> + __be32 counter = cpu_to_be32(1);
> + int err;
> +
> + memset(ctx->auth_tag, 0, sizeof(ctx->auth_tag));
> + memcpy(req->iv + 12, &counter, 4);
> +
> + /* Create E(K, IV) */
> + err = do_encrypt_iv(req, ctx->auth_tag);
> + if (err)
> + return err;
> +
> + dd = omap_aes_find_dev(ctx);
> + if (!dd)
> + return -ENODEV;
> + rctx->mode = mode;
> +
> + return omap_aes_gcm_handle_queue(dd, req);
> +}
> +
> +int omap_aes_gcm_encrypt(struct aead_request *req)
> +{
> + return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
> +}
> +
> +int omap_aes_gcm_decrypt(struct aead_request *req)
> +{
> + return omap_aes_gcm_crypt(req, FLAGS_GCM);
> +}
> +
> +int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
> + unsigned int keylen)
> +{
> + struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
> +
> + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
> + keylen != AES_KEYSIZE_256)
> + return -EINVAL;
> +
> + memcpy(ctx->key, key, keylen);
> + ctx->keylen = keylen;
> +
> + return 0;
> +}
> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
> index d974ab6..e5e9a19 100644
> --- a/drivers/crypto/omap-aes.c
> +++ b/drivers/crypto/omap-aes.c
> @@ -36,157 +36,7 @@
> #include <linux/interrupt.h>
> #include <crypto/scatterwalk.h>
> #include <crypto/aes.h>
> -
> -#define DST_MAXBURST 4
> -#define DMA_MIN (DST_MAXBURST * sizeof(u32))
> -
> -#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
> -
> -/* OMAP TRM gives bitfields as start:end, where start is the higher bit
> - number. For example 7:0 */
> -#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
> -#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
> -
> -#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
> - ((x ^ 0x01) * 0x04))
> -#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
> -
> -#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
> -#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
> -#define AES_REG_CTRL_CTR_WIDTH_32 0
> -#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
> -#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
> -#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
> -#define AES_REG_CTRL_CTR BIT(6)
> -#define AES_REG_CTRL_CBC BIT(5)
> -#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
> -#define AES_REG_CTRL_DIRECTION BIT(2)
> -#define AES_REG_CTRL_INPUT_READY BIT(1)
> -#define AES_REG_CTRL_OUTPUT_READY BIT(0)
> -#define AES_REG_CTRL_MASK GENMASK(24, 2)
> -
> -#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
> -
> -#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
> -
> -#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
> -#define AES_REG_MASK_SIDLE BIT(6)
> -#define AES_REG_MASK_START BIT(5)
> -#define AES_REG_MASK_DMA_OUT_EN BIT(3)
> -#define AES_REG_MASK_DMA_IN_EN BIT(2)
> -#define AES_REG_MASK_SOFTRESET BIT(1)
> -#define AES_REG_AUTOIDLE BIT(0)
> -
> -#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
> -
> -#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
> -#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
> -#define AES_REG_IRQ_DATA_IN BIT(1)
> -#define AES_REG_IRQ_DATA_OUT BIT(2)
> -#define DEFAULT_TIMEOUT (5*HZ)
> -
> -#define FLAGS_MODE_MASK 0x000f
> -#define FLAGS_ENCRYPT BIT(0)
> -#define FLAGS_CBC BIT(1)
> -#define FLAGS_GIV BIT(2)
> -#define FLAGS_CTR BIT(3)
> -
> -#define FLAGS_INIT BIT(4)
> -#define FLAGS_FAST BIT(5)
> -#define FLAGS_BUSY BIT(6)
> -
> -#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
> -
> -struct omap_aes_ctx {
> - struct omap_aes_dev *dd;
> -
> - int keylen;
> - u32 key[AES_KEYSIZE_256 / sizeof(u32)];
> - unsigned long flags;
> -};
> -
> -struct omap_aes_reqctx {
> - unsigned long mode;
> -};
> -
> -#define OMAP_AES_QUEUE_LENGTH 1
> -#define OMAP_AES_CACHE_SIZE 0
> -
> -struct omap_aes_algs_info {
> - struct crypto_alg *algs_list;
> - unsigned int size;
> - unsigned int registered;
> -};
> -
> -struct omap_aes_pdata {
> - struct omap_aes_algs_info *algs_info;
> - unsigned int algs_info_size;
> -
> - void (*trigger)(struct omap_aes_dev *dd, int length);
> -
> - u32 key_ofs;
> - u32 iv_ofs;
> - u32 ctrl_ofs;
> - u32 data_ofs;
> - u32 rev_ofs;
> - u32 mask_ofs;
> - u32 irq_enable_ofs;
> - u32 irq_status_ofs;
> -
> - u32 dma_enable_in;
> - u32 dma_enable_out;
> - u32 dma_start;
> -
> - u32 major_mask;
> - u32 major_shift;
> - u32 minor_mask;
> - u32 minor_shift;
> -};
> -
> -struct omap_aes_dev {
> - struct list_head list;
> - unsigned long phys_base;
> - void __iomem *io_base;
> - struct omap_aes_ctx *ctx;
> - struct device *dev;
> - unsigned long flags;
> - int err;
> -
> - spinlock_t lock;
> - struct crypto_queue queue;
> -
> - struct tasklet_struct done_task;
> - struct tasklet_struct queue_task;
> -
> - struct ablkcipher_request *req;
> -
> - /*
> - * total is used by PIO mode for book keeping so introduce
> - * variable total_save as need it to calc page_order
> - */
> - size_t total;
> - size_t total_save;
> -
> - struct scatterlist *in_sg;
> - struct scatterlist *out_sg;
> -
> - /* Buffers for copying for unaligned cases */
> - struct scatterlist in_sgl;
> - struct scatterlist out_sgl;
> - struct scatterlist *orig_out;
> - int sgs_copied;
> -
> - struct scatter_walk in_walk;
> - struct scatter_walk out_walk;
> - int dma_in;
> - struct dma_chan *dma_lch_in;
> - int dma_out;
> - struct dma_chan *dma_lch_out;
> - int in_sg_len;
> - int out_sg_len;
> - int pio_only;
> - const struct omap_aes_pdata *pdata;
> -};
> +#include "omap-aes.h"
>
> /* keep registered devices data here */
> static LIST_HEAD(dev_list);
> @@ -202,7 +52,7 @@ static DEFINE_SPINLOCK(list_lock);
> _read_ret; \
> })
> #else
> -static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
> +inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
> {
> return __raw_readl(dd->io_base + offset);
> }
> @@ -216,7 +66,7 @@ static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
> __raw_writel(value, dd->io_base + offset); \
> } while (0)
> #else
> -static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
> +inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
> u32 value)
> {
> __raw_writel(value, dd->io_base + offset);
> @@ -251,7 +101,7 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
> return 0;
> }
>
> -static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
> +int omap_aes_write_ctrl(struct omap_aes_dev *dd)
> {
> unsigned int key32;
> int i, err;
> @@ -263,7 +113,11 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
>
> key32 = dd->ctx->keylen / sizeof(u32);
>
> - /* it seems a key should always be set even if it has not changed */
> + /* RESET the key as previous HASH keys should not get affected*/
> + if (dd->flags & FLAGS_GCM)
> + for (i = 0; i < 0x40; i = i + 4)
> + omap_aes_write(dd, i, 0x0);
> +
> for (i = 0; i < key32; i++) {
> omap_aes_write(dd, AES_REG_KEY(dd, i),
> __le32_to_cpu(dd->ctx->key[i]));
> @@ -272,12 +126,20 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
> if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
> omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
>
> + if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv)
> + omap_aes_write_n(dd, AES_REG_IV(dd, 0),
> + (u32 *)dd->aead_req->iv, 4);
> +
> val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
> if (dd->flags & FLAGS_CBC)
> val |= AES_REG_CTRL_CBC;
> - if (dd->flags & FLAGS_CTR)
> +
> + if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
> val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
>
> + if (dd->flags & FLAGS_GCM)
> + val |= AES_REG_CTRL_GCM;
> +
> if (dd->flags & FLAGS_ENCRYPT)
> val |= AES_REG_CTRL_DIRECTION;
>
> @@ -308,6 +170,8 @@ static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
> {
> omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
> omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
> + if (dd->flags & FLAGS_GCM)
> + omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
>
> omap_aes_dma_trigger_omap2(dd, length);
> }
> @@ -322,7 +186,7 @@ static void omap_aes_dma_stop(struct omap_aes_dev *dd)
> omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
> }
>
> -static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
> +struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
> {
> struct omap_aes_dev *dd = NULL, *tmp;
>
> @@ -410,12 +274,11 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg,
> scatterwalk_done(&walk, out, 0);
> }
>
> -static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
> - struct scatterlist *in_sg, struct scatterlist *out_sg,
> - int in_sg_len, int out_sg_len)
> +static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
> + struct scatterlist *in_sg,
> + struct scatterlist *out_sg,
> + int in_sg_len, int out_sg_len)
> {
> - struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
> - struct omap_aes_dev *dd = ctx->dd;
> struct dma_async_tx_descriptor *tx_in, *tx_out;
> struct dma_slave_config cfg;
> int ret;
> @@ -476,7 +339,10 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
> return -EINVAL;
> }
>
> - tx_out->callback = omap_aes_dma_out_callback;
> + if (dd->flags & FLAGS_GCM)
> + tx_out->callback = omap_aes_gcm_dma_out_callback;
> + else
> + tx_out->callback = omap_aes_dma_out_callback;
> tx_out->callback_param = dd;
>
> dmaengine_submit(tx_in);
> @@ -491,10 +357,8 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
> return 0;
> }
>
> -static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
> +int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
> {
> - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
> - crypto_ablkcipher_reqtfm(dd->req));
> int err;
>
> pr_debug("total: %d\n", dd->total);
> @@ -515,7 +379,7 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
> }
> }
>
> - err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
> + err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
> dd->out_sg_len);
> if (err && !dd->pio_only) {
> dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
> @@ -537,7 +401,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
> req->base.complete(&req->base, err);
> }
>
> -static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
> +int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
> {
> int err = 0;
>
> @@ -551,7 +415,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
> return err;
> }
>
> -static int omap_aes_check_aligned(struct scatterlist *sg, int total)
> +int omap_aes_check_aligned(struct scatterlist *sg, int total)
> {
> int len = 0;
>
> @@ -594,9 +458,9 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
>
> sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
>
> - sg_init_table(&dd->in_sgl, 1);
> - sg_set_buf(&dd->in_sgl, buf_in, total);
> - dd->in_sg = &dd->in_sgl;
> + sg_init_table(dd->in_sgl, 1);
> + sg_set_buf(dd->in_sgl, buf_in, total);
> + dd->in_sg = dd->in_sgl;
>
> sg_init_table(&dd->out_sgl, 1);
> sg_set_buf(&dd->out_sgl, buf_out, total);
> @@ -665,6 +529,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
> ctx->dd = dd;
>
> err = omap_aes_write_ctrl(dd);
> +
> if (!err)
> err = omap_aes_crypt_dma_start(dd);
> if (err) {
> @@ -694,7 +559,7 @@ static void omap_aes_done_task(unsigned long data)
> }
>
> if (dd->sgs_copied) {
> - buf_in = sg_virt(&dd->in_sgl);
> + buf_in = sg_virt(dd->in_sgl);
> buf_out = sg_virt(&dd->out_sgl);
>
> sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
> @@ -811,6 +676,30 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
> return 0;
> }
>
> +static int omap_aes_gcm_cra_init(struct crypto_tfm *tfm)
> +{
> + struct omap_aes_dev *dd = NULL;
> + int err;
> +
> + /* Find AES device, currently picks the first device */
> + spin_lock_bh(&list_lock);
> + list_for_each_entry(dd, &dev_list, list) {
> + break;
> + }
> + spin_unlock_bh(&list_lock);
> +
> + err = pm_runtime_get_sync(dd->dev);
> + if (err < 0) {
> + dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
> + __func__, err);
> + return err;
> + }
> +
> + tfm->crt_aead.reqsize = sizeof(struct omap_aes_reqctx);
> +
> + return 0;
> +}
> +
> static void omap_aes_cra_exit(struct crypto_tfm *tfm)
> {
> struct omap_aes_dev *dd = NULL;
> @@ -899,7 +788,7 @@ static struct crypto_alg algs_ctr[] = {
> .encrypt = omap_aes_ctr_encrypt,
> .decrypt = omap_aes_ctr_decrypt,
> }
> -} ,
> +},
> };
>
> static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
> @@ -1179,6 +1068,7 @@ static int omap_aes_probe(struct platform_device *pdev)
>
> spin_lock_init(&dd->lock);
> crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
> + crypto_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
>
> err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
> omap_aes_get_res_pdev(dd, pdev, &res);
> diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
> new file mode 100644
> index 0000000..0863874
> --- /dev/null
> +++ b/drivers/crypto/omap-aes.h
> @@ -0,0 +1,205 @@
> +/*
> + * Cryptographic API.
> + *
> + * Support for OMAP AES HW ACCELERATOR defines
> + *
> + * Copyright (c) 2015 Texas Instruments Incorporated
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as published
> + * by the Free Software Foundation.
> + *
> + */
> +#ifndef __OMAP_AES_REGS_H__
> +#define __OMAP_AES_REGS_H__
> +
> +#define DST_MAXBURST 4
> +#define DMA_MIN (DST_MAXBURST * sizeof(u32))
> +
> +#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
> +
> +/* OMAP TRM gives bitfields as start:end, where start is the higher bit
> + number. For example 7:0 */
> +#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
> +#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
> +
> +#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
> + ((x ^ 0x01) * 0x04))
> +#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
> +
> +#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
> +#define AES_REG_CTRL_CONTEXT_READY BIT(31)
> +#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
> +#define AES_REG_CTRL_CTR_WIDTH_32 0
> +#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
> +#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
> +#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
> +#define AES_REG_CTRL_GCM GENMASK(17, 16)
> +#define AES_REG_CTRL_CTR BIT(6)
> +#define AES_REG_CTRL_CBC BIT(5)
> +#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
> +#define AES_REG_CTRL_DIRECTION BIT(2)
> +#define AES_REG_CTRL_INPUT_READY BIT(1)
> +#define AES_REG_CTRL_OUTPUT_READY BIT(0)
> +#define AES_REG_CTRL_MASK GENMASK(24, 2)
> +
> +#define AES_REG_C_LEN_0 0x54
> +#define AES_REG_C_LEN_1 0x58
> +#define AES_REG_A_LEN 0x5C
> +
> +#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
> +#define AES_REG_TAG_N(dd, x) (0x70 + ((x) * 0x04))
> +
> +#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
> +
> +#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
> +#define AES_REG_MASK_SIDLE BIT(6)
> +#define AES_REG_MASK_START BIT(5)
> +#define AES_REG_MASK_DMA_OUT_EN BIT(3)
> +#define AES_REG_MASK_DMA_IN_EN BIT(2)
> +#define AES_REG_MASK_SOFTRESET BIT(1)
> +#define AES_REG_AUTOIDLE BIT(0)
> +
> +#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
> +
> +#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
> +#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
> +#define AES_REG_IRQ_DATA_IN BIT(1)
> +#define AES_REG_IRQ_DATA_OUT BIT(2)
> +#define DEFAULT_TIMEOUT (5 * HZ)
> +
> +#define FLAGS_MODE_MASK 0x001f
> +#define FLAGS_ENCRYPT BIT(0)
> +#define FLAGS_CBC BIT(1)
> +#define FLAGS_GIV BIT(2)
> +#define FLAGS_CTR BIT(3)
> +#define FLAGS_GCM BIT(4)
> +
> +#define FLAGS_INIT BIT(5)
> +#define FLAGS_FAST BIT(6)
> +#define FLAGS_BUSY BIT(7)
> +
> +#define AES_ASSOC_DATA_COPIED BIT(0)
> +#define AES_IN_DATA_COPIED BIT(1)
> +#define AES_OUT_DATA_COPIED BIT(2)
> +
> +#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
> +
> +struct tcrypt_result {
> + struct completion completion;
> + int err;
> +};
> +
> +struct omap_aes_ctx {
> + struct omap_aes_dev *dd;
> +
> + int keylen;
> + u32 key[AES_KEYSIZE_256 / sizeof(u32)];
> + u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)];
> + unsigned long flags;
> +};
> +
> +struct omap_aes_reqctx {
> + unsigned long mode;
> +};
> +
> +#define OMAP_AES_QUEUE_LENGTH 1
> +#define OMAP_AES_CACHE_SIZE 0
> +
> +struct omap_aes_algs_info {
> + struct crypto_alg *algs_list;
> + unsigned int size;
> + unsigned int registered;
> +};
> +
> +struct omap_aes_pdata {
> + struct omap_aes_algs_info *algs_info;
> + unsigned int algs_info_size;
> +
> + void (*trigger)(struct omap_aes_dev *dd, int length);
> +
> + u32 key_ofs;
> + u32 iv_ofs;
> + u32 ctrl_ofs;
> + u32 data_ofs;
> + u32 rev_ofs;
> + u32 mask_ofs;
> + u32 irq_enable_ofs;
> + u32 irq_status_ofs;
> +
> + u32 dma_enable_in;
> + u32 dma_enable_out;
> + u32 dma_start;
> +
> + u32 major_mask;
> + u32 major_shift;
> + u32 minor_mask;
> + u32 minor_shift;
> +};
> +
> +struct omap_aes_dev {
> + struct list_head list;
> + unsigned long phys_base;
> + void __iomem *io_base;
> + struct omap_aes_ctx *ctx;
> + struct device *dev;
> + unsigned long flags;
> + int err;
> +
> + /* Lock to acquire omap_aes_dd */
> + spinlock_t lock;
> + struct crypto_queue queue;
> + struct crypto_queue aead_queue;
> +
> + struct tasklet_struct done_task;
> + struct tasklet_struct queue_task;
> +
> + struct ablkcipher_request *req;
> + struct aead_request *aead_req;
> +
> + /*
> + * total is used by PIO mode for book keeping so introduce
> + * variable total_save as need it to calc page_order
> + */
> + size_t total;
> + size_t total_save;
> + size_t assoc_len;
> + size_t authsize;
> +
> + struct scatterlist *in_sg;
> + struct scatterlist *assoc_sg;
> + struct scatterlist *out_sg;
> +
> + /* Buffers for copying for unaligned cases */
> + struct scatterlist in_sgl[2];
> + struct scatterlist out_sgl;
> + struct scatterlist aead_sgl[2];
> + struct scatterlist *orig_out;
> + int sgs_copied;
> +
> + struct scatter_walk in_walk;
> + struct scatter_walk out_walk;
> + int dma_in;
> + struct dma_chan *dma_lch_in;
> + int dma_out;
> + struct dma_chan *dma_lch_out;
> + int in_sg_len;
> + int out_sg_len;
> + int pio_only;
> + const struct omap_aes_pdata *pdata;
> +};
> +
> +u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset);
> +void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value);
> +struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx);
> +int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
> + unsigned int keylen);
> +int omap_aes_gcm_encrypt(struct aead_request *req);
> +int omap_aes_gcm_decrypt(struct aead_request *req);
> +int omap_aes_write_ctrl(struct omap_aes_dev *dd);
> +int omap_aes_check_aligned(struct scatterlist *sg, int total);
> +int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
> +void omap_aes_gcm_dma_out_callback(void *data);
> +int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
> +
> +#endif
> --
> 1.7.9.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
balbi
On Thu, Jul 02, 2015 at 10:48:36AM +0530, Lokesh Vutla wrote:
> Its not necessary that assoc data and plain text is passed always.
> Add these checks before processing the input.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
why can't this be combined with patch which added GCM in the first
place ?
> ---
> drivers/crypto/omap-aes-gcm.c | 26 ++++++++++++++++++++------
> 1 file changed, 20 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
> index 1be9d91..72815af 100644
> --- a/drivers/crypto/omap-aes-gcm.c
> +++ b/drivers/crypto/omap-aes-gcm.c
> @@ -87,7 +87,7 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
> struct aead_request *req)
> {
> void *buf_in;
> - int alen, clen;
> + int alen, clen, nsg;
> struct crypto_aead *aead = crypto_aead_reqtfm(req);
> unsigned int authlen = crypto_aead_authsize(aead);
> u32 dec = !(dd->flags & FLAGS_ENCRYPT);
> @@ -97,12 +97,18 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
>
> dd->sgs_copied = 0;
>
> - sg_init_table(dd->in_sgl, 2);
> - buf_in = sg_virt(req->assoc);
> - sg_set_buf(dd->in_sgl, buf_in, alen);
> + nsg = 1 + !!(req->assoclen && req->cryptlen);
>
> - buf_in = sg_virt(req->src);
> - sg_set_buf(&dd->in_sgl[1], buf_in, clen);
> + sg_init_table(dd->in_sgl, nsg);
> + if (req->assoclen) {
> + buf_in = sg_virt(req->assoc);
> + sg_set_buf(dd->in_sgl, buf_in, alen);
> + }
> +
> + if (req->cryptlen) {
> + buf_in = sg_virt(req->src);
> + sg_set_buf(&dd->in_sgl[nsg - 1], buf_in, clen);
> + }
>
> dd->in_sg = dd->in_sgl;
> dd->total = clen;
> @@ -258,6 +264,8 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
> {
> struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
> struct omap_aes_reqctx *rctx = aead_request_ctx(req);
> + struct crypto_aead *aead = crypto_aead_reqtfm(req);
> + unsigned int authlen = crypto_aead_authsize(aead);
> struct omap_aes_dev *dd;
> __be32 counter = cpu_to_be32(1);
> int err;
> @@ -270,6 +278,12 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
> if (err)
> return err;
>
> + if (req->assoclen + req->cryptlen == 0) {
> + scatterwalk_map_and_copy(ctx->auth_tag, req->dst, 0, authlen,
> + 1);
> + return 0;
> + }
> +
> dd = omap_aes_find_dev(ctx);
> if (!dd)
> return -ENODEV;
> --
> 1.7.9.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
balbi
On Thu, Jul 02, 2015 at 10:48:37AM +0530, Lokesh Vutla wrote:
> Check if the inputs are not aligned, if not process
> the input before starting the hw acceleration.
> Similarly after completition of hw acceleration.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
and this ?
--
balbi
On Thu, Jul 02, 2015 at 10:48:39AM +0530, Lokesh Vutla wrote:
> Add support for PIO mode for GCM mode.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
why do you require PIO ? Is there any situation where DMA can't be
used? What would that case be ?
--
balbi
On Thursday 02 July 2015 01:23 PM, Felipe Balbi wrote:
> On Thu, Jul 02, 2015 at 10:48:31AM +0530, Lokesh Vutla wrote:
>> OMAP AES driver returns an error if the data is not aligned with
>> AES_BLOCK_SIZE bytes.
>> But OMAP AES hw allows data input upto 1 byte aligned, but still
>> zeros are to be appended and complete AES_BLOCK_SIZE has to be written.
>> And correct length has to be passed in LENGTH field.
>> Adding support for inputs not aligned with AES_BLOCK_SIZE.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>> ---
>> drivers/crypto/omap-aes.c | 33 ++++++++++++++++-----------------
>> 1 file changed, 16 insertions(+), 17 deletions(-)
>>
>> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
>> index 9a28b7e..a923101 100644
>> --- a/drivers/crypto/omap-aes.c
>> +++ b/drivers/crypto/omap-aes.c
>> @@ -558,6 +558,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
>> {
>> int len = 0;
>>
>> + if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
>> + return -1;
>
> -EINVAL?
Okay, will update it.
Thanks and regards,
Lokesh
On Thursday 02 July 2015 01:27 PM, Felipe Balbi wrote:
> On Thu, Jul 02, 2015 at 10:48:32AM +0530, Lokesh Vutla wrote:
>> AES_CTRL_REG is used to configure AES mode. Before configuring
>> any mode we need to make sure all other modes are reset or else
>> driver will misbehave. So mask all modes before configuring
>> any AES mode.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>> ---
>> drivers/crypto/omap-aes.c | 13 +++++--------
>> 1 file changed, 5 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
>> index a923101..96fc7f7 100644
>> --- a/drivers/crypto/omap-aes.c
>> +++ b/drivers/crypto/omap-aes.c
>> @@ -63,6 +63,7 @@
>> #define AES_REG_CTRL_DIRECTION (1 << 2)
>> #define AES_REG_CTRL_INPUT_READY (1 << 1)
>> #define AES_REG_CTRL_OUTPUT_READY (1 << 0)
>> +#define AES_REG_CTRL_MASK FLD_MASK(24, 2)
>
> you end up masking bits which aren't even defined in this driver. What
> are those bits ? Perhaps add macros for them and define
> AES_REG_CTRL_MASK by explicitly ORing those macros ? That would, at
> least, be clearer
Hardware supports ECB, CBC, CTR, CFB, F8, CBC_MAC, F9, GCM, CCM, XTS modes.
But current driver has only ECB, CBC, CTR modes support.
That is why the other fields are not yet defined.
So, defining these is fine, but ORing all these will be very big and looks a bit ugly.
So I kept it as mask of all these bits.
Ill move it to GEN_MASK here only.
Thanks and regards,
Lokesh
>
On Thursday 02 July 2015 01:28 PM, Felipe Balbi wrote:
> On Thu, Jul 02, 2015 at 10:48:33AM +0530, Lokesh Vutla wrote:
>> Add aead_request_cast() api to get pointer to aead_request
>> from cryto_async_request.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>> ---
>> include/linux/crypto.h | 6 ++++++
>> 1 file changed, 6 insertions(+)
>>
>> diff --git a/include/linux/crypto.h b/include/linux/crypto.h
>> index 10df5d2..20fac3d 100644
>> --- a/include/linux/crypto.h
>> +++ b/include/linux/crypto.h
>> @@ -1460,6 +1460,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
>> req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
>> }
>>
>> +static inline struct aead_request *aead_request_cast(
>> + struct crypto_async_request *req)
>> +{
>> + return container_of(req, struct aead_request, base);
>
> container_of() ensures type safety, this can be a macro just fine.
There are many similar definitions for ablkcipher.. etc,in
crypto library. So I tried to make it uniform for the entire library.
Thanks and regards,
Lokesh
>
On Thursday 02 July 2015 01:30 PM, Stephan Mueller wrote:
> Am Donnerstag, 2. Juli 2015, 10:48:38 schrieb Lokesh Vutla:
>
> Hi Lokesh,
>
>> Now the driver supports gcm mode, add omap-aes-gcm
>> algo info to omap-aes driver.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>> ---
>> drivers/crypto/omap-aes.c | 22 ++++++++++++++++++++++
>> 1 file changed, 22 insertions(+)
>>
>> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
>> index e5e9a19..11f3850 100644
>> --- a/drivers/crypto/omap-aes.c
>> +++ b/drivers/crypto/omap-aes.c
>> @@ -789,6 +789,28 @@ static struct crypto_alg algs_ctr[] = {
>> .decrypt = omap_aes_ctr_decrypt,
>> }
>> },
>> +{
>> + .cra_name = "gcm(aes)",
>> + .cra_driver_name = "gcm-aes-omap",
>> + .cra_priority = 100,
>
> Why did you choose the priority 100? The software implementations commonly use
> 100. crypto/gcm.c uses the prio of the underlying cipher. In case of ARM,
> there seem to be assembler implementations of AES which have the prio of 200
> or 300. So, such software implementation of gcm(aes) would have a higher
> precedence than your hw implementation.
Yes, you are right.
Other hw algos in omap-aes also uses priority 100.
Only sw and hw implementations are enabled right now and both are at same priority.
And till now its lucky enough that hw algo gets picked.
Ill change the priority to 300 for all the modes.
Thanks for pointing it.
Regards,
Lokesh
>
> So, if a user would use gcm(aes), isn't it more likely that he gets the
> software implementation?
>
>> + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
>> + CRYPTO_ALG_KERN_DRIVER_ONLY,
>> + .cra_blocksize = AES_BLOCK_SIZE,
>> + .cra_ctxsize = sizeof(struct omap_aes_ctx),
>> + .cra_alignmask = 0xf,
>> + .cra_type = &crypto_aead_type,
>> + .cra_module = THIS_MODULE,
>> + .cra_init = omap_aes_gcm_cra_init,
>> + .cra_exit = omap_aes_cra_exit,
>> + .cra_u.aead = {
>> + .maxauthsize = AES_BLOCK_SIZE,
>> + .geniv = "eseqiv",
>> + .ivsize = AES_BLOCK_SIZE,
>> + .setkey = omap_aes_gcm_setkey,
>> + .encrypt = omap_aes_gcm_encrypt,
>> + .decrypt = omap_aes_gcm_decrypt,
>> + }
>> +},
>> };
>>
>> static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
>
>
> Ciao
> Stephan
>
Am Donnerstag, 2. Juli 2015, 15:24:58 schrieb Lokesh Vutla:
Hi Lokesh,
>>> +{
>>> + .cra_name = "gcm(aes)",
>>> + .cra_driver_name = "gcm-aes-omap",
>>> + .cra_priority = 100,
>>
>> Why did you choose the priority 100? The software implementations commonly
>> use 100. crypto/gcm.c uses the prio of the underlying cipher. In case of
>> ARM, there seem to be assembler implementations of AES which have the prio
>> of 200 or 300. So, such software implementation of gcm(aes) would have a
>> higher precedence than your hw implementation.
>
>Yes, you are right.
>Other hw algos in omap-aes also uses priority 100.
>Only sw and hw implementations are enabled right now and both are at same
>priority. And till now its lucky enough that hw algo gets picked.
Maybe those HW prios should be updated too?
Ciao
Stephan
On Thursday 02 July 2015 01:34 PM, Felipe Balbi wrote:
> On Thu, Jul 02, 2015 at 10:48:35AM +0530, Lokesh Vutla wrote:
>> OMAP AES hw supports aes gcm mode.
>
> here you refer to it as 'gcm'
Will update it in next revision.
>
>> Adding support for GCM mode in omap-aes driver.
>
> while here and in subject as 'GCM'.
>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>> ---
>> drivers/crypto/Makefile | 3 +-
>> drivers/crypto/omap-aes-gcm.c | 304 +++++++++++++++++++++++++++++++++++++++++
>
> why does this have to be a separate source file ? Patch gets really
> large with all the macro and structure definition being shuffled around.
GCM is an aead(authenticated encryption and decryption) but
other modes are just block ciphers(encryption and decryption).
Crypto library defines differ structures and routines for aead and normal
block ciphers.
So I started off with a new driver, and later able to combine common parts.
Since GCM is a bit different from all other modes, I left it in a new file.
If I combine into same file, it will be difficult to track all these fucntions
at a time.
Thanks and regards,
Lokesh
>
>> drivers/crypto/omap-aes.c | 238 +++++++++-----------------------
>> drivers/crypto/omap-aes.h | 205 +++++++++++++++++++++++++++
>> 4 files changed, 575 insertions(+), 175 deletions(-)
>> create mode 100644 drivers/crypto/omap-aes-gcm.c
>> create mode 100644 drivers/crypto/omap-aes.h
>>
>> diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
>> index fb84be7..3afad7b 100644
>> --- a/drivers/crypto/Makefile
>> +++ b/drivers/crypto/Makefile
>> @@ -13,7 +13,8 @@ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
>> obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
>> n2_crypto-y := n2_core.o n2_asm.o
>> obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
>> -obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
>> +obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
>> +omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o
>
> ... I mean, considering you unconditionally link these two together...
>
>> diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
>> new file mode 100644
>> index 0000000..1be9d91
>> --- /dev/null
>> +++ b/drivers/crypto/omap-aes-gcm.c
>> @@ -0,0 +1,304 @@
>> +/*
>> + * Cryptographic API.
>> + *
>> + * Support for OMAP AES GCM HW acceleration.
>> + *
>> + * Copyright (c) 2015 Texas Instruments Incorporated
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as published
>> + * by the Free Software Foundation.
>> + *
>> + */
>> +
>> +#include <linux/err.h>
>> +#include <linux/module.h>
>> +#include <linux/init.h>
>> +#include <linux/errno.h>
>> +#include <linux/kernel.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/scatterlist.h>
>> +#include <linux/dma-mapping.h>
>> +#include <linux/dmaengine.h>
>> +#include <linux/omap-dma.h>
>> +#include <linux/pm_runtime.h>
>> +#include <linux/of.h>
>> +#include <linux/of_device.h>
>> +#include <linux/of_address.h>
>> +#include <linux/io.h>
>> +#include <linux/crypto.h>
>> +#include <linux/interrupt.h>
>> +#include <crypto/scatterwalk.h>
>> +#include <crypto/aes.h>
>> +#include "omap-aes.h"
>> +
>> +static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
>> + struct aead_request *req);
>> +
>> +static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
>> +{
>> + struct aead_request *req = dd->aead_req;
>> +
>> + dd->flags &= ~FLAGS_BUSY;
>> + dd->in_sg = NULL;
>> + dd->out_sg = NULL;
>> +
>> + req->base.complete(&req->base, ret);
>> +}
>> +
>> +static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
>> +{
>> + u8 *tag;
>> + int alen, clen, i, ret = 0, nsg;
>> +
>> + alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
>> + clen = ALIGN(dd->total, AES_BLOCK_SIZE);
>> +
>> + nsg = 1 + !!(dd->assoc_len && dd->total);
>> +
>> + if (!dd->pio_only) {
>> + dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
>> + DMA_FROM_DEVICE);
>> + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
>> + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
>> + DMA_FROM_DEVICE);
>> + omap_aes_crypt_dma_stop(dd);
>> + }
>> +
>> + if (dd->flags & FLAGS_ENCRYPT)
>> + scatterwalk_map_and_copy(dd->ctx->auth_tag, dd->aead_req->dst,
>> + dd->total, dd->authsize, 1);
>> +
>> + if (!(dd->flags & FLAGS_ENCRYPT)) {
>> + tag = (u8 *)dd->ctx->auth_tag;
>> + for (i = 0; i < dd->authsize; i++) {
>> + if (tag[i]) {
>> + dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
>> + ret = -EBADMSG;
>> + }
>> + }
>> + }
>> +
>> + omap_aes_gcm_finish_req(dd, ret);
>> + omap_aes_gcm_handle_queue(dd, NULL);
>> +}
>> +
>> +static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
>> + struct aead_request *req)
>> +{
>> + void *buf_in;
>> + int alen, clen;
>> + struct crypto_aead *aead = crypto_aead_reqtfm(req);
>> + unsigned int authlen = crypto_aead_authsize(aead);
>> + u32 dec = !(dd->flags & FLAGS_ENCRYPT);
>> +
>> + alen = req->assoclen;
>> + clen = req->cryptlen - (dec * authlen);
>> +
>> + dd->sgs_copied = 0;
>> +
>> + sg_init_table(dd->in_sgl, 2);
>> + buf_in = sg_virt(req->assoc);
>> + sg_set_buf(dd->in_sgl, buf_in, alen);
>> +
>> + buf_in = sg_virt(req->src);
>> + sg_set_buf(&dd->in_sgl[1], buf_in, clen);
>> +
>> + dd->in_sg = dd->in_sgl;
>> + dd->total = clen;
>> + dd->assoc_len = req->assoclen;
>> + dd->authsize = authlen;
>> + dd->out_sg = req->dst;
>> +
>> + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, alen + clen);
>> + dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, clen);
>> +
>> + return 0;
>> +}
>> +
>> +static void tcrypt_complete(struct crypto_async_request *req, int err)
>> +{
>> + struct tcrypt_result *res = req->data;
>> +
>> + if (err == -EINPROGRESS)
>> + return;
>> +
>> + res->err = err;
>> + complete(&res->completion);
>> +}
>> +
>> +static int do_encrypt_iv(struct aead_request *req, u32 *tag)
>> +{
>> + struct scatterlist iv_sg;
>> + struct ablkcipher_request *ablk_req;
>> + struct crypto_ablkcipher *tfm;
>> + struct tcrypt_result result;
>> + struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
>> + int ret = 0;
>> +
>> + tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
>> + ablk_req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
>> + if (!ablk_req) {
>> + pr_err("skcipher: Failed to allocate request\n");
>> + return -1;
>> + }
>> +
>> + init_completion(&result.completion);
>> +
>> + sg_init_one(&iv_sg, tag, AES_BLOCK_SIZE);
>> + ablkcipher_request_set_callback(ablk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
>> + tcrypt_complete, &result);
>> + ret = crypto_ablkcipher_setkey(tfm, (u8 *)ctx->key, ctx->keylen);
>
> looks like you should check result here.
>
>> + ablkcipher_request_set_crypt(ablk_req, &iv_sg, &iv_sg, AES_BLOCK_SIZE,
>> + req->iv);
>> + ret = crypto_ablkcipher_encrypt(ablk_req);
>> + switch (ret) {
>> + case 0:
>> + break;
>> + case -EINPROGRESS:
>> + case -EBUSY:
>> + ret = wait_for_completion_interruptible(&result.completion);
>> + if (!ret) {
>> + ret = result.err;
>> + if (!ret) {
>> + reinit_completion(&result.completion);
>> + break;
>> + }
>> + }
>> + /* fall through */
>> + default:
>> + pr_err("Encryptio of IV failed for GCM mode");
>
> Encryption.
>
>> + break;
>> + }
>> +
>> + ablkcipher_request_free(ablk_req);
>> + crypto_free_ablkcipher(tfm);
>> + return ret;
>> +}
>> +
>> +void omap_aes_gcm_dma_out_callback(void *data)
>> +{
>> + struct omap_aes_dev *dd = data;
>> + int i, val;
>> + u32 *auth_tag, tag[4];
>> +
>> + if (!(dd->flags & FLAGS_ENCRYPT))
>> + scatterwalk_map_and_copy(tag, dd->aead_req->src, dd->total,
>> + dd->authsize, 0);
>> +
>> + auth_tag = dd->ctx->auth_tag;
>> + for (i = 0; i < 4; i++) {
>> + val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
>> + auth_tag[i] = val ^ auth_tag[i];
>> + if (!(dd->flags & FLAGS_ENCRYPT))
>> + auth_tag[i] = auth_tag[i] ^ tag[i];
>> + }
>> +
>> + /* dma_lch_out - completed */
>> + omap_aes_gcm_done_task(dd);
>> +}
>> +
>> +static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
>> + struct aead_request *req)
>> +{
>> + struct omap_aes_ctx *ctx;
>> + struct crypto_async_request *async_req, *backlog;
>> + struct omap_aes_reqctx *rctx;
>> + unsigned long flags;
>> + int err, ret = 0;
>> +
>> + spin_lock_irqsave(&dd->lock, flags);
>> + if (req)
>> + ret = crypto_enqueue_request(&dd->aead_queue, &req->base);
>> + if (dd->flags & FLAGS_BUSY) {
>> + spin_unlock_irqrestore(&dd->lock, flags);
>> + return ret;
>> + }
>> + backlog = crypto_get_backlog(&dd->aead_queue);
>> + async_req = crypto_dequeue_request(&dd->aead_queue);
>> + if (async_req)
>> + dd->flags |= FLAGS_BUSY;
>> + spin_unlock_irqrestore(&dd->lock, flags);
>> +
>> + if (!async_req)
>> + return ret;
>> +
>> + if (backlog)
>> + backlog->complete(backlog, -EINPROGRESS);
>> +
>> + req = aead_request_cast(async_req);
>> +
>> + ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
>> + rctx = aead_request_ctx(req);
>> +
>> + dd->ctx = ctx;
>> + ctx->dd = dd;
>> + dd->aead_req = req;
>> +
>> + rctx->mode &= FLAGS_MODE_MASK;
>> + dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
>> +
>> + err = omap_aes_gcm_copy_buffers(dd, req);
>> + if (err)
>> + return err;
>> +
>> + err = omap_aes_write_ctrl(dd);
>> + if (!err)
>> + err = omap_aes_crypt_dma_start(dd);
>> +
>> + if (err) {
>> + omap_aes_gcm_finish_req(dd, err);
>> + omap_aes_gcm_handle_queue(dd, NULL);
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
>> +{
>> + struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
>> + struct omap_aes_reqctx *rctx = aead_request_ctx(req);
>> + struct omap_aes_dev *dd;
>> + __be32 counter = cpu_to_be32(1);
>> + int err;
>> +
>> + memset(ctx->auth_tag, 0, sizeof(ctx->auth_tag));
>> + memcpy(req->iv + 12, &counter, 4);
>> +
>> + /* Create E(K, IV) */
>> + err = do_encrypt_iv(req, ctx->auth_tag);
>> + if (err)
>> + return err;
>> +
>> + dd = omap_aes_find_dev(ctx);
>> + if (!dd)
>> + return -ENODEV;
>> + rctx->mode = mode;
>> +
>> + return omap_aes_gcm_handle_queue(dd, req);
>> +}
>> +
>> +int omap_aes_gcm_encrypt(struct aead_request *req)
>> +{
>> + return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
>> +}
>> +
>> +int omap_aes_gcm_decrypt(struct aead_request *req)
>> +{
>> + return omap_aes_gcm_crypt(req, FLAGS_GCM);
>> +}
>> +
>> +int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
>> + unsigned int keylen)
>> +{
>> + struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
>> +
>> + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
>> + keylen != AES_KEYSIZE_256)
>> + return -EINVAL;
>> +
>> + memcpy(ctx->key, key, keylen);
>> + ctx->keylen = keylen;
>> +
>> + return 0;
>> +}
>> diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
>> index d974ab6..e5e9a19 100644
>> --- a/drivers/crypto/omap-aes.c
>> +++ b/drivers/crypto/omap-aes.c
>> @@ -36,157 +36,7 @@
>> #include <linux/interrupt.h>
>> #include <crypto/scatterwalk.h>
>> #include <crypto/aes.h>
>> -
>> -#define DST_MAXBURST 4
>> -#define DMA_MIN (DST_MAXBURST * sizeof(u32))
>> -
>> -#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
>> -
>> -/* OMAP TRM gives bitfields as start:end, where start is the higher bit
>> - number. For example 7:0 */
>> -#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
>> -#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
>> -
>> -#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
>> - ((x ^ 0x01) * 0x04))
>> -#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
>> -
>> -#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
>> -#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
>> -#define AES_REG_CTRL_CTR_WIDTH_32 0
>> -#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
>> -#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
>> -#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
>> -#define AES_REG_CTRL_CTR BIT(6)
>> -#define AES_REG_CTRL_CBC BIT(5)
>> -#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
>> -#define AES_REG_CTRL_DIRECTION BIT(2)
>> -#define AES_REG_CTRL_INPUT_READY BIT(1)
>> -#define AES_REG_CTRL_OUTPUT_READY BIT(0)
>> -#define AES_REG_CTRL_MASK GENMASK(24, 2)
>> -
>> -#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
>> -
>> -#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
>> -
>> -#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
>> -#define AES_REG_MASK_SIDLE BIT(6)
>> -#define AES_REG_MASK_START BIT(5)
>> -#define AES_REG_MASK_DMA_OUT_EN BIT(3)
>> -#define AES_REG_MASK_DMA_IN_EN BIT(2)
>> -#define AES_REG_MASK_SOFTRESET BIT(1)
>> -#define AES_REG_AUTOIDLE BIT(0)
>> -
>> -#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
>> -
>> -#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
>> -#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
>> -#define AES_REG_IRQ_DATA_IN BIT(1)
>> -#define AES_REG_IRQ_DATA_OUT BIT(2)
>> -#define DEFAULT_TIMEOUT (5*HZ)
>> -
>> -#define FLAGS_MODE_MASK 0x000f
>> -#define FLAGS_ENCRYPT BIT(0)
>> -#define FLAGS_CBC BIT(1)
>> -#define FLAGS_GIV BIT(2)
>> -#define FLAGS_CTR BIT(3)
>> -
>> -#define FLAGS_INIT BIT(4)
>> -#define FLAGS_FAST BIT(5)
>> -#define FLAGS_BUSY BIT(6)
>> -
>> -#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
>> -
>> -struct omap_aes_ctx {
>> - struct omap_aes_dev *dd;
>> -
>> - int keylen;
>> - u32 key[AES_KEYSIZE_256 / sizeof(u32)];
>> - unsigned long flags;
>> -};
>> -
>> -struct omap_aes_reqctx {
>> - unsigned long mode;
>> -};
>> -
>> -#define OMAP_AES_QUEUE_LENGTH 1
>> -#define OMAP_AES_CACHE_SIZE 0
>> -
>> -struct omap_aes_algs_info {
>> - struct crypto_alg *algs_list;
>> - unsigned int size;
>> - unsigned int registered;
>> -};
>> -
>> -struct omap_aes_pdata {
>> - struct omap_aes_algs_info *algs_info;
>> - unsigned int algs_info_size;
>> -
>> - void (*trigger)(struct omap_aes_dev *dd, int length);
>> -
>> - u32 key_ofs;
>> - u32 iv_ofs;
>> - u32 ctrl_ofs;
>> - u32 data_ofs;
>> - u32 rev_ofs;
>> - u32 mask_ofs;
>> - u32 irq_enable_ofs;
>> - u32 irq_status_ofs;
>> -
>> - u32 dma_enable_in;
>> - u32 dma_enable_out;
>> - u32 dma_start;
>> -
>> - u32 major_mask;
>> - u32 major_shift;
>> - u32 minor_mask;
>> - u32 minor_shift;
>> -};
>> -
>> -struct omap_aes_dev {
>> - struct list_head list;
>> - unsigned long phys_base;
>> - void __iomem *io_base;
>> - struct omap_aes_ctx *ctx;
>> - struct device *dev;
>> - unsigned long flags;
>> - int err;
>> -
>> - spinlock_t lock;
>> - struct crypto_queue queue;
>> -
>> - struct tasklet_struct done_task;
>> - struct tasklet_struct queue_task;
>> -
>> - struct ablkcipher_request *req;
>> -
>> - /*
>> - * total is used by PIO mode for book keeping so introduce
>> - * variable total_save as need it to calc page_order
>> - */
>> - size_t total;
>> - size_t total_save;
>> -
>> - struct scatterlist *in_sg;
>> - struct scatterlist *out_sg;
>> -
>> - /* Buffers for copying for unaligned cases */
>> - struct scatterlist in_sgl;
>> - struct scatterlist out_sgl;
>> - struct scatterlist *orig_out;
>> - int sgs_copied;
>> -
>> - struct scatter_walk in_walk;
>> - struct scatter_walk out_walk;
>> - int dma_in;
>> - struct dma_chan *dma_lch_in;
>> - int dma_out;
>> - struct dma_chan *dma_lch_out;
>> - int in_sg_len;
>> - int out_sg_len;
>> - int pio_only;
>> - const struct omap_aes_pdata *pdata;
>> -};
>> +#include "omap-aes.h"
>>
>> /* keep registered devices data here */
>> static LIST_HEAD(dev_list);
>> @@ -202,7 +52,7 @@ static DEFINE_SPINLOCK(list_lock);
>> _read_ret; \
>> })
>> #else
>> -static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
>> +inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
>> {
>> return __raw_readl(dd->io_base + offset);
>> }
>> @@ -216,7 +66,7 @@ static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
>> __raw_writel(value, dd->io_base + offset); \
>> } while (0)
>> #else
>> -static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
>> +inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
>> u32 value)
>> {
>> __raw_writel(value, dd->io_base + offset);
>> @@ -251,7 +101,7 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
>> return 0;
>> }
>>
>> -static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
>> +int omap_aes_write_ctrl(struct omap_aes_dev *dd)
>> {
>> unsigned int key32;
>> int i, err;
>> @@ -263,7 +113,11 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
>>
>> key32 = dd->ctx->keylen / sizeof(u32);
>>
>> - /* it seems a key should always be set even if it has not changed */
>> + /* RESET the key as previous HASH keys should not get affected*/
>> + if (dd->flags & FLAGS_GCM)
>> + for (i = 0; i < 0x40; i = i + 4)
>> + omap_aes_write(dd, i, 0x0);
>> +
>> for (i = 0; i < key32; i++) {
>> omap_aes_write(dd, AES_REG_KEY(dd, i),
>> __le32_to_cpu(dd->ctx->key[i]));
>> @@ -272,12 +126,20 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
>> if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
>> omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
>>
>> + if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv)
>> + omap_aes_write_n(dd, AES_REG_IV(dd, 0),
>> + (u32 *)dd->aead_req->iv, 4);
>> +
>> val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
>> if (dd->flags & FLAGS_CBC)
>> val |= AES_REG_CTRL_CBC;
>> - if (dd->flags & FLAGS_CTR)
>> +
>> + if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
>> val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
>>
>> + if (dd->flags & FLAGS_GCM)
>> + val |= AES_REG_CTRL_GCM;
>> +
>> if (dd->flags & FLAGS_ENCRYPT)
>> val |= AES_REG_CTRL_DIRECTION;
>>
>> @@ -308,6 +170,8 @@ static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
>> {
>> omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
>> omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
>> + if (dd->flags & FLAGS_GCM)
>> + omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
>>
>> omap_aes_dma_trigger_omap2(dd, length);
>> }
>> @@ -322,7 +186,7 @@ static void omap_aes_dma_stop(struct omap_aes_dev *dd)
>> omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
>> }
>>
>> -static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
>> +struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
>> {
>> struct omap_aes_dev *dd = NULL, *tmp;
>>
>> @@ -410,12 +274,11 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg,
>> scatterwalk_done(&walk, out, 0);
>> }
>>
>> -static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
>> - struct scatterlist *in_sg, struct scatterlist *out_sg,
>> - int in_sg_len, int out_sg_len)
>> +static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
>> + struct scatterlist *in_sg,
>> + struct scatterlist *out_sg,
>> + int in_sg_len, int out_sg_len)
>> {
>> - struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
>> - struct omap_aes_dev *dd = ctx->dd;
>> struct dma_async_tx_descriptor *tx_in, *tx_out;
>> struct dma_slave_config cfg;
>> int ret;
>> @@ -476,7 +339,10 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
>> return -EINVAL;
>> }
>>
>> - tx_out->callback = omap_aes_dma_out_callback;
>> + if (dd->flags & FLAGS_GCM)
>> + tx_out->callback = omap_aes_gcm_dma_out_callback;
>> + else
>> + tx_out->callback = omap_aes_dma_out_callback;
>> tx_out->callback_param = dd;
>>
>> dmaengine_submit(tx_in);
>> @@ -491,10 +357,8 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
>> return 0;
>> }
>>
>> -static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
>> +int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
>> {
>> - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
>> - crypto_ablkcipher_reqtfm(dd->req));
>> int err;
>>
>> pr_debug("total: %d\n", dd->total);
>> @@ -515,7 +379,7 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
>> }
>> }
>>
>> - err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
>> + err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
>> dd->out_sg_len);
>> if (err && !dd->pio_only) {
>> dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
>> @@ -537,7 +401,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
>> req->base.complete(&req->base, err);
>> }
>>
>> -static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
>> +int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
>> {
>> int err = 0;
>>
>> @@ -551,7 +415,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
>> return err;
>> }
>>
>> -static int omap_aes_check_aligned(struct scatterlist *sg, int total)
>> +int omap_aes_check_aligned(struct scatterlist *sg, int total)
>> {
>> int len = 0;
>>
>> @@ -594,9 +458,9 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
>>
>> sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
>>
>> - sg_init_table(&dd->in_sgl, 1);
>> - sg_set_buf(&dd->in_sgl, buf_in, total);
>> - dd->in_sg = &dd->in_sgl;
>> + sg_init_table(dd->in_sgl, 1);
>> + sg_set_buf(dd->in_sgl, buf_in, total);
>> + dd->in_sg = dd->in_sgl;
>>
>> sg_init_table(&dd->out_sgl, 1);
>> sg_set_buf(&dd->out_sgl, buf_out, total);
>> @@ -665,6 +529,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
>> ctx->dd = dd;
>>
>> err = omap_aes_write_ctrl(dd);
>> +
>> if (!err)
>> err = omap_aes_crypt_dma_start(dd);
>> if (err) {
>> @@ -694,7 +559,7 @@ static void omap_aes_done_task(unsigned long data)
>> }
>>
>> if (dd->sgs_copied) {
>> - buf_in = sg_virt(&dd->in_sgl);
>> + buf_in = sg_virt(dd->in_sgl);
>> buf_out = sg_virt(&dd->out_sgl);
>>
>> sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
>> @@ -811,6 +676,30 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
>> return 0;
>> }
>>
>> +static int omap_aes_gcm_cra_init(struct crypto_tfm *tfm)
>> +{
>> + struct omap_aes_dev *dd = NULL;
>> + int err;
>> +
>> + /* Find AES device, currently picks the first device */
>> + spin_lock_bh(&list_lock);
>> + list_for_each_entry(dd, &dev_list, list) {
>> + break;
>> + }
>> + spin_unlock_bh(&list_lock);
>> +
>> + err = pm_runtime_get_sync(dd->dev);
>> + if (err < 0) {
>> + dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
>> + __func__, err);
>> + return err;
>> + }
>> +
>> + tfm->crt_aead.reqsize = sizeof(struct omap_aes_reqctx);
>> +
>> + return 0;
>> +}
>> +
>> static void omap_aes_cra_exit(struct crypto_tfm *tfm)
>> {
>> struct omap_aes_dev *dd = NULL;
>> @@ -899,7 +788,7 @@ static struct crypto_alg algs_ctr[] = {
>> .encrypt = omap_aes_ctr_encrypt,
>> .decrypt = omap_aes_ctr_decrypt,
>> }
>> -} ,
>> +},
>> };
>>
>> static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
>> @@ -1179,6 +1068,7 @@ static int omap_aes_probe(struct platform_device *pdev)
>>
>> spin_lock_init(&dd->lock);
>> crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
>> + crypto_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
>>
>> err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
>> omap_aes_get_res_pdev(dd, pdev, &res);
>> diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
>> new file mode 100644
>> index 0000000..0863874
>> --- /dev/null
>> +++ b/drivers/crypto/omap-aes.h
>> @@ -0,0 +1,205 @@
>> +/*
>> + * Cryptographic API.
>> + *
>> + * Support for OMAP AES HW ACCELERATOR defines
>> + *
>> + * Copyright (c) 2015 Texas Instruments Incorporated
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as published
>> + * by the Free Software Foundation.
>> + *
>> + */
>> +#ifndef __OMAP_AES_REGS_H__
>> +#define __OMAP_AES_REGS_H__
>> +
>> +#define DST_MAXBURST 4
>> +#define DMA_MIN (DST_MAXBURST * sizeof(u32))
>> +
>> +#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
>> +
>> +/* OMAP TRM gives bitfields as start:end, where start is the higher bit
>> + number. For example 7:0 */
>> +#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
>> +#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
>> +
>> +#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
>> + ((x ^ 0x01) * 0x04))
>> +#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
>> +
>> +#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
>> +#define AES_REG_CTRL_CONTEXT_READY BIT(31)
>> +#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
>> +#define AES_REG_CTRL_CTR_WIDTH_32 0
>> +#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
>> +#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
>> +#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
>> +#define AES_REG_CTRL_GCM GENMASK(17, 16)
>> +#define AES_REG_CTRL_CTR BIT(6)
>> +#define AES_REG_CTRL_CBC BIT(5)
>> +#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
>> +#define AES_REG_CTRL_DIRECTION BIT(2)
>> +#define AES_REG_CTRL_INPUT_READY BIT(1)
>> +#define AES_REG_CTRL_OUTPUT_READY BIT(0)
>> +#define AES_REG_CTRL_MASK GENMASK(24, 2)
>> +
>> +#define AES_REG_C_LEN_0 0x54
>> +#define AES_REG_C_LEN_1 0x58
>> +#define AES_REG_A_LEN 0x5C
>> +
>> +#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
>> +#define AES_REG_TAG_N(dd, x) (0x70 + ((x) * 0x04))
>> +
>> +#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
>> +
>> +#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
>> +#define AES_REG_MASK_SIDLE BIT(6)
>> +#define AES_REG_MASK_START BIT(5)
>> +#define AES_REG_MASK_DMA_OUT_EN BIT(3)
>> +#define AES_REG_MASK_DMA_IN_EN BIT(2)
>> +#define AES_REG_MASK_SOFTRESET BIT(1)
>> +#define AES_REG_AUTOIDLE BIT(0)
>> +
>> +#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
>> +
>> +#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
>> +#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
>> +#define AES_REG_IRQ_DATA_IN BIT(1)
>> +#define AES_REG_IRQ_DATA_OUT BIT(2)
>> +#define DEFAULT_TIMEOUT (5 * HZ)
>> +
>> +#define FLAGS_MODE_MASK 0x001f
>> +#define FLAGS_ENCRYPT BIT(0)
>> +#define FLAGS_CBC BIT(1)
>> +#define FLAGS_GIV BIT(2)
>> +#define FLAGS_CTR BIT(3)
>> +#define FLAGS_GCM BIT(4)
>> +
>> +#define FLAGS_INIT BIT(5)
>> +#define FLAGS_FAST BIT(6)
>> +#define FLAGS_BUSY BIT(7)
>> +
>> +#define AES_ASSOC_DATA_COPIED BIT(0)
>> +#define AES_IN_DATA_COPIED BIT(1)
>> +#define AES_OUT_DATA_COPIED BIT(2)
>> +
>> +#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
>> +
>> +struct tcrypt_result {
>> + struct completion completion;
>> + int err;
>> +};
>> +
>> +struct omap_aes_ctx {
>> + struct omap_aes_dev *dd;
>> +
>> + int keylen;
>> + u32 key[AES_KEYSIZE_256 / sizeof(u32)];
>> + u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)];
>> + unsigned long flags;
>> +};
>> +
>> +struct omap_aes_reqctx {
>> + unsigned long mode;
>> +};
>> +
>> +#define OMAP_AES_QUEUE_LENGTH 1
>> +#define OMAP_AES_CACHE_SIZE 0
>> +
>> +struct omap_aes_algs_info {
>> + struct crypto_alg *algs_list;
>> + unsigned int size;
>> + unsigned int registered;
>> +};
>> +
>> +struct omap_aes_pdata {
>> + struct omap_aes_algs_info *algs_info;
>> + unsigned int algs_info_size;
>> +
>> + void (*trigger)(struct omap_aes_dev *dd, int length);
>> +
>> + u32 key_ofs;
>> + u32 iv_ofs;
>> + u32 ctrl_ofs;
>> + u32 data_ofs;
>> + u32 rev_ofs;
>> + u32 mask_ofs;
>> + u32 irq_enable_ofs;
>> + u32 irq_status_ofs;
>> +
>> + u32 dma_enable_in;
>> + u32 dma_enable_out;
>> + u32 dma_start;
>> +
>> + u32 major_mask;
>> + u32 major_shift;
>> + u32 minor_mask;
>> + u32 minor_shift;
>> +};
>> +
>> +struct omap_aes_dev {
>> + struct list_head list;
>> + unsigned long phys_base;
>> + void __iomem *io_base;
>> + struct omap_aes_ctx *ctx;
>> + struct device *dev;
>> + unsigned long flags;
>> + int err;
>> +
>> + /* Lock to acquire omap_aes_dd */
>> + spinlock_t lock;
>> + struct crypto_queue queue;
>> + struct crypto_queue aead_queue;
>> +
>> + struct tasklet_struct done_task;
>> + struct tasklet_struct queue_task;
>> +
>> + struct ablkcipher_request *req;
>> + struct aead_request *aead_req;
>> +
>> + /*
>> + * total is used by PIO mode for book keeping so introduce
>> + * variable total_save as need it to calc page_order
>> + */
>> + size_t total;
>> + size_t total_save;
>> + size_t assoc_len;
>> + size_t authsize;
>> +
>> + struct scatterlist *in_sg;
>> + struct scatterlist *assoc_sg;
>> + struct scatterlist *out_sg;
>> +
>> + /* Buffers for copying for unaligned cases */
>> + struct scatterlist in_sgl[2];
>> + struct scatterlist out_sgl;
>> + struct scatterlist aead_sgl[2];
>> + struct scatterlist *orig_out;
>> + int sgs_copied;
>> +
>> + struct scatter_walk in_walk;
>> + struct scatter_walk out_walk;
>> + int dma_in;
>> + struct dma_chan *dma_lch_in;
>> + int dma_out;
>> + struct dma_chan *dma_lch_out;
>> + int in_sg_len;
>> + int out_sg_len;
>> + int pio_only;
>> + const struct omap_aes_pdata *pdata;
>> +};
>> +
>> +u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset);
>> +void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value);
>> +struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx);
>> +int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
>> + unsigned int keylen);
>> +int omap_aes_gcm_encrypt(struct aead_request *req);
>> +int omap_aes_gcm_decrypt(struct aead_request *req);
>> +int omap_aes_write_ctrl(struct omap_aes_dev *dd);
>> +int omap_aes_check_aligned(struct scatterlist *sg, int total);
>> +int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
>> +void omap_aes_gcm_dma_out_callback(void *data);
>> +int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
>> +
>> +#endif
>> --
>> 1.7.9.5
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>> the body of a message to [email protected]
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>> Please read the FAQ at http://www.tux.org/lkml/
>
On Thursday 02 July 2015 01:34 PM, Felipe Balbi wrote:
> On Thu, Jul 02, 2015 at 10:48:36AM +0530, Lokesh Vutla wrote:
>> Its not necessary that assoc data and plain text is passed always.
>> Add these checks before processing the input.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>
> why can't this be combined with patch which added GCM in the first
> place ?
Yes, my initial patch is all combined. But it was very big.
I tried my best in breaking down into different patches inorder to
help reviewer.
So, I kept the functionality part in one patch, and handled corner case
like these in separate patches.
Thanks and regards,
Lokesh
>
>> ---
>> drivers/crypto/omap-aes-gcm.c | 26 ++++++++++++++++++++------
>> 1 file changed, 20 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
>> index 1be9d91..72815af 100644
>> --- a/drivers/crypto/omap-aes-gcm.c
>> +++ b/drivers/crypto/omap-aes-gcm.c
>> @@ -87,7 +87,7 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
>> struct aead_request *req)
>> {
>> void *buf_in;
>> - int alen, clen;
>> + int alen, clen, nsg;
>> struct crypto_aead *aead = crypto_aead_reqtfm(req);
>> unsigned int authlen = crypto_aead_authsize(aead);
>> u32 dec = !(dd->flags & FLAGS_ENCRYPT);
>> @@ -97,12 +97,18 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
>>
>> dd->sgs_copied = 0;
>>
>> - sg_init_table(dd->in_sgl, 2);
>> - buf_in = sg_virt(req->assoc);
>> - sg_set_buf(dd->in_sgl, buf_in, alen);
>> + nsg = 1 + !!(req->assoclen && req->cryptlen);
>>
>> - buf_in = sg_virt(req->src);
>> - sg_set_buf(&dd->in_sgl[1], buf_in, clen);
>> + sg_init_table(dd->in_sgl, nsg);
>> + if (req->assoclen) {
>> + buf_in = sg_virt(req->assoc);
>> + sg_set_buf(dd->in_sgl, buf_in, alen);
>> + }
>> +
>> + if (req->cryptlen) {
>> + buf_in = sg_virt(req->src);
>> + sg_set_buf(&dd->in_sgl[nsg - 1], buf_in, clen);
>> + }
>>
>> dd->in_sg = dd->in_sgl;
>> dd->total = clen;
>> @@ -258,6 +264,8 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
>> {
>> struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
>> struct omap_aes_reqctx *rctx = aead_request_ctx(req);
>> + struct crypto_aead *aead = crypto_aead_reqtfm(req);
>> + unsigned int authlen = crypto_aead_authsize(aead);
>> struct omap_aes_dev *dd;
>> __be32 counter = cpu_to_be32(1);
>> int err;
>> @@ -270,6 +278,12 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
>> if (err)
>> return err;
>>
>> + if (req->assoclen + req->cryptlen == 0) {
>> + scatterwalk_map_and_copy(ctx->auth_tag, req->dst, 0, authlen,
>> + 1);
>> + return 0;
>> + }
>> +
>> dd = omap_aes_find_dev(ctx);
>> if (!dd)
>> return -ENODEV;
>> --
>> 1.7.9.5
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>> the body of a message to [email protected]
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>> Please read the FAQ at http://www.tux.org/lkml/
>
On Thursday 02 July 2015 01:36 PM, Felipe Balbi wrote:
> On Thu, Jul 02, 2015 at 10:48:39AM +0530, Lokesh Vutla wrote:
>> Add support for PIO mode for GCM mode.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>
> why do you require PIO ? Is there any situation where DMA can't be
> used? What would that case be ?
Cannot think of any case where DMA can't be used.
But the current driver already supports PIO for all other AES modes.
I do not want to break it, so added support for PIO for GCM :)
Thanks and regards,
Lokesh
On Thu, Jul 02, 2015 at 10:48:38AM +0530, Lokesh Vutla wrote:
> Now the driver supports gcm mode, add omap-aes-gcm
> algo info to omap-aes driver.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
You're using the old AEAD interface. We are now moving to the
new AEAD interface so I will not be accepting any new implementations
using the old interface.
Please convert your driver over to the new interface.
Also please merge your GCM patches into a single patch. Splitting
out bug fixes makes no sense.
Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Thu, Jul 02, 2015 at 10:48:31AM +0530, Lokesh Vutla wrote:
> OMAP AES driver returns an error if the data is not aligned with
> AES_BLOCK_SIZE bytes.
> But OMAP AES hw allows data input upto 1 byte aligned, but still
> zeros are to be appended and complete AES_BLOCK_SIZE has to be written.
> And correct length has to be passed in LENGTH field.
> Adding support for inputs not aligned with AES_BLOCK_SIZE.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
Please explain the purpose of this patch. As it stands your change
log makes no sense. If you're relaxing the check for GCM support
then you should state that explicitly. Because partial blocks make
no sense otherwise.
Cheers,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Thu, Jul 02, 2015 at 10:48:33AM +0530, Lokesh Vutla wrote:
> Add aead_request_cast() api to get pointer to aead_request
> from cryto_async_request.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
> ---
> include/linux/crypto.h | 6 ++++++
> 1 file changed, 6 insertions(+)
>
> diff --git a/include/linux/crypto.h b/include/linux/crypto.h
> index 10df5d2..20fac3d 100644
> --- a/include/linux/crypto.h
> +++ b/include/linux/crypto.h
> @@ -1460,6 +1460,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
> req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
> }
>
> +static inline struct aead_request *aead_request_cast(
> + struct crypto_async_request *req)
> +{
> + return container_of(req, struct aead_request, base);
> +}
> +
This should go into include/crypto/internal/aead.h.
Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Thu, Jul 02, 2015 at 02:59:03AM -0500, Felipe Balbi wrote:
> On Thu, Jul 02, 2015 at 10:48:34AM +0530, Lokesh Vutla wrote:
> > Use BIT()/GENMASK() macros for all register definitions instead of
> > hand-writing bit masks.
> >
> > Signed-off-by: Lokesh Vutla <[email protected]>
> > ---
> > drivers/crypto/omap-aes.c | 36 ++++++++++++++++++------------------
> > 1 file changed, 18 insertions(+), 18 deletions(-)
> >
> > diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
> > index 96fc7f7..d974ab6 100644
> > --- a/drivers/crypto/omap-aes.c
> > +++ b/drivers/crypto/omap-aes.c
> > @@ -52,30 +52,30 @@
> > #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
> >
> > #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
> > -#define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7)
> > -#define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7)
> > -#define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7)
> > -#define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7)
> > -#define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7)
> > -#define AES_REG_CTRL_CTR (1 << 6)
> > -#define AES_REG_CTRL_CBC (1 << 5)
> > -#define AES_REG_CTRL_KEY_SIZE (3 << 3)
> > -#define AES_REG_CTRL_DIRECTION (1 << 2)
> > -#define AES_REG_CTRL_INPUT_READY (1 << 1)
> > -#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
> > -#define AES_REG_CTRL_MASK FLD_MASK(24, 2)
> > +#define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
> > +#define AES_REG_CTRL_CTR_WIDTH_32 0
> > +#define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
> > +#define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
> > +#define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
> > +#define AES_REG_CTRL_CTR BIT(6)
> > +#define AES_REG_CTRL_CBC BIT(5)
> > +#define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
> > +#define AES_REG_CTRL_DIRECTION BIT(2)
> > +#define AES_REG_CTRL_INPUT_READY BIT(1)
> > +#define AES_REG_CTRL_OUTPUT_READY BIT(0)
> > +#define AES_REG_CTRL_MASK GENMASK(24, 2)
>
> this was defined a couple patches ago, why didn't you define it with
> GENMASK() to start with ?
Indeed, this should be folded into the earlier patch.
Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Thu, Jul 02, 2015 at 10:48:40AM +0530, Lokesh Vutla wrote:
> Adding simple speed tests for a range of block sizes for Async AEAD crypto
> algorithms.
>
> Signed-off-by: Lokesh Vutla <[email protected]>
What's wrong with the existing AEAD speed tests?
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Hi,
On Monday 06 July 2015 01:05 PM, Herbert Xu wrote:
> On Thu, Jul 02, 2015 at 10:48:38AM +0530, Lokesh Vutla wrote:
>> Now the driver supports gcm mode, add omap-aes-gcm
>> algo info to omap-aes driver.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>
> You're using the old AEAD interface. We are now moving to the
> new AEAD interface so I will not be accepting any new implementations
> using the old interface.
>
> Please convert your driver over to the new interface.
Will convert omap-aes driver to new interface and repost.
Thanks and regards,
Lokesh
>
> Also please merge your GCM patches into a single patch. Splitting
> out bug fixes makes no sense.
>
> Thanks,
>
Hi,
On Monday 06 July 2015 01:14 PM, Herbert Xu wrote:
> On Thu, Jul 02, 2015 at 10:48:40AM +0530, Lokesh Vutla wrote:
>> Adding simple speed tests for a range of block sizes for Async AEAD crypto
>> algorithms.
>>
>> Signed-off-by: Lokesh Vutla <[email protected]>
>
> What's wrong with the existing AEAD speed tests?
>
The existing AEAD test case does not do a wait_for_completion(), when
the return value is EINPROGRESS or EBUSY like it is done for acipher_speed tests.
Can I be updating the same test case for handling this?
Correct me if I am wrong.
Thanks and regards,
Lokesh
On Mon, Jul 06, 2015 at 02:15:06PM +0530, Lokesh Vutla wrote:
>
> The existing AEAD test case does not do a wait_for_completion(), when
> the return value is EINPROGRESS or EBUSY like it is done for acipher_speed tests.
Please fix them to do the wait.
Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt