The hardware does not handle 0 size length request, let's add a
fallback.
Furthermore fallback will be used for all unaligned case the hardware
cannot handle.
Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API")
Signed-off-by: Corentin Labbe <[email protected]>
---
drivers/crypto/Kconfig | 4 +
drivers/crypto/rockchip/rk3288_crypto.h | 2 +
.../crypto/rockchip/rk3288_crypto_skcipher.c | 105 +++++++++++++++---
3 files changed, 98 insertions(+), 13 deletions(-)
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 7b2d138bc83e..84ab14afcbd9 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -784,6 +784,10 @@ config CRYPTO_DEV_IMGTEC_HASH
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
+ depends on PM
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_DES
select CRYPTO_AES
select CRYPTO_LIB_DES
select CRYPTO_MD5
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index c919d9a43a08..8b1e15d8ddc6 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -246,10 +246,12 @@ struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
u8 iv[AES_BLOCK_SIZE];
+ struct crypto_skcipher *fallback_tfm;
};
struct rk_cipher_rctx {
u32 mode;
+ struct skcipher_request fallback_req; // keep at the end
};
enum alg_type {
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index bbd0bf52bf07..c6b601086c04 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -13,6 +13,71 @@
#define RK_CRYPTO_DEC BIT(0)
+static int rk_cipher_need_fallback(struct skcipher_request *req)
+{
+ struct scatterlist *sgs, *sgd;
+ unsigned int todo, len;
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
+
+ if (!req->cryptlen)
+ return true;
+
+ len = req->cryptlen;
+ sgs = req->src;
+ while (sgs) {
+ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
+ return true;
+ }
+ todo = min(len, sgs->length);
+ if (todo % bs) {
+ return true;
+ }
+ len -= todo;
+ sgs = sg_next(sgs);
+ }
+ len = req->cryptlen;
+ sgd = req->dst;
+ while (sgd) {
+ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
+ return true;
+ }
+ todo = min(len, sgd->length);
+ if (todo % bs) {
+ return true;
+ }
+ len -= todo;
+ sgd = sg_next(sgd);
+ }
+ sgs = req->src;
+ sgd = req->dst;
+ while (sgs && sgd) {
+ if (sgs->length != sgd->length)
+ return true;
+ sgs = sg_next(sgs);
+ sgd = sg_next(sgd);
+ }
+ return false;
+}
+
+static int rk_cipher_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (rctx->mode & RK_CRYPTO_DEC)
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ return err;
+}
+
static void rk_crypto_complete(struct crypto_async_request *base, int err)
{
if (base->complete)
@@ -22,10 +87,10 @@ static void rk_crypto_complete(struct crypto_async_request *base, int err)
static int rk_handle_req(struct rk_crypto_info *dev,
struct skcipher_request *req)
{
- if (!IS_ALIGNED(req->cryptlen, dev->align_size))
- return -EINVAL;
- else
- return dev->enqueue(dev, &req->base);
+ if (rk_cipher_need_fallback(req))
+ return rk_cipher_fallback(req);
+
+ return dev->enqueue(dev, &req->base);
}
static int rk_aes_setkey(struct crypto_skcipher *cipher,
@@ -39,7 +104,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
return -EINVAL;
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
- return 0;
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_des_setkey(struct crypto_skcipher *cipher,
@@ -54,7 +120,8 @@ static int rk_des_setkey(struct crypto_skcipher *cipher,
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_tdes_setkey(struct crypto_skcipher *cipher,
@@ -69,7 +136,7 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
ctx->keylen = keylen;
memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
- return 0;
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
}
static int rk_aes_ecb_encrypt(struct skcipher_request *req)
@@ -394,6 +461,7 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
{
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
struct rk_crypto_tmp *algt;
algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
@@ -407,6 +475,16 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
if (!ctx->dev->addr_vir)
return -ENOMEM;
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ tfm->reqsize = sizeof(struct rk_cipher_rctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm);
+
return 0;
}
@@ -415,6 +493,7 @@ static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
free_page((unsigned long)ctx->dev->addr_vir);
+ crypto_free_skcipher(ctx->fallback_tfm);
}
struct rk_crypto_tmp rk_ecb_aes_alg = {
@@ -423,7 +502,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x0f,
@@ -445,7 +524,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x0f,
@@ -468,7 +547,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
@@ -490,7 +569,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
@@ -513,7 +592,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-ede-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
@@ -535,7 +614,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-ede-rk",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
.base.cra_alignmask = 0x07,
--
2.35.1
On Fri, Apr 01, 2022 at 08:17:37PM +0000, Corentin Labbe wrote:
> The hardware does not handle 0 size length request, let's add a
> fallback.
> Furthermore fallback will be used for all unaligned case the hardware
> cannot handle.
>
> Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API")
> Signed-off-by: Corentin Labbe <[email protected]>
> ---
> diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
> index bbd0bf52bf07..c6b601086c04 100644
> --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
> +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
> @@ -13,6 +13,71 @@
>
> #define RK_CRYPTO_DEC BIT(0)
>
> +static int rk_cipher_need_fallback(struct skcipher_request *req)
> +{
> + struct scatterlist *sgs, *sgd;
> + unsigned int todo, len;
> + unsigned int bs = crypto_skcipher_blocksize(tfm);
> +
> + if (!req->cryptlen)
> + return true;
> +
> + len = req->cryptlen;
> + sgs = req->src;
> + while (sgs) {
> + if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
> + return true;
> + }
> + todo = min(len, sgs->length);
> + if (todo % bs) {
> + return true;
> + }
> + len -= todo;
> + sgs = sg_next(sgs);
> + }
> + len = req->cryptlen;
> + sgd = req->dst;
> + while (sgd) {
> + if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
> + return true;
> + }
> + todo = min(len, sgd->length);
> + if (todo % bs) {
> + return true;
> + }
> + len -= todo;
> + sgd = sg_next(sgd);
> + }
> + sgs = req->src;
> + sgd = req->dst;
> + while (sgs && sgd) {
> + if (sgs->length != sgd->length)
This check still seems to be triggering the fallback when it is not
needed.
I've done some testing with fscrypt and the series is working great, but
the stats show the fallback triggering more than I'd expect. With some
extra logging here I see output like:
sgs->length=32 sgd->length=255 req->cryptlen=16
In this case sgs and sgd are both the first (and only) entries in the
list. Should this take account of req->cryptlen as well?
In fact, can't this whole function be folded into one loop over src and
dst at the same time, since all the checks must be the same? Something
like this (untested):
while (sgs && sgd) {
if (!IS_ALIGNED(sgs->offset, sizeof(u32)) ||
!IS_ALIGNED(sgd->offset, sizeof(u32)))
return true;
todo = min(len, sgs->length);
if (todo % bs)
return true;
if (sgd->length < todo)
return true;
len -= todo;
sgs = sg_next(sgs);
sgd = sg_next(sgd);
}
if (len)
return true;
> + return true;
> + sgs = sg_next(sgs);
> + sgd = sg_next(sgd);
> + }
> + return false;
> +}
Le Mon, Apr 04, 2022 at 12:26:15PM +0100, John Keeping a ?crit :
> On Fri, Apr 01, 2022 at 08:17:37PM +0000, Corentin Labbe wrote:
> > The hardware does not handle 0 size length request, let's add a
> > fallback.
> > Furthermore fallback will be used for all unaligned case the hardware
> > cannot handle.
> >
> > Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API")
> > Signed-off-by: Corentin Labbe <[email protected]>
> > ---
> > diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
> > index bbd0bf52bf07..c6b601086c04 100644
> > --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
> > +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
> > @@ -13,6 +13,71 @@
> >
> > #define RK_CRYPTO_DEC BIT(0)
> >
> > +static int rk_cipher_need_fallback(struct skcipher_request *req)
> > +{
> > + struct scatterlist *sgs, *sgd;
> > + unsigned int todo, len;
> > + unsigned int bs = crypto_skcipher_blocksize(tfm);
> > +
> > + if (!req->cryptlen)
> > + return true;
> > +
> > + len = req->cryptlen;
> > + sgs = req->src;
> > + while (sgs) {
> > + if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
> > + return true;
> > + }
> > + todo = min(len, sgs->length);
> > + if (todo % bs) {
> > + return true;
> > + }
> > + len -= todo;
> > + sgs = sg_next(sgs);
> > + }
> > + len = req->cryptlen;
> > + sgd = req->dst;
> > + while (sgd) {
> > + if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
> > + return true;
> > + }
> > + todo = min(len, sgd->length);
> > + if (todo % bs) {
> > + return true;
> > + }
> > + len -= todo;
> > + sgd = sg_next(sgd);
> > + }
> > + sgs = req->src;
> > + sgd = req->dst;
> > + while (sgs && sgd) {
> > + if (sgs->length != sgd->length)
>
> This check still seems to be triggering the fallback when it is not
> needed.
>
> I've done some testing with fscrypt and the series is working great, but
> the stats show the fallback triggering more than I'd expect. With some
> extra logging here I see output like:
>
> sgs->length=32 sgd->length=255 req->cryptlen=16
>
> In this case sgs and sgd are both the first (and only) entries in the
> list. Should this take account of req->cryptlen as well?
>
> In fact, can't this whole function be folded into one loop over src and
> dst at the same time, since all the checks must be the same? Something
> like this (untested):
>
> while (sgs && sgd) {
> if (!IS_ALIGNED(sgs->offset, sizeof(u32)) ||
> !IS_ALIGNED(sgd->offset, sizeof(u32)))
> return true;
>
> todo = min(len, sgs->length);
> if (todo % bs)
> return true;
>
> if (sgd->length < todo)
> return true;
>
> len -= todo;
> sgs = sg_next(sgs);
> sgd = sg_next(sgd);
> }
>
> if (len)
> return true;
>
Thanks, for this hint, I will use it.
Regards