Hi,
V6 has only a bunch of static checker warnings fixed. Tested building
with W=1 and C=1 make options, also did a sanity test with crypto
manager tests + extra tests, and did a quick trial with tcrypt.
-Tero
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
The child devices for sa2ul (like the RNG) have hard dependency towards
the parent, they can't function without the parent enabled. Add device
link for this purpose so that the dependencies are taken care of properly.
Signed-off-by: Tero Kristo <[email protected]>
---
drivers/crypto/sa2ul.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index fb4c0aba9048..ebcdffcdb686 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -2302,6 +2302,15 @@ static int sa_dma_init(struct sa_crypto_data *dd)
return ret;
}
+static int sa_link_child(struct device *dev, void *data)
+{
+ struct device *parent = data;
+
+ device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
+
+ return 0;
+}
+
static int sa_ul_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2352,6 +2361,8 @@ static int sa_ul_probe(struct platform_device *pdev)
if (ret)
goto release_dma;
+ device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
+
return 0;
release_dma:
--
2.17.1
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
From: Keerthy <[email protected]>
Add crypto accelarator node for supporting hardware crypto algorithms,
including SHA1, SHA256, SHA512, AES, 3DES, and AEAD suites.
Signed-off-by: Keerthy <[email protected]>
[[email protected]: Modifications based on introduction of yaml binding]
Signed-off-by: Tero Kristo <[email protected]>
---
arch/arm64/boot/dts/ti/k3-am65-main.dtsi | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 61815228e230..4615f2009a35 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -112,6 +112,28 @@
power-domains = <&k3_pds 148 TI_SCI_PD_EXCLUSIVE>;
};
+ crypto: crypto@4E00000 {
+ compatible = "ti,am654-sa2ul";
+ reg = <0x0 0x4E00000 0x0 0x1200>;
+ power-domains = <&k3_pds 136 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x04E00000 0x00 0x04E00000 0x0 0x30000>;
+ status = "okay";
+
+ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
+ <&main_udmap 0x4001>;
+ dma-names = "tx", "rx1", "rx2";
+ dma-coherent;
+
+ rng: rng@4e10000 {
+ compatible = "inside-secure,safexcel-eip76";
+ reg = <0x0 0x4e10000 0x0 0x7d>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 136 1>;
+ };
+ };
+
main_pmx0: pinmux@11c000 {
compatible = "pinctrl-single";
reg = <0x0 0x11c000 0x0 0x2e4>;
--
2.17.1
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
From: Keerthy <[email protected]>
Add support for sa2ul hardware AEAD for hmac(sha256),cbc(aes) and
hmac(sha1),cbc(aes) algorithms.
Signed-off-by: Keerthy <[email protected]>
[[email protected]: number of bug fixes, major refactoring and cleanup of
code]
Signed-off-by: Tero Kristo <[email protected]>
---
drivers/crypto/sa2ul.c | 538 +++++++++++++++++++++++++++++++++++++++--
drivers/crypto/sa2ul.h | 1 +
2 files changed, 518 insertions(+), 21 deletions(-)
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 761754dd2b88..fb4c0aba9048 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -17,7 +17,9 @@
#include <linux/pm_runtime.h>
#include <crypto/aes.h>
+#include <crypto/authenc.h>
#include <crypto/des.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -77,6 +79,7 @@ static struct device *sa_k3_dev;
* @iv_size: Initialization Vector size
* @akey: Authentication key
* @akey_len: Authentication key length
+ * @enc: True, if this is an encode request
*/
struct sa_cmdl_cfg {
int aalg;
@@ -85,6 +88,7 @@ struct sa_cmdl_cfg {
u8 iv_size;
const u8 *akey;
u16 akey_len;
+ bool enc;
};
/**
@@ -101,6 +105,8 @@ struct sa_cmdl_cfg {
* @mci_dec: Mode Control Instruction for Decryption
* @inv_key: Whether the encryption algorithm demands key inversion
* @ctx: Pointer to the algorithm context
+ * @keyed_mac: Whether the authentication algorithm has key
+ * @prep_iopad: Function pointer to generate intermediate ipad/opad
*/
struct algo_data {
struct sa_eng_info enc_eng;
@@ -115,6 +121,9 @@ struct algo_data {
u8 *mci_dec;
bool inv_key;
struct sa_tfm_ctx *ctx;
+ bool keyed_mac;
+ void (*prep_iopad)(struct algo_data *algo, const u8 *key,
+ u16 key_sz, __be32 *ipad, __be32 *opad);
};
/**
@@ -128,6 +137,7 @@ struct sa_alg_tmpl {
union {
struct skcipher_alg skcipher;
struct ahash_alg ahash;
+ struct aead_alg aead;
} alg;
bool registered;
};
@@ -234,6 +244,38 @@ static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
};
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for encryption
+ */
+static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for decryption
+ */
+static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
+ { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
/*
* Mode Control Instructions for various Key lengths 128, 192, 256
* For ECB (Electronic Code Book) mode for encryption
@@ -313,6 +355,82 @@ static void sa_swiz_128(u8 *in, u16 len)
}
}
+/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
+static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
+{
+ int i;
+
+ for (i = 0; i < key_sz; i++) {
+ k_ipad[i] = key[i] ^ 0x36;
+ k_opad[i] = key[i] ^ 0x5c;
+ }
+
+ /* Instead of XOR with 0 */
+ for (; i < SHA1_BLOCK_SIZE; i++) {
+ k_ipad[i] = 0x36;
+ k_opad[i] = 0x5c;
+ }
+}
+
+static void sa_export_shash(struct shash_desc *hash, int block_size,
+ int digest_size, __be32 *out)
+{
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ } sha;
+ void *state;
+ u32 *result;
+ int i;
+
+ switch (digest_size) {
+ case SHA1_DIGEST_SIZE:
+ state = &sha.sha1;
+ result = sha.sha1.state;
+ break;
+ case SHA256_DIGEST_SIZE:
+ state = &sha.sha256;
+ result = sha.sha256.state;
+ break;
+ default:
+ dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
+ digest_size);
+ return;
+ }
+
+ crypto_shash_export(hash, state);
+
+ for (i = 0; i < digest_size >> 2; i++)
+ out[i] = cpu_to_be32(result[i]);
+}
+
+static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
+ u16 key_sz, __be32 *ipad, __be32 *opad)
+{
+ SHASH_DESC_ON_STACK(shash, data->ctx->shash);
+ int block_size = crypto_shash_blocksize(data->ctx->shash);
+ int digest_size = crypto_shash_digestsize(data->ctx->shash);
+ u8 k_ipad[SHA1_BLOCK_SIZE];
+ u8 k_opad[SHA1_BLOCK_SIZE];
+
+ shash->tfm = data->ctx->shash;
+
+ prepare_kiopad(k_ipad, k_opad, key, key_sz);
+
+ memzero_explicit(ipad, block_size);
+ memzero_explicit(opad, block_size);
+
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, k_ipad, block_size);
+ sa_export_shash(shash, block_size, digest_size, ipad);
+
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, k_opad, block_size);
+
+ sa_export_shash(shash, block_size, digest_size, opad);
+}
+
/* Derive the inverse key used in AES-CBC decryption operation */
static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
{
@@ -383,14 +501,26 @@ static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
u8 *sc_buf)
{
+ __be32 ipad[64], opad[64];
+
/* Set Authentication mode selector to hash processing */
sc_buf[0] = SA_HASH_PROCESSING;
/* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
sc_buf[1] |= ad->auth_ctrl;
- /* basic hash */
- sc_buf[1] |= SA_BASIC_HASH;
+ /* Copy the keys or ipad/opad */
+ if (ad->keyed_mac) {
+ ad->prep_iopad(ad, key, key_sz, ipad, opad);
+
+ /* Copy ipad to AuthKey */
+ memcpy(&sc_buf[32], ipad, ad->hash_size);
+ /* Copy opad to Aux-1 */
+ memcpy(&sc_buf[64], opad, ad->hash_size);
+ } else {
+ /* basic hash */
+ sc_buf[1] |= SA_BASIC_HASH;
+ }
}
static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
@@ -420,16 +550,18 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
/* Iniialize the command update structure */
memzero_explicit(upd_info, sizeof(*upd_info));
- if (cfg->enc_eng_id)
- total = SA_CMDL_HEADER_SIZE_BYTES;
-
- if (cfg->auth_eng_id)
- total = SA_CMDL_HEADER_SIZE_BYTES;
-
- if (cfg->iv_size)
- total += cfg->iv_size;
+ if (cfg->enc_eng_id && cfg->auth_eng_id) {
+ if (cfg->enc) {
+ auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
+ enc_next_eng = cfg->auth_eng_id;
- enc_next_eng = SA_ENG_ID_OUTPORT2;
+ if (cfg->iv_size)
+ auth_offset += cfg->iv_size;
+ } else {
+ enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
+ auth_next_eng = cfg->enc_eng_id;
+ }
+ }
if (cfg->enc_eng_id) {
upd_info->flags |= SA_CMDL_UPD_ENC;
@@ -450,11 +582,11 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
(SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
- enc_offset += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
+ total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
} else {
cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
SA_CMDL_HEADER_SIZE_BYTES;
- enc_offset += SA_CMDL_HEADER_SIZE_BYTES;
+ total += SA_CMDL_HEADER_SIZE_BYTES;
}
}
@@ -562,23 +694,28 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
int auth_sc_offset = 0;
u8 *sc_buf = ctx->sc;
u16 sc_id = ctx->sc_id;
- u8 first_engine;
+ u8 first_engine = 0;
memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
- if (ad->enc_eng.eng_id) {
- enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
- first_engine = ad->enc_eng.eng_id;
- sc_buf[1] = SA_SCCTL_FE_ENC;
- ad->hash_size = ad->iv_out_size;
- } else {
+ if (ad->auth_eng.eng_id) {
+ if (enc)
+ first_engine = ad->enc_eng.eng_id;
+ else
+ first_engine = ad->auth_eng.eng_id;
+
enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
- first_engine = ad->auth_eng.eng_id;
sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
if (!ad->hash_size)
return -EINVAL;
ad->hash_size = roundup(ad->hash_size, 8);
+
+ } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ first_engine = ad->enc_eng.eng_id;
+ sc_buf[1] = SA_SCCTL_FE_ENC;
+ ad->hash_size = ad->iv_out_size;
}
/* SCCTL Owner info: 0=host, 1=CP_ACE */
@@ -1491,6 +1628,305 @@ static void sa_sha_cra_exit(struct crypto_tfm *tfm)
crypto_free_ahash(ctx->fallback.ahash);
}
+static void sa_aead_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ unsigned int start;
+ unsigned int authsize;
+ u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
+ size_t pl, ml;
+ int i, sglen;
+ int err = 0;
+ u16 auth_len;
+ u32 *mdptr;
+ bool diff_dst;
+ enum dma_data_direction dir_src;
+
+ req = container_of(rxd->req, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ start = req->assoclen + req->cryptlen;
+ authsize = crypto_aead_authsize(tfm);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
+ for (i = 0; i < (authsize / 4); i++)
+ mdptr[i + 4] = swab32(mdptr[i + 4]);
+
+ auth_len = req->assoclen + req->cryptlen;
+ if (!rxd->enc)
+ auth_len -= authsize;
+
+ sglen = sg_nents_for_len(rxd->src, auth_len);
+ dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
+ kfree(rxd->split_src_sg);
+
+ if (diff_dst) {
+ sglen = sg_nents_for_len(rxd->dst, auth_len);
+ dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ if (rxd->enc) {
+ scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
+ 1);
+ } else {
+ start -= authsize;
+ scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
+ 0);
+
+ err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
+ }
+
+ kfree(rxd);
+
+ aead_request_complete(req, err);
+}
+
+static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
+ const char *fallback)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ int ret;
+
+ memzero_explicit(ctx, sizeof(*ctx));
+
+ ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash)) {
+ dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
+ return PTR_ERR(ctx->shash);
+ }
+
+ ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback.aead)) {
+ dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
+ fallback);
+ return PTR_ERR(ctx->fallback.aead);
+ }
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->fallback.aead));
+
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+
+ ret = sa_init_ctx_info(&ctx->dec, data);
+ if (ret) {
+ sa_free_ctx_info(&ctx->enc, data);
+ return ret;
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ return ret;
+}
+
+static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
+{
+ return sa_cra_init_aead(tfm, "sha1",
+ "authenc(hmac(sha1-ce),cbc(aes-ce))");
+}
+
+static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
+{
+ return sa_cra_init_aead(tfm, "sha256",
+ "authenc(hmac(sha256-ce),cbc(aes-ce))");
+}
+
+static void sa_exit_tfm_aead(struct crypto_aead *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ crypto_free_shash(ctx->shash);
+ crypto_free_aead(ctx->fallback.aead);
+
+ sa_free_ctx_info(&ctx->enc, data);
+ sa_free_ctx_info(&ctx->dec, data);
+}
+
+/* AEAD algorithm configuration interface function */
+static int sa_aead_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen,
+ struct algo_data *ad)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_authenc_keys keys;
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+ int key_idx;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ return -EINVAL;
+
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ key_idx = (keys.enckeylen >> 3) - 2;
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad->ctx = ctx;
+ ad->enc_eng.eng_id = SA_ENG_ID_EM1;
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+ ad->auth_eng.eng_id = SA_ENG_ID_AM1;
+ ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
+ ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
+ ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
+ ad->inv_key = true;
+ ad->keyed_mac = true;
+ ad->ealg_id = SA_EALG_ID_AES_CBC;
+ ad->prep_iopad = sa_prepare_iopads;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.enc = true;
+ cfg.aalg = ad->aalg_id;
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.auth_eng_id = ad->auth_eng.eng_id;
+ cfg.iv_size = crypto_aead_ivsize(authenc);
+ cfg.akey = keys.authkey;
+ cfg.akey_len = keys.authkeylen;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
+ keys.authkey, keys.authkeylen,
+ ad, 1, &ctx->enc.epib[1]))
+ return -EINVAL;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ return -EINVAL;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ /* Setup Decryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
+ keys.authkey, keys.authkeylen,
+ ad, 0, &ctx->dec.epib[1]))
+ return -EINVAL;
+
+ cfg.enc = false;
+ cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
+ &ctx->dec.cmdl_upd_info);
+
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ return -EINVAL;
+
+ ctx->dec.cmdl_size = cmdl_len;
+
+ crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->fallback.aead,
+ crypto_aead_get_flags(authenc) &
+ CRYPTO_TFM_REQ_MASK);
+ crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+
+ return 0;
+}
+
+static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
+
+ return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
+}
+
+static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
+ ad.hash_size = SHA1_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
+
+ return sa_aead_setkey(authenc, key, keylen, &ad);
+}
+
+static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
+ ad.hash_size = SHA256_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
+
+ return sa_aead_setkey(authenc, key, keylen, &ad);
+}
+
+static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_req sa_req = { 0 };
+ size_t auth_size, enc_size;
+
+ enc_size = req->cryptlen;
+ auth_size = req->assoclen + req->cryptlen;
+
+ if (!enc) {
+ enc_size -= crypto_aead_authsize(tfm);
+ auth_size -= crypto_aead_authsize(tfm);
+ }
+
+ if (auth_size > SA_MAX_DATA_SZ ||
+ (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
+ auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
+ struct aead_request *subreq = aead_request_ctx(req);
+ int ret;
+
+ aead_request_set_tfm(subreq, ctx->fallback.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ ret = enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+ return ret;
+ }
+
+ sa_req.enc_offset = req->assoclen;
+ sa_req.enc_size = enc_size;
+ sa_req.auth_size = auth_size;
+ sa_req.size = auth_size;
+ sa_req.enc_iv = iv;
+ sa_req.type = CRYPTO_ALG_TYPE_AEAD;
+ sa_req.enc = enc;
+ sa_req.callback = sa_aead_dma_in_callback;
+ sa_req.mdata_size = 52;
+ sa_req.base = &req->base;
+ sa_req.ctx = ctx;
+ sa_req.src = req->src;
+ sa_req.dst = req->dst;
+
+ return sa_run(&sa_req);
+}
+
+/* AEAD algorithm encrypt interface function */
+static int sa_aead_encrypt(struct aead_request *req)
+{
+ return sa_aead_run(req, req->iv, 1);
+}
+
+/* AEAD algorithm decrypt interface function */
+static int sa_aead_decrypt(struct aead_request *req)
+{
+ return sa_aead_run(req, req->iv, 0);
+}
+
static struct sa_alg_tmpl sa_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
@@ -1669,6 +2105,61 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha1),cbc(aes))-sa2ul",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_priority = 3000,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+
+ .init = sa_cra_init_aead_sha1,
+ .exit = sa_exit_tfm_aead,
+ .setkey = sa_aead_cbc_sha1_setkey,
+ .setauthsize = sa_aead_setauthsize,
+ .encrypt = sa_aead_encrypt,
+ .decrypt = sa_aead_decrypt,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha256),cbc(aes))-sa2ul",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0,
+ .cra_priority = 3000,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+
+ .init = sa_cra_init_aead_sha256,
+ .exit = sa_exit_tfm_aead,
+ .setkey = sa_aead_cbc_sha256_setkey,
+ .setauthsize = sa_aead_setauthsize,
+ .encrypt = sa_aead_encrypt,
+ .decrypt = sa_aead_decrypt,
+ },
+ },
};
/* Register the algorithms in crypto framework */
@@ -1686,6 +2177,9 @@ static void sa_register_algos(const struct device *dev)
} else if (type == CRYPTO_ALG_TYPE_AHASH) {
alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
err = crypto_register_ahash(&sa_algs[i].alg.ahash);
+ } else if (type == CRYPTO_ALG_TYPE_AEAD) {
+ alg_name = sa_algs[i].alg.aead.base.cra_name;
+ err = crypto_register_aead(&sa_algs[i].alg.aead);
} else {
dev_err(dev,
"un-supported crypto algorithm (%d)",
@@ -1714,6 +2208,8 @@ static void sa_unregister_algos(const struct device *dev)
crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
else if (type == CRYPTO_ALG_TYPE_AHASH)
crypto_unregister_ahash(&sa_algs[i].alg.ahash);
+ else if (type == CRYPTO_ALG_TYPE_AEAD)
+ crypto_unregister_aead(&sa_algs[i].alg.aead);
sa_algs[i].registered = false;
}
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index dc5e3470c3a0..7f7e3fe60d11 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -313,6 +313,7 @@ struct sa_tfm_ctx {
union {
struct crypto_sync_skcipher *skcipher;
struct crypto_ahash *ahash;
+ struct crypto_aead *aead;
} fallback;
};
--
2.17.1
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
From: Keerthy <[email protected]>
Add support for sha1/sha256/sha512 sa2ul based hardware authentication.
With the hash update mechanism, we always use software fallback
mechanism for now, as there is no way to fetch the partial hash state
from the HW accelerator. HW accelerator is only used when digest is
called for a data chunk of known size.
Signed-off-by: Keerthy <[email protected]>
[[email protected]: various bug fixes, major cleanups and refactoring of code]
Signed-off-by: Tero Kristo <[email protected]>
---
drivers/crypto/sa2ul.c | 549 ++++++++++++++++++++++++++++++++++++++++-
drivers/crypto/sa2ul.h | 24 +-
2 files changed, 560 insertions(+), 13 deletions(-)
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 860c7435fefa..761754dd2b88 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -18,8 +18,10 @@
#include <crypto/aes.h>
#include <crypto/des.h>
+#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
#include "sa2ul.h"
@@ -69,20 +71,32 @@ static struct device *sa_k3_dev;
/**
* struct sa_cmdl_cfg - Command label configuration descriptor
+ * @aalg: authentication algorithm ID
* @enc_eng_id: Encryption Engine ID supported by the SA hardware
+ * @auth_eng_id: Authentication Engine ID
* @iv_size: Initialization Vector size
+ * @akey: Authentication key
+ * @akey_len: Authentication key length
*/
struct sa_cmdl_cfg {
+ int aalg;
u8 enc_eng_id;
+ u8 auth_eng_id;
u8 iv_size;
+ const u8 *akey;
+ u16 akey_len;
};
/**
* struct algo_data - Crypto algorithm specific data
* @enc_eng: Encryption engine info structure
+ * @auth_eng: Authentication engine info structure
+ * @auth_ctrl: Authentication control word
+ * @hash_size: Size of digest
* @iv_idx: iv index in psdata
* @iv_out_size: iv out size
* @ealg_id: Encryption Algorithm ID
+ * @aalg_id: Authentication algorithm ID
* @mci_enc: Mode Control Instruction for Encryption algorithm
* @mci_dec: Mode Control Instruction for Decryption
* @inv_key: Whether the encryption algorithm demands key inversion
@@ -90,9 +104,13 @@ struct sa_cmdl_cfg {
*/
struct algo_data {
struct sa_eng_info enc_eng;
+ struct sa_eng_info auth_eng;
+ u8 auth_ctrl;
+ u8 hash_size;
u8 iv_idx;
u8 iv_out_size;
u8 ealg_id;
+ u8 aalg_id;
u8 *mci_enc;
u8 *mci_dec;
bool inv_key;
@@ -109,6 +127,7 @@ struct sa_alg_tmpl {
u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
union {
struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
} alg;
bool registered;
};
@@ -150,6 +169,9 @@ struct sa_rx_data {
* @enc_offset: offset of cipher data
* @enc_size: data to be passed to cipher engine
* @enc_iv: cipher IV
+ * @auth_offset: offset of the authentication data
+ * @auth_size: size of the authentication data
+ * @auth_iv: authentication IV
* @type: algorithm type for the request
* @cmdl: command label pointer
* @base: pointer to the base request
@@ -166,6 +188,9 @@ struct sa_req {
u8 enc_offset;
u16 enc_size;
u8 *enc_iv;
+ u8 auth_offset;
+ u16 auth_size;
+ u8 *auth_iv;
u32 type;
u32 *cmdl;
struct crypto_async_request *base;
@@ -354,6 +379,20 @@ static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
return 0;
}
+/* Set Security context for the authentication engine */
+static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
+ u8 *sc_buf)
+{
+ /* Set Authentication mode selector to hash processing */
+ sc_buf[0] = SA_HASH_PROCESSING;
+ /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
+ sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
+ sc_buf[1] |= ad->auth_ctrl;
+
+ /* basic hash */
+ sc_buf[1] |= SA_BASIC_HASH;
+}
+
static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
{
int j;
@@ -369,8 +408,9 @@ static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
struct sa_cmdl_upd_info *upd_info)
{
- u8 enc_offset = 0, total = 0;
+ u8 enc_offset = 0, auth_offset = 0, total = 0;
u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
+ u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
u32 *word_ptr = (u32 *)cmdl;
int i;
@@ -380,7 +420,10 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
/* Iniialize the command update structure */
memzero_explicit(upd_info, sizeof(*upd_info));
- if (cfg->enc_eng_id != SA_ENG_ID_NONE)
+ if (cfg->enc_eng_id)
+ total = SA_CMDL_HEADER_SIZE_BYTES;
+
+ if (cfg->auth_eng_id)
total = SA_CMDL_HEADER_SIZE_BYTES;
if (cfg->iv_size)
@@ -388,7 +431,7 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
enc_next_eng = SA_ENG_ID_OUTPORT2;
- if (cfg->enc_eng_id != SA_ENG_ID_NONE) {
+ if (cfg->enc_eng_id) {
upd_info->flags |= SA_CMDL_UPD_ENC;
upd_info->enc_size.index = enc_offset >> 2;
upd_info->enc_offset.index = upd_info->enc_size.index + 1;
@@ -415,6 +458,16 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
}
}
+ if (cfg->auth_eng_id) {
+ upd_info->flags |= SA_CMDL_UPD_AUTH;
+ upd_info->auth_size.index = auth_offset >> 2;
+ upd_info->auth_offset.index = upd_info->auth_size.index + 1;
+ cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
+ cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES;
+ total += SA_CMDL_HEADER_SIZE_BYTES;
+ }
+
total = roundup(total, 8);
for (i = 0; i < total / 4; i++)
@@ -448,6 +501,27 @@ static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
}
}
}
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
+ cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
+ cmdl[upd_info->auth_size.index] |= req->auth_size;
+ cmdl[upd_info->auth_offset.index] &=
+ ~SA_CMDL_SOP_BYPASS_LEN_MASK;
+ cmdl[upd_info->auth_offset.index] |=
+ ((u32)req->auth_offset <<
+ __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+ if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
+ sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
+ req->auth_iv,
+ (upd_info->auth_iv.size > 8));
+ }
+ if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
+ int offset = (req->auth_size & 0xF) ? 4 : 0;
+
+ memcpy(&cmdl[upd_info->aux_key_info.index],
+ &upd_info->aux_key[offset], 16);
+ }
+ }
}
/* Format SWINFO words to be sent to SA */
@@ -481,21 +555,34 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
static
int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
- u16 enc_key_sz, struct algo_data *ad, u8 enc, u32 *swinfo)
+ u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+ struct algo_data *ad, u8 enc, u32 *swinfo)
{
int enc_sc_offset = 0;
+ int auth_sc_offset = 0;
u8 *sc_buf = ctx->sc;
u16 sc_id = ctx->sc_id;
u8 first_engine;
memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
- enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ if (ad->enc_eng.eng_id) {
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ first_engine = ad->enc_eng.eng_id;
+ sc_buf[1] = SA_SCCTL_FE_ENC;
+ ad->hash_size = ad->iv_out_size;
+ } else {
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
+ first_engine = ad->auth_eng.eng_id;
+ sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
+ if (!ad->hash_size)
+ return -EINVAL;
+ ad->hash_size = roundup(ad->hash_size, 8);
+ }
/* SCCTL Owner info: 0=host, 1=CP_ACE */
sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
- /* SCCTL F/E control */
- sc_buf[1] = SA_SCCTL_FE_ENC;
memcpy(&sc_buf[2], &sc_id, 2);
sc_buf[4] = 0x0;
sc_buf[5] = PRIV_ID;
@@ -509,16 +596,19 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
return -EINVAL;
}
+ /* Prepare context for authentication engine */
+ if (ad->auth_eng.sc_size)
+ sa_set_sc_auth(ad, auth_key, auth_key_sz,
+ &sc_buf[auth_sc_offset]);
+
/* Set the ownership of context to CP_ACE */
sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
/* swizzle the security context */
sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
- /* Setup SWINFO */
- first_engine = ad->enc_eng.eng_id;
sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
- SA_SW_INFO_FLAG_EVICT, ad->iv_out_size, swinfo);
+ SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
sa_dump_sc(sc_buf, ctx->sc_phys);
@@ -652,7 +742,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
return ret;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, key, keylen, ad, 1, &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
+ &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -664,7 +755,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, key, keylen, ad, 0, &ctx->dec.epib[1]))
+ if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
+ &ctx->dec.epib[1]))
goto badkey;
cfg.enc_eng_id = ad->enc_eng.eng_id;
@@ -1058,6 +1150,347 @@ static int sa_decrypt(struct skcipher_request *req)
return sa_cipher_run(req, req->iv, 0);
}
+static void sa_sha_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ unsigned int authsize;
+ int i, sg_nents;
+ size_t ml, pl;
+ u32 *result;
+ __be32 *mdptr;
+
+ req = container_of(rxd->req, struct ahash_request, base);
+ tfm = crypto_ahash_reqtfm(req);
+ authsize = crypto_ahash_digestsize(tfm);
+
+ mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
+ result = (u32 *)req->result;
+
+ for (i = 0; i < (authsize / 4); i++)
+ result[i] = be32_to_cpu(mdptr[i + 4]);
+
+ sg_nents = sg_nents_for_len(req->src, req->nbytes);
+ dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
+
+ kfree(rxd->split_src_sg);
+
+ kfree(rxd);
+
+ ahash_request_complete(req, 0);
+}
+
+static int zero_message_process(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int sa_digest_size = crypto_ahash_digestsize(tfm);
+
+ switch (sa_digest_size) {
+ case SHA1_DIGEST_SIZE:
+ memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
+ break;
+ case SHA256_DIGEST_SIZE:
+ memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
+ break;
+ case SHA512_DIGEST_SIZE:
+ memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sa_sha_run(struct ahash_request *req)
+{
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_req sa_req = { 0 };
+ size_t auth_len;
+
+ auth_len = req->nbytes;
+
+ if (!auth_len)
+ return zero_message_process(req);
+
+ if (auth_len > SA_MAX_DATA_SZ ||
+ (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
+ auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
+ struct ahash_request *subreq = &rctx->fallback_req;
+ int ret = 0;
+
+ ahash_request_set_tfm(subreq, ctx->fallback.ahash);
+ subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ crypto_ahash_init(subreq);
+
+ subreq->nbytes = auth_len;
+ subreq->src = req->src;
+ subreq->result = req->result;
+
+ ret |= crypto_ahash_update(subreq);
+
+ subreq->nbytes = 0;
+
+ ret |= crypto_ahash_final(subreq);
+
+ return ret;
+ }
+
+ sa_req.size = auth_len;
+ sa_req.auth_size = auth_len;
+ sa_req.src = req->src;
+ sa_req.dst = req->src;
+ sa_req.enc = true;
+ sa_req.type = CRYPTO_ALG_TYPE_AHASH;
+ sa_req.callback = sa_sha_dma_in_callback;
+ sa_req.mdata_size = 28;
+ sa_req.ctx = ctx;
+ sa_req.base = &req->base;
+
+ return sa_run(&sa_req);
+}
+
+static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
+{
+ int bs = crypto_shash_blocksize(ctx->shash);
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+ ad->auth_eng.eng_id = SA_ENG_ID_AM1;
+ ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
+
+ memset(ctx->authkey, 0, bs);
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.aalg = ad->aalg_id;
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.auth_eng_id = ad->auth_eng.eng_id;
+ cfg.iv_size = 0;
+ cfg.akey = NULL;
+ cfg.akey_len = 0;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
+ &ctx->enc.epib[1]))
+ goto badkey;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ return 0;
+
+badkey:
+ dev_err(sa_k3_dev, "%s: badkey\n", __func__);
+ return -EINVAL;
+}
+
+static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->dev_data = data;
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+
+ if (alg_base) {
+ ctx->shash = crypto_alloc_shash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash)) {
+ dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
+ alg_base);
+ return PTR_ERR(ctx->shash);
+ }
+ /* for fallback */
+ ctx->fallback.ahash =
+ crypto_alloc_ahash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback.ahash)) {
+ dev_err(ctx->dev_data->dev,
+ "Could not load fallback driver\n");
+ return PTR_ERR(ctx->fallback.ahash);
+ }
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sa_sha_req_ctx) +
+ crypto_ahash_reqsize(ctx->fallback.ahash));
+
+ return 0;
+}
+
+static int sa_sha_digest(struct ahash_request *req)
+{
+ return sa_sha_run(req);
+}
+
+static int sa_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n",
+ crypto_ahash_digestsize(tfm), (u64)rctx);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int sa_sha_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int sa_sha_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int sa_sha_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int sa_sha_import(struct ahash_request *req, const void *in)
+{
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int sa_sha_export(struct ahash_request *req, void *out)
+{
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = &rctx->fallback_req;
+
+ ahash_request_set_tfm(subreq, ctx->fallback.ahash);
+ subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(subreq, out);
+}
+
+static int sa_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha1");
+
+ ad.aalg_id = SA_AALG_ID_SHA1;
+ ad.hash_size = SHA1_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static int sa_sha256_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha256");
+
+ ad.aalg_id = SA_AALG_ID_SHA2_256;
+ ad.hash_size = SHA256_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static int sa_sha512_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha512");
+
+ ad.aalg_id = SA_AALG_ID_SHA2_512;
+ ad.hash_size = SHA512_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static void sa_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
+ sa_free_ctx_info(&ctx->enc, data);
+
+ crypto_free_shash(ctx->shash);
+ crypto_free_ahash(ctx->fallback.ahash);
+}
+
static struct sa_alg_tmpl sa_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
@@ -1149,6 +1582,93 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha1_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha1_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha256_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha256_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha512_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha512_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
};
/* Register the algorithms in crypto framework */
@@ -1163,6 +1683,9 @@ static void sa_register_algos(const struct device *dev)
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
alg_name = sa_algs[i].alg.skcipher.base.cra_name;
err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
+ } else if (type == CRYPTO_ALG_TYPE_AHASH) {
+ alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
+ err = crypto_register_ahash(&sa_algs[i].alg.ahash);
} else {
dev_err(dev,
"un-supported crypto algorithm (%d)",
@@ -1189,6 +1712,8 @@ static void sa_unregister_algos(const struct device *dev)
continue;
if (type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
+ else if (type == CRYPTO_ALG_TYPE_AHASH)
+ crypto_unregister_ahash(&sa_algs[i].alg.ahash);
sa_algs[i].registered = false;
}
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index 45ba86cb5d11..dc5e3470c3a0 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -73,7 +73,6 @@ struct sa_tfm_ctx;
#define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */
#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */
#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */
-#define SA_ENG_ID_NONE 0xff
/*
* Command Label Definitions
@@ -156,6 +155,13 @@ struct sa_tfm_ctx;
#define SA_ALIGN_MASK (sizeof(u32) - 1)
#define SA_ALIGNED __aligned(32)
+#define SA_AUTH_SW_CTRL_MD5 1
+#define SA_AUTH_SW_CTRL_SHA1 2
+#define SA_AUTH_SW_CTRL_SHA224 3
+#define SA_AUTH_SW_CTRL_SHA256 4
+#define SA_AUTH_SW_CTRL_SHA384 5
+#define SA_AUTH_SW_CTRL_SHA512 6
+
/* SA2UL can only handle maximum data size of 64KB */
#define SA_MAX_DATA_SZ U16_MAX
@@ -297,15 +303,31 @@ struct sa_tfm_ctx {
struct sa_crypto_data *dev_data;
struct sa_ctx_info enc;
struct sa_ctx_info dec;
+ struct sa_ctx_info auth;
int keylen;
int iv_idx;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ u8 authkey[SHA512_BLOCK_SIZE];
+ struct crypto_shash *shash;
/* for fallback */
union {
struct crypto_sync_skcipher *skcipher;
+ struct crypto_ahash *ahash;
} fallback;
};
+/**
+ * struct sa_sha_req_ctx: Structure used for sha request
+ * @dev_data: struct sa_crypto_data pointer
+ * @cmdl: Complete command label with psdata and epib included
+ * @fallback_req: SW fallback request container
+ */
+struct sa_sha_req_ctx {
+ struct sa_crypto_data *dev_data;
+ u32 cmdl[SA_MAX_CMDL_WORDS + SA_PSDATA_CTX_WORDS];
+ struct ahash_request fallback_req;
+};
+
enum sa_submode {
SA_MODE_GEN = 0,
SA_MODE_CCM,
--
2.17.1
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
From: Keerthy <[email protected]>
Add crypto accelarator node for supporting hardware crypto algorithms,
including SHA1, SHA256, SHA512, AES, 3DES, and AEAD suites.
Signed-off-by: Keerthy <[email protected]>
[[email protected]: Modifications based on introduction of yaml binding]
Signed-off-by: Tero Kristo <[email protected]>
---
arch/arm64/boot/dts/ti/k3-j721e-main.dtsi | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index 96c929da639d..df640680e564 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -268,6 +268,29 @@
};
};
+ main_crypto: crypto@4E00000 {
+ compatible = "ti,j721e-sa2ul";
+ reg = <0x0 0x4E00000 0x0 0x1200>;
+ power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x04E00000 0x00 0x04E00000 0x0 0x30000>;
+
+ status = "okay";
+
+ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
+ <&main_udmap 0x4001>;
+ dma-names = "tx", "rx1", "rx2";
+ dma-coherent;
+
+ rng: rng@4e10000 {
+ compatible = "inside-secure,safexcel-eip76";
+ reg = <0x0 0x4e10000 0x0 0x7d>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 264 1>;
+ };
+ };
+
main_pmx0: pinmux@11c000 {
compatible = "pinctrl-single";
/* Proxy 0 addressing */
--
2.17.1
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
From: Keerthy <[email protected]>
Adds a basic crypto driver and currently supports AES/3DES
in cbc mode for both encryption and decryption.
Signed-off-by: Keerthy <[email protected]>
[[email protected]: major re-work to fix various bugs in the driver and to
cleanup the code]
Signed-off-by: Tero Kristo <[email protected]>
---
drivers/crypto/Kconfig | 14 +
drivers/crypto/Makefile | 1 +
drivers/crypto/sa2ul.c | 1388 +++++++++++++++++++++++++++++++++++++++
drivers/crypto/sa2ul.h | 380 +++++++++++
4 files changed, 1783 insertions(+)
create mode 100644 drivers/crypto/sa2ul.c
create mode 100644 drivers/crypto/sa2ul.h
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 802b9ada4e9e..152a7ed538a4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -865,4 +865,18 @@ source "drivers/crypto/hisilicon/Kconfig"
source "drivers/crypto/amlogic/Kconfig"
+config CRYPTO_DEV_SA2UL
+ tristate "Support for TI security accelerator"
+ depends on ARCH_K3 || COMPILE_TEST
+ select ARM64_CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_AES_ARM64
+ select CRYPTO_ALGAPI
+ select HW_RANDOM
+ select SG_SPLIT
+ help
+ K3 devices include a security accelerator engine that may be
+ used for crypto offload. Select this if you want to use hardware
+ acceleration for cryptographic algorithms on these devices.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 944ed7226e37..53fc115cf459 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
new file mode 100644
index 000000000000..860c7435fefa
--- /dev/null
+++ b/drivers/crypto/sa2ul.c
@@ -0,0 +1,1388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 SA2UL crypto accelerator driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Keerthy
+ * Vitaly Andrianov
+ * Tero Kristo
+ */
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "sa2ul.h"
+
+/* Byte offset for key in encryption security context */
+#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
+/* Byte offset for Aux-1 in encryption security context */
+#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
+
+#define SA_CMDL_UPD_ENC 0x0001
+#define SA_CMDL_UPD_AUTH 0x0002
+#define SA_CMDL_UPD_ENC_IV 0x0004
+#define SA_CMDL_UPD_AUTH_IV 0x0008
+#define SA_CMDL_UPD_AUX_KEY 0x0010
+
+#define SA_AUTH_SUBKEY_LEN 16
+#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
+#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
+
+#define MODE_CONTROL_BYTES 27
+#define SA_HASH_PROCESSING 0
+#define SA_CRYPTO_PROCESSING 0
+#define SA_UPLOAD_HASH_TO_TLR BIT(6)
+
+#define SA_SW0_FLAGS_MASK 0xF0000
+#define SA_SW0_CMDL_INFO_MASK 0x1F00000
+#define SA_SW0_CMDL_PRESENT BIT(4)
+#define SA_SW0_ENG_ID_MASK 0x3E000000
+#define SA_SW0_DEST_INFO_PRESENT BIT(30)
+#define SA_SW2_EGRESS_LENGTH 0xFF000000
+#define SA_BASIC_HASH 0x10
+
+#define SHA256_DIGEST_WORDS 8
+/* Make 32-bit word from 4 bytes */
+#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
+ ((b2) << 8) | (b3))
+
+/* size of SCCTL structure in bytes */
+#define SA_SCCTL_SZ 16
+
+/* Max Authentication tag size */
+#define SA_MAX_AUTH_TAG_SZ 64
+
+#define PRIV_ID 0x1
+#define PRIV 0x1
+
+static struct device *sa_k3_dev;
+
+/**
+ * struct sa_cmdl_cfg - Command label configuration descriptor
+ * @enc_eng_id: Encryption Engine ID supported by the SA hardware
+ * @iv_size: Initialization Vector size
+ */
+struct sa_cmdl_cfg {
+ u8 enc_eng_id;
+ u8 iv_size;
+};
+
+/**
+ * struct algo_data - Crypto algorithm specific data
+ * @enc_eng: Encryption engine info structure
+ * @iv_idx: iv index in psdata
+ * @iv_out_size: iv out size
+ * @ealg_id: Encryption Algorithm ID
+ * @mci_enc: Mode Control Instruction for Encryption algorithm
+ * @mci_dec: Mode Control Instruction for Decryption
+ * @inv_key: Whether the encryption algorithm demands key inversion
+ * @ctx: Pointer to the algorithm context
+ */
+struct algo_data {
+ struct sa_eng_info enc_eng;
+ u8 iv_idx;
+ u8 iv_out_size;
+ u8 ealg_id;
+ u8 *mci_enc;
+ u8 *mci_dec;
+ bool inv_key;
+ struct sa_tfm_ctx *ctx;
+};
+
+/**
+ * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
+ * @type: Type of the crypto algorithm.
+ * @alg: Union of crypto algorithm definitions.
+ * @registered: Flag indicating if the crypto algorithm is already registered
+ */
+struct sa_alg_tmpl {
+ u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
+ union {
+ struct skcipher_alg skcipher;
+ } alg;
+ bool registered;
+};
+
+/**
+ * struct sa_rx_data: RX Packet miscellaneous data place holder
+ * @req: crypto request data pointer
+ * @ddev: pointer to the DMA device
+ * @tx_in: dma_async_tx_descriptor pointer for rx channel
+ * @split_src_sg: Set if the src sg is split and needs to be freed up
+ * @split_dst_sg: Set if the dst sg is split and needs to be freed up
+ * @enc: Flag indicating either encryption or decryption
+ * @enc_iv_size: Initialisation vector size
+ * @iv_idx: Initialisation vector index
+ * @rx_sg: Static scatterlist entry for overriding RX data
+ * @tx_sg: Static scatterlist entry for overriding TX data
+ * @src: Source data pointer
+ * @dst: Destination data pointer
+ */
+struct sa_rx_data {
+ void *req;
+ struct device *ddev;
+ struct dma_async_tx_descriptor *tx_in;
+ struct scatterlist *split_src_sg;
+ struct scatterlist *split_dst_sg;
+ u8 enc;
+ u8 enc_iv_size;
+ u8 iv_idx;
+ struct scatterlist rx_sg;
+ struct scatterlist tx_sg;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+};
+
+/**
+ * struct sa_req: SA request definition
+ * @dev: device for the request
+ * @size: total data to the xmitted via DMA
+ * @enc_offset: offset of cipher data
+ * @enc_size: data to be passed to cipher engine
+ * @enc_iv: cipher IV
+ * @type: algorithm type for the request
+ * @cmdl: command label pointer
+ * @base: pointer to the base request
+ * @ctx: pointer to the algorithm context data
+ * @enc: true if this is an encode request
+ * @src: source data
+ * @dst: destination data
+ * @callback: DMA callback for the request
+ * @mdata_size: metadata size passed to DMA
+ */
+struct sa_req {
+ struct device *dev;
+ u16 size;
+ u8 enc_offset;
+ u16 enc_size;
+ u8 *enc_iv;
+ u32 type;
+ u32 *cmdl;
+ struct crypto_async_request *base;
+ struct sa_tfm_ctx *ctx;
+ bool enc;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ dma_async_tx_callback callback;
+ u16 mdata_size;
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for encryption
+ */
+static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for decryption
+ */
+static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
+ { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For ECB (Electronic Code Book) mode for encryption
+ */
+static u8 mci_ecb_enc_array[3][27] = {
+ { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For ECB (Electronic Code Book) mode for decryption
+ */
+static u8 mci_ecb_dec_array[3][27] = {
+ { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for DES algorithm
+ * For CBC (Cipher Block Chaining) mode and ECB mode
+ * encryption and for decryption respectively
+ */
+static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
+ 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
+ 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
+ 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
+ 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+/*
+ * Perform 16 byte or 128 bit swizzling
+ * The SA2UL Expects the security context to
+ * be in little Endian and the bus width is 128 bits or 16 bytes
+ * Hence swap 16 bytes at a time from higher to lower address
+ */
+static void sa_swiz_128(u8 *in, u16 len)
+{
+ u8 data[16];
+ int i, j;
+
+ for (i = 0; i < len; i += 16) {
+ memcpy(data, &in[i], 16);
+ for (j = 0; j < 16; j++)
+ in[i + j] = data[15 - j];
+ }
+}
+
+/* Derive the inverse key used in AES-CBC decryption operation */
+static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
+{
+ struct crypto_aes_ctx ctx;
+ int key_pos;
+
+ if (aes_expandkey(&ctx, key, key_sz)) {
+ dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
+ return -EINVAL;
+ }
+
+ /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
+ if (key_sz == AES_KEYSIZE_192) {
+ ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
+ ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
+ }
+
+ /* Based crypto_aes_expand_key logic */
+ switch (key_sz) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ key_pos = key_sz + 24;
+ break;
+
+ case AES_KEYSIZE_256:
+ key_pos = key_sz + 24 - 4;
+ break;
+
+ default:
+ dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
+ return -EINVAL;
+ }
+
+ memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
+ return 0;
+}
+
+/* Set Security context for the encryption engine */
+static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
+ u8 enc, u8 *sc_buf)
+{
+ const u8 *mci = NULL;
+
+ /* Set Encryption mode selector to crypto processing */
+ sc_buf[0] = SA_CRYPTO_PROCESSING;
+
+ if (enc)
+ mci = ad->mci_enc;
+ else
+ mci = ad->mci_dec;
+ /* Set the mode control instructions in security context */
+ if (mci)
+ memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
+
+ /* For AES-CBC decryption get the inverse key */
+ if (ad->inv_key && !enc) {
+ if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
+ return -EINVAL;
+ /* For all other cases: key is used */
+ } else {
+ memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
+ }
+
+ return 0;
+}
+
+static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
+{
+ int j;
+
+ for (j = 0; j < ((size16) ? 4 : 2); j++) {
+ *out = cpu_to_be32(*((u32 *)iv));
+ iv += 4;
+ out++;
+ }
+}
+
+/* Format general command label */
+static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
+ struct sa_cmdl_upd_info *upd_info)
+{
+ u8 enc_offset = 0, total = 0;
+ u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
+ u32 *word_ptr = (u32 *)cmdl;
+ int i;
+
+ /* Clear the command label */
+ memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
+
+ /* Iniialize the command update structure */
+ memzero_explicit(upd_info, sizeof(*upd_info));
+
+ if (cfg->enc_eng_id != SA_ENG_ID_NONE)
+ total = SA_CMDL_HEADER_SIZE_BYTES;
+
+ if (cfg->iv_size)
+ total += cfg->iv_size;
+
+ enc_next_eng = SA_ENG_ID_OUTPORT2;
+
+ if (cfg->enc_eng_id != SA_ENG_ID_NONE) {
+ upd_info->flags |= SA_CMDL_UPD_ENC;
+ upd_info->enc_size.index = enc_offset >> 2;
+ upd_info->enc_offset.index = upd_info->enc_size.index + 1;
+ /* Encryption command label */
+ cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
+
+ /* Encryption modes requiring IV */
+ if (cfg->iv_size) {
+ upd_info->flags |= SA_CMDL_UPD_ENC_IV;
+ upd_info->enc_iv.index =
+ (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
+ upd_info->enc_iv.size = cfg->iv_size;
+
+ cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
+
+ cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
+ (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
+ enc_offset += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
+ } else {
+ cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES;
+ enc_offset += SA_CMDL_HEADER_SIZE_BYTES;
+ }
+ }
+
+ total = roundup(total, 8);
+
+ for (i = 0; i < total / 4; i++)
+ word_ptr[i] = swab32(word_ptr[i]);
+
+ return total;
+}
+
+/* Update Command label */
+static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
+ struct sa_cmdl_upd_info *upd_info)
+{
+ int i = 0, j;
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
+ cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
+ cmdl[upd_info->enc_size.index] |= req->enc_size;
+ cmdl[upd_info->enc_offset.index] &=
+ ~SA_CMDL_SOP_BYPASS_LEN_MASK;
+ cmdl[upd_info->enc_offset.index] |=
+ ((u32)req->enc_offset <<
+ __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
+ __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
+ u32 *enc_iv = (u32 *)req->enc_iv;
+
+ for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
+ data[j] = cpu_to_be32(*enc_iv);
+ enc_iv++;
+ }
+ }
+ }
+}
+
+/* Format SWINFO words to be sent to SA */
+static
+void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
+ u8 cmdl_present, u8 cmdl_offset, u8 flags,
+ u8 hash_size, u32 *swinfo)
+{
+ swinfo[0] = sc_id;
+ swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
+ if (likely(cmdl_present))
+ swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
+ __ffs(SA_SW0_CMDL_INFO_MASK));
+ swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
+
+ swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
+ swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
+ swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
+ swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
+}
+
+/* Dump the security context */
+static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
+{
+#ifdef DEBUG
+ dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, SA_CTX_MAX_SZ, false);
+#endif
+}
+
+static
+int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
+ u16 enc_key_sz, struct algo_data *ad, u8 enc, u32 *swinfo)
+{
+ int enc_sc_offset = 0;
+ u8 *sc_buf = ctx->sc;
+ u16 sc_id = ctx->sc_id;
+ u8 first_engine;
+
+ memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
+
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+
+ /* SCCTL Owner info: 0=host, 1=CP_ACE */
+ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
+ /* SCCTL F/E control */
+ sc_buf[1] = SA_SCCTL_FE_ENC;
+ memcpy(&sc_buf[2], &sc_id, 2);
+ sc_buf[4] = 0x0;
+ sc_buf[5] = PRIV_ID;
+ sc_buf[6] = PRIV;
+ sc_buf[7] = 0x0;
+
+ /* Prepare context for encryption engine */
+ if (ad->enc_eng.sc_size) {
+ if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
+ &sc_buf[enc_sc_offset]))
+ return -EINVAL;
+ }
+
+ /* Set the ownership of context to CP_ACE */
+ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
+
+ /* swizzle the security context */
+ sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
+ /* Setup SWINFO */
+ first_engine = ad->enc_eng.eng_id;
+
+ sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
+ SA_SW_INFO_FLAG_EVICT, ad->iv_out_size, swinfo);
+
+ sa_dump_sc(sc_buf, ctx->sc_phys);
+
+ return 0;
+}
+
+/* Free the per direction context memory */
+static void sa_free_ctx_info(struct sa_ctx_info *ctx,
+ struct sa_crypto_data *data)
+{
+ unsigned long bn;
+
+ bn = ctx->sc_id - data->sc_id_start;
+ spin_lock(&data->scid_lock);
+ __clear_bit(bn, data->ctx_bm);
+ data->sc_id--;
+ spin_unlock(&data->scid_lock);
+
+ if (ctx->sc) {
+ dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
+ ctx->sc = NULL;
+ }
+}
+
+static int sa_init_ctx_info(struct sa_ctx_info *ctx,
+ struct sa_crypto_data *data)
+{
+ unsigned long bn;
+ int err;
+
+ spin_lock(&data->scid_lock);
+ bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
+ __set_bit(bn, data->ctx_bm);
+ data->sc_id++;
+ spin_unlock(&data->scid_lock);
+
+ ctx->sc_id = (u16)(data->sc_id_start + bn);
+
+ ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
+ if (!ctx->sc) {
+ dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
+ err = -ENOMEM;
+ goto scid_rollback;
+ }
+
+ return 0;
+
+scid_rollback:
+ spin_lock(&data->scid_lock);
+ __clear_bit(bn, data->ctx_bm);
+ data->sc_id--;
+ spin_unlock(&data->scid_lock);
+
+ return err;
+}
+
+static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ sa_free_ctx_info(&ctx->enc, data);
+ sa_free_ctx_info(&ctx->dec, data);
+
+ crypto_free_sync_skcipher(ctx->fallback.skcipher);
+}
+
+static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ int ret;
+
+ memzero_explicit(ctx, sizeof(*ctx));
+ ctx->dev_data = data;
+
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+ ret = sa_init_ctx_info(&ctx->dec, data);
+ if (ret) {
+ sa_free_ctx_info(&ctx->enc, data);
+ return ret;
+ }
+
+ ctx->fallback.skcipher =
+ crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback.skcipher)) {
+ dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(ctx->fallback.skcipher);
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+ return 0;
+}
+
+static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen, struct algo_data *ad)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+ int ret;
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ad->enc_eng.eng_id = SA_ENG_ID_EM1;
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+
+ memzero_explicit(&cfg, sizeof(cfg));
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.iv_size = crypto_skcipher_ivsize(tfm);
+
+ crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback.skcipher,
+ tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen);
+ if (ret)
+ return ret;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, key, keylen, ad, 1, &ctx->enc.epib[1]))
+ goto badkey;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ /* Setup Decryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->dec, key, keylen, ad, 0, &ctx->dec.epib[1]))
+ goto badkey;
+
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
+ &ctx->dec.cmdl_upd_info);
+
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->dec.cmdl_size = cmdl_len;
+ ctx->iv_idx = ad->iv_idx;
+
+ return 0;
+
+badkey:
+ dev_err(sa_k3_dev, "%s: badkey\n", __func__);
+ return -EINVAL;
+}
+
+static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ int key_idx = (keylen >> 3) - 2;
+
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad.mci_enc = mci_cbc_enc_array[key_idx];
+ ad.mci_dec = mci_cbc_dec_array[key_idx];
+ ad.inv_key = true;
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.iv_idx = 4;
+ ad.iv_out_size = 16;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ int key_idx = (keylen >> 3) - 2;
+
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad.mci_enc = mci_ecb_enc_array[key_idx];
+ ad.mci_dec = mci_ecb_dec_array[key_idx];
+ ad.inv_key = true;
+ ad.ealg_id = SA_EALG_ID_AES_ECB;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.mci_enc = mci_cbc_3des_enc_array;
+ ad.mci_dec = mci_cbc_3des_dec_array;
+ ad.ealg_id = SA_EALG_ID_3DES_CBC;
+ ad.iv_idx = 6;
+ ad.iv_out_size = 8;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.mci_enc = mci_ecb_3des_enc_array;
+ ad.mci_dec = mci_ecb_3des_dec_array;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static void sa_aes_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct skcipher_request *req;
+ int sglen;
+ u32 *result;
+ __be32 *mdptr;
+ size_t ml, pl;
+ int i;
+ enum dma_data_direction dir_src;
+ bool diff_dst;
+
+ req = container_of(rxd->req, struct skcipher_request, base);
+ sglen = sg_nents_for_len(req->src, req->cryptlen);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ if (req->iv) {
+ mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
+ &ml);
+ result = (u32 *)req->iv;
+
+ for (i = 0; i < (rxd->enc_iv_size / 4); i++)
+ result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
+ }
+
+ dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
+ kfree(rxd->split_src_sg);
+
+ if (diff_dst) {
+ sglen = sg_nents_for_len(req->dst, req->cryptlen);
+
+ dma_unmap_sg(rxd->ddev, req->dst, sglen,
+ DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ kfree(rxd);
+
+ skcipher_request_complete(req, 0);
+}
+
+static void
+sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
+{
+ u32 *out, *in;
+ int i;
+
+ for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
+ *out++ = *in++;
+
+ mdptr[4] = (0xFFFF << 16);
+ for (out = &mdptr[5], in = psdata, i = 0;
+ i < pslen / sizeof(u32); i++)
+ *out++ = *in++;
+}
+
+static int sa_run(struct sa_req *req)
+{
+ struct sa_rx_data *rxd;
+ gfp_t gfp_flags;
+ u32 cmdl[SA_MAX_CMDL_WORDS];
+ struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
+ struct device *ddev;
+ struct dma_chan *dma_rx;
+ int sg_nents, src_nents, dst_nents;
+ int mapped_src_nents, mapped_dst_nents;
+ struct scatterlist *src, *dst;
+ size_t pl, ml, split_size;
+ struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
+ int ret;
+ struct dma_async_tx_descriptor *tx_out;
+ u32 *mdptr;
+ bool diff_dst;
+ enum dma_data_direction dir_src;
+
+ gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ rxd = kzalloc(sizeof(*rxd), gfp_flags);
+ if (!rxd)
+ return -ENOMEM;
+
+ if (req->src != req->dst) {
+ diff_dst = true;
+ dir_src = DMA_TO_DEVICE;
+ } else {
+ diff_dst = false;
+ dir_src = DMA_BIDIRECTIONAL;
+ }
+
+ /*
+ * SA2UL has an interesting feature where the receive DMA channel
+ * is selected based on the data passed to the engine. Within the
+ * transition range, there is also a space where it is impossible
+ * to determine where the data will end up, and this should be
+ * avoided. This will be handled by the SW fallback mechanism by
+ * the individual algorithm implementations.
+ */
+ if (req->size >= 256)
+ dma_rx = pdata->dma_rx2;
+ else
+ dma_rx = pdata->dma_rx1;
+
+ ddev = dma_rx->device->dev;
+
+ memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
+
+ sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
+
+ if (req->type != CRYPTO_ALG_TYPE_AHASH) {
+ if (req->enc)
+ req->type |=
+ (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
+ else
+ req->type |=
+ (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
+ }
+
+ cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
+
+ /*
+ * Map the packets, first we check if the data fits into a single
+ * sg entry and use that if possible. If it does not fit, we check
+ * if we need to do sg_split to align the scatterlist data on the
+ * actual data size being processed by the crypto engine.
+ */
+ src = req->src;
+ sg_nents = sg_nents_for_len(src, req->size);
+
+ split_size = req->size;
+
+ if (sg_nents == 1 && split_size <= req->src->length) {
+ src = &rxd->rx_sg;
+ sg_init_table(src, 1);
+ sg_set_page(src, sg_page(req->src), split_size,
+ req->src->offset);
+ src_nents = 1;
+ dma_map_sg(ddev, src, sg_nents, dir_src);
+ } else {
+ mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
+ dir_src);
+ ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
+ &src, &src_nents, gfp_flags);
+ if (ret) {
+ src_nents = sg_nents;
+ src = req->src;
+ } else {
+ rxd->split_src_sg = src;
+ }
+ }
+
+ if (!diff_dst) {
+ dst_nents = src_nents;
+ dst = src;
+ } else {
+ dst_nents = sg_nents_for_len(req->dst, req->size);
+
+ if (dst_nents == 1 && split_size <= req->dst->length) {
+ dst = &rxd->tx_sg;
+ sg_init_table(dst, 1);
+ sg_set_page(dst, sg_page(req->dst), split_size,
+ req->dst->offset);
+ dst_nents = 1;
+ dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
+ &split_size, &dst, &dst_nents,
+ gfp_flags);
+ if (ret) {
+ dst_nents = dst_nents;
+ dst = req->dst;
+ } else {
+ rxd->split_dst_sg = dst;
+ }
+ }
+ }
+
+ if (unlikely(src_nents != sg_nents)) {
+ dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
+ ret = -EIO;
+ goto err_cleanup;
+ }
+
+ rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxd->tx_in) {
+ dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ rxd->req = (void *)req->base;
+ rxd->enc = req->enc;
+ rxd->ddev = ddev;
+ rxd->src = src;
+ rxd->dst = dst;
+ rxd->iv_idx = req->ctx->iv_idx;
+ rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
+ rxd->tx_in->callback = req->callback;
+ rxd->tx_in->callback_param = rxd;
+
+ tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
+ src_nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!tx_out) {
+ dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /*
+ * Prepare metadata for DMA engine. This essentially describes the
+ * crypto algorithm to be used, data sizes, different keys etc.
+ */
+ mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
+
+ sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
+ sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
+ sa_ctx->epib);
+
+ ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
+ dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
+
+ dmaengine_submit(tx_out);
+ dmaengine_submit(rxd->tx_in);
+
+ dma_async_issue_pending(dma_rx);
+ dma_async_issue_pending(pdata->dma_tx);
+
+ return -EINPROGRESS;
+
+err_cleanup:
+ dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
+ kfree(rxd->split_src_sg);
+
+ if (req->src != req->dst) {
+ dst_nents = sg_nents_for_len(req->dst, req->size);
+ dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ kfree(rxd);
+
+ return ret;
+}
+
+static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
+{
+ struct sa_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct crypto_alg *alg = req->base.tfm->__crt_alg;
+ struct sa_req sa_req = { 0 };
+ int ret;
+
+ if (!req->cryptlen)
+ return 0;
+
+ if (req->cryptlen % alg->cra_blocksize)
+ return -EINVAL;
+
+ /* Use SW fallback if the data size is not supported */
+ if (req->cryptlen > SA_MAX_DATA_SZ ||
+ (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
+ req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
+
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ if (enc)
+ ret = crypto_skcipher_encrypt(subreq);
+ else
+ ret = crypto_skcipher_decrypt(subreq);
+
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ sa_req.size = req->cryptlen;
+ sa_req.enc_size = req->cryptlen;
+ sa_req.src = req->src;
+ sa_req.dst = req->dst;
+ sa_req.enc_iv = iv;
+ sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
+ sa_req.enc = enc;
+ sa_req.callback = sa_aes_dma_in_callback;
+ sa_req.mdata_size = 44;
+ sa_req.base = &req->base;
+ sa_req.ctx = ctx;
+
+ return sa_run(&sa_req);
+}
+
+static int sa_encrypt(struct skcipher_request *req)
+{
+ return sa_cipher_run(req, req->iv, 1);
+}
+
+static int sa_decrypt(struct skcipher_request *req)
+{
+ return sa_cipher_run(req, req->iv, 0);
+}
+
+static struct sa_alg_tmpl sa_algs[] = {
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sa_aes_cbc_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sa_aes_ecb_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = sa_3des_cbc_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = sa_3des_ecb_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+};
+
+/* Register the algorithms in crypto framework */
+static void sa_register_algos(const struct device *dev)
+{
+ char *alg_name;
+ u32 type;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ type = sa_algs[i].type;
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
+ alg_name = sa_algs[i].alg.skcipher.base.cra_name;
+ err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
+ } else {
+ dev_err(dev,
+ "un-supported crypto algorithm (%d)",
+ sa_algs[i].type);
+ continue;
+ }
+
+ if (err)
+ dev_err(dev, "Failed to register '%s'\n", alg_name);
+ else
+ sa_algs[i].registered = true;
+ }
+}
+
+/* Unregister the algorithms in crypto framework */
+static void sa_unregister_algos(const struct device *dev)
+{
+ u32 type;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ type = sa_algs[i].type;
+ if (!sa_algs[i].registered)
+ continue;
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER)
+ crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
+
+ sa_algs[i].registered = false;
+ }
+}
+
+static int sa_init_mem(struct sa_crypto_data *dev_data)
+{
+ struct device *dev = &dev_data->pdev->dev;
+ /* Setup dma pool for security context buffers */
+ dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
+ SA_CTX_MAX_SZ, 64, 0);
+ if (!dev_data->sc_pool) {
+ dev_err(dev, "Failed to create dma pool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int sa_dma_init(struct sa_crypto_data *dd)
+{
+ int ret;
+ struct dma_slave_config cfg;
+
+ dd->dma_rx1 = NULL;
+ dd->dma_tx = NULL;
+ dd->dma_rx2 = NULL;
+
+ ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
+ if (ret)
+ return ret;
+
+ dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
+ if (IS_ERR(dd->dma_rx1)) {
+ if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
+ return PTR_ERR(dd->dma_rx1);
+ }
+
+ dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
+ if (IS_ERR(dd->dma_rx2)) {
+ dma_release_channel(dd->dma_rx1);
+ if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
+ return PTR_ERR(dd->dma_rx2);
+ }
+
+ dd->dma_tx = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_tx)) {
+ if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request tx DMA channel\n");
+ ret = PTR_ERR(dd->dma_tx);
+ goto err_dma_tx;
+ }
+
+ memzero_explicit(&cfg, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 4;
+ cfg.dst_maxburst = 4;
+
+ ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(dd->dma_tx, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+
+err_dma_tx:
+ dma_release_channel(dd->dma_rx1);
+ dma_release_channel(dd->dma_rx2);
+
+ return ret;
+}
+
+static int sa_ul_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *saul_base;
+ struct sa_crypto_data *dev_data;
+ u32 val;
+ int ret;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ sa_k3_dev = dev;
+ dev_data->dev = dev;
+ dev_data->pdev = pdev;
+ platform_set_drvdata(pdev, dev_data);
+ dev_set_drvdata(sa_k3_dev, dev_data);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ sa_init_mem(dev_data);
+ ret = sa_dma_init(dev_data);
+ if (ret)
+ goto disable_pm_runtime;
+
+ spin_lock_init(&dev_data->scid_lock);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ saul_base = devm_ioremap_resource(dev, res);
+
+ dev_data->base = saul_base;
+ val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
+
+ writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+
+ sa_register_algos(dev);
+
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ goto release_dma;
+
+ return 0;
+
+release_dma:
+ sa_unregister_algos(&pdev->dev);
+
+ dma_release_channel(dev_data->dma_rx2);
+ dma_release_channel(dev_data->dma_rx1);
+ dma_release_channel(dev_data->dma_tx);
+
+ dma_pool_destroy(dev_data->sc_pool);
+
+disable_pm_runtime:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int sa_ul_remove(struct platform_device *pdev)
+{
+ struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
+
+ sa_unregister_algos(&pdev->dev);
+
+ dma_release_channel(dev_data->dma_rx2);
+ dma_release_channel(dev_data->dma_rx1);
+ dma_release_channel(dev_data->dma_tx);
+
+ dma_pool_destroy(dev_data->sc_pool);
+
+ platform_set_drvdata(pdev, NULL);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ {.compatible = "ti,j721e-sa2ul",},
+ {.compatible = "ti,am654-sa2ul",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver sa_ul_driver = {
+ .probe = sa_ul_probe,
+ .remove = sa_ul_remove,
+ .driver = {
+ .name = "saul-crypto",
+ .of_match_table = of_match,
+ },
+};
+module_platform_driver(sa_ul_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
new file mode 100644
index 000000000000..45ba86cb5d11
--- /dev/null
+++ b/drivers/crypto/sa2ul.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * K3 SA2UL crypto accelerator driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Keerthy
+ * Vitaly Andrianov
+ * Tero Kristo
+ */
+
+#ifndef _K3_SA2UL_
+#define _K3_SA2UL_
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/hw_random.h>
+#include <crypto/aes.h>
+
+#define SA_ENGINE_ENABLE_CONTROL 0x1000
+
+struct sa_tfm_ctx;
+/*
+ * SA_ENGINE_ENABLE_CONTROL register bits
+ */
+#define SA_EEC_ENCSS_EN 0x00000001
+#define SA_EEC_AUTHSS_EN 0x00000002
+#define SA_EEC_TRNG_EN 0x00000008
+#define SA_EEC_PKA_EN 0x00000010
+#define SA_EEC_CTXCACH_EN 0x00000080
+#define SA_EEC_CPPI_PORT_IN_EN 0x00000200
+#define SA_EEC_CPPI_PORT_OUT_EN 0x00000800
+
+/*
+ * Encoding used to identify the typo of crypto operation
+ * performed on the packet when the packet is returned
+ * by SA
+ */
+#define SA_REQ_SUBTYPE_ENC 0x0001
+#define SA_REQ_SUBTYPE_DEC 0x0002
+#define SA_REQ_SUBTYPE_SHIFT 16
+#define SA_REQ_SUBTYPE_MASK 0xffff
+
+/* Number of 32 bit words in EPIB */
+#define SA_DMA_NUM_EPIB_WORDS 4
+
+/* Number of 32 bit words in PS data */
+#define SA_DMA_NUM_PS_WORDS 16
+#define NKEY_SZ 3
+#define MCI_SZ 27
+
+/*
+ * Maximum number of simultaeneous security contexts
+ * supported by the driver
+ */
+#define SA_MAX_NUM_CTX 512
+
+/*
+ * Assumption: CTX size is multiple of 32
+ */
+#define SA_CTX_SIZE_TO_DMA_SIZE(ctx_sz) \
+ ((ctx_sz) ? ((ctx_sz) / 32 - 1) : 0)
+
+#define SA_CTX_ENC_KEY_OFFSET 32
+#define SA_CTX_ENC_AUX1_OFFSET 64
+#define SA_CTX_ENC_AUX2_OFFSET 96
+#define SA_CTX_ENC_AUX3_OFFSET 112
+#define SA_CTX_ENC_AUX4_OFFSET 128
+
+/* Next Engine Select code in CP_ACE */
+#define SA_ENG_ID_EM1 2 /* Enc/Dec engine with AES/DEC core */
+#define SA_ENG_ID_EM2 3 /* Encryption/Decryption enginefor pass 2 */
+#define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */
+#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */
+#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */
+#define SA_ENG_ID_NONE 0xff
+
+/*
+ * Command Label Definitions
+ */
+#define SA_CMDL_OFFSET_NESC 0 /* Next Engine Select Code */
+#define SA_CMDL_OFFSET_LABEL_LEN 1 /* Engine Command Label Length */
+/* 16-bit Length of Data to be processed */
+#define SA_CMDL_OFFSET_DATA_LEN 2
+#define SA_CMDL_OFFSET_DATA_OFFSET 4 /* Stat Data Offset */
+#define SA_CMDL_OFFSET_OPTION_CTRL1 5 /* Option Control Byte 1 */
+#define SA_CMDL_OFFSET_OPTION_CTRL2 6 /* Option Control Byte 2 */
+#define SA_CMDL_OFFSET_OPTION_CTRL3 7 /* Option Control Byte 3 */
+#define SA_CMDL_OFFSET_OPTION_BYTE 8
+
+#define SA_CMDL_HEADER_SIZE_BYTES 8
+
+#define SA_CMDL_OPTION_BYTES_MAX_SIZE 72
+#define SA_CMDL_MAX_SIZE_BYTES (SA_CMDL_HEADER_SIZE_BYTES + \
+ SA_CMDL_OPTION_BYTES_MAX_SIZE)
+
+/* SWINFO word-0 flags */
+#define SA_SW_INFO_FLAG_EVICT 0x0001
+#define SA_SW_INFO_FLAG_TEAR 0x0002
+#define SA_SW_INFO_FLAG_NOPD 0x0004
+
+/*
+ * This type represents the various packet types to be processed
+ * by the PHP engine in SA.
+ * It is used to identify the corresponding PHP processing function.
+ */
+#define SA_CTX_PE_PKT_TYPE_3GPP_AIR 0 /* 3GPP Air Cipher */
+#define SA_CTX_PE_PKT_TYPE_SRTP 1 /* SRTP */
+#define SA_CTX_PE_PKT_TYPE_IPSEC_AH 2 /* IPSec Authentication Header */
+/* IPSec Encapsulating Security Payload */
+#define SA_CTX_PE_PKT_TYPE_IPSEC_ESP 3
+/* Indicates that it is in data mode, It may not be used by PHP */
+#define SA_CTX_PE_PKT_TYPE_NONE 4
+#define SA_CTX_ENC_TYPE1_SZ 64 /* Encryption SC with Key only */
+#define SA_CTX_ENC_TYPE2_SZ 96 /* Encryption SC with Key and Aux1 */
+
+#define SA_CTX_AUTH_TYPE1_SZ 64 /* Auth SC with Key only */
+#define SA_CTX_AUTH_TYPE2_SZ 96 /* Auth SC with Key and Aux1 */
+/* Size of security context for PHP engine */
+#define SA_CTX_PHP_PE_CTX_SZ 64
+
+#define SA_CTX_MAX_SZ (64 + SA_CTX_ENC_TYPE2_SZ + SA_CTX_AUTH_TYPE2_SZ)
+
+/*
+ * Encoding of F/E control in SCCTL
+ * Bit 0-1: Fetch PHP Bytes
+ * Bit 2-3: Fetch Encryption/Air Ciphering Bytes
+ * Bit 4-5: Fetch Authentication Bytes or Encr pass 2
+ * Bit 6-7: Evict PHP Bytes
+ *
+ * where 00 = 0 bytes
+ * 01 = 64 bytes
+ * 10 = 96 bytes
+ * 11 = 128 bytes
+ */
+#define SA_CTX_DMA_SIZE_0 0
+#define SA_CTX_DMA_SIZE_64 1
+#define SA_CTX_DMA_SIZE_96 2
+#define SA_CTX_DMA_SIZE_128 3
+
+/*
+ * Byte offset of the owner word in SCCTL
+ * in the security context
+ */
+#define SA_CTX_SCCTL_OWNER_OFFSET 0
+
+#define SA_CTX_ENC_KEY_OFFSET 32
+#define SA_CTX_ENC_AUX1_OFFSET 64
+#define SA_CTX_ENC_AUX2_OFFSET 96
+#define SA_CTX_ENC_AUX3_OFFSET 112
+#define SA_CTX_ENC_AUX4_OFFSET 128
+
+#define SA_SCCTL_FE_AUTH_ENC 0x65
+#define SA_SCCTL_FE_ENC 0x8D
+
+#define SA_ALIGN_MASK (sizeof(u32) - 1)
+#define SA_ALIGNED __aligned(32)
+
+/* SA2UL can only handle maximum data size of 64KB */
+#define SA_MAX_DATA_SZ U16_MAX
+
+/*
+ * SA2UL can provide unpredictable results with packet sizes that fall
+ * the following range, so avoid using it.
+ */
+#define SA_UNSAFE_DATA_SZ_MIN 240
+#define SA_UNSAFE_DATA_SZ_MAX 256
+
+/**
+ * struct sa_crypto_data - Crypto driver instance data
+ * @base: Base address of the register space
+ * @pdev: Platform device pointer
+ * @sc_pool: security context pool
+ * @dev: Device pointer
+ * @scid_lock: secure context ID lock
+ * @sc_id_start: starting index for SC ID
+ * @sc_id_end: Ending index for SC ID
+ * @sc_id: Security Context ID
+ * @ctx_bm: Bitmap to keep track of Security context ID's
+ * @ctx: SA tfm context pointer
+ * @dma_rx1: Pointer to DMA rx channel for sizes < 256 Bytes
+ * @dma_rx2: Pointer to DMA rx channel for sizes > 256 Bytes
+ * @dma_tx: Pointer to DMA TX channel
+ */
+struct sa_crypto_data {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct dma_pool *sc_pool;
+ struct device *dev;
+ spinlock_t scid_lock; /* lock for SC-ID allocation */
+ /* Security context data */
+ u16 sc_id_start;
+ u16 sc_id_end;
+ u16 sc_id;
+ unsigned long ctx_bm[DIV_ROUND_UP(SA_MAX_NUM_CTX,
+ BITS_PER_LONG)];
+ struct sa_tfm_ctx *ctx;
+ struct dma_chan *dma_rx1;
+ struct dma_chan *dma_rx2;
+ struct dma_chan *dma_tx;
+};
+
+/**
+ * struct sa_cmdl_param_info: Command label parameters info
+ * @index: Index of the parameter in the command label format
+ * @offset: the offset of the parameter
+ * @size: Size of the parameter
+ */
+struct sa_cmdl_param_info {
+ u16 index;
+ u16 offset;
+ u16 size;
+};
+
+/* Maximum length of Auxiliary data in 32bit words */
+#define SA_MAX_AUX_DATA_WORDS 8
+
+/**
+ * struct sa_cmdl_upd_info: Command label updation info
+ * @flags: flags in command label
+ * @submode: Encryption submodes
+ * @enc_size: Size of first pass encryption size
+ * @enc_size2: Size of second pass encryption size
+ * @enc_offset: Encryption payload offset in the packet
+ * @enc_iv: Encryption initialization vector for pass2
+ * @enc_iv2: Encryption initialization vector for pass2
+ * @aad: Associated data
+ * @payload: Payload info
+ * @auth_size: Authentication size for pass 1
+ * @auth_size2: Authentication size for pass 2
+ * @auth_offset: Authentication payload offset
+ * @auth_iv: Authentication initialization vector
+ * @aux_key_info: Authentication aux key information
+ * @aux_key: Aux key for authentication
+ */
+struct sa_cmdl_upd_info {
+ u16 flags;
+ u16 submode;
+ struct sa_cmdl_param_info enc_size;
+ struct sa_cmdl_param_info enc_size2;
+ struct sa_cmdl_param_info enc_offset;
+ struct sa_cmdl_param_info enc_iv;
+ struct sa_cmdl_param_info enc_iv2;
+ struct sa_cmdl_param_info aad;
+ struct sa_cmdl_param_info payload;
+ struct sa_cmdl_param_info auth_size;
+ struct sa_cmdl_param_info auth_size2;
+ struct sa_cmdl_param_info auth_offset;
+ struct sa_cmdl_param_info auth_iv;
+ struct sa_cmdl_param_info aux_key_info;
+ u32 aux_key[SA_MAX_AUX_DATA_WORDS];
+};
+
+/*
+ * Number of 32bit words appended after the command label
+ * in PSDATA to identify the crypto request context.
+ * word-0: Request type
+ * word-1: pointer to request
+ */
+#define SA_PSDATA_CTX_WORDS 4
+
+/* Maximum size of Command label in 32 words */
+#define SA_MAX_CMDL_WORDS (SA_DMA_NUM_PS_WORDS - SA_PSDATA_CTX_WORDS)
+
+/**
+ * struct sa_ctx_info: SA context information
+ * @sc: Pointer to security context
+ * @sc_phys: Security context physical address that is passed on to SA2UL
+ * @sc_id: Security context ID
+ * @cmdl_size: Command label size
+ * @cmdl: Command label for a particular iteration
+ * @cmdl_upd_info: structure holding command label updation info
+ * @epib: Extended protocol information block words
+ */
+struct sa_ctx_info {
+ u8 *sc;
+ dma_addr_t sc_phys;
+ u16 sc_id;
+ u16 cmdl_size;
+ u32 cmdl[SA_MAX_CMDL_WORDS];
+ struct sa_cmdl_upd_info cmdl_upd_info;
+ /* Store Auxiliary data such as K2/K3 subkeys in AES-XCBC */
+ u32 epib[SA_DMA_NUM_EPIB_WORDS];
+};
+
+/**
+ * struct sa_tfm_ctx: TFM context structure
+ * @dev_data: struct sa_crypto_data pointer
+ * @enc: struct sa_ctx_info for encryption
+ * @dec: struct sa_ctx_info for decryption
+ * @keylen: encrption/decryption keylength
+ * @iv_idx: Initialization vector index
+ * @key: encryption key
+ * @fallback: SW fallback algorithm
+ */
+struct sa_tfm_ctx {
+ struct sa_crypto_data *dev_data;
+ struct sa_ctx_info enc;
+ struct sa_ctx_info dec;
+ int keylen;
+ int iv_idx;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ /* for fallback */
+ union {
+ struct crypto_sync_skcipher *skcipher;
+ } fallback;
+};
+
+enum sa_submode {
+ SA_MODE_GEN = 0,
+ SA_MODE_CCM,
+ SA_MODE_GCM,
+ SA_MODE_GMAC
+};
+
+/* Encryption algorithms */
+enum sa_ealg_id {
+ SA_EALG_ID_NONE = 0, /* No encryption */
+ SA_EALG_ID_NULL, /* NULL encryption */
+ SA_EALG_ID_AES_CTR, /* AES Counter mode */
+ SA_EALG_ID_AES_F8, /* AES F8 mode */
+ SA_EALG_ID_AES_CBC, /* AES CBC mode */
+ SA_EALG_ID_DES_CBC, /* DES CBC mode */
+ SA_EALG_ID_3DES_CBC, /* 3DES CBC mode */
+ SA_EALG_ID_CCM, /* Counter with CBC-MAC mode */
+ SA_EALG_ID_GCM, /* Galois Counter mode */
+ SA_EALG_ID_AES_ECB,
+ SA_EALG_ID_LAST
+};
+
+/* Authentication algorithms */
+enum sa_aalg_id {
+ SA_AALG_ID_NONE = 0, /* No Authentication */
+ SA_AALG_ID_NULL = SA_EALG_ID_LAST, /* NULL Authentication */
+ SA_AALG_ID_MD5, /* MD5 mode */
+ SA_AALG_ID_SHA1, /* SHA1 mode */
+ SA_AALG_ID_SHA2_224, /* 224-bit SHA2 mode */
+ SA_AALG_ID_SHA2_256, /* 256-bit SHA2 mode */
+ SA_AALG_ID_SHA2_512, /* 512-bit SHA2 mode */
+ SA_AALG_ID_HMAC_MD5, /* HMAC with MD5 mode */
+ SA_AALG_ID_HMAC_SHA1, /* HMAC with SHA1 mode */
+ SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */
+ SA_AALG_ID_HMAC_SHA2_256, /* HMAC with 256-bit SHA2 mode */
+ SA_AALG_ID_GMAC, /* Galois Message Auth. Code mode */
+ SA_AALG_ID_CMAC, /* Cipher-based Mes. Auth. Code mode */
+ SA_AALG_ID_CBC_MAC, /* Cipher Block Chaining */
+ SA_AALG_ID_AES_XCBC /* AES Extended Cipher Block Chaining */
+};
+
+/*
+ * Mode control engine algorithms used to index the
+ * mode control instruction tables
+ */
+enum sa_eng_algo_id {
+ SA_ENG_ALGO_ECB = 0,
+ SA_ENG_ALGO_CBC,
+ SA_ENG_ALGO_CFB,
+ SA_ENG_ALGO_OFB,
+ SA_ENG_ALGO_CTR,
+ SA_ENG_ALGO_F8,
+ SA_ENG_ALGO_F8F9,
+ SA_ENG_ALGO_GCM,
+ SA_ENG_ALGO_GMAC,
+ SA_ENG_ALGO_CCM,
+ SA_ENG_ALGO_CMAC,
+ SA_ENG_ALGO_CBCMAC,
+ SA_NUM_ENG_ALGOS
+};
+
+/**
+ * struct sa_eng_info: Security accelerator engine info
+ * @eng_id: Engine ID
+ * @sc_size: security context size
+ */
+struct sa_eng_info {
+ u8 eng_id;
+ u16 sc_size;
+};
+
+#endif /* _K3_SA2UL_ */
--
2.17.1
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki
On Mon, Jul 13, 2020 at 11:34:20AM +0300, Tero Kristo wrote:
> Hi,
>
> V6 has only a bunch of static checker warnings fixed. Tested building
> with W=1 and C=1 make options, also did a sanity test with crypto
> manager tests + extra tests, and did a quick trial with tcrypt.
Patches 1-5 applied. Thanks.
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Mon, Jul 13, 2020 at 11:34:22AM +0300, Tero Kristo wrote:
> From: Keerthy <[email protected]>
>
> Adds a basic crypto driver and currently supports AES/3DES
> in cbc mode for both encryption and decryption.
>
> Signed-off-by: Keerthy <[email protected]>
> [[email protected]: major re-work to fix various bugs in the driver and to
> cleanup the code]
> Signed-off-by: Tero Kristo <[email protected]>
> ---
> drivers/crypto/Kconfig | 14 +
> drivers/crypto/Makefile | 1 +
> drivers/crypto/sa2ul.c | 1388 +++++++++++++++++++++++++++++++++++++++
> drivers/crypto/sa2ul.h | 380 +++++++++++
> 4 files changed, 1783 insertions(+)
> create mode 100644 drivers/crypto/sa2ul.c
> create mode 100644 drivers/crypto/sa2ul.h
<snip>
> diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
> new file mode 100644
> index 000000000000..860c7435fefa
> --- /dev/null
> +++ b/drivers/crypto/sa2ul.c
> @@ -0,0 +1,1388 @@
<snip>
> +static int sa_run(struct sa_req *req)
> +{
> + struct sa_rx_data *rxd;
> + gfp_t gfp_flags;
> + u32 cmdl[SA_MAX_CMDL_WORDS];
> + struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
> + struct device *ddev;
> + struct dma_chan *dma_rx;
> + int sg_nents, src_nents, dst_nents;
> + int mapped_src_nents, mapped_dst_nents;
> + struct scatterlist *src, *dst;
> + size_t pl, ml, split_size;
> + struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
> + int ret;
> + struct dma_async_tx_descriptor *tx_out;
> + u32 *mdptr;
> + bool diff_dst;
> + enum dma_data_direction dir_src;
> +
> + gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
> + GFP_KERNEL : GFP_ATOMIC;
> +
> + rxd = kzalloc(sizeof(*rxd), gfp_flags);
> + if (!rxd)
> + return -ENOMEM;
> +
> + if (req->src != req->dst) {
> + diff_dst = true;
> + dir_src = DMA_TO_DEVICE;
> + } else {
> + diff_dst = false;
> + dir_src = DMA_BIDIRECTIONAL;
> + }
> +
> + /*
> + * SA2UL has an interesting feature where the receive DMA channel
> + * is selected based on the data passed to the engine. Within the
> + * transition range, there is also a space where it is impossible
> + * to determine where the data will end up, and this should be
> + * avoided. This will be handled by the SW fallback mechanism by
> + * the individual algorithm implementations.
> + */
> + if (req->size >= 256)
> + dma_rx = pdata->dma_rx2;
> + else
> + dma_rx = pdata->dma_rx1;
> +
> + ddev = dma_rx->device->dev;
> +
> + memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
> +
> + sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
> +
> + if (req->type != CRYPTO_ALG_TYPE_AHASH) {
> + if (req->enc)
> + req->type |=
> + (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
> + else
> + req->type |=
> + (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
> + }
> +
> + cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
> +
> + /*
> + * Map the packets, first we check if the data fits into a single
> + * sg entry and use that if possible. If it does not fit, we check
> + * if we need to do sg_split to align the scatterlist data on the
> + * actual data size being processed by the crypto engine.
> + */
> + src = req->src;
> + sg_nents = sg_nents_for_len(src, req->size);
> +
> + split_size = req->size;
> +
> + if (sg_nents == 1 && split_size <= req->src->length) {
> + src = &rxd->rx_sg;
> + sg_init_table(src, 1);
> + sg_set_page(src, sg_page(req->src), split_size,
> + req->src->offset);
> + src_nents = 1;
> + dma_map_sg(ddev, src, sg_nents, dir_src);
> + } else {
> + mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
> + dir_src);
> + ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
> + &src, &src_nents, gfp_flags);
> + if (ret) {
> + src_nents = sg_nents;
> + src = req->src;
> + } else {
> + rxd->split_src_sg = src;
> + }
> + }
> +
> + if (!diff_dst) {
> + dst_nents = src_nents;
> + dst = src;
> + } else {
> + dst_nents = sg_nents_for_len(req->dst, req->size);
> +
> + if (dst_nents == 1 && split_size <= req->dst->length) {
> + dst = &rxd->tx_sg;
> + sg_init_table(dst, 1);
> + sg_set_page(dst, sg_page(req->dst), split_size,
> + req->dst->offset);
> + dst_nents = 1;
> + dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
> + } else {
> + mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
> + DMA_FROM_DEVICE);
> + ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
> + &split_size, &dst, &dst_nents,
> + gfp_flags);
> + if (ret) {
> + dst_nents = dst_nents;
This causes a clang warning:
drivers/crypto/sa2ul.c:1152:15: warning: explicitly assigning value of
variable of type 'int' to itself [-Wself-assign]
dst_nents = dst_nents;
~~~~~~~~~ ^ ~~~~~~~~~
1 warning generated.
Was the right side supposed to be something else? Otherwise, this line
can be removed, right?
> + dst = req->dst;
> + } else {
> + rxd->split_dst_sg = dst;
> + }
> + }
> + }
> +
> + if (unlikely(src_nents != sg_nents)) {
> + dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
> + ret = -EIO;
> + goto err_cleanup;
> + }
> +
> + rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
> + DMA_DEV_TO_MEM,
> + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> + if (!rxd->tx_in) {
> + dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
> + ret = -EINVAL;
> + goto err_cleanup;
> + }
> +
> + rxd->req = (void *)req->base;
> + rxd->enc = req->enc;
> + rxd->ddev = ddev;
> + rxd->src = src;
> + rxd->dst = dst;
> + rxd->iv_idx = req->ctx->iv_idx;
> + rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
> + rxd->tx_in->callback = req->callback;
> + rxd->tx_in->callback_param = rxd;
> +
> + tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
> + src_nents, DMA_MEM_TO_DEV,
> + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> +
> + if (!tx_out) {
> + dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
> + ret = -EINVAL;
> + goto err_cleanup;
> + }
> +
> + /*
> + * Prepare metadata for DMA engine. This essentially describes the
> + * crypto algorithm to be used, data sizes, different keys etc.
> + */
> + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
> +
> + sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
> + sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
> + sa_ctx->epib);
> +
> + ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
> + dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
> +
> + dmaengine_submit(tx_out);
> + dmaengine_submit(rxd->tx_in);
> +
> + dma_async_issue_pending(dma_rx);
> + dma_async_issue_pending(pdata->dma_tx);
> +
> + return -EINPROGRESS;
> +
> +err_cleanup:
> + dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
> + kfree(rxd->split_src_sg);
> +
> + if (req->src != req->dst) {
> + dst_nents = sg_nents_for_len(req->dst, req->size);
> + dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
> + kfree(rxd->split_dst_sg);
> + }
> +
> + kfree(rxd);
> +
> + return ret;
> +}
Cheers,
Nathan
On 22/08/2020 01:17, Nathan Chancellor wrote:
> On Mon, Jul 13, 2020 at 11:34:22AM +0300, Tero Kristo wrote:
>> From: Keerthy <[email protected]>
>>
>> Adds a basic crypto driver and currently supports AES/3DES
>> in cbc mode for both encryption and decryption.
>>
>> Signed-off-by: Keerthy <[email protected]>
>> [[email protected]: major re-work to fix various bugs in the driver and to
>> cleanup the code]
>> Signed-off-by: Tero Kristo <[email protected]>
>> ---
>> drivers/crypto/Kconfig | 14 +
>> drivers/crypto/Makefile | 1 +
>> drivers/crypto/sa2ul.c | 1388 +++++++++++++++++++++++++++++++++++++++
>> drivers/crypto/sa2ul.h | 380 +++++++++++
>> 4 files changed, 1783 insertions(+)
>> create mode 100644 drivers/crypto/sa2ul.c
>> create mode 100644 drivers/crypto/sa2ul.h
>
> <snip>
>
>> diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
>> new file mode 100644
>> index 000000000000..860c7435fefa
>> --- /dev/null
>> +++ b/drivers/crypto/sa2ul.c
>> @@ -0,0 +1,1388 @@
>
> <snip>
>
>> +static int sa_run(struct sa_req *req)
>> +{
>> + struct sa_rx_data *rxd;
>> + gfp_t gfp_flags;
>> + u32 cmdl[SA_MAX_CMDL_WORDS];
>> + struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
>> + struct device *ddev;
>> + struct dma_chan *dma_rx;
>> + int sg_nents, src_nents, dst_nents;
>> + int mapped_src_nents, mapped_dst_nents;
>> + struct scatterlist *src, *dst;
>> + size_t pl, ml, split_size;
>> + struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
>> + int ret;
>> + struct dma_async_tx_descriptor *tx_out;
>> + u32 *mdptr;
>> + bool diff_dst;
>> + enum dma_data_direction dir_src;
>> +
>> + gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
>> + GFP_KERNEL : GFP_ATOMIC;
>> +
>> + rxd = kzalloc(sizeof(*rxd), gfp_flags);
>> + if (!rxd)
>> + return -ENOMEM;
>> +
>> + if (req->src != req->dst) {
>> + diff_dst = true;
>> + dir_src = DMA_TO_DEVICE;
>> + } else {
>> + diff_dst = false;
>> + dir_src = DMA_BIDIRECTIONAL;
>> + }
>> +
>> + /*
>> + * SA2UL has an interesting feature where the receive DMA channel
>> + * is selected based on the data passed to the engine. Within the
>> + * transition range, there is also a space where it is impossible
>> + * to determine where the data will end up, and this should be
>> + * avoided. This will be handled by the SW fallback mechanism by
>> + * the individual algorithm implementations.
>> + */
>> + if (req->size >= 256)
>> + dma_rx = pdata->dma_rx2;
>> + else
>> + dma_rx = pdata->dma_rx1;
>> +
>> + ddev = dma_rx->device->dev;
>> +
>> + memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
>> +
>> + sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
>> +
>> + if (req->type != CRYPTO_ALG_TYPE_AHASH) {
>> + if (req->enc)
>> + req->type |=
>> + (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
>> + else
>> + req->type |=
>> + (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
>> + }
>> +
>> + cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
>> +
>> + /*
>> + * Map the packets, first we check if the data fits into a single
>> + * sg entry and use that if possible. If it does not fit, we check
>> + * if we need to do sg_split to align the scatterlist data on the
>> + * actual data size being processed by the crypto engine.
>> + */
>> + src = req->src;
>> + sg_nents = sg_nents_for_len(src, req->size);
>> +
>> + split_size = req->size;
>> +
>> + if (sg_nents == 1 && split_size <= req->src->length) {
>> + src = &rxd->rx_sg;
>> + sg_init_table(src, 1);
>> + sg_set_page(src, sg_page(req->src), split_size,
>> + req->src->offset);
>> + src_nents = 1;
>> + dma_map_sg(ddev, src, sg_nents, dir_src);
>> + } else {
>> + mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
>> + dir_src);
>> + ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
>> + &src, &src_nents, gfp_flags);
>> + if (ret) {
>> + src_nents = sg_nents;
>> + src = req->src;
>> + } else {
>> + rxd->split_src_sg = src;
>> + }
>> + }
>> +
>> + if (!diff_dst) {
>> + dst_nents = src_nents;
>> + dst = src;
>> + } else {
>> + dst_nents = sg_nents_for_len(req->dst, req->size);
>> +
>> + if (dst_nents == 1 && split_size <= req->dst->length) {
>> + dst = &rxd->tx_sg;
>> + sg_init_table(dst, 1);
>> + sg_set_page(dst, sg_page(req->dst), split_size,
>> + req->dst->offset);
>> + dst_nents = 1;
>> + dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
>> + } else {
>> + mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
>> + DMA_FROM_DEVICE);
>> + ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
>> + &split_size, &dst, &dst_nents,
>> + gfp_flags);
>> + if (ret) {
>> + dst_nents = dst_nents;
>
> This causes a clang warning:
>
> drivers/crypto/sa2ul.c:1152:15: warning: explicitly assigning value of
> variable of type 'int' to itself [-Wself-assign]
> dst_nents = dst_nents;
> ~~~~~~~~~ ^ ~~~~~~~~~
> 1 warning generated.
>
> Was the right side supposed to be something else? Otherwise, this line
> can be removed, right?
This is definitely a bug in the code, thanks for catching. I'll check
what this was actually supposed to be and fix... Too many iterations of
the code behind.
-Tero
>
>> + dst = req->dst;
>> + } else {
>> + rxd->split_dst_sg = dst;
>> + }
>> + }
>> + }
>> +
>> + if (unlikely(src_nents != sg_nents)) {
>> + dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
>> + ret = -EIO;
>> + goto err_cleanup;
>> + }
>> +
>> + rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
>> + DMA_DEV_TO_MEM,
>> + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
>> + if (!rxd->tx_in) {
>> + dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
>> + ret = -EINVAL;
>> + goto err_cleanup;
>> + }
>> +
>> + rxd->req = (void *)req->base;
>> + rxd->enc = req->enc;
>> + rxd->ddev = ddev;
>> + rxd->src = src;
>> + rxd->dst = dst;
>> + rxd->iv_idx = req->ctx->iv_idx;
>> + rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
>> + rxd->tx_in->callback = req->callback;
>> + rxd->tx_in->callback_param = rxd;
>> +
>> + tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
>> + src_nents, DMA_MEM_TO_DEV,
>> + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
>> +
>> + if (!tx_out) {
>> + dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
>> + ret = -EINVAL;
>> + goto err_cleanup;
>> + }
>> +
>> + /*
>> + * Prepare metadata for DMA engine. This essentially describes the
>> + * crypto algorithm to be used, data sizes, different keys etc.
>> + */
>> + mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
>> +
>> + sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
>> + sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
>> + sa_ctx->epib);
>> +
>> + ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
>> + dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
>> +
>> + dmaengine_submit(tx_out);
>> + dmaengine_submit(rxd->tx_in);
>> +
>> + dma_async_issue_pending(dma_rx);
>> + dma_async_issue_pending(pdata->dma_tx);
>> +
>> + return -EINPROGRESS;
>> +
>> +err_cleanup:
>> + dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
>> + kfree(rxd->split_src_sg);
>> +
>> + if (req->src != req->dst) {
>> + dst_nents = sg_nents_for_len(req->dst, req->size);
>> + dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
>> + kfree(rxd->split_dst_sg);
>> + }
>> +
>> + kfree(rxd);
>> +
>> + return ret;
>> +}
>
> Cheers,
> Nathan
>
--
Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki