2015-09-23 11:55:36

by Corentin Labbe

[permalink] [raw]
Subject: [PATCH] crypto: dma_map_sg can handle chained SG


Hello

Some drivers use two dma_map_sg path according to SG are chained or not.
Since dma_map_sg can handle both case, this patch series clean all code
with references to sg chained.

Note that I could only compile test sahara and caam patch.
And none could be tested due to lack oh hardware.

Regards

LABBE Corentin


2015-09-23 11:55:25

by Corentin Labbe

[permalink] [raw]
Subject: [PATCH 1/4] crypto: talitos: dma_map_sg can handle chained SG

The talitos driver use two dma_map_sg path
according to SG are chained or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing talitos_map_sg, talitos_unmap_sg_chain
and sg_count functions.

Signed-off-by: LABBE Corentin <[email protected]>
---
drivers/crypto/talitos.c | 104 ++++++++---------------------------------------
1 file changed, 18 insertions(+), 86 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 3b20a1b..46f531e 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -857,8 +857,6 @@ badkey:
* talitos_edesc - s/w-extended descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @src_chained: whether src is chained or not
- * @dst_chained: whether dst is chained or not
* @icv_ool: whether ICV is out-of-line
* @iv_dma: dma address of iv for checking continuity and link table
* @dma_len: length of dma mapped link_tbl space
@@ -874,8 +872,6 @@ badkey:
struct talitos_edesc {
int src_nents;
int dst_nents;
- bool src_chained;
- bool dst_chained;
bool icv_ool;
dma_addr_t iv_dma;
int dma_len;
@@ -887,29 +883,6 @@ struct talitos_edesc {
};
};

-static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
- unsigned int nents, enum dma_data_direction dir,
- bool chained)
-{
- if (unlikely(chained))
- while (sg) {
- dma_map_sg(dev, sg, 1, dir);
- sg = sg_next(sg);
- }
- else
- dma_map_sg(dev, sg, nents, dir);
- return nents;
-}
-
-static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
- enum dma_data_direction dir)
-{
- while (sg) {
- dma_unmap_sg(dev, sg, 1, dir);
- sg = sg_next(sg);
- }
-}
-
static void talitos_sg_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct scatterlist *src,
@@ -919,24 +892,13 @@ static void talitos_sg_unmap(struct device *dev,
unsigned int dst_nents = edesc->dst_nents ? : 1;

if (src != dst) {
- if (edesc->src_chained)
- talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
- else
- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);

if (dst) {
- if (edesc->dst_chained)
- talitos_unmap_sg_chain(dev, dst,
- DMA_FROM_DEVICE);
- else
- dma_unmap_sg(dev, dst, dst_nents,
- DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
}
} else
- if (edesc->src_chained)
- talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
- else
- dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}

static void ipsec_esp_unmap(struct device *dev,
@@ -1118,10 +1080,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
DMA_TO_DEVICE);

- sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
- (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
- : DMA_TO_DEVICE,
- edesc->src_chained);
+ sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ : DMA_TO_DEVICE);

/* hmac data */
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
@@ -1185,9 +1146,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
desc->ptr[5].j_extent = authsize;

if (areq->src != areq->dst)
- sg_count = talitos_map_sg(dev, areq->dst,
- edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE, edesc->dst_chained);
+ sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
+ DMA_FROM_DEVICE);

edesc->icv_ool = false;

@@ -1234,26 +1194,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
}

/*
- * derive number of elements in scatterlist
- */
-static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
-{
- struct scatterlist *sg = sg_list;
- int sg_nents = 0;
-
- *chained = false;
- while (nbytes > 0 && sg) {
- sg_nents++;
- nbytes -= sg->length;
- if (!sg_is_last(sg) && (sg + 1)->length == 0)
- *chained = true;
- sg = sg_next(sg);
- }
-
- return sg_nents;
-}
-
-/*
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
@@ -1270,7 +1210,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
{
struct talitos_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len;
- bool src_chained = false, dst_chained = false;
dma_addr_t iv_dma = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1287,18 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);

if (!dst || dst == src) {
- src_nents = sg_count(src, assoclen + cryptlen + authsize,
- &src_chained);
+ src_nents = sg_nents_for_len(src,
+ assoclen + cryptlen + authsize);
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
} else { /* dst && dst != src*/
- src_nents = sg_count(src, assoclen + cryptlen +
- (encrypt ? 0 : authsize),
- &src_chained);
+ src_nents = sg_nents_for_len(src, assoclen + cryptlen +
+ (encrypt ? 0 : authsize));
src_nents = (src_nents == 1) ? 0 : src_nents;
- dst_nents = sg_count(dst, assoclen + cryptlen +
- (encrypt ? authsize : 0),
- &dst_chained);
+ dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
+ (encrypt ? authsize : 0));
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}

@@ -1332,8 +1269,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,

edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
- edesc->src_chained = src_chained;
- edesc->dst_chained = dst_chained;
edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
if (dma_len)
@@ -1518,8 +1453,7 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
} else {
to_talitos_ptr_extent_clear(ptr, is_sec1);

- sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
- edesc->src_chained);
+ sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);

if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
@@ -1552,8 +1486,7 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
bool is_sec1 = has_ftr_sec1(priv);

if (dir != DMA_NONE)
- sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
- dir, edesc->dst_chained);
+ sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);

to_talitos_ptr_len(ptr, len, is_sec1);

@@ -1897,12 +1830,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
unsigned int nbytes_to_hash;
unsigned int to_hash_later;
unsigned int nsg;
- bool chained;

if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
sg_copy_to_buffer(areq->src,
- sg_count(areq->src, nbytes, &chained),
+ sg_nents_for_len(areq->src, nbytes),
req_ctx->buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
@@ -1935,7 +1867,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
req_ctx->psrc = areq->src;

if (to_hash_later) {
- int nents = sg_count(areq->src, nbytes, &chained);
+ int nents = sg_nents_for_len(areq->src, nbytes);
sg_pcopy_to_buffer(areq->src, nents,
req_ctx->bufnext,
to_hash_later,
--
2.4.9

2015-09-23 11:56:27

by Corentin Labbe

[permalink] [raw]
Subject: [PATCH 2/4] crypto: qce: dma_map_sg can handle chained SG

The qce driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.

Signed-off-by: LABBE Corentin <[email protected]>
---
drivers/crypto/qce/ablkcipher.c | 30 ++++++++----------------
drivers/crypto/qce/cipher.h | 4 ----
drivers/crypto/qce/dma.c | 52 -----------------------------------------
drivers/crypto/qce/dma.h | 5 ----
drivers/crypto/qce/sha.c | 14 ++++-------
drivers/crypto/qce/sha.h | 2 --
6 files changed, 15 insertions(+), 92 deletions(-)

diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ad592de..2c0d63d 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -44,10 +44,8 @@ static void qce_ablkcipher_done(void *data)
error);

if (diff_dst)
- qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
- rctx->dst_chained);
- qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
- rctx->dst_chained);
+ dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);

sg_free_table(&rctx->dst_tbl);

@@ -80,15 +78,11 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

- rctx->src_nents = qce_countsg(req->src, req->nbytes,
- &rctx->src_chained);
- if (diff_dst) {
- rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
- &rctx->dst_chained);
- } else {
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (diff_dst)
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ else
rctx->dst_nents = rctx->src_nents;
- rctx->dst_chained = rctx->src_chained;
- }

rctx->dst_nents += 1;

@@ -116,14 +110,12 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
sg_mark_end(sg);
rctx->dst_sg = rctx->dst_tbl.sgl;

- ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
- rctx->dst_chained);
+ ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
if (ret < 0)
goto error_free;

if (diff_dst) {
- ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
- rctx->src_chained);
+ ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
if (ret < 0)
goto error_unmap_dst;
rctx->src_sg = req->src;
@@ -149,11 +141,9 @@ error_terminate:
qce_dma_terminate_all(&qce->dma);
error_unmap_src:
if (diff_dst)
- qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
- rctx->src_chained);
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
- qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
- rctx->dst_chained);
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
sg_free_table(&rctx->dst_tbl);
return ret;
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index d5757cf..5c6a5f8 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -32,8 +32,6 @@ struct qce_cipher_ctx {
* @ivsize: IV size
* @src_nents: source entries
* @dst_nents: destination entries
- * @src_chained: is source chained
- * @dst_chained: is destination chained
* @result_sg: scatterlist used for result buffer
* @dst_tbl: destination sg table
* @dst_sg: destination sg pointer table beginning
@@ -47,8 +45,6 @@ struct qce_cipher_reqctx {
unsigned int ivsize;
int src_nents;
int dst_nents;
- bool src_chained;
- bool dst_chained;
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 378cb76..4797e79 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -54,58 +54,6 @@ void qce_dma_release(struct qce_dma_data *dma)
kfree(dma->result_buf);
}

-int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained)
-{
- int err;
-
- if (chained) {
- while (sg) {
- err = dma_map_sg(dev, sg, 1, dir);
- if (!err)
- return -EFAULT;
- sg = sg_next(sg);
- }
- } else {
- err = dma_map_sg(dev, sg, nents, dir);
- if (!err)
- return -EFAULT;
- }
-
- return nents;
-}
-
-void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained)
-{
- if (chained)
- while (sg) {
- dma_unmap_sg(dev, sg, 1, dir);
- sg = sg_next(sg);
- }
- else
- dma_unmap_sg(dev, sg, nents, dir);
-}
-
-int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
-{
- struct scatterlist *sg = sglist;
- int nents = 0;
-
- if (chained)
- *chained = false;
-
- while (nbytes > 0 && sg) {
- nents++;
- nbytes -= sg->length;
- if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
- *chained = true;
- sg = sg_next(sg);
- }
-
- return nents;
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
{
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 65bedb8..130235d 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -49,11 +49,6 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
dma_async_tx_callback cb, void *cb_param);
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
-int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
-void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained);
-int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index be2f504..bcde9a0 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
if (error)
dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);

- qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
- rctx->src_chained);
- qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);

memcpy(rctx->digest, result->auth_iv, digestsize);
if (req->result)
@@ -92,10 +91,8 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
rctx->authklen = AES_KEYSIZE_128;
}

- rctx->src_nents = qce_countsg(req->src, req->nbytes,
- &rctx->src_chained);
- ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
- rctx->src_chained);
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
if (ret < 0)
return ret;

@@ -123,8 +120,7 @@ error_terminate:
error_unmap_dst:
qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
error_unmap_src:
- qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
- rctx->src_chained);
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
return ret;
}

diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
index 286f0d5..236bb5e9 100644
--- a/drivers/crypto/qce/sha.h
+++ b/drivers/crypto/qce/sha.h
@@ -36,7 +36,6 @@ struct qce_sha_ctx {
* @flags: operation flags
* @src_orig: original request sg list
* @nbytes_orig: original request number of bytes
- * @src_chained: is source scatterlist chained
* @src_nents: source number of entries
* @byte_count: byte count
* @count: save count in states during update, import and export
@@ -55,7 +54,6 @@ struct qce_sha_reqctx {
unsigned long flags;
struct scatterlist *src_orig;
unsigned int nbytes_orig;
- bool src_chained;
int src_nents;
__be32 byte_count[2];
u64 count;
--
2.4.9

2015-09-23 11:55:27

by Corentin Labbe

[permalink] [raw]
Subject: [PATCH 3/4] crypto: caam: dma_map_sg can handle chained SG

The caam driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing dma_map_sg_chained, dma_unmap_sg_chained
and __sg_count functions.

Signed-off-by: LABBE Corentin <[email protected]>
---
drivers/crypto/caam/caamalg.c | 94 +++++++++++++++-------------------------
drivers/crypto/caam/caamhash.c | 55 ++++++++---------------
drivers/crypto/caam/sg_sw_sec4.h | 72 +-----------------------------
3 files changed, 53 insertions(+), 168 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ba79d63..ad0d1ec 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1708,11 +1708,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/*
* aead_edesc - s/w-extended aead descriptor
* @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
- * @assoc_chained: if source is chained
* @src_nents: number of segments in input scatterlist
- * @src_chained: if source is chained
* @dst_nents: number of segments in output scatterlist
- * @dst_chained: if destination is chained
* @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1721,11 +1718,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
*/
struct aead_edesc {
int assoc_nents;
- bool assoc_chained;
int src_nents;
- bool src_chained;
int dst_nents;
- bool dst_chained;
dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
@@ -1736,9 +1730,7 @@ struct aead_edesc {
/*
* ablkcipher_edesc - s/w-extended ablkcipher descriptor
* @src_nents: number of segments in input scatterlist
- * @src_chained: if source is chained
* @dst_nents: number of segments in output scatterlist
- * @dst_chained: if destination is chained
* @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1747,9 +1739,7 @@ struct aead_edesc {
*/
struct ablkcipher_edesc {
int src_nents;
- bool src_chained;
int dst_nents;
- bool dst_chained;
dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
@@ -1759,18 +1749,15 @@ struct ablkcipher_edesc {

static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
- bool src_chained, int dst_nents, bool dst_chained,
+ int dst_nents,
dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
- dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
- src_chained);
- dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
- dst_chained);
+ dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg_chained(dev, src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
+ dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
}

if (iv_dma)
@@ -1785,8 +1772,7 @@ static void aead_unmap(struct device *dev,
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->src_chained, edesc->dst_nents,
- edesc->dst_chained, 0, 0,
+ edesc->src_nents, edesc->dst_nents, 0, 0,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}

@@ -1798,8 +1784,8 @@ static void ablkcipher_unmap(struct device *dev,
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);

caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->src_chained, edesc->dst_nents,
- edesc->dst_chained, edesc->iv_dma, ivsize,
+ edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}

@@ -2169,22 +2155,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct aead_edesc *edesc;
int sgc;
bool all_contig = true;
- bool src_chained = false, dst_chained = false;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
unsigned int authsize = ctx->authsize;

if (unlikely(req->dst != req->src)) {
- src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
- &src_chained);
+ src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
dst_nents = sg_count(req->dst,
req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)),
- &dst_chained);
+ (encrypt ? authsize : (-authsize)));
} else {
src_nents = sg_count(req->src,
req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0),
- &src_chained);
+ (encrypt ? authsize : 0));
}

/* Check if data are contiguous. */
@@ -2207,37 +2189,35 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}

if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
if (unlikely(!sgc)) {
dev_err(jrdev, "unable to map source\n");
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
} else {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
if (unlikely(!sgc)) {
dev_err(jrdev, "unable to map source\n");
kfree(edesc);
return ERR_PTR(-ENOMEM);
}

- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE, dst_chained);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
if (unlikely(!sgc)) {
dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
+ dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
}

edesc->src_nents = src_nents;
- edesc->src_chained = src_chained;
edesc->dst_nents = dst_nents;
- edesc->dst_chained = dst_chained;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
*all_contig_ptr = all_contig;
@@ -2467,22 +2447,21 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
bool iv_contig = false;
int sgc;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- bool src_chained = false, dst_chained = false;
int sec4_sg_index;

- src_nents = sg_count(req->src, req->nbytes, &src_chained);
+ src_nents = sg_count(req->src, req->nbytes);

if (req->dst != req->src)
- dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+ dst_nents = sg_count(req->dst, req->nbytes);

if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
} else {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE, dst_chained);
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
}

iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
@@ -2511,9 +2490,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}

edesc->src_nents = src_nents;
- edesc->src_chained = src_chained;
edesc->dst_nents = dst_nents;
- edesc->dst_chained = dst_chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
@@ -2646,22 +2623,21 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
bool iv_contig = false;
int sgc;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- bool src_chained = false, dst_chained = false;
int sec4_sg_index;

- src_nents = sg_count(req->src, req->nbytes, &src_chained);
+ src_nents = sg_count(req->src, req->nbytes);

if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+ dst_nents = sg_count(req->dst, req->nbytes);

if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
} else {
- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, src_chained);
- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE, dst_chained);
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
}

/*
@@ -2690,9 +2666,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}

edesc->src_nents = src_nents;
- edesc->src_chained = src_chained;
edesc->dst_nents = dst_nents;
- edesc->dst_chained = dst_chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 94433b9..9609f66 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -181,10 +181,9 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
/* Map req->src and put it in link table */
static inline void src_map_to_sec4_sg(struct device *jrdev,
struct scatterlist *src, int src_nents,
- struct sec4_sg_entry *sec4_sg,
- bool chained)
+ struct sec4_sg_entry *sec4_sg)
{
- dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
+ dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
}

@@ -585,7 +584,6 @@ badkey:
* ahash_edesc - s/w-extended ahash descriptor
* @dst_dma: physical mapped address of req->result
* @sec4_sg_dma: physical mapped address of h/w link table
- * @chained: if source is chained
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg: pointer to h/w link table
@@ -594,7 +592,6 @@ badkey:
struct ahash_edesc {
dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
- bool chained;
int src_nents;
int sec4_sg_bytes;
struct sec4_sg_entry *sec4_sg;
@@ -606,8 +603,7 @@ static inline void ahash_unmap(struct device *dev,
struct ahash_request *req, int dst_len)
{
if (edesc->src_nents)
- dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
- DMA_TO_DEVICE, edesc->chained);
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->dst_dma)
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);

@@ -788,7 +784,6 @@ static int ahash_update_ctx(struct ahash_request *req)
dma_addr_t ptr = ctx->sh_desc_update_dma;
int src_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
- bool chained = false;
int ret = 0;
int sh_len;

@@ -797,8 +792,8 @@ static int ahash_update_ctx(struct ahash_request *req)
to_hash = in_len - *next_buflen;

if (to_hash) {
- src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
- &chained);
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
@@ -816,7 +811,6 @@ static int ahash_update_ctx(struct ahash_request *req)
}

edesc->src_nents = src_nents;
- edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
@@ -833,8 +827,7 @@ static int ahash_update_ctx(struct ahash_request *req)

if (src_nents) {
src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + sec4_sg_src_index,
- chained);
+ edesc->sec4_sg + sec4_sg_src_index);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
@@ -996,11 +989,10 @@ static int ahash_finup_ctx(struct ahash_request *req)
int src_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- bool chained = false;
int ret = 0;
int sh_len;

- src_nents = __sg_count(req->src, req->nbytes, &chained);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
@@ -1018,7 +1010,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);

edesc->src_nents = src_nents;
- edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
@@ -1033,7 +1024,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
last_buflen);

src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
- sec4_sg_src_index, chained);
+ sec4_sg_src_index);

edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1081,14 +1072,12 @@ static int ahash_digest(struct ahash_request *req)
int src_nents, sec4_sg_bytes;
dma_addr_t src_dma;
struct ahash_edesc *edesc;
- bool chained = false;
int ret = 0;
u32 options;
int sh_len;

- src_nents = sg_count(req->src, req->nbytes, &chained);
- dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
- chained);
+ src_nents = sg_count(req->src, req->nbytes);
+ dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);

/* allocate space for base edesc and hw desc commands, link tables */
@@ -1102,7 +1091,6 @@ static int ahash_digest(struct ahash_request *req)
DESC_JOB_IO_LEN;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->src_nents = src_nents;
- edesc->chained = chained;

sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
@@ -1228,7 +1216,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct ahash_edesc *edesc;
u32 *desc, *sh_desc = ctx->sh_desc_update_first;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
- bool chained = false;
int ret = 0;
int sh_len;

@@ -1236,8 +1223,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
to_hash = in_len - *next_buflen;

if (to_hash) {
- src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
- &chained);
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
sec4_sg_bytes = (1 + src_nents) *
sizeof(struct sec4_sg_entry);

@@ -1254,7 +1241,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}

edesc->src_nents = src_nents;
- edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
@@ -1263,7 +1249,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + 1, chained);
+ edesc->sec4_sg + 1);
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
@@ -1343,11 +1329,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
int sec4_sg_bytes, sec4_sg_src_index, src_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- bool chained = false;
int sh_len;
int ret = 0;

- src_nents = __sg_count(req->src, req->nbytes, &chained);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
sec4_sg_src_index = 2;
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
@@ -1365,7 +1350,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);

edesc->src_nents = src_nents;
- edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
@@ -1374,8 +1358,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
state->buf_dma, buflen,
last_buflen);

- src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
- chained);
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);

edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1429,7 +1412,6 @@ static int ahash_update_first(struct ahash_request *req)
dma_addr_t src_dma;
u32 options;
struct ahash_edesc *edesc;
- bool chained = false;
int ret = 0;
int sh_len;

@@ -1438,10 +1420,8 @@ static int ahash_update_first(struct ahash_request *req)
to_hash = req->nbytes - *next_buflen;

if (to_hash) {
- src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
- &chained);
- dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE, chained);
+ src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
+ dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);

/*
@@ -1457,7 +1437,6 @@ static int ahash_update_first(struct ahash_request *req)
}

edesc->src_nents = src_nents;
- edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 18cd6d1..12ec661 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -69,81 +69,13 @@ static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
return sec4_sg_ptr - 1;
}

-/* count number of elements in scatterlist */
-static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
- bool *chained)
-{
- struct scatterlist *sg = sg_list;
- int sg_nents = 0;
-
- while (nbytes > 0) {
- sg_nents++;
- nbytes -= sg->length;
- if (!sg_is_last(sg) && (sg + 1)->length == 0)
- *chained = true;
- sg = sg_next(sg);
- }
-
- return sg_nents;
-}
-
/* derive number of elements in scatterlist, but return 0 for 1 */
-static inline int sg_count(struct scatterlist *sg_list, int nbytes,
- bool *chained)
+static inline int sg_count(struct scatterlist *sg_list, int nbytes)
{
- int sg_nents = __sg_count(sg_list, nbytes, chained);
+ int sg_nents = sg_nents_for_len(sg_list, nbytes);

if (likely(sg_nents == 1))
return 0;

return sg_nents;
}
-
-static inline void dma_unmap_sg_chained(
- struct device *dev, struct scatterlist *sg, unsigned int nents,
- enum dma_data_direction dir, bool chained)
-{
- if (unlikely(chained)) {
- int i;
- struct scatterlist *tsg = sg;
-
- /*
- * Use a local copy of the sg pointer to avoid moving the
- * head of the list pointed to by sg as we walk the list.
- */
- for (i = 0; i < nents; i++) {
- dma_unmap_sg(dev, tsg, 1, dir);
- tsg = sg_next(tsg);
- }
- } else if (nents) {
- dma_unmap_sg(dev, sg, nents, dir);
- }
-}
-
-static inline int dma_map_sg_chained(
- struct device *dev, struct scatterlist *sg, unsigned int nents,
- enum dma_data_direction dir, bool chained)
-{
- if (unlikely(chained)) {
- int i;
- struct scatterlist *tsg = sg;
-
- /*
- * Use a local copy of the sg pointer to avoid moving the
- * head of the list pointed to by sg as we walk the list.
- */
- for (i = 0; i < nents; i++) {
- if (!dma_map_sg(dev, tsg, 1, dir)) {
- dma_unmap_sg_chained(dev, sg, i, dir,
- chained);
- nents = 0;
- break;
- }
-
- tsg = sg_next(tsg);
- }
- } else
- nents = dma_map_sg(dev, sg, nents, dir);
-
- return nents;
-}
--
2.4.9

2015-09-23 11:55:28

by Corentin Labbe

[permalink] [raw]
Subject: [PATCH 4/4] crypto: sahara: dma_map_sg can handle chained SG

The sahara driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing the sahara_sha_unmap_sg function.

Signed-off-by: LABBE Corentin <[email protected]>
---
drivers/crypto/sahara.c | 66 ++++++++++---------------------------------------
1 file changed, 13 insertions(+), 53 deletions(-)

diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index cea2411..804c0f5 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -173,7 +173,6 @@ struct sahara_aes_reqctx {
* @sg_in_idx: number of hw links
* @in_sg: scatterlist for input data
* @in_sg_chain: scatterlists for chained input data
- * @in_sg_chained: specifies if chained scatterlists are used or not
* @total: total number of bytes for transfer
* @last: is this the last block
* @first: is this the first block
@@ -191,7 +190,6 @@ struct sahara_sha_reqctx {
unsigned int sg_in_idx;
struct scatterlist *in_sg;
struct scatterlist in_sg_chain[2];
- bool in_sg_chained;
size_t total;
unsigned int last;
unsigned int first;
@@ -801,38 +799,19 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
return -EINVAL;
}

- if (rctx->in_sg_chained) {
- i = start;
- sg = dev->in_sg;
- while (sg) {
- ret = dma_map_sg(dev->device, sg, 1,
- DMA_TO_DEVICE);
- if (!ret)
- return -EFAULT;
-
- dev->hw_link[i]->len = sg->length;
- dev->hw_link[i]->p = sg->dma_address;
+ sg = dev->in_sg;
+ ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
+ if (!ret)
+ return -EFAULT;
+
+ for (i = start; i < dev->nb_in_sg + start; i++) {
+ dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg + start - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
- i += 1;
- }
- dev->hw_link[i-1]->next = 0;
- } else {
- sg = dev->in_sg;
- ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
- DMA_TO_DEVICE);
- if (!ret)
- return -EFAULT;
-
- for (i = start; i < dev->nb_in_sg + start; i++) {
- dev->hw_link[i]->len = sg->length;
- dev->hw_link[i]->p = sg->dma_address;
- if (i == (dev->nb_in_sg + start - 1)) {
- dev->hw_link[i]->next = 0;
- } else {
- dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
- sg = sg_next(sg);
- }
}
}

@@ -980,7 +959,6 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain;

- rctx->in_sg_chained = true;
req->src = rctx->in_sg_chain;
/* only data from previous operation */
} else if (rctx->buf_cnt) {
@@ -991,13 +969,11 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
/* buf was copied into rembuf above */
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
rctx->total = rctx->buf_cnt;
- rctx->in_sg_chained = false;
/* no data from previous operation */
} else {
rctx->in_sg = req->src;
rctx->total = req->nbytes;
req->src = rctx->in_sg;
- rctx->in_sg_chained = false;
}

/* on next call, we only have the remaining data in the buffer */
@@ -1006,23 +982,6 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
return -EINPROGRESS;
}

-static void sahara_sha_unmap_sg(struct sahara_dev *dev,
- struct sahara_sha_reqctx *rctx)
-{
- struct scatterlist *sg;
-
- if (rctx->in_sg_chained) {
- sg = dev->in_sg;
- while (sg) {
- dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
- sg = sg_next(sg);
- }
- } else {
- dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
- DMA_TO_DEVICE);
- }
-}
-
static int sahara_sha_process(struct ahash_request *req)
{
struct sahara_dev *dev = dev_ptr;
@@ -1062,7 +1021,8 @@ static int sahara_sha_process(struct ahash_request *req)
}

if (rctx->sg_in_idx)
- sahara_sha_unmap_sg(dev, rctx);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);

memcpy(rctx->context, dev->context_base, rctx->context_size);

--
2.4.9

2015-09-23 12:07:22

by Christophe Leroy

[permalink] [raw]
Subject: Re: [PATCH 1/4] crypto: talitos: dma_map_sg can handle chained SG


Le 23/09/2015 13:55, LABBE Corentin a ?crit :
> The talitos driver use two dma_map_sg path
> according to SG are chained or not.
> Since dma_map_sg can handle both case, clean the code with all
> references to sg chained.
>
> Thus removing talitos_map_sg, talitos_unmap_sg_chain
> and sg_count functions.
>
>
Shouldn't the replacement of sg_count() by sg_nents_for_len() be part
of second patch ?

Christophe

2015-09-23 19:47:49

by Stanimir Varbanov

[permalink] [raw]
Subject: Re: [PATCH 2/4] crypto: qce: dma_map_sg can handle chained SG

Hi,

On 09/23/2015 02:55 PM, LABBE Corentin wrote:
> The qce driver use two dma_map_sg path according to SG are chained
> or not.
> Since dma_map_sg can handle both case, clean the code with all
> references to sg chained.
>
> Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.
>
> Signed-off-by: LABBE Corentin <[email protected]>
> ---
> drivers/crypto/qce/ablkcipher.c | 30 ++++++++----------------
> drivers/crypto/qce/cipher.h | 4 ----
> drivers/crypto/qce/dma.c | 52 -----------------------------------------
> drivers/crypto/qce/dma.h | 5 ----
> drivers/crypto/qce/sha.c | 14 ++++-------
> drivers/crypto/qce/sha.h | 2 --
> 6 files changed, 15 insertions(+), 92 deletions(-)

Thanks for the patch, very nice diffstat.

I will take care to test the patch next week.


regards,
Stan

2015-09-28 09:43:49

by Corentin Labbe

[permalink] [raw]
Subject: Re: [PATCH 1/4] crypto: talitos: dma_map_sg can handle chained SG

On Wed, Sep 23, 2015 at 02:07:22PM +0200, Christophe Leroy wrote:
>
> Le 23/09/2015 13:55, LABBE Corentin a ?crit :
> > The talitos driver use two dma_map_sg path
> > according to SG are chained or not.
> > Since dma_map_sg can handle both case, clean the code with all
> > references to sg chained.
> >
> > Thus removing talitos_map_sg, talitos_unmap_sg_chain
> > and sg_count functions.
> >
> >
> Shouldn't the replacement of sg_count() by sg_nents_for_len() be part
> of second patch ?
>

No because I remove also the chained argument of sg_count, so splitting this patch
with a dedicated patch for removing sg_count is not good.

Regards

2015-10-01 13:58:16

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH 2/4] crypto: qce: dma_map_sg can handle chained SG

On Wed, Sep 23, 2015 at 01:55:26PM +0200, LABBE Corentin wrote:
> The qce driver use two dma_map_sg path according to SG are chained
> or not.
> Since dma_map_sg can handle both case, clean the code with all
> references to sg chained.
>
> Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.
>
> Signed-off-by: LABBE Corentin <[email protected]>

This patch doesn't compile as you missed out on a hunk in sha.c.

Please fix and resubmit.

Thanks!
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2015-10-01 14:01:06

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH] crypto: dma_map_sg can handle chained SG

On Wed, Sep 23, 2015 at 01:55:24PM +0200, LABBE Corentin wrote:
>
> Hello
>
> Some drivers use two dma_map_sg path according to SG are chained or not.
> Since dma_map_sg can handle both case, this patch series clean all code
> with references to sg chained.
>
> Note that I could only compile test sahara and caam patch.
> And none could be tested due to lack oh hardware.

Patches 1,3-4 applied. Please resubmit patch 2 once you've completed
it. Thanks!
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2015-10-02 06:01:12

by Corentin Labbe

[permalink] [raw]
Subject: [PATCH] crypto: qce: dma_map_sg can handle chained SG

The qce driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.

Signed-off-by: LABBE Corentin <[email protected]>
---
drivers/crypto/qce/ablkcipher.c | 30 ++++++++----------------
drivers/crypto/qce/cipher.h | 4 ----
drivers/crypto/qce/dma.c | 52 -----------------------------------------
drivers/crypto/qce/dma.h | 5 ----
drivers/crypto/qce/sha.c | 18 ++++++--------
drivers/crypto/qce/sha.h | 2 --
6 files changed, 17 insertions(+), 94 deletions(-)

diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ad592de..2c0d63d 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -44,10 +44,8 @@ static void qce_ablkcipher_done(void *data)
error);

if (diff_dst)
- qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
- rctx->dst_chained);
- qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
- rctx->dst_chained);
+ dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);

sg_free_table(&rctx->dst_tbl);

@@ -80,15 +78,11 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

- rctx->src_nents = qce_countsg(req->src, req->nbytes,
- &rctx->src_chained);
- if (diff_dst) {
- rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
- &rctx->dst_chained);
- } else {
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (diff_dst)
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ else
rctx->dst_nents = rctx->src_nents;
- rctx->dst_chained = rctx->src_chained;
- }

rctx->dst_nents += 1;

@@ -116,14 +110,12 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
sg_mark_end(sg);
rctx->dst_sg = rctx->dst_tbl.sgl;

- ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
- rctx->dst_chained);
+ ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
if (ret < 0)
goto error_free;

if (diff_dst) {
- ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
- rctx->src_chained);
+ ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
if (ret < 0)
goto error_unmap_dst;
rctx->src_sg = req->src;
@@ -149,11 +141,9 @@ error_terminate:
qce_dma_terminate_all(&qce->dma);
error_unmap_src:
if (diff_dst)
- qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
- rctx->src_chained);
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
- qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
- rctx->dst_chained);
+ dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
sg_free_table(&rctx->dst_tbl);
return ret;
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index d5757cf..5c6a5f8 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -32,8 +32,6 @@ struct qce_cipher_ctx {
* @ivsize: IV size
* @src_nents: source entries
* @dst_nents: destination entries
- * @src_chained: is source chained
- * @dst_chained: is destination chained
* @result_sg: scatterlist used for result buffer
* @dst_tbl: destination sg table
* @dst_sg: destination sg pointer table beginning
@@ -47,8 +45,6 @@ struct qce_cipher_reqctx {
unsigned int ivsize;
int src_nents;
int dst_nents;
- bool src_chained;
- bool dst_chained;
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 378cb76..4797e79 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -54,58 +54,6 @@ void qce_dma_release(struct qce_dma_data *dma)
kfree(dma->result_buf);
}

-int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained)
-{
- int err;
-
- if (chained) {
- while (sg) {
- err = dma_map_sg(dev, sg, 1, dir);
- if (!err)
- return -EFAULT;
- sg = sg_next(sg);
- }
- } else {
- err = dma_map_sg(dev, sg, nents, dir);
- if (!err)
- return -EFAULT;
- }
-
- return nents;
-}
-
-void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained)
-{
- if (chained)
- while (sg) {
- dma_unmap_sg(dev, sg, 1, dir);
- sg = sg_next(sg);
- }
- else
- dma_unmap_sg(dev, sg, nents, dir);
-}
-
-int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
-{
- struct scatterlist *sg = sglist;
- int nents = 0;
-
- if (chained)
- *chained = false;
-
- while (nbytes > 0 && sg) {
- nents++;
- nbytes -= sg->length;
- if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
- *chained = true;
- sg = sg_next(sg);
- }
-
- return nents;
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
{
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 65bedb8..130235d 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -49,11 +49,6 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
dma_async_tx_callback cb, void *cb_param);
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
-int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
-void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained);
-int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, bool chained);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);

diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index be2f504..0c9973e 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
if (error)
dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);

- qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
- rctx->src_chained);
- qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);

memcpy(rctx->digest, result->auth_iv, digestsize);
if (req->result)
@@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
rctx->authklen = AES_KEYSIZE_128;
}

- rctx->src_nents = qce_countsg(req->src, req->nbytes,
- &rctx->src_chained);
- ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
- rctx->src_chained);
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
if (ret < 0)
return ret;

sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);

- ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+ ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
if (ret < 0)
goto error_unmap_src;

@@ -121,10 +118,9 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
error_terminate:
qce_dma_terminate_all(&qce->dma);
error_unmap_dst:
- qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+ dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
error_unmap_src:
- qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
- rctx->src_chained);
+ dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
return ret;
}

diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
index 286f0d5..236bb5e9 100644
--- a/drivers/crypto/qce/sha.h
+++ b/drivers/crypto/qce/sha.h
@@ -36,7 +36,6 @@ struct qce_sha_ctx {
* @flags: operation flags
* @src_orig: original request sg list
* @nbytes_orig: original request number of bytes
- * @src_chained: is source scatterlist chained
* @src_nents: source number of entries
* @byte_count: byte count
* @count: save count in states during update, import and export
@@ -55,7 +54,6 @@ struct qce_sha_reqctx {
unsigned long flags;
struct scatterlist *src_orig;
unsigned int nbytes_orig;
- bool src_chained;
int src_nents;
__be32 byte_count[2];
u64 count;
--
2.4.9

2015-10-08 14:23:27

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH] crypto: qce: dma_map_sg can handle chained SG

On Fri, Oct 02, 2015 at 08:01:02AM +0200, LABBE Corentin wrote:
> The qce driver use two dma_map_sg path according to SG are chained
> or not.
> Since dma_map_sg can handle both case, clean the code with all
> references to sg chained.
>
> Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.
>
> Signed-off-by: LABBE Corentin <[email protected]>

Patch applied. Thanks.
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2015-11-03 10:40:04

by Stanimir Varbanov

[permalink] [raw]
Subject: Re: [PATCH] crypto: qce: dma_map_sg can handle chained SG

Hi,

I know that this patch has been queued up, but ...

On 10/02/2015 09:01 AM, LABBE Corentin wrote:
> The qce driver use two dma_map_sg path according to SG are chained
> or not.
> Since dma_map_sg can handle both case, clean the code with all
> references to sg chained.
>
> Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.
>
> Signed-off-by: LABBE Corentin <[email protected]>
> ---
> drivers/crypto/qce/ablkcipher.c | 30 ++++++++----------------
> drivers/crypto/qce/cipher.h | 4 ----
> drivers/crypto/qce/dma.c | 52 -----------------------------------------
> drivers/crypto/qce/dma.h | 5 ----
> drivers/crypto/qce/sha.c | 18 ++++++--------
> drivers/crypto/qce/sha.h | 2 --
> 6 files changed, 17 insertions(+), 94 deletions(-)
>

<snip>

> diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
> index be2f504..0c9973e 100644
> --- a/drivers/crypto/qce/sha.c
> +++ b/drivers/crypto/qce/sha.c
> @@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
> if (error)
> dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
>
> - qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
> - rctx->src_chained);
> - qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
> + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
> + dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
>
> memcpy(rctx->digest, result->auth_iv, digestsize);
> if (req->result)
> @@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
> rctx->authklen = AES_KEYSIZE_128;
> }
>
> - rctx->src_nents = qce_countsg(req->src, req->nbytes,
> - &rctx->src_chained);
> - ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
> - rctx->src_chained);
> + rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);

sg_nents_for_len can return -EINVAL, and this error should be handled.

After added a check for the error I'm observing below:

#insmod tcrypt.ko mode=403 (test_ahash_speed("sha1"))

test 21 ( 8192 byte blocks, 8192 bytes per update, 1 updates):
qcrypto 73a000.crypto: sg_nents_for_len failed (-22) (nbytes:8192,
sg_len:4096)
hashing failed ret=-22

It seems that something is wrong with the test case? Looking further in
test_hash_sg_init() we can see:

#define TVMEMSIZE 4

sg_init_table(sg, TVMEMSIZE);
for (i = 0; i < TVMEMSIZE; i++) {
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
memset(tvmem[i], 0xff, PAGE_SIZE);
}

so we have 4 SGs with sg->length = 4096, thus sg_nents_for_len() should
return 2 entries with 4096 bytes each.

What is wrong here?

> + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
> if (ret < 0)
> return ret;
>
> sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
>
> - ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
> + ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
> if (ret < 0)
> goto error_unmap_src;
>

<snip>


--
regards,
Stan

2015-11-03 12:37:00

by Corentin Labbe

[permalink] [raw]
Subject: Re: [PATCH] crypto: qce: dma_map_sg can handle chained SG

On Tue, Nov 03, 2015 at 12:39:57PM +0200, Stanimir Varbanov wrote:
> Hi,
>
> I know that this patch has been queued up, but ...
>
> On 10/02/2015 09:01 AM, LABBE Corentin wrote:
> > The qce driver use two dma_map_sg path according to SG are chained
> > or not.
> > Since dma_map_sg can handle both case, clean the code with all
> > references to sg chained.
> >
> > Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.
> >
> > Signed-off-by: LABBE Corentin <[email protected]>
> > ---
> > drivers/crypto/qce/ablkcipher.c | 30 ++++++++----------------
> > drivers/crypto/qce/cipher.h | 4 ----
> > drivers/crypto/qce/dma.c | 52 -----------------------------------------
> > drivers/crypto/qce/dma.h | 5 ----
> > drivers/crypto/qce/sha.c | 18 ++++++--------
> > drivers/crypto/qce/sha.h | 2 --
> > 6 files changed, 17 insertions(+), 94 deletions(-)
> >
>
> <snip>
>
> > diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
> > index be2f504..0c9973e 100644
> > --- a/drivers/crypto/qce/sha.c
> > +++ b/drivers/crypto/qce/sha.c
> > @@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
> > if (error)
> > dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
> >
> > - qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
> > - rctx->src_chained);
> > - qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
> > + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
> > + dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
> >
> > memcpy(rctx->digest, result->auth_iv, digestsize);
> > if (req->result)
> > @@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
> > rctx->authklen = AES_KEYSIZE_128;
> > }
> >
> > - rctx->src_nents = qce_countsg(req->src, req->nbytes,
> > - &rctx->src_chained);
> > - ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
> > - rctx->src_chained);
> > + rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
>
> sg_nents_for_len can return -EINVAL, and this error should be handled.
>
> After added a check for the error I'm observing below:
>
> #insmod tcrypt.ko mode=403 (test_ahash_speed("sha1"))
>
> test 21 ( 8192 byte blocks, 8192 bytes per update, 1 updates):
> qcrypto 73a000.crypto: sg_nents_for_len failed (-22) (nbytes:8192,
> sg_len:4096)
> hashing failed ret=-22
>
> It seems that something is wrong with the test case? Looking further in
> test_hash_sg_init() we can see:
>
> #define TVMEMSIZE 4
>
> sg_init_table(sg, TVMEMSIZE);
> for (i = 0; i < TVMEMSIZE; i++) {
> sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
> memset(tvmem[i], 0xff, PAGE_SIZE);
> }
>
> so we have 4 SGs with sg->length = 4096, thus sg_nents_for_len() should
> return 2 entries with 4096 bytes each.
>
> What is wrong here?
>

Hello

It is a shame that I forgot to check the return value.
Herbert, do you prefer to drop the patch series and I send an updated one, or does I will send a new patch series for checking the return value of sg_nents_for_len() ?

Regards

LABBE Corentin

2015-11-03 13:33:51

by Stanimir Varbanov

[permalink] [raw]
Subject: Re: [PATCH] crypto: qce: dma_map_sg can handle chained SG

On 11/03/2015 02:36 PM, LABBE Corentin wrote:
> On Tue, Nov 03, 2015 at 12:39:57PM +0200, Stanimir Varbanov wrote:
>> Hi,
>>
>> I know that this patch has been queued up, but ...
>>
>> On 10/02/2015 09:01 AM, LABBE Corentin wrote:
>>> The qce driver use two dma_map_sg path according to SG are chained
>>> or not.
>>> Since dma_map_sg can handle both case, clean the code with all
>>> references to sg chained.
>>>
>>> Thus removing qce_mapsg, qce_unmapsg and qce_countsg functions.
>>>
>>> Signed-off-by: LABBE Corentin <[email protected]>
>>> ---
>>> drivers/crypto/qce/ablkcipher.c | 30 ++++++++----------------
>>> drivers/crypto/qce/cipher.h | 4 ----
>>> drivers/crypto/qce/dma.c | 52 -----------------------------------------
>>> drivers/crypto/qce/dma.h | 5 ----
>>> drivers/crypto/qce/sha.c | 18 ++++++--------
>>> drivers/crypto/qce/sha.h | 2 --
>>> 6 files changed, 17 insertions(+), 94 deletions(-)
>>>
>>
>> <snip>
>>
>>> diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
>>> index be2f504..0c9973e 100644
>>> --- a/drivers/crypto/qce/sha.c
>>> +++ b/drivers/crypto/qce/sha.c
>>> @@ -51,9 +51,8 @@ static void qce_ahash_done(void *data)
>>> if (error)
>>> dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
>>>
>>> - qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
>>> - rctx->src_chained);
>>> - qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
>>> + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
>>> + dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
>>>
>>> memcpy(rctx->digest, result->auth_iv, digestsize);
>>> if (req->result)
>>> @@ -92,16 +91,14 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
>>> rctx->authklen = AES_KEYSIZE_128;
>>> }
>>>
>>> - rctx->src_nents = qce_countsg(req->src, req->nbytes,
>>> - &rctx->src_chained);
>>> - ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
>>> - rctx->src_chained);
>>> + rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
>>
>> sg_nents_for_len can return -EINVAL, and this error should be handled.
>>
>> After added a check for the error I'm observing below:
>>
>> #insmod tcrypt.ko mode=403 (test_ahash_speed("sha1"))
>>
>> test 21 ( 8192 byte blocks, 8192 bytes per update, 1 updates):
>> qcrypto 73a000.crypto: sg_nents_for_len failed (-22) (nbytes:8192,
>> sg_len:4096)
>> hashing failed ret=-22
>>
>> It seems that something is wrong with the test case? Looking further in
>> test_hash_sg_init() we can see:
>>
>> #define TVMEMSIZE 4
>>
>> sg_init_table(sg, TVMEMSIZE);
>> for (i = 0; i < TVMEMSIZE; i++) {
>> sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
>> memset(tvmem[i], 0xff, PAGE_SIZE);
>> }
>>
>> so we have 4 SGs with sg->length = 4096, thus sg_nents_for_len() should
>> return 2 entries with 4096 bytes each.
>>
>> What is wrong here?
>>
>
> Hello
>
> It is a shame that I forgot to check the return value.

I guess that you expected dma_map_sg() to handle the error, and infact
it does - trow out BUG_ON :))

More interesting thing to me is how the above test case is expected to
work without sg chaining.

--
regards,
Stan

2015-11-04 05:02:29

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH] crypto: qce: dma_map_sg can handle chained SG

On Tue, Nov 03, 2015 at 01:36:55PM +0100, LABBE Corentin wrote:
>
> Herbert, do you prefer to drop the patch series and I send an updated one, or does I will send a new patch series for checking the return value of sg_nents_for_len() ?

Your patch has already been applied so please send any fixes on
top of it.

Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt