2007-09-29 13:34:53

by Herbert Xu

[permalink] [raw]
Subject: [CRYPTO] blkcipher: Add IV generation

Hi:

I've just checked in these patches needed for CTR to function
correctly.

[CRYPTO] blkcipher: Add IV generation

Different cipher block modes may have different requirements for IV
generation. Therefore it makes sense to move IV generation into the
crypto API instead of having the crypto users worry about it.

In particular, this would allow us to support CTR mode for IPsec as
the IV that's currently used isn't secure for it.

For CBC, I've decided to always generate a random IV rather than using
the last block of the previous encryption. The reason is that for async
CBC we'd have to do this anyway. If we did both depending on whether
the algorithm is sync or async then we'd be exposing information to the
outside world.

Signed-off-by: Herbert Xu <[email protected]>

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index d1defbb..c697525 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -22,6 +22,7 @@ config CRYPTO_DES_S390
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
+ select CRYPTO_CBC
help
This us the s390 hardware accelerated implementation of the
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
@@ -31,6 +32,7 @@ config CRYPTO_AES_S390
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
+ select CRYPTO_CBC
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197). AES uses the Rijndael
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 3660ca6..195c118 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -17,6 +17,7 @@
*/

#include <crypto/algapi.h>
+#include <crypto/cbc.h>
#include <linux/module.h>
#include <linux/init.h>
#include "crypt_s390.h"
@@ -317,6 +318,7 @@ static struct crypto_alg cbc_aes_alg = {
.setkey = cbc_aes_set_key,
.encrypt = cbc_aes_encrypt,
.decrypt = cbc_aes_decrypt,
+ .geniv = crypto_cbc_geniv,
}
}
};
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index ea22707..8b8856c 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -15,6 +15,7 @@
*/

#include <crypto/algapi.h>
+#include <crypto/cbc.h>
#include <linux/init.h>
#include <linux/module.h>

@@ -226,6 +227,7 @@ static struct crypto_alg cbc_des_alg = {
.setkey = des_setkey,
.encrypt = cbc_des_encrypt,
.decrypt = cbc_des_decrypt,
+ .geniv = crypto_cbc_geniv,
}
}
};
@@ -384,6 +386,7 @@ static struct crypto_alg cbc_des3_128_alg = {
.setkey = des3_128_setkey,
.encrypt = cbc_des3_128_encrypt,
.decrypt = cbc_des3_128_decrypt,
+ .geniv = crypto_cbc_geniv,
}
}
};
@@ -546,6 +549,7 @@ static struct crypto_alg cbc_des3_192_alg = {
.setkey = des3_192_setkey,
.encrypt = cbc_des3_192_encrypt,
.decrypt = cbc_des3_192_decrypt,
+ .geniv = crypto_cbc_geniv,
}
}
};
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 2731acb..4be3343 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -60,6 +60,10 @@ static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return cipher->setkey(tfm, key, keylen);
}

+static void nogeniv(struct crypto_ablkcipher *tfm, u8 *iv, u64 seq)
+{
+}
+
static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
@@ -78,8 +82,16 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
+ crt->geniv = alg->geniv;
crt->ivsize = alg->ivsize;

+ if (!alg->geniv) {
+ if (alg->ivsize)
+ return -EINVAL;
+
+ crt->geniv = nogeniv;
+ }
+
return 0;
}

diff --git a/crypto/aead.c b/crypto/aead.c
index 84a3501..1603791 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -67,9 +67,13 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
if (max(alg->authsize, alg->ivsize) > PAGE_SIZE / 8)
return -EINVAL;

+ if (!alg->geniv)
+ return -EINVAL;
+
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
+ crt->geniv = alg->geniv;
crt->ivsize = alg->ivsize;
crt->authsize = alg->authsize;

diff --git a/crypto/authenc.c b/crypto/authenc.c
index 0b29a6a..f1802f1 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -221,6 +221,13 @@ static int crypto_authenc_decrypt(struct aead_request *req)
return crypto_ablkcipher_decrypt(abreq);
}

+static void crypto_authenc_geniv(struct crypto_aead *tfm, u8 *iv, u64 seq)
+{
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_ablkcipher_geniv(ctx->enc, iv, seq);
+}
+
static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
@@ -351,6 +358,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
inst->alg.cra_aead.setkey = crypto_authenc_setkey;
inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
+ inst->alg.cra_aead.geniv = crypto_authenc_geniv;

out:
crypto_mod_put(enc);
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 9c49770..1f8e9e5 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -410,6 +410,22 @@ static int async_decrypt(struct ablkcipher_request *req)
return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
}

+static void async_geniv(struct crypto_ablkcipher *ab, u8 *iv, u64 seq)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ab);
+ struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
+
+ return alg->geniv(__crypto_blkcipher_cast(tfm), iv, seq);
+}
+
+static void async_nogeniv(struct crypto_ablkcipher *tfm, u8 *iv, u64 seq)
+{
+}
+
+static void nogeniv(struct crypto_blkcipher *tfm, u8 *iv, u64 seq)
+{
+}
+
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
@@ -434,8 +450,16 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
crt->setkey = async_setkey;
crt->encrypt = async_encrypt;
crt->decrypt = async_decrypt;
+ crt->geniv = async_geniv;
crt->ivsize = alg->ivsize;

+ if (!alg->geniv) {
+ if (alg->ivsize)
+ return -EINVAL;
+
+ crt->geniv = async_nogeniv;
+ }
+
return 0;
}

@@ -449,6 +473,14 @@ static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
+ crt->geniv = alg->geniv;
+
+ if (!alg->geniv) {
+ if (alg->ivsize)
+ return -EINVAL;
+
+ crt->geniv = nogeniv;
+ }

addr = (unsigned long)crypto_tfm_ctx(tfm);
addr = ALIGN(addr, align);
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 1f2649e..e8618e9 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -11,10 +11,12 @@
*/

#include <crypto/algapi.h>
+#include <crypto/cbc.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>

@@ -207,6 +209,12 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
return err;
}

+void crypto_cbc_geniv(struct crypto_blkcipher *tfm, u8 *iv, u64 seq)
+{
+ get_random_bytes(iv, crypto_blkcipher_ivsize(tfm));
+}
+EXPORT_SYMBOL_GPL(crypto_cbc_geniv);
+
static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
{
do {
@@ -314,6 +322,7 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
+ inst->alg.cra_blkcipher.geniv = crypto_cbc_geniv;

out_put_alg:
crypto_mod_put(alg);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 8bf2da8..a1f3e2e 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -148,6 +148,14 @@ static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
}

+static void cryptd_blkcipher_geniv(struct crypto_ablkcipher *tfm, u8 *iv,
+ u64 seq)
+{
+ struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ crypto_blkcipher_geniv(ctx->child, iv, seq);
+}
+
static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
@@ -251,6 +259,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
+ inst->alg.cra_ablkcipher.geniv = cryptd_blkcipher_geniv;

out_put_alg:
crypto_mod_put(alg);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5fd6688..02f688a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -27,6 +27,7 @@ config CRYPTO_DEV_PADLOCK_AES
tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
select CRYPTO_BLKCIPHER
+ select CRYPTO_CBC
help
Use VIA PadLock for AES algorithm.

@@ -55,6 +56,7 @@ config CRYPTO_DEV_GEODE
depends on X86_32 && PCI
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
+ select CRYPTO_CBC
help
Say 'Y' here to use the AMD Geode LX processor on-board AES
engine for the CryptoAPI AES algorithm.
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 6a86958..7c48c18 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -13,6 +13,7 @@
#include <linux/crypto.h>
#include <linux/spinlock.h>
#include <crypto/algapi.h>
+#include <crypto/cbc.h>

#include <asm/io.h>
#include <asm/delay.h>
@@ -294,6 +295,7 @@ static struct crypto_alg geode_cbc_alg = {
.setkey = geode_setkey,
.encrypt = geode_cbc_encrypt,
.decrypt = geode_cbc_decrypt,
+ .geniv = crypto_cbc_geniv,
.ivsize = AES_IV_LENGTH,
}
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index d4501dc..af41ec3 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -44,6 +44,7 @@
*/

#include <crypto/algapi.h>
+#include <crypto/cbc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -604,6 +605,7 @@ static struct crypto_alg cbc_aes_alg = {
.setkey = aes_set_key,
.encrypt = cbc_aes_encrypt,
.decrypt = cbc_aes_decrypt,
+ .geniv = crypto_cbc_geniv,
}
}
};
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
new file mode 100644
index 0000000..6fec310
--- /dev/null
+++ b/include/crypto/cbc.h
@@ -0,0 +1,22 @@
+/*
+ * CBC: Cipher Block Chaining mode
+ *
+ * Copyright (c) 2007 Herbert Xu <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_CBC_H
+#define _CRYPTO_CBC_H
+
+#include <linux/types.h>
+
+struct crypto_blkcipher;
+
+void crypto_cbc_geniv(struct crypto_blkcipher *tfm, u8 *iv, u64 seq);
+
+#endif /* _CRYPTO_CBC_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index fc32694..8f9dd29 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -176,6 +176,7 @@ struct ablkcipher_alg {
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
+ void (*geniv)(struct crypto_ablkcipher *tfm, u8 *iv, u64 seq);

unsigned int min_keysize;
unsigned int max_keysize;
@@ -187,6 +188,7 @@ struct aead_alg {
unsigned int keylen);
int (*encrypt)(struct aead_request *req);
int (*decrypt)(struct aead_request *req);
+ void (*geniv)(struct crypto_aead *tfm, u8 *iv, u64 seq);

unsigned int ivsize;
unsigned int authsize;
@@ -201,6 +203,7 @@ struct blkcipher_alg {
int (*decrypt)(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
+ void (*geniv)(struct crypto_blkcipher *tfm, u8 *iv, u64 seq);

unsigned int min_keysize;
unsigned int max_keysize;
@@ -317,6 +320,7 @@ struct ablkcipher_tfm {
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
+ void (*geniv)(struct crypto_ablkcipher *tfm, u8 *iv, u64 seq);
unsigned int ivsize;
unsigned int reqsize;
};
@@ -326,6 +330,7 @@ struct aead_tfm {
unsigned int keylen);
int (*encrypt)(struct aead_request *req);
int (*decrypt)(struct aead_request *req);
+ void (*geniv)(struct crypto_aead *tfm, u8 *iv, u64 seq);
unsigned int ivsize;
unsigned int authsize;
unsigned int reqsize;
@@ -339,6 +344,7 @@ struct blkcipher_tfm {
struct scatterlist *src, unsigned int nbytes);
int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
+ void (*geniv)(struct crypto_blkcipher *tfm, u8 *iv, u64 seq);
};

struct cipher_tfm {
@@ -624,6 +630,13 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
return crt->decrypt(req);
}

+static inline void crypto_ablkcipher_geniv(struct crypto_ablkcipher *tfm,
+ u8 *iv, u64 seq)
+{
+ struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
+ crt->geniv(tfm, iv, seq);
+}
+
static inline unsigned int crypto_ablkcipher_reqsize(
struct crypto_ablkcipher *tfm)
{
@@ -767,6 +780,12 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
}

+static inline void crypto_aead_geniv(struct crypto_aead *tfm, u8 *iv, u64 seq)
+{
+ struct aead_tfm *crt = crypto_aead_crt(tfm);
+ crt->geniv(tfm, iv, seq);
+}
+
static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
{
return crypto_aead_crt(tfm)->reqsize;
@@ -960,6 +979,13 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
}

+static inline void crypto_blkcipher_geniv(struct crypto_blkcipher *tfm,
+ u8 *iv, u64 seq)
+{
+ struct blkcipher_tfm *crt = crypto_blkcipher_crt(tfm);
+ crt->geniv(tfm, iv, seq);
+}
+
static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
const u8 *src, unsigned int len)
{


2007-09-29 13:36:51

by Herbert Xu

[permalink] [raw]
Subject: Re: [CRYPTO] blkcipher: Add IV generation

Hi:

[CRYPTO] blkcipher: Remove alignment restriction on block size

Previously we assumed for convenience that the block size is a multiple of
the algorithm's required alignment. With the pending addition of CTR this
will no longer be the case as the block size will be 1 due to it being a
stream cipher. However, the alignment requirement will be that of the
underlying implementation which will most likely be greater than 1.

Signed-off-by: Herbert Xu <[email protected]>

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d891f56..58cc191 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -63,9 +63,6 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;

- if (alg->cra_alignmask & alg->cra_blocksize)
- return -EINVAL;
-
if (alg->cra_blocksize > PAGE_SIZE / 8)
return -EINVAL;

diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 1f8e9e5..ea9e240 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -149,6 +149,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
unsigned int alignmask)
{
unsigned int n;
+ unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);

if (walk->buffer)
goto ok;
@@ -167,8 +168,8 @@ ok:
walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
alignmask + 1);
walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
- walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
- bsize);
+ walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
+ aligned_bsize, bsize);

scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);

@@ -278,7 +279,9 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
{
unsigned bs = crypto_blkcipher_blocksize(tfm);
unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
- unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
+ unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+ unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+ (alignmask + 1);
u8 *iv;

size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
@@ -287,8 +290,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
return -ENOMEM;

iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
- iv = blkcipher_get_spot(iv, bs) + bs;
- iv = blkcipher_get_spot(iv, bs) + bs;
+ iv = blkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = blkcipher_get_spot(iv, bs) + aligned_bs;
iv = blkcipher_get_spot(iv, ivsize);

walk->iv = memcpy(iv, walk->iv, ivsize);

2007-10-02 06:00:22

by Joy Latten

[permalink] [raw]
Subject: Re: [CRYPTO] blkcipher: Add IV generation

{
@@ -434,8 +450,16 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
crt->setkey = async_setkey;
crt->encrypt = async_encrypt;
crt->decrypt = async_decrypt;
+ crt->geniv = async_geniv;
crt->ivsize = alg->ivsize;

+ if (!alg->geniv) {
+ if (alg->ivsize)
+ return -EINVAL;
+
+ crt->geniv = async_nogeniv;
+ }
+
return 0;
}

@@ -449,6 +473,14 @@ static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
+ crt->geniv = alg->geniv;
+
+ if (!alg->geniv) {
+ if (alg->ivsize)
+ return -EINVAL;
+
+ crt->geniv = nogeniv;
+ }

So, I am thinking CTR and some of the other modes,
other than CBC, that use an IV will fail with this
change since they currently don't have an alg->geniv...
should they have a geniv similar to that of CBC, which
is gotten randomly?

Regards,
Joy

2007-10-02 06:18:00

by Herbert Xu

[permalink] [raw]
Subject: Re: [CRYPTO] blkcipher: Add IV generation

Joy Latten <[email protected]> wrote:
>
> So, I am thinking CTR and some of the other modes,
> other than CBC, that use an IV will fail with this
> change since they currently don't have an alg->geniv...
> should they have a geniv similar to that of CBC, which
> is gotten randomly?

The only other mode in the tree currently is ECB, which has
no IV at all. Yes CTR should implement its own geniv that
simply uses the supplied sequence number (zero extended or
truncated if necessary).

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2007-10-04 07:29:00

by Herbert Xu

[permalink] [raw]
Subject: Re: [CRYPTO] blkcipher: Add IV generation

On Sat, Sep 29, 2007 at 09:36:48PM +0800, Herbert Xu wrote:
>
> [CRYPTO] blkcipher: Remove alignment restriction on block size
>
> Previously we assumed for convenience that the block size is a multiple of
> the algorithm's required alignment. With the pending addition of CTR this
> will no longer be the case as the block size will be 1 due to it being a
> stream cipher. However, the alignment requirement will be that of the
> underlying implementation which will most likely be greater than 1.
>
> Signed-off-by: Herbert Xu <[email protected]>

That patch missed one spot. Here's a fix on top of it.

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
319f8a1dff903a7f8b9853bd229abbc13e7fad71
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index ea9e240..1b2a14a 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -158,7 +158,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
if (walk->buffer)
goto ok;

- n = bsize * 3 - (alignmask + 1) +
+ n = aligned_bsize * 3 - (alignmask + 1) +
(alignmask & ~(crypto_tfm_ctx_alignment() - 1));
walk->buffer = kmalloc(n, GFP_ATOMIC);
if (!walk->buffer)