This patchset adds support for IPsec extended (64-bit) sequence numbers for
esp as defined in RFC 4303. Also it adds support for anti-replay windows
bigger than 32 packets. To make use of big anti-replay windows and extended
sequence numbers, new userspace tools are needed. An example patch for
iproute2 is provided with this patchset. It has not much testing yet, in
particular I don't have any other implementations of IPsec extended sequence
numbers to test against. So this is not yet ready for inclusion, I just want
to receive some review on the design before I spend more time on working at it.
The patchset is also available at branch net-next-esn of
git://git.kernel.org/pub/scm/linux/kernel/git/klassert/linux-2.6-stk.git
Steffen
A lot of crypto algorithms implement their own chaining function.
So add a generic one that can be used from all the algorithms that
need scatterlist chaining.
Signed-off-by: Steffen Klassert <[email protected]>
---
include/crypto/scatterwalk.h | 15 +++++++++++++++
1 files changed, 15 insertions(+), 0 deletions(-)
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 833d208..4fd95a3 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -68,6 +68,21 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
return (++sg)->length ? sg : (void *)sg_page(sg);
}
+static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+ struct scatterlist *sg,
+ int chain, int num)
+{
+ if (chain) {
+ head->length += sg->length;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ if (sg)
+ scatterwalk_sg_chain(head, num, sg);
+ else
+ sg_mark_end(head);
+}
+
static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
struct scatter_walk *walk_out)
{
--
1.7.0.4
Use scatterwalk_crypto_chain in favor of locally defined chaining functions.
Signed-off-by: Steffen Klassert <[email protected]>
---
crypto/authenc.c | 22 ++++------------------
crypto/eseqiv.c | 18 ++----------------
crypto/gcm.c | 19 ++-----------------
3 files changed, 8 insertions(+), 51 deletions(-)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a5a22cf..5ef7ba6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -107,20 +107,6 @@ badkey:
goto out;
}
-static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
- int chain)
-{
- if (chain) {
- head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
- }
-
- if (sg)
- scatterwalk_sg_chain(head, 2, sg);
- else
- sg_mark_end(head);
-}
-
static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
int err)
{
@@ -345,7 +331,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
- authenc_chain(cipher, dst, vdst == iv + ivsize);
+ scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
dst = cipher;
cryptlen += ivsize;
}
@@ -354,7 +340,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
- authenc_chain(asg, dst, 0);
+ scatterwalk_crypto_chain(asg, dst, 0, 2);
dst = asg;
cryptlen += req->assoclen;
}
@@ -499,7 +485,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
- authenc_chain(cipher, src, vsrc == iv + ivsize);
+ scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
src = cipher;
cryptlen += ivsize;
}
@@ -508,7 +494,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
- authenc_chain(asg, src, 0);
+ scatterwalk_crypto_chain(asg, src, 0, 2);
src = asg;
cryptlen += req->assoclen;
}
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 3ca3b66..42ce9f5 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -62,20 +62,6 @@ out:
skcipher_givcrypt_complete(req, err);
}
-static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
- int chain)
-{
- if (chain) {
- head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
- }
-
- if (sg)
- scatterwalk_sg_chain(head, 2, sg);
- else
- sg_mark_end(head);
-}
-
static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
{
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
@@ -124,13 +110,13 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
sg_init_table(reqctx->src, 2);
sg_set_buf(reqctx->src, giv, ivsize);
- eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
+ scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
dst = reqctx->src;
if (osrc != odst) {
sg_init_table(reqctx->dst, 2);
sg_set_buf(reqctx->dst, giv, ivsize);
- eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
+ scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
dst = reqctx->dst;
}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 2f5fbba..1a25263 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1102,21 +1102,6 @@ static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
return crypto_aead_setauthsize(ctx->child, authsize);
}
-/* this is the same as crypto_authenc_chain */
-static void crypto_rfc4543_chain(struct scatterlist *head,
- struct scatterlist *sg, int chain)
-{
- if (chain) {
- head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
- }
-
- if (sg)
- scatterwalk_sg_chain(head, 2, sg);
- else
- sg_mark_end(head);
-}
-
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
int enc)
{
@@ -1154,13 +1139,13 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
sg_init_table(payload, 2);
sg_set_buf(payload, req->iv, 8);
- crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
+ scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
sg_init_table(assoc, 2);
sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
req->assoc->offset);
- crypto_rfc4543_chain(assoc, payload, 0);
+ scatterwalk_crypto_chain(assoc, payload, 0, 2);
aead_request_set_tfm(subreq, ctx->child);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
--
1.7.0.4
ESP with separate encryption/authentication algorithms needs a special
treatment for the associated data. This patch add a new algorithm that
handles esp with extended sequence numbers.
Signed-off-by: Steffen Klassert <[email protected]>
---
crypto/Makefile | 2 +-
crypto/authencesn.c | 821 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 822 insertions(+), 1 deletions(-)
create mode 100644 crypto/authencesn.c
diff --git a/crypto/Makefile b/crypto/Makefile
index 423b7de..85e50d6 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -78,7 +78,7 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
-obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
+obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
new file mode 100644
index 0000000..71e3a2d
--- /dev/null
+++ b/crypto/authencesn.c
@@ -0,0 +1,821 @@
+/*
+ * authencesn.c - AEAD wrapper for IPsec with extended sequence numbers,
+ * derived from authenc.c
+ *
+ * Copyright (C) 2010 secunet Security Networks AG
+ * Copyright (C) 2010 Steffen Klassert <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct authenc_esn_instance_ctx {
+ struct crypto_ahash_spawn auth;
+ struct crypto_skcipher_spawn enc;
+};
+
+struct crypto_authenc_esn_ctx {
+ unsigned int reqoff;
+ struct crypto_ahash *auth;
+ struct crypto_ablkcipher *enc;
+};
+
+struct authenc_esn_request_ctx {
+ unsigned int cryptlen;
+ unsigned int headlen;
+ unsigned int trailen;
+ struct scatterlist *sg;
+ struct scatterlist asg[2];
+ struct scatterlist assoctrail[1];
+ struct scatterlist cipher[2];
+ crypto_completion_t complete;
+ crypto_completion_t update_complete;
+ crypto_completion_t update_complete2;
+ char tail[];
+};
+
+static void authenc_esn_request_complete(struct aead_request *req, int err)
+{
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct crypto_ahash *auth = ctx->auth;
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
+ int err = -EINVAL;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(auth, key, authkeylen);
+ crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
+ CRYPTO_TFM_RES_MASK);
+
+ if (err)
+ goto out;
+
+ crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
+ CRYPTO_TFM_RES_MASK);
+
+out:
+ return err;
+
+badkey:
+ crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
+static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->update_complete2, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->assoctrail, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->assoctrail, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+
+static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+
+out:
+ aead_request_complete(req, err);
+}
+
+
+static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+
+ ahash_request_set_callback(ahreq,
+ aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->update_complete2, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->assoctrail, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->assoctrail, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+
+static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
+ unsigned int flags)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
+
+ err = crypto_ahash_init(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->asg, hash, areq_ctx->headlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->update_complete, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->update_complete2, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->assoctrail, hash,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ return hash;
+}
+
+static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
+ unsigned int flags)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
+ struct scatterlist *assoctrail = areq_ctx->assoctrail;
+ struct scatterlist *tsg;
+ unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+ unsigned int cryptlen = req->cryptlen;
+ struct page *dstp;
+ u8 *vdst;
+ u8 *hash;
+
+ dstp = sg_page(dst);
+ vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
+
+ if (ivsize) {
+ sg_init_table(cipher, 2);
+ sg_set_buf(cipher, iv, ivsize);
+ scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
+ dst = cipher;
+ cryptlen += ivsize;
+ }
+
+ BUG_ON(sg_is_last(assoc));
+
+ sg_init_table(asg, 1);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+
+ tsg = assoc + 1;
+
+ BUG_ON((assoc->length + tsg->length != req->assoclen));
+
+ sg_init_table(assoctrail, 1);
+ sg_set_page(assoctrail, sg_page(tsg), tsg->length, tsg->offset);
+
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->headlen = assoc->length;
+ areq_ctx->trailen = tsg->length;
+ areq_ctx->sg = dst;
+
+ areq_ctx->complete = authenc_esn_geniv_ahash_done;
+ areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
+ areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
+
+ hash = crypto_authenc_esn_ahash(req, flags);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ scatterwalk_map_and_copy(hash, dst, cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+ return 0;
+}
+
+
+static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
+ int err)
+{
+ struct aead_request *areq = req->data;
+
+ if (!err) {
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct ablkcipher_request *abreq = aead_request_ctx(areq);
+ u8 *iv = (u8 *)(abreq + 1) +
+ crypto_ablkcipher_reqsize(ctx->enc);
+
+ err = crypto_authenc_esn_genicv(areq, iv, 0);
+ }
+
+ authenc_esn_request_complete(areq, err);
+}
+
+static int crypto_authenc_esn_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct scatterlist *dst = req->dst;
+ unsigned int cryptlen = req->cryptlen;
+ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ + ctx->reqoff);
+ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
+ int err;
+
+ ablkcipher_request_set_tfm(abreq, enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ crypto_authenc_esn_encrypt_done, req);
+ ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
+
+ memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
+
+ err = crypto_ablkcipher_encrypt(abreq);
+ if (err)
+ return err;
+
+ return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+}
+
+static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
+ int err)
+{
+ struct aead_request *areq = req->data;
+
+ if (!err) {
+ struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
+
+ err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
+ }
+
+ authenc_esn_request_complete(areq, err);
+}
+
+static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
+{
+ struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct aead_request *areq = &req->areq;
+ struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
+ u8 *iv = req->giv;
+ int err;
+
+ skcipher_givcrypt_set_tfm(greq, ctx->enc);
+ skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
+ crypto_authenc_esn_givencrypt_done, areq);
+ skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
+ areq->iv);
+ skcipher_givcrypt_set_giv(greq, iv, req->seq);
+
+ err = crypto_skcipher_givencrypt(greq);
+ if (err)
+ return err;
+
+ return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+}
+
+static int crypto_authenc_esn_verify(struct aead_request *req)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ u8 *ohash;
+ u8 *ihash;
+ unsigned int authsize;
+
+ areq_ctx->complete = authenc_esn_verify_ahash_done;
+ areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
+
+ ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (IS_ERR(ohash))
+ return PTR_ERR(ohash);
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ ihash = ohash + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+ return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+}
+
+static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
+ unsigned int cryptlen)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct scatterlist *src = req->src;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
+ struct scatterlist *assoctrail = areq_ctx->assoctrail;
+ struct scatterlist *tsg;
+ unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+ struct page *srcp;
+ u8 *vsrc;
+
+ srcp = sg_page(src);
+ vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
+
+ if (ivsize) {
+ sg_init_table(cipher, 2);
+ sg_set_buf(cipher, iv, ivsize);
+ scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
+ src = cipher;
+ cryptlen += ivsize;
+ }
+
+ BUG_ON(sg_is_last(assoc));
+
+ sg_init_table(asg, 1);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+
+ tsg = assoc + 1;
+
+ BUG_ON((assoc->length + tsg->length != req->assoclen));
+
+ sg_init_table(assoctrail, 1);
+ sg_set_page(assoctrail, sg_page(tsg), tsg->length, tsg->offset);
+
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->headlen = assoc->length;
+ areq_ctx->trailen = tsg->length;
+ areq_ctx->sg = src;
+
+ areq_ctx->complete = authenc_esn_verify_ahash_done;
+ areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
+ areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
+
+ return crypto_authenc_esn_verify(req);
+}
+
+static int crypto_authenc_esn_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct ablkcipher_request *abreq = aead_request_ctx(req);
+ unsigned int cryptlen = req->cryptlen;
+ unsigned int authsize = crypto_aead_authsize(authenc_esn);
+ u8 *iv = req->iv;
+ int err;
+
+ if (cryptlen < authsize)
+ return -EINVAL;
+ cryptlen -= authsize;
+
+ err = crypto_authenc_esn_iverify(req, iv, cryptlen);
+ if (err)
+ return err;
+
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
+
+ return crypto_ablkcipher_decrypt(abreq);
+}
+
+static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_ablkcipher *enc;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ enc = crypto_spawn_skcipher(&ictx->enc);
+ err = PTR_ERR(enc);
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+ ctx->auth = auth;
+ ctx->enc = enc;
+
+ ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
+ crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1) +
+ crypto_ablkcipher_ivsize(enc);
+
+ tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) +
+ ctx->reqoff +
+ max_t(unsigned int,
+ crypto_ahash_reqsize(auth) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(enc));
+
+ return 0;
+
+err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+}
+
+static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_ablkcipher(ctx->enc);
+}
+
+static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ struct crypto_instance *inst;
+ struct hash_alg_common *auth;
+ struct crypto_alg *auth_base;
+ struct crypto_alg *enc;
+ struct authenc_esn_instance_ctx *ctx;
+ const char *enc_name;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ return ERR_PTR(err);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+ return ERR_PTR(-EINVAL);
+
+ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ if (IS_ERR(auth))
+ return ERR_CAST(auth);
+
+ auth_base = &auth->base;
+
+ enc_name = crypto_attr_alg_name(tb[2]);
+ err = PTR_ERR(enc_name);
+ if (IS_ERR(enc_name))
+ goto out_put_auth;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!inst)
+ goto out_put_auth;
+
+ ctx = crypto_instance_ctx(inst);
+
+ err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+ if (err)
+ goto err_free_inst;
+
+ crypto_set_skcipher_spawn(&ctx->enc, inst);
+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err)
+ goto err_drop_auth;
+
+ enc = crypto_skcipher_spawn_alg(&ctx->enc);
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+ "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_drop_enc;
+
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "authencesn(%s,%s)", auth_base->cra_driver_name,
+ enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_drop_enc;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
+ inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.cra_priority = enc->cra_priority *
+ 10 + auth_base->cra_priority;
+ inst->alg.cra_blocksize = enc->cra_blocksize;
+ inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
+ inst->alg.cra_type = &crypto_aead_type;
+
+ inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
+ inst->alg.cra_aead.maxauthsize = auth->digestsize;
+
+ inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+
+ inst->alg.cra_init = crypto_authenc_esn_init_tfm;
+ inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
+
+ inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
+ inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
+ inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
+ inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
+
+out:
+ crypto_mod_put(auth_base);
+ return inst;
+
+err_drop_enc:
+ crypto_drop_skcipher(&ctx->enc);
+err_drop_auth:
+ crypto_drop_ahash(&ctx->auth);
+err_free_inst:
+ kfree(inst);
+out_put_auth:
+ inst = ERR_PTR(err);
+ goto out;
+}
+
+static void crypto_authenc_esn_free(struct crypto_instance *inst)
+{
+ struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->enc);
+ crypto_drop_ahash(&ctx->auth);
+ kfree(inst);
+}
+
+static struct crypto_template crypto_authenc_esn_tmpl = {
+ .name = "authencesn",
+ .alloc = crypto_authenc_esn_alloc,
+ .free = crypto_authenc_esn_free,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_authenc_esn_module_init(void)
+{
+ return crypto_register_template(&crypto_authenc_esn_tmpl);
+}
+
+static void __exit crypto_authenc_esn_module_exit(void)
+{
+ crypto_unregister_template(&crypto_authenc_esn_tmpl);
+}
+
+module_init(crypto_authenc_esn_module_init);
+module_exit(crypto_authenc_esn_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <[email protected]>");
+MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
--
1.7.0.4
This patch adds the struct xfrm_replay_state_esn which will be
used to support IPsec extended sequence numbers and anti replay windows
bigger than 32 packets. Also we add a function that returns the actual
size of the xfrm_replay_state_esn, a xfrm netlink atribute and a xfrm state
flag for the use of extended sequence numbers.
Signed-off-by: Steffen Klassert <[email protected]>
---
include/linux/xfrm.h | 12 ++++++++++++
include/net/xfrm.h | 7 +++++++
2 files changed, 19 insertions(+), 0 deletions(-)
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index b971e38..9eeefb1 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -84,6 +84,16 @@ struct xfrm_replay_state {
__u32 bitmap;
};
+struct xfrm_replay_state_esn {
+ unsigned int bmp_len;
+ __u32 oseq;
+ __u32 seq;
+ __u32 oseq_hi;
+ __u32 seq_hi;
+ __u32 replay_window;
+ __u32 bmp[0];
+};
+
struct xfrm_algo {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
@@ -283,6 +293,7 @@ enum xfrm_attr_type_t {
XFRMA_KMADDRESS, /* struct xfrm_user_kmaddress */
XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
+ XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -349,6 +360,7 @@ struct xfrm_usersa_info {
#define XFRM_STATE_WILDRECV 8
#define XFRM_STATE_ICMP 16
#define XFRM_STATE_AF_UNSPEC 32
+#define XFRM_STATE_ESN 64
};
struct xfrm_usersa_id {
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 54b2832..1456254 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -184,9 +184,11 @@ struct xfrm_state {
/* State for replay detection */
struct xfrm_replay_state replay;
+ struct xfrm_replay_state_esn *replay_esn;
/* Replay detection state at the time we sent the last notification */
struct xfrm_replay_state preplay;
+ struct xfrm_replay_state_esn *preplay_esn;
/* internal flag that only holds state for delayed aevent at the
* moment
@@ -1553,6 +1555,11 @@ static inline int xfrm_alg_auth_len(struct xfrm_algo_auth *alg)
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}
+static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
+{
+ return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
+}
+
#ifdef CONFIG_XFRM_MIGRATE
static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
{
--
1.7.0.4
To support IPsec extended sequence numbers, we split the
output sequence numbers of xfrm_skb_cb in low and high order 32 bits
and we add the high order 32 bits to the input sequence numbers.
All users are updated accordingly.
Signed-off-by: Steffen Klassert <[email protected]>
---
include/net/xfrm.h | 10 ++++++++--
net/ipv4/ah4.c | 2 +-
net/ipv4/esp4.c | 4 ++--
net/ipv6/ah6.c | 2 +-
net/ipv6/esp6.c | 4 ++--
net/xfrm/xfrm_input.c | 4 ++--
net/xfrm/xfrm_output.c | 2 +-
7 files changed, 17 insertions(+), 11 deletions(-)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1456254..7f196e5 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -571,8 +571,14 @@ struct xfrm_skb_cb {
/* Sequence number for replay protection. */
union {
- u64 output;
- __be32 input;
+ struct {
+ __u32 low;
+ __u32 hi;
+ } output;
+ struct {
+ __be32 low;
+ __be32 hi;
+ } input;
} seq;
};
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec..461ccac 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -205,7 +205,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->reserved = 0;
ah->spi = x->id.spi;
- ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+ ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg, 0, skb->len);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 14ca1f1..f986aee 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -199,7 +199,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
}
esph->spi = x->id.spi;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
@@ -211,7 +211,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
aead_givcrypt_set_giv(req, esph->enc_data,
- XFRM_SKB_CB(skb)->seq.output);
+ XFRM_SKB_CB(skb)->seq.output.low);
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_givencrypt(req);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4e..8fccce7 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -409,7 +409,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ah->reserved = 0;
ah->spi = x->id.spi;
- ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+ ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg, 0, skb->len);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index ee9b93b..672cb69 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -188,7 +188,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
*skb_mac_header(skb) = IPPROTO_ESP;
esph->spi = x->id.spi;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
@@ -200,7 +200,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
aead_givcrypt_set_giv(req, esph->enc_data,
- XFRM_SKB_CB(skb)->seq.output);
+ XFRM_SKB_CB(skb)->seq.output.low);
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_givencrypt(req);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 45f1c98..b173b7f 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -118,7 +118,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
if (encap_type < 0) {
async = 1;
x = xfrm_input_state(skb);
- seq = XFRM_SKB_CB(skb)->seq.input;
+ seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume;
}
@@ -184,7 +184,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
spin_unlock(&x->lock);
- XFRM_SKB_CB(skb)->seq.input = seq;
+ XFRM_SKB_CB(skb)->seq.input.low = seq;
nexthdr = x->type->input(x, skb);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 64f2ae1..4b63776 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -68,7 +68,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
}
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
- XFRM_SKB_CB(skb)->seq.output = ++x->replay.oseq;
+ XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
if (unlikely(x->replay.oseq == 0)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
x->replay.oseq--;
--
1.7.0.4
This patch adds IPsec extended sequence numbers support to esp4.
We use the authencesn crypto algorithm to handle esp with separate
encryption/authentication algorithms.
Signed-off-by: Steffen Klassert <[email protected]>
---
net/ipv4/esp4.c | 98 ++++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 80 insertions(+), 18 deletions(-)
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index f986aee..1a4ac47 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -31,11 +31,14 @@ struct esp_skb_cb {
*
* TODO: Use spare space in skb for this where possible.
*/
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
{
unsigned int len;
- len = crypto_aead_ivsize(aead);
+ len = seqhilen;
+
+ len += crypto_aead_ivsize(aead);
+
if (len) {
len += crypto_aead_alignmask(aead) &
~(crypto_tfm_ctx_alignment() - 1);
@@ -50,10 +53,15 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
return kmalloc(len, GFP_ATOMIC);
}
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
+static inline __be32 *esp_tmp_seqhi(void *tmp)
+{
+ return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+}
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
{
return crypto_aead_ivsize(aead) ?
- PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
+ PTR_ALIGN((u8 *)tmp + seqhilen,
+ crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
}
static inline struct aead_givcrypt_request *esp_tmp_givreq(
@@ -118,6 +126,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
int clen;
int alen;
int nfrags;
+ int assoclen;
+ int sglists;
+ int seqhilen;
+ __be32 *seqhi;
/* skb is pure payload to encrypt */
@@ -139,14 +151,25 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
goto error;
nfrags = err;
- tmp = esp_alloc_tmp(aead, nfrags + 1);
+ assoclen = sizeof(*esph);
+ sglists = 1;
+ seqhilen = 0;
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists++;
+ seqhilen += sizeof(__be32);
+ assoclen += seqhilen;
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
if (!tmp)
goto error;
- iv = esp_tmp_iv(aead, tmp);
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_givreq(aead, iv);
asg = esp_givreq_sg(aead, req);
- sg = asg + 1;
+ sg = asg + sglists;
/* Fill padding... */
tail = skb_tail_pointer(trailer);
@@ -205,11 +228,18 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
skb_to_sgvec(skb, sg,
esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
clen + alen);
- sg_init_one(asg, esph, sizeof(*esph));
+
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ sg_init_table(asg, 2);
+ sg_set_buf(asg, esph, sizeof(*esph));
+ *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+ sg_set_buf(asg + 1, seqhi, seqhilen);
+ } else
+ sg_init_one(asg, esph, sizeof(*esph));
aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
- aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
+ aead_givcrypt_set_assoc(req, asg, assoclen);
aead_givcrypt_set_giv(req, esph->enc_data,
XFRM_SKB_CB(skb)->seq.output.low);
@@ -330,6 +360,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
struct sk_buff *trailer;
int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
int nfrags;
+ int assoclen;
+ int sglists;
+ int seqhilen;
+ __be32 *seqhi;
void *tmp;
u8 *iv;
struct scatterlist *sg;
@@ -346,16 +380,27 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
nfrags = err;
+ assoclen = sizeof(*esph);
+ sglists = 1;
+ seqhilen = 0;
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists++;
+ seqhilen += sizeof(__be32);
+ assoclen += seqhilen;
+ }
+
err = -ENOMEM;
- tmp = esp_alloc_tmp(aead, nfrags + 1);
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
if (!tmp)
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
- iv = esp_tmp_iv(aead, tmp);
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
asg = esp_req_sg(aead, req);
- sg = asg + 1;
+ sg = asg + sglists;
skb->ip_summed = CHECKSUM_NONE;
@@ -366,11 +411,18 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
- sg_init_one(asg, esph, sizeof(*esph));
+
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ sg_init_table(asg, 2);
+ sg_set_buf(asg, esph, sizeof(*esph));
+ *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+ sg_set_buf(asg + 1, seqhi, seqhilen);
+ } else
+ sg_init_one(asg, esph, sizeof(*esph));
aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen, iv);
- aead_request_set_assoc(req, asg, sizeof(*esph));
+ aead_request_set_assoc(req, asg, assoclen);
err = crypto_aead_decrypt(req);
if (err == -EINPROGRESS)
@@ -484,10 +536,20 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error;
err = -ENAMETOOLONG;
- if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
- x->aalg ? x->aalg->alg_name : "digest_null",
- x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
- goto error;
+
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "authencesn(%s,%s)",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ } else {
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "authenc(%s,%s)",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ }
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
--
1.7.0.4
this patch adds IPsec extended sequence numbers support to esp6.
We use the authencesn crypto algorithm to handle esp with separate
encryption/authentication algorithms.
Signed-off-by: Steffen Klassert <[email protected]>
---
net/ipv6/esp6.c | 103 ++++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 84 insertions(+), 19 deletions(-)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 672cb69..e018dfa 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -52,16 +52,20 @@ struct esp_skb_cb {
/*
* Allocate an AEAD request structure with extra space for SG and IV.
*
- * For alignment considerations the IV is placed at the front, followed
- * by the request and finally the SG list.
+ * For alignment considerations the upper 32 bits of the sequence number are
+ * placed at the front, if present. Followed by the IV, the request and finally
+ * the SG list.
*
* TODO: Use spare space in skb for this where possible.
*/
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
{
unsigned int len;
- len = crypto_aead_ivsize(aead);
+ len = seqihlen;
+
+ len += crypto_aead_ivsize(aead);
+
if (len) {
len += crypto_aead_alignmask(aead) &
~(crypto_tfm_ctx_alignment() - 1);
@@ -76,10 +80,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
return kmalloc(len, GFP_ATOMIC);
}
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
+static inline __be32 *esp_tmp_seqhi(void *tmp)
+{
+ return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+}
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
{
return crypto_aead_ivsize(aead) ?
- PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
+ PTR_ALIGN((u8 *)tmp + seqhilen,
+ crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
}
static inline struct aead_givcrypt_request *esp_tmp_givreq(
@@ -141,8 +151,12 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
int clen;
int alen;
int nfrags;
+ int assoclen;
+ int sglists;
+ int seqhilen;
u8 *iv;
u8 *tail;
+ __be32 *seqhi;
struct esp_data *esp = x->data;
/* skb is pure payload to encrypt */
@@ -163,14 +177,25 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
goto error;
nfrags = err;
- tmp = esp_alloc_tmp(aead, nfrags + 1);
+ assoclen = sizeof(*esph);
+ sglists = 1;
+ seqhilen = 0;
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists++;
+ seqhilen += sizeof(__be32);
+ assoclen += seqhilen;
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
if (!tmp)
goto error;
- iv = esp_tmp_iv(aead, tmp);
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_givreq(aead, iv);
asg = esp_givreq_sg(aead, req);
- sg = asg + 1;
+ sg = asg + sglists;
/* Fill padding... */
tail = skb_tail_pointer(trailer);
@@ -194,11 +219,18 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
skb_to_sgvec(skb, sg,
esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
clen + alen);
- sg_init_one(asg, esph, sizeof(*esph));
+
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ sg_init_table(asg, 2);
+ sg_set_buf(asg, esph, sizeof(*esph));
+ *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+ sg_set_buf(asg + 1, seqhi, seqhilen);
+ } else
+ sg_init_one(asg, esph, sizeof(*esph));
aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
- aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
+ aead_givcrypt_set_assoc(req, asg, assoclen);
aead_givcrypt_set_giv(req, esph->enc_data,
XFRM_SKB_CB(skb)->seq.output.low);
@@ -276,8 +308,12 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
struct sk_buff *trailer;
int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
int nfrags;
+ int assoclen;
+ int sglists;
+ int seqhilen;
int ret = 0;
void *tmp;
+ __be32 *seqhi;
u8 *iv;
struct scatterlist *sg;
struct scatterlist *asg;
@@ -298,12 +334,24 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
}
ret = -ENOMEM;
- tmp = esp_alloc_tmp(aead, nfrags + 1);
+
+ assoclen = sizeof(*esph);
+ sglists = 1;
+ seqhilen = 0;
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists++;
+ seqhilen += sizeof(__be32);
+ assoclen += seqhilen;
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
if (!tmp)
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
- iv = esp_tmp_iv(aead, tmp);
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
asg = esp_req_sg(aead, req);
sg = asg + 1;
@@ -317,11 +365,18 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
- sg_init_one(asg, esph, sizeof(*esph));
+
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ sg_init_table(asg, 2);
+ sg_set_buf(asg, esph, sizeof(*esph));
+ *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+ sg_set_buf(asg + 1, seqhi, seqhilen);
+ } else
+ sg_init_one(asg, esph, sizeof(*esph));
aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen, iv);
- aead_request_set_assoc(req, asg, sizeof(*esph));
+ aead_request_set_assoc(req, asg, assoclen);
ret = crypto_aead_decrypt(req);
if (ret == -EINPROGRESS)
@@ -427,10 +482,20 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error;
err = -ENAMETOOLONG;
- if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
- x->aalg ? x->aalg->alg_name : "digest_null",
- x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
- goto error;
+
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "authencesn(%s,%s)",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ } else {
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
+ "authenc(%s,%s)",
+ x->aalg ? x->aalg->alg_name : "digest_null",
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+ goto error;
+ }
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
--
1.7.0.4
To support multiple versions of replay detection, we move the replay
detection functions to a separate file and make them accessible
via function pointers contained in the struct xfrm_replay.
Signed-off-by: Steffen Klassert <[email protected]>
---
include/net/xfrm.h | 24 ++++++-
net/xfrm/Makefile | 2 +-
net/xfrm/xfrm_input.c | 5 +-
net/xfrm/xfrm_output.c | 15 +----
net/xfrm/xfrm_replay.c | 157 ++++++++++++++++++++++++++++++++++++++++++++++++
net/xfrm/xfrm_state.c | 111 +---------------------------------
net/xfrm/xfrm_user.c | 4 +-
7 files changed, 190 insertions(+), 128 deletions(-)
create mode 100644 net/xfrm/xfrm_replay.c
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7f196e5..4b1559a 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -190,6 +190,9 @@ struct xfrm_state {
struct xfrm_replay_state preplay;
struct xfrm_replay_state_esn *preplay_esn;
+ /* The functions for replay detection. */
+ struct xfrm_replay *repl;
+
/* internal flag that only holds state for delayed aevent at the
* moment
*/
@@ -259,6 +262,15 @@ struct km_event {
struct net *net;
};
+struct xfrm_replay {
+ void (*advance)(struct xfrm_state *x, __be32 net_seq);
+ int (*check)(struct xfrm_state *x,
+ struct sk_buff *skb,
+ __be32 net_seq);
+ void (*notify)(struct xfrm_state *x, int event);
+ int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
+};
+
struct net_device;
struct xfrm_type;
struct xfrm_dst;
@@ -682,6 +694,8 @@ extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
u32 auid, u32 ses, u32 secid);
extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
struct sk_buff *skb);
+extern void xfrm_audit_state_replay(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq);
extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
__be32 net_spi, __be32 net_seq);
@@ -714,6 +728,11 @@ static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
{
}
+static inline void xfrm_audit_state_replay(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+}
+
static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
u16 family)
{
@@ -1393,10 +1412,7 @@ extern int xfrm_state_delete(struct xfrm_state *x);
extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
-extern int xfrm_replay_check(struct xfrm_state *x,
- struct sk_buff *skb, __be32 seq);
-extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
-extern void xfrm_replay_notify(struct xfrm_state *x, int event);
+extern int xfrm_init_replay(struct xfrm_state *x);
extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
extern int xfrm_init_state(struct xfrm_state *x);
extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index c631047..aa429ee 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
xfrm_input.o xfrm_output.o xfrm_algo.o \
- xfrm_sysctl.o
+ xfrm_sysctl.o xfrm_replay.o
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b173b7f..55d5f5c 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -172,7 +172,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop_unlock;
}
- if (x->props.replay_window && xfrm_replay_check(x, skb, seq)) {
+ if (x->props.replay_window && x->repl->check(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
@@ -206,8 +206,7 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;
- if (x->props.replay_window)
- xfrm_replay_advance(x, seq);
+ x->repl->advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 4b63776..1aba03f 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -67,17 +67,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
goto error;
}
- if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
- XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
- if (unlikely(x->replay.oseq == 0)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
- x->replay.oseq--;
- xfrm_audit_state_replay_overflow(x, skb);
- err = -EOVERFLOW;
- goto error;
- }
- if (xfrm_aevent_is_on(net))
- xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+ err = x->repl->overflow(x, skb);
+ if (err) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
+ goto error;
}
x->curlft.bytes += skb->len;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
new file mode 100644
index 0000000..598ca4c
--- /dev/null
+++ b/net/xfrm/xfrm_replay.c
@@ -0,0 +1,157 @@
+/*
+ * xfrm_replay.c - xfrm replay detection, derived from xfrm_state.c.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <net/xfrm.h>
+
+static void xfrm_replay_notify(struct xfrm_state *x, int event)
+{
+ struct km_event c;
+ /* we send notify messages in case
+ * 1. we updated on of the sequence numbers, and the seqno difference
+ * is at least x->replay_maxdiff, in this case we also update the
+ * timeout of our timer function
+ * 2. if x->replay_maxage has elapsed since last update,
+ * and there were changes
+ *
+ * The state structure must be locked!
+ */
+
+ switch (event) {
+ case XFRM_REPLAY_UPDATE:
+ if (x->replay_maxdiff &&
+ (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
+ (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
+ if (x->xflags & XFRM_TIME_DEFER)
+ event = XFRM_REPLAY_TIMEOUT;
+ else
+ return;
+ }
+
+ break;
+
+ case XFRM_REPLAY_TIMEOUT:
+ if (memcmp(&x->replay, &x->preplay,
+ sizeof(struct xfrm_replay_state)) == 0) {
+ x->xflags |= XFRM_TIME_DEFER;
+ return;
+ }
+
+ break;
+ }
+
+ memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
+ c.event = XFRM_MSG_NEWAE;
+ c.data.aevent = event;
+ km_state_notify(x, &c);
+
+ if (x->replay_maxage &&
+ !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
+ x->xflags &= ~XFRM_TIME_DEFER;
+}
+
+static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+{
+ int err = 0;
+ struct net *net = xs_net(x);
+
+ if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+ XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
+ if (unlikely(x->replay.oseq == 0)) {
+ x->replay.oseq--;
+ xfrm_audit_state_replay_overflow(x, skb);
+ err = -EOVERFLOW;
+
+ return err;
+ }
+ if (xfrm_aevent_is_on(net))
+ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ return err;
+}
+
+static int xfrm_replay_check(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+ u32 diff;
+ u32 seq = ntohl(net_seq);
+
+ if (unlikely(seq == 0))
+ goto err;
+
+ if (likely(seq > x->replay.seq))
+ return 0;
+
+ diff = x->replay.seq - seq;
+ if (diff >= min_t(unsigned int, x->props.replay_window,
+ sizeof(x->replay.bitmap) * 8)) {
+ x->stats.replay_window++;
+ goto err;
+ }
+
+ if (x->replay.bitmap & (1U << diff)) {
+ x->stats.replay++;
+ goto err;
+ }
+ return 0;
+
+err:
+ xfrm_audit_state_replay(x, skb, net_seq);
+ return -EINVAL;
+}
+
+static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
+{
+ u32 diff;
+ u32 seq = ntohl(net_seq);
+
+ if (!x->props.replay_window)
+ return;
+
+ if (seq > x->replay.seq) {
+ diff = seq - x->replay.seq;
+ if (diff < x->props.replay_window)
+ x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
+ else
+ x->replay.bitmap = 1;
+ x->replay.seq = seq;
+ } else {
+ diff = x->replay.seq - seq;
+ x->replay.bitmap |= (1U << diff);
+ }
+
+ if (xfrm_aevent_is_on(xs_net(x)))
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+}
+
+static struct xfrm_replay xfrm_replay_legacy = {
+ .advance = xfrm_replay_advance,
+ .check = xfrm_replay_check,
+ .notify = xfrm_replay_notify,
+ .overflow = xfrm_replay_overflow,
+};
+
+int xfrm_init_replay(struct xfrm_state *x)
+{
+ x->repl = &xfrm_replay_legacy;
+
+ return 0;
+}
+EXPORT_SYMBOL(xfrm_init_replay);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index eb96ce5..a5035c0 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -42,13 +42,6 @@ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
-#ifdef CONFIG_AUDITSYSCALL
-static void xfrm_audit_state_replay(struct xfrm_state *x,
- struct sk_buff *skb, __be32 net_seq);
-#else
-#define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
-#endif /* CONFIG_AUDITSYSCALL */
-
static inline unsigned int xfrm_dst_hash(struct net *net,
xfrm_address_t *daddr,
xfrm_address_t *saddr,
@@ -1609,54 +1602,6 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk)
}
EXPORT_SYMBOL(xfrm_state_walk_done);
-
-void xfrm_replay_notify(struct xfrm_state *x, int event)
-{
- struct km_event c;
- /* we send notify messages in case
- * 1. we updated on of the sequence numbers, and the seqno difference
- * is at least x->replay_maxdiff, in this case we also update the
- * timeout of our timer function
- * 2. if x->replay_maxage has elapsed since last update,
- * and there were changes
- *
- * The state structure must be locked!
- */
-
- switch (event) {
- case XFRM_REPLAY_UPDATE:
- if (x->replay_maxdiff &&
- (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
- (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
- if (x->xflags & XFRM_TIME_DEFER)
- event = XFRM_REPLAY_TIMEOUT;
- else
- return;
- }
-
- break;
-
- case XFRM_REPLAY_TIMEOUT:
- if ((x->replay.seq == x->preplay.seq) &&
- (x->replay.bitmap == x->preplay.bitmap) &&
- (x->replay.oseq == x->preplay.oseq)) {
- x->xflags |= XFRM_TIME_DEFER;
- return;
- }
-
- break;
- }
-
- memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
- c.event = XFRM_MSG_NEWAE;
- c.data.aevent = event;
- km_state_notify(x, &c);
-
- if (x->replay_maxage &&
- !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
- x->xflags &= ~XFRM_TIME_DEFER;
-}
-
static void xfrm_replay_timer_handler(unsigned long data)
{
struct xfrm_state *x = (struct xfrm_state*)data;
@@ -1665,7 +1610,7 @@ static void xfrm_replay_timer_handler(unsigned long data)
if (x->km.state == XFRM_STATE_VALID) {
if (xfrm_aevent_is_on(xs_net(x)))
- xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
+ x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
else
x->xflags |= XFRM_TIME_DEFER;
}
@@ -1673,57 +1618,6 @@ static void xfrm_replay_timer_handler(unsigned long data)
spin_unlock(&x->lock);
}
-int xfrm_replay_check(struct xfrm_state *x,
- struct sk_buff *skb, __be32 net_seq)
-{
- u32 diff;
- u32 seq = ntohl(net_seq);
-
- if (unlikely(seq == 0))
- goto err;
-
- if (likely(seq > x->replay.seq))
- return 0;
-
- diff = x->replay.seq - seq;
- if (diff >= min_t(unsigned int, x->props.replay_window,
- sizeof(x->replay.bitmap) * 8)) {
- x->stats.replay_window++;
- goto err;
- }
-
- if (x->replay.bitmap & (1U << diff)) {
- x->stats.replay++;
- goto err;
- }
- return 0;
-
-err:
- xfrm_audit_state_replay(x, skb, net_seq);
- return -EINVAL;
-}
-
-void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
-{
- u32 diff;
- u32 seq = ntohl(net_seq);
-
- if (seq > x->replay.seq) {
- diff = seq - x->replay.seq;
- if (diff < x->props.replay_window)
- x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
- else
- x->replay.bitmap = 1;
- x->replay.seq = seq;
- } else {
- diff = x->replay.seq - seq;
- x->replay.bitmap |= (1U << diff);
- }
-
- if (xfrm_aevent_is_on(xs_net(x)))
- xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
-}
-
static LIST_HEAD(xfrm_km_list);
static DEFINE_RWLOCK(xfrm_km_lock);
@@ -2236,7 +2130,7 @@ void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
}
EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
-static void xfrm_audit_state_replay(struct xfrm_state *x,
+void xfrm_audit_state_replay(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
struct audit_buffer *audit_buf;
@@ -2251,6 +2145,7 @@ static void xfrm_audit_state_replay(struct xfrm_state *x,
spi, spi, ntohl(net_seq));
audit_log_end(audit_buf);
}
+EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8bae6b2..77452dc 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -464,8 +464,10 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
x->preplay.seq = x->replay.seq+x->replay_maxdiff;
x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
- /* override default values from above */
+ if ((err = xfrm_init_replay(x)))
+ goto error;
+ /* override default values from above */
xfrm_update_ae_params(x, attrs);
return x;
--
1.7.0.4
As it is, the anti-replay bitmap in struct xfrm_replay_state can
only accomodate 32 packets. Even though it is possible to configure
anti-replay window sizes up to 255 packets from userspace. So we
reject any packet with a sequence number within the configured window
but outside the bitmap. With this patch, we represent the anti-replay
window as a bitmap of variable length that can be accessed via the
new struct xfrm_replay_state_esn. Thus, we have no limit on the
window size anymore. To use the new anti-replay window implementantion,
new userspace tools are required. We leave the old implementation
untouched to stay in sync with old userspace tools.
Signed-off-by: Steffen Klassert <[email protected]>
---
net/xfrm/xfrm_replay.c | 185 +++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 184 insertions(+), 1 deletions(-)
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 598ca4c..be2c9eb 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -141,6 +141,178 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
+static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
+{
+ int err = 0;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ struct net *net = xs_net(x);
+
+ if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+ XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+ if (unlikely(replay_esn->oseq == 0)) {
+ replay_esn->oseq--;
+ xfrm_audit_state_replay_overflow(x, skb);
+ err = -EOVERFLOW;
+
+ return err;
+ }
+ if (xfrm_aevent_is_on(net))
+ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ return err;
+}
+
+static int xfrm_replay_check_bmp(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+ unsigned int bitnr, nr;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ u32 seq = ntohl(net_seq);
+ u32 diff = replay_esn->seq - seq;
+ u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+ if (unlikely(seq == 0))
+ goto err;
+
+ if (likely(seq > replay_esn->seq))
+ return 0;
+
+ if (diff >= replay_esn->replay_window) {
+ x->stats.replay_window++;
+ goto err;
+ }
+
+ if (pos >= diff) {
+ bitnr = (pos - diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+ } else {
+ bitnr = replay_esn->replay_window - (diff - pos);
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+ }
+ return 0;
+
+err_replay:
+ x->stats.replay++;
+err:
+ xfrm_audit_state_replay(x, skb, net_seq);
+ return -EINVAL;
+}
+
+static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
+{
+ unsigned int bitnr, nr, i;
+ u32 diff;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ u32 seq = ntohl(net_seq);
+ u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+ if (!replay_esn->replay_window)
+ return;
+
+ if (seq > replay_esn->seq) {
+ diff = seq - replay_esn->seq;
+
+ if (diff < replay_esn->replay_window) {
+ for (i = 1; i < diff; i++) {
+ bitnr = (pos + i) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] &= ~(1U << bitnr);
+ }
+
+ bitnr = (pos + diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ } else {
+ nr = replay_esn->replay_window >> 5;
+ for (i = 0; i <= nr; i++)
+ replay_esn->bmp[i] = 0;
+
+ bitnr = (pos + diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ }
+
+ replay_esn->seq = seq;
+ } else {
+ diff = replay_esn->seq - seq;
+
+ if (pos >= diff) {
+ bitnr = (pos - diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ } else {
+ bitnr = replay_esn->replay_window - (diff - pos);
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ }
+ }
+
+ if (xfrm_aevent_is_on(xs_net(x)))
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+}
+
+static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
+{
+ struct km_event c;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
+
+ /* we send notify messages in case
+ * 1. we updated on of the sequence numbers, and the seqno difference
+ * is at least x->replay_maxdiff, in this case we also update the
+ * timeout of our timer function
+ * 2. if x->replay_maxage has elapsed since last update,
+ * and there were changes
+ *
+ * The state structure must be locked!
+ */
+
+ switch (event) {
+ case XFRM_REPLAY_UPDATE:
+ if (x->replay_maxdiff &&
+ (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
+ (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) {
+ if (x->xflags & XFRM_TIME_DEFER)
+ event = XFRM_REPLAY_TIMEOUT;
+ else
+ return;
+ }
+
+ break;
+
+ case XFRM_REPLAY_TIMEOUT:
+ if (memcmp(x->replay_esn, x->preplay_esn,
+ xfrm_replay_state_esn_len(replay_esn)) == 0) {
+ x->xflags |= XFRM_TIME_DEFER;
+ return;
+ }
+
+ break;
+ }
+
+ memcpy(x->preplay_esn, x->replay_esn,
+ xfrm_replay_state_esn_len(replay_esn));
+ c.event = XFRM_MSG_NEWAE;
+ c.data.aevent = event;
+ km_state_notify(x, &c);
+
+ if (x->replay_maxage &&
+ !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
+ x->xflags &= ~XFRM_TIME_DEFER;
+}
+
static struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
@@ -148,9 +320,20 @@ static struct xfrm_replay xfrm_replay_legacy = {
.overflow = xfrm_replay_overflow,
};
+static struct xfrm_replay xfrm_replay_bmp = {
+ .advance = xfrm_replay_advance_bmp,
+ .check = xfrm_replay_check_bmp,
+ .notify = xfrm_replay_notify_bmp,
+ .overflow = xfrm_replay_overflow_bmp,
+};
+
int xfrm_init_replay(struct xfrm_state *x)
{
- x->repl = &xfrm_replay_legacy;
+ if (x->replay_esn)
+ x->repl = &xfrm_replay_bmp;
+ else
+ x->repl = &xfrm_replay_legacy;
+
return 0;
}
--
1.7.0.4
This patch adds support for IPsec extended sequence numbers (esn)
as defined in RFC 4303. The bits to manage the anti-replay window
are based on a patch from Alex Badea.
Signed-off-by: Steffen Klassert <[email protected]>
---
include/net/xfrm.h | 1 +
net/xfrm/xfrm_input.c | 4 +
net/xfrm/xfrm_replay.c | 190 +++++++++++++++++++++++++++++++++++++++++++++++-
3 files changed, 194 insertions(+), 1 deletions(-)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 4b1559a..a6ef001 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1412,6 +1412,7 @@ extern int xfrm_state_delete(struct xfrm_state *x);
extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
extern int xfrm_init_replay(struct xfrm_state *x);
extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
extern int xfrm_init_state(struct xfrm_state *x);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 55d5f5c..872065c 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -107,6 +107,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
struct net *net = dev_net(skb->dev);
int err;
__be32 seq;
+ __be32 seq_hi;
struct xfrm_state *x;
xfrm_address_t *daddr;
struct xfrm_mode *inner_mode;
@@ -184,7 +185,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
spin_unlock(&x->lock);
+ seq_hi = htonl(xfrm_replay_seqhi(x, seq));
+
XFRM_SKB_CB(skb)->seq.input.low = seq;
+ XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
nexthdr = x->type->input(x, skb);
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index be2c9eb..52433b5 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -20,6 +20,31 @@
#include <net/xfrm.h>
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
+{
+ u32 seq, seq_hi, bottom;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+
+ if (!(x->props.flags & XFRM_STATE_ESN))
+ return 0;
+
+ seq = ntohl(net_seq);
+ seq_hi = replay_esn->seq_hi;
+ bottom = replay_esn->seq - replay_esn->replay_window + 1;
+
+ if (likely(replay_esn->seq >= replay_esn->replay_window - 1)) {
+ /* A. same subspace */
+ if (unlikely(seq < bottom))
+ seq_hi++;
+ } else {
+ /* B. window spans two subspaces */
+ if (unlikely(seq >= bottom))
+ seq_hi--;
+ }
+
+ return seq_hi;
+}
+
static void xfrm_replay_notify(struct xfrm_state *x, int event)
{
struct km_event c;
@@ -313,6 +338,160 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
x->xflags &= ~XFRM_TIME_DEFER;
}
+static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
+{
+ int err = 0;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ struct net *net = xs_net(x);
+
+ if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
+ XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+ XFRM_SKB_CB(skb)->seq.output.hi = replay_esn->oseq_hi;
+
+ if (unlikely(replay_esn->oseq == 0)) {
+ XFRM_SKB_CB(skb)->seq.output.hi = ++replay_esn->oseq_hi;
+
+ if (replay_esn->oseq_hi == 0) {
+ replay_esn->oseq--;
+ replay_esn->oseq_hi--;
+ xfrm_audit_state_replay_overflow(x, skb);
+ err = -EOVERFLOW;
+
+ return err;
+ }
+ }
+ if (xfrm_aevent_is_on(net))
+ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ return err;
+}
+
+static int xfrm_replay_check_esn(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+ unsigned int bitnr, nr;
+ u32 diff;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ u32 seq = ntohl(net_seq);
+ u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+ u32 wsize = replay_esn->replay_window;
+ u32 top = replay_esn->seq;
+ u32 bottom = top - wsize + 1;
+
+ if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
+ (replay_esn->seq < replay_esn->replay_window - 1)))
+ goto err;
+
+ diff = top - seq;
+
+ if (likely(top >= wsize - 1)) {
+ /* A. same subspace */
+ if (likely(seq > top) || seq < bottom)
+ return 0;
+ } else {
+ /* B. window spans two subspaces */
+ if (likely(seq > top && seq < bottom))
+ return 0;
+ if (seq >= bottom)
+ diff = ~seq + top + 1;
+ }
+
+ if (diff >= replay_esn->replay_window) {
+ x->stats.replay_window++;
+ goto err;
+ }
+
+ if (pos >= diff) {
+ bitnr = (pos - diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+ } else {
+ bitnr = replay_esn->replay_window - (diff - pos);
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+ }
+ return 0;
+
+err_replay:
+ x->stats.replay++;
+err:
+ xfrm_audit_state_replay(x, skb, net_seq);
+ return -EINVAL;
+}
+
+static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
+{
+ unsigned int bitnr, nr, i;
+ int wrap;
+ u32 diff, pos, seq, seq_hi;
+ struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+
+ if (!replay_esn->replay_window)
+ return;
+
+ seq = ntohl(net_seq);
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+ seq_hi = xfrm_replay_seqhi(x, net_seq);
+ wrap = seq_hi - replay_esn->seq_hi;
+
+ if ((!wrap && seq > replay_esn->seq) || wrap > 0) {
+ if (likely(!wrap))
+ diff = seq - replay_esn->seq;
+ else
+ diff = ~replay_esn->seq + seq + 1;
+
+ if (diff < replay_esn->replay_window) {
+ for (i = 1; i < diff; i++) {
+ bitnr = (pos + i) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] &= ~(1U << bitnr);
+ }
+
+ bitnr = (pos + diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ } else {
+ nr = replay_esn->replay_window >> 5;
+ for (i = 0; i <= nr; i++)
+ replay_esn->bmp[i] = 0;
+
+ bitnr = (pos + diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ }
+
+ replay_esn->seq = seq;
+
+ if (unlikely(wrap > 0))
+ replay_esn->seq_hi++;
+ } else {
+ diff = replay_esn->seq - seq;
+
+ if (pos >= diff) {
+ bitnr = (pos - diff) % replay_esn->replay_window;
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ } else {
+ bitnr = replay_esn->replay_window - (diff - pos);
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+ }
+ }
+
+ if (xfrm_aevent_is_on(xs_net(x)))
+ xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+}
+
static struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
@@ -327,9 +506,18 @@ static struct xfrm_replay xfrm_replay_bmp = {
.overflow = xfrm_replay_overflow_bmp,
};
+static struct xfrm_replay xfrm_replay_esn = {
+ .advance = xfrm_replay_advance_esn,
+ .check = xfrm_replay_check_esn,
+ .notify = xfrm_replay_notify_bmp,
+ .overflow = xfrm_replay_overflow_esn,
+};
+
int xfrm_init_replay(struct xfrm_state *x)
{
- if (x->replay_esn)
+ if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
+ x->repl = &xfrm_replay_esn;
+ else if (x->replay_esn)
x->repl = &xfrm_replay_bmp;
else
x->repl = &xfrm_replay_legacy;
--
1.7.0.4
This patch adds a netlink based user interface to configure
esn and big anti-replay windows. The new netlink attribute
XFRMA_REPLAY_ESN_VAL is used to configure the new implementation.
If the XFRM_STATE_ESN flag is set, we use esn and support for big
anti-replay windows for the configured state. If this flag is not
set we use the new implementation with 32 bit sequence numbers.
A big anti-replay window can be configured in this case anyway.
Signed-off-by: Steffen Klassert <[email protected]>
---
net/xfrm/xfrm_state.c | 2 +
net/xfrm/xfrm_user.c | 99 ++++++++++++++++++++++++++++++++++++++++++-------
2 files changed, 87 insertions(+), 14 deletions(-)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a5035c0..fc82563 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -355,6 +355,8 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
kfree(x->calg);
kfree(x->encap);
kfree(x->coaddr);
+ kfree(x->replay_esn);
+ kfree(x->preplay_esn);
if (x->inner_mode)
xfrm_put_mode(x->inner_mode);
if (x->inner_mode_iaf)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 77452dc..cf6cab2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -118,6 +118,19 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
return 0;
}
+static inline int verify_replay(struct xfrm_usersa_info *p,
+ struct nlattr **attrs)
+{
+ struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
+
+ if (!rt)
+ return 0;
+
+ if (p->replay_window != 0)
+ return -EINVAL;
+
+ return 0;
+}
static int verify_newsa_info(struct xfrm_usersa_info *p,
struct nlattr **attrs)
@@ -207,6 +220,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
goto out;
if ((err = verify_sec_ctx_len(attrs)))
goto out;
+ if ((err = verify_replay(p, attrs)))
+ goto out;
err = -EINVAL;
switch (p->mode) {
@@ -337,6 +352,33 @@ static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
return 0;
}
+static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
+ struct xfrm_replay_state_esn **preplay_esn,
+ struct nlattr *rta)
+{
+ struct xfrm_replay_state_esn *p, *pp, *up;
+
+ if (!rta)
+ return 0;
+
+ up = nla_data(rta);
+
+ p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+ if (!pp) {
+ kfree(p);
+ return -ENOMEM;
+ }
+
+ *replay_esn = p;
+ *preplay_esn = pp;
+
+ return 0;
+}
+
static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
{
int len = 0;
@@ -372,10 +414,20 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
{
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
+ struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+ if (re) {
+ struct xfrm_replay_state_esn *replay_esn;
+ replay_esn = nla_data(re);
+ memcpy(x->replay_esn, replay_esn,
+ xfrm_replay_state_esn_len(replay_esn));
+ memcpy(x->preplay_esn, replay_esn,
+ xfrm_replay_state_esn_len(replay_esn));
+ }
+
if (rp) {
struct xfrm_replay_state *replay;
replay = nla_data(rp);
@@ -456,13 +508,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
goto error;
+ if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
+ attrs[XFRMA_REPLAY_ESN_VAL])))
+ goto error;
+
x->km.seq = p->seq;
x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
/* sysctl_xfrm_aevent_etime is in 100ms units */
x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
- x->preplay.bitmap = 0;
- x->preplay.seq = x->replay.seq+x->replay_maxdiff;
- x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
if ((err = xfrm_init_replay(x)))
goto error;
@@ -693,6 +746,10 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
if (xfrm_mark_put(skb, &x->mark))
goto nla_put_failure;
+ if (x->replay_esn)
+ NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
+
if (x->security && copy_sec_ctx(x->security, skb) < 0)
goto nla_put_failure;
@@ -1560,10 +1617,14 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
}
-static inline size_t xfrm_aevent_msgsize(void)
+static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
{
+ size_t replay_size = x->replay_esn ?
+ xfrm_replay_state_esn_len(x->replay_esn) :
+ sizeof(struct xfrm_replay_state);
+
return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
- + nla_total_size(sizeof(struct xfrm_replay_state))
+ + nla_total_size(replay_size)
+ nla_total_size(sizeof(struct xfrm_lifetime_cur))
+ nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(4) /* XFRM_AE_RTHR */
@@ -1588,7 +1649,13 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
id->reqid = x->props.reqid;
id->flags = c->data.aevent;
- NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
+ if (x->replay_esn)
+ NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ x->replay_esn);
+ else
+ NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
+
NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
if (id->flags & XFRM_AE_RTHR)
@@ -1621,16 +1688,16 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct xfrm_usersa_id *id = &p->sa_id;
- r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
- if (r_skb == NULL)
- return -ENOMEM;
-
mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
- if (x == NULL) {
- kfree_skb(r_skb);
+ if (x == NULL)
return -ESRCH;
+
+ r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
+ if (r_skb == NULL) {
+ xfrm_state_put(x);
+ return -ENOMEM;
}
/*
@@ -1662,9 +1729,10 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
+ struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
- if (!lt && !rp)
+ if (!lt && !rp && !re)
return err;
/* pedantic mode - thou shalt sayeth replaceth */
@@ -2124,6 +2192,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
[XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
[XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
+ [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
};
static struct xfrm_link {
@@ -2251,7 +2320,7 @@ static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
struct net *net = xs_net(x);
struct sk_buff *skb;
- skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
+ skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
@@ -2303,6 +2372,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
l += nla_total_size(sizeof(*x->calg));
if (x->encap)
l += nla_total_size(sizeof(*x->encap));
+ if (x->replay_esn)
+ l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
if (x->security)
l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
x->security->ctx_len);
--
1.7.0.4
Signed-off-by: Steffen Klassert <[email protected]>
---
include/linux/xfrm.h | 12 ++++++++++++
ip/ipxfrm.c | 8 +++++++-
ip/xfrm_state.c | 37 +++++++++++++++++++++++++++++--------
3 files changed, 48 insertions(+), 9 deletions(-)
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 07f2b63..dd6928d 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -84,6 +84,16 @@ struct xfrm_replay_state {
__u32 bitmap;
};
+struct xfrm_replay_state_esn {
+ unsigned int len;
+ __u32 oseq;
+ __u32 oseq_hi;
+ __u32 seq;
+ __u32 seq_hi;
+ __u32 replay_window;
+ __u32 bmp[0];
+};
+
struct xfrm_algo {
char alg_name[64];
unsigned int alg_key_len; /* in bits */
@@ -283,6 +293,7 @@ enum xfrm_attr_type_t {
XFRMA_KMADDRESS, /* struct xfrm_user_kmaddress */
XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
+ XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -349,6 +360,7 @@ struct xfrm_usersa_info {
#define XFRM_STATE_WILDRECV 8
#define XFRM_STATE_ICMP 16
#define XFRM_STATE_AF_UNSPEC 32
+#define XFRM_STATE_ESN 64
};
struct xfrm_usersa_id {
diff --git a/ip/ipxfrm.c b/ip/ipxfrm.c
index 99a6756..548e4a4 100644
--- a/ip/ipxfrm.c
+++ b/ip/ipxfrm.c
@@ -665,6 +665,12 @@ done:
void xfrm_xfrma_print(struct rtattr *tb[], __u16 family,
FILE *fp, const char *prefix)
{
+
+ if (tb[XFRMA_REPLAY_ESN_VAL]) {
+ struct rtattr *rta = tb[XFRMA_REPLAY_ESN_VAL];
+ struct xfrm_replay_state_esn *repl = (struct xfrm_replay_state_esn *) RTA_DATA(rta);
+ fprintf(fp, "\treplay-window %u\n", repl->replay_window);
+ }
if (tb[XFRMA_MARK]) {
struct rtattr *rta = tb[XFRMA_MARK];
struct xfrm_mark *m = (struct xfrm_mark *) RTA_DATA(rta);
@@ -809,7 +815,6 @@ void xfrm_state_info_print(struct xfrm_usersa_info *xsinfo,
STRBUF_CAT(buf, "\t");
fputs(buf, fp);
- fprintf(fp, "replay-window %u ", xsinfo->replay_window);
if (show_stats > 0)
fprintf(fp, "seq 0x%08u ", xsinfo->seq);
if (show_stats > 0 || xsinfo->flags) {
@@ -822,6 +827,7 @@ void xfrm_state_info_print(struct xfrm_usersa_info *xsinfo,
XFRM_FLAG_PRINT(fp, flags, XFRM_STATE_WILDRECV, "wildrecv");
XFRM_FLAG_PRINT(fp, flags, XFRM_STATE_ICMP, "icmp");
XFRM_FLAG_PRINT(fp, flags, XFRM_STATE_AF_UNSPEC, "af-unspec");
+ XFRM_FLAG_PRINT(fp, flags, XFRM_STATE_ESN, "replay-esn");
if (flags)
fprintf(fp, "%x", flags);
}
diff --git a/ip/xfrm_state.c b/ip/xfrm_state.c
index 38d4039..4c66923 100644
--- a/ip/xfrm_state.c
+++ b/ip/xfrm_state.c
@@ -50,15 +50,16 @@
#define NLMSG_BUF_SIZE 4096
#define RTA_BUF_SIZE 2048
#define XFRM_ALGO_KEY_BUF_SIZE 512
+#define XFRM_REPLAY_BMP_SIZE_U32 16
static void usage(void) __attribute__((noreturn));
static void usage(void)
{
fprintf(stderr, "Usage: ip xfrm state { add | update } ID [ XFRM_OPT ] [ mode MODE ]\n");
- fprintf(stderr, " [ reqid REQID ] [ seq SEQ ] [ replay-window SIZE ] [ flag FLAG-LIST ]\n");
+ fprintf(stderr, " [ reqid REQID ] [ seq SEQ ][ replay-window SIZE ] [ flag FLAG-LIST ]\n");
fprintf(stderr, " [ encap ENCAP ] [ sel SELECTOR ] [ replay-seq SEQ ]\n");
- fprintf(stderr, " [ replay-oseq SEQ ] [ LIMIT-LIST ]\n");
+ fprintf(stderr, " [ replay-oseq SEQ ] [ replay-seqhi SEQ ] [ replay-oseqhi SEQ ] [ LIMIT-LIST ]\n");
fprintf(stderr, "Usage: ip xfrm state allocspi ID [ mode MODE ] [ reqid REQID ] [ seq SEQ ]\n");
fprintf(stderr, " [ min SPI max SPI ]\n");
fprintf(stderr, "Usage: ip xfrm state { delete | get } ID\n");
@@ -214,6 +215,8 @@ static int xfrm_state_flag_parse(__u8 *flags, int *argcp, char ***argvp)
*flags |= XFRM_STATE_ICMP;
else if (strcmp(*argv, "af-unspec") == 0)
*flags |= XFRM_STATE_AF_UNSPEC;
+ else if (strcmp(*argv, "replay-esn") == 0)
+ *flags |= XFRM_STATE_ESN;
else {
PREV_ARG(); /* back track */
break;
@@ -239,7 +242,11 @@ static int xfrm_state_modify(int cmd, unsigned flags, int argc, char **argv)
struct xfrm_usersa_info xsinfo;
char buf[RTA_BUF_SIZE];
} req;
- struct xfrm_replay_state replay;
+ struct {
+ struct xfrm_replay_state_esn state;
+ __u32 bmp[XFRM_REPLAY_BMP_SIZE_U32];
+ } replay;
+
char *idp = NULL;
char *aeadop = NULL;
char *ealgop = NULL;
@@ -249,8 +256,11 @@ static int xfrm_state_modify(int cmd, unsigned flags, int argc, char **argv)
struct xfrm_mark mark = {0, 0};
memset(&req, 0, sizeof(req));
+
memset(&replay, 0, sizeof(replay));
+ replay.state.len = sizeof(replay);
+
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.xsinfo));
req.n.nlmsg_flags = NLM_F_REQUEST|flags;
req.n.nlmsg_type = cmd;
@@ -275,16 +285,24 @@ static int xfrm_state_modify(int cmd, unsigned flags, int argc, char **argv)
xfrm_seq_parse(&req.xsinfo.seq, &argc, &argv);
} else if (strcmp(*argv, "replay-window") == 0) {
NEXT_ARG();
- if (get_u8(&req.xsinfo.replay_window, *argv, 0))
+ if (get_u32(&replay.state.replay_window, *argv, 0))
invarg("\"replay-window\" value is invalid", *argv);
} else if (strcmp(*argv, "replay-seq") == 0) {
NEXT_ARG();
- if (get_u32(&replay.seq, *argv, 0))
+ if (get_u32(&replay.state.seq, *argv, 0))
invarg("\"replay-seq\" value is invalid", *argv);
} else if (strcmp(*argv, "replay-oseq") == 0) {
NEXT_ARG();
- if (get_u32(&replay.oseq, *argv, 0))
+ if (get_u32(&replay.state.oseq, *argv, 0))
invarg("\"replay-oseq\" value is invalid", *argv);
+ } else if (strcmp(*argv, "replay-seqhi") == 0) {
+ NEXT_ARG();
+ if (get_u32(&replay.state.seq_hi, *argv, 0))
+ invarg("\"replay-seqhi\" value is invalid", *argv);
+ } else if (strcmp(*argv, "replay-oseqhi") == 0) {
+ NEXT_ARG();
+ if (get_u32(&replay.state.oseq_hi, *argv, 0))
+ invarg("\"replay-oseqhi\" value is invalid", *argv);
} else if (strcmp(*argv, "flag") == 0) {
NEXT_ARG();
xfrm_state_flag_parse(&req.xsinfo.flags, &argc, &argv);
@@ -434,9 +452,12 @@ parse_algo:
argc--; argv++;
}
- if (replay.seq || replay.oseq)
- addattr_l(&req.n, sizeof(req.buf), XFRMA_REPLAY_VAL,
+ if (replay.state.replay_window || replay.state.seq || replay.state.oseq ||
+ replay.state.seq_hi || replay.state.oseq_hi ) {
+
+ addattr_l(&req.n, sizeof(req.buf), XFRMA_REPLAY_ESN_VAL,
(void *)&replay, sizeof(replay));
+ }
if (!idp) {
fprintf(stderr, "Not enough information: \"ID\" is required\n");
--
1.7.0.4
On Mon, Nov 22, 2010 at 11:26:54AM +0100, Steffen Klassert wrote:
> Use scatterwalk_crypto_chain in favor of locally defined chaining functions.
>
> Signed-off-by: Steffen Klassert <[email protected]>
I've applied patches 1&2 since they're logically separate from the
rest of the series and make sense on their own.
Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Mon, Nov 22, 2010 at 11:30:14AM +0100, Steffen Klassert wrote:
>
> @@ -205,11 +228,18 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
> skb_to_sgvec(skb, sg,
> esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
> clen + alen);
> - sg_init_one(asg, esph, sizeof(*esph));
> +
> + if ((x->props.flags & XFRM_STATE_ESN)) {
> + sg_init_table(asg, 2);
> + sg_set_buf(asg, esph, sizeof(*esph));
> + *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
> + sg_set_buf(asg + 1, seqhi, seqhilen);
> + } else
> + sg_init_one(asg, esph, sizeof(*esph));
I think this is wrong for AEAD algorithms. You want the sequence
number in network byte order for them so the high bits need to be
inserted into the middle of the ESP header.
The other problem is that you're currently requiring the authencesn
user to provide two SG entries which is fine for now. However,
since this might be exported to user-space in future, authenecesn
shouldn't really rely on that, or at least it shouldn't BUG.
So one solution is to do it based on bytes in authencesn. That is,
your associated input should always be 12 bytes long, and then you
simply construct a new SG list for your actual processing with the
middle 4 bytes taken out.
For IPsec it could just provide an SG list with three entries,
of 4 bytes each.
Of course for simplicity, you could require this to be the case in
authencesn and return -EINVAL (not BUG :) if it's not the case.
Thanks,
--
Email: Herbert Xu <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Sorry for the huge delay...
On Thu, Dec 02, 2010 at 03:29:47PM +0800, Herbert Xu wrote:
> On Mon, Nov 22, 2010 at 11:30:14AM +0100, Steffen Klassert wrote:
> >
> > @@ -205,11 +228,18 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
> > skb_to_sgvec(skb, sg,
> > esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
> > clen + alen);
> > - sg_init_one(asg, esph, sizeof(*esph));
> > +
> > + if ((x->props.flags & XFRM_STATE_ESN)) {
> > + sg_init_table(asg, 2);
> > + sg_set_buf(asg, esph, sizeof(*esph));
> > + *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
> > + sg_set_buf(asg + 1, seqhi, seqhilen);
> > + } else
> > + sg_init_one(asg, esph, sizeof(*esph));
>
> I think this is wrong for AEAD algorithms. You want the sequence
> number in network byte order for them so the high bits need to be
> inserted into the middle of the ESP header.
>
Yes, indeed.
> The other problem is that you're currently requiring the authencesn
> user to provide two SG entries which is fine for now. However,
> since this might be exported to user-space in future, authenecesn
> shouldn't really rely on that, or at least it shouldn't BUG.
>
> So one solution is to do it based on bytes in authencesn. That is,
> your associated input should always be 12 bytes long, and then you
> simply construct a new SG list for your actual processing with the
> middle 4 bytes taken out.
>
> For IPsec it could just provide an SG list with three entries,
> of 4 bytes each.
Ok, I've updated the patchset in this regard.
>
> Of course for simplicity, you could require this to be the case in
> authencesn and return -EINVAL (not BUG :) if it's not the case.
>
Doing BUG was a leftover from debugging the thing. On debugging it is
sometimes better to pull the emergency brake as soon as something
unexpected happens. I replaced BUG with return -EINVAL now.
I'll resend the patchset for a second round of review.