2017-03-30 17:39:02

by David Gstir

[permalink] [raw]
Subject: [PATCH] fscrypt: Add support for AES-128-CBC

From: Daniel Walter <[email protected]>

fscrypt provides facilities to use different encryption algorithms which are
selectable by userspace when setting the encryption policy. Currently, only
AES-256-XTS for file contents and AES-256-CBC-CTS for file names are implemented.
Which is a clear case of kernel offers the mechanism and userspace selects a
policy. Similar to what dm-crypt and ecryptfs have.

This patch adds support for using AES-128-CBC for file contents and
AES-128-CBC-CTS for file name encryption. To mitigate watermarking attacks, IVs
are generated using the ESSIV algorithm. While AES-CBC is actually slightly
less secure than AES-XTS from a security point of view, there is more
widespread hardware support. Especially low-powered embedded devices crypto
accelerators such as CAAM or CESA support only AES-CBC-128 with an acceptable
speed. Using AES-CBC gives us the acceptable performance while still providing
a moderate level of security for persistent storage.

[david: massaged commit message]

Signed-off-by: Daniel Walter <[email protected]>
Signed-off-by: David Gstir <[email protected]>
---
fs/crypto/crypto.c | 25 ++++++++----
fs/crypto/fscrypt_private.h | 5 ++-
fs/crypto/keyinfo.c | 87 ++++++++++++++++++++++++++++++++++++------
include/linux/fscrypt_common.h | 6 ++-
include/uapi/linux/fs.h | 2 +
5 files changed, 103 insertions(+), 22 deletions(-)

diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 6d6eca3..210b586 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -26,6 +26,7 @@
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
+#include <crypto/aes.h>
#include "fscrypt_private.h"

static unsigned int num_prealloc_crypto_pages = 32;
@@ -147,17 +148,31 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
{
struct {
__le64 index;
- u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
- } xts_tweak;
+ u8 padding[FS_IV_SIZE - sizeof(__le64)];
+ } blk_desc;
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
+ u8 iv[FS_IV_SIZE];
int res = 0;

BUG_ON(len == 0);

+ BUILD_BUG_ON(sizeof(blk_desc) != FS_IV_SIZE);
+ BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
+ blk_desc.index = cpu_to_le64(lblk_num);
+ memset(blk_desc.padding, 0, sizeof(blk_desc.padding));
+
+ if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
+ BUG_ON(ci->ci_essiv_tfm == NULL);
+ memset(iv, 0, sizeof(iv));
+ crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv, (u8 *)&blk_desc);
+ } else {
+ memcpy(iv, &blk_desc, sizeof(iv));
+ }
+
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
@@ -170,15 +185,11 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
page_crypt_complete, &ecr);

- BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
- xts_tweak.index = cpu_to_le64(lblk_num);
- memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
-
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, len, offs);
- skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
+ skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
else
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index e39696e..81ce9af 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -16,8 +16,10 @@
#define FS_FNAME_CRYPTO_DIGEST_SIZE 32

/* Encryption parameters */
-#define FS_XTS_TWEAK_SIZE 16
+#define FS_IV_SIZE 16
#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_128_CBC_KEY_SIZE 16
+#define FS_AES_128_CTS_KEY_SIZE 16
#define FS_AES_256_GCM_KEY_SIZE 32
#define FS_AES_256_CBC_KEY_SIZE 32
#define FS_AES_256_CTS_KEY_SIZE 32
@@ -67,6 +69,7 @@ struct fscrypt_info {
u8 ci_filename_mode;
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
+ struct crypto_cipher *ci_essiv_tfm;
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
};

diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 8cdfddc..cc515e9 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -10,6 +10,7 @@

#include <keys/user-type.h>
#include <linux/scatterlist.h>
+#include <linux/cryptohash.h>
#include "fscrypt_private.h"

static void derive_crypt_complete(struct crypto_async_request *req, int rc)
@@ -27,13 +28,13 @@ static void derive_crypt_complete(struct crypto_async_request *req, int rc)
* derive_key_aes() - Derive a key using AES-128-ECB
* @deriving_key: Encryption key used for derivation.
* @source_key: Source key to which to apply derivation.
- * @derived_key: Derived key.
+ * @derived_key_raw: Derived raw key.
*
* Return: Zero on success; non-zero otherwise.
*/
static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
- u8 source_key[FS_AES_256_XTS_KEY_SIZE],
- u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
+ struct fscrypt_key *source_key,
+ u8 derived_raw_key[FS_MAX_KEY_SIZE])
{
int res = 0;
struct skcipher_request *req = NULL;
@@ -60,10 +61,10 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
if (res < 0)
goto out;

- sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
- sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg,
- FS_AES_256_XTS_KEY_SIZE, NULL);
+ sg_init_one(&src_sg, source_key->raw, source_key->size);
+ sg_init_one(&dst_sg, derived_raw_key, source_key->size);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
+ NULL);
res = crypto_skcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
wait_for_completion(&ecr.completion);
@@ -75,9 +76,28 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
return res;
}

+static bool valid_key_size(struct fscrypt_info *ci, struct fscrypt_key *key,
+ int reg_file)
+{
+ if (reg_file) {
+ switch(ci->ci_data_mode) {
+ case FS_ENCRYPTION_MODE_AES_256_XTS:
+ return key->size >= FS_AES_256_XTS_KEY_SIZE;
+ case FS_ENCRYPTION_MODE_AES_128_CBC:
+ return key->size >= FS_AES_128_CBC_KEY_SIZE;
+ }
+ } else {
+ if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+ return key->size >= FS_AES_256_CTS_KEY_SIZE;
+ if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
+ return key->size >= FS_AES_128_CTS_KEY_SIZE;
+ }
+ return false;
+}
+
static int validate_user_key(struct fscrypt_info *crypt_info,
struct fscrypt_context *ctx, u8 *raw_key,
- const char *prefix)
+ const char *prefix, int reg_file)
{
char *description;
struct key *keyring_key;
@@ -111,14 +131,14 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
master_key = (struct fscrypt_key *)ukp->data;
BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);

- if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
+ if (!valid_key_size(crypt_info, master_key, reg_file)) {
printk_once(KERN_WARNING
"%s: key size incorrect: %d\n",
__func__, master_key->size);
res = -ENOKEY;
goto out;
}
- res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
+ res = derive_key_aes(ctx->nonce, master_key, raw_key);
out:
up_read(&keyring_key->sem);
key_put(keyring_key);
@@ -134,6 +154,11 @@ static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
return 0;
}
+ if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
+ *cipher_str_ret = "cbc(aes)";
+ *keysize_ret = FS_AES_128_CBC_KEY_SIZE;
+ return 0;
+ }
pr_warn_once("fscrypto: unsupported contents encryption mode "
"%d for inode %lu\n",
ci->ci_data_mode, inode->i_ino);
@@ -146,6 +171,11 @@ static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
return 0;
}
+ if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_128_CTS) {
+ *cipher_str_ret = "cts(cbc(aes))";
+ *keysize_ret = FS_AES_128_CTS_KEY_SIZE;
+ return 0;
+ }
pr_warn_once("fscrypto: unsupported filenames encryption mode "
"%d for inode %lu\n",
ci->ci_filename_mode, inode->i_ino);
@@ -163,6 +193,8 @@ static void put_crypt_info(struct fscrypt_info *ci)
return;

crypto_free_skcipher(ci->ci_ctfm);
+ if (ci->ci_essiv_tfm)
+ crypto_free_cipher(ci->ci_essiv_tfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}

@@ -171,6 +203,10 @@ int fscrypt_get_encryption_info(struct inode *inode)
struct fscrypt_info *crypt_info;
struct fscrypt_context ctx;
struct crypto_skcipher *ctfm;
+ struct crypto_cipher *essiv_tfm;
+ __u32 sha_ws[SHA_WORKSPACE_WORDS];
+ __u32 essiv_key[SHA_DIGEST_WORDS];
+
const char *cipher_str;
int keysize;
u8 *raw_key = NULL;
@@ -207,6 +243,10 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
return -EINVAL;

+ if (ctx.contents_encryption_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
+ ctx.filenames_encryption_mode != FS_ENCRYPTION_MODE_AES_128_CTS)
+ return -EINVAL;
+
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
if (!crypt_info)
return -ENOMEM;
@@ -215,6 +255,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
crypt_info->ci_ctfm = NULL;
+ crypt_info->ci_essiv_tfm = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));

@@ -231,10 +272,12 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (!raw_key)
goto out;

- res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX);
+ res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
+ S_ISREG(inode->i_mode));
if (res && inode->i_sb->s_cop->key_prefix) {
int res2 = validate_user_key(crypt_info, &ctx, raw_key,
- inode->i_sb->s_cop->key_prefix);
+ inode->i_sb->s_cop->key_prefix,
+ S_ISREG(inode->i_mode));
if (res2) {
if (res2 == -ENOKEY)
res = -ENOKEY;
@@ -258,6 +301,26 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (res)
goto out;

+ if (crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
+ /* init ESSIV generator */
+ essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (!essiv_tfm || IS_ERR(essiv_tfm)) {
+ res = essiv_tfm ? PTR_ERR(essiv_tfm) : -ENOMEM;
+ printk(KERN_DEBUG
+ "%s: error %d (inode %u) allocating essiv tfm\n",
+ __func__, res, (unsigned) inode->i_ino);
+ goto out;
+ }
+ /* calc sha of key for essiv generation */
+ memset(sha_ws, 0, sizeof(sha_ws));
+ sha_init(essiv_key);
+ sha_transform(essiv_key, raw_key, sha_ws);
+ res = crypto_cipher_setkey(essiv_tfm, (u8 *)essiv_key, keysize);
+ if (res)
+ goto out;
+
+ crypt_info->ci_essiv_tfm = essiv_tfm;
+ }
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 10c1abf..83d32a6 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -104,12 +104,14 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)

static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
{
- return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS ||
+ mode == FS_ENCRYPTION_MODE_AES_128_CBC);
}

static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
{
- return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+ return (mode == FS_ENCRYPTION_MODE_AES_256_CTS ||
+ mode == FS_ENCRYPTION_MODE_AES_128_CTS);
}

static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 048a85e..231b15d 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -272,6 +272,8 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_AES_256_GCM 2
#define FS_ENCRYPTION_MODE_AES_256_CBC 3
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+#define FS_ENCRYPTION_MODE_AES_128_CBC 5
+#define FS_ENCRYPTION_MODE_AES_128_CTS 6

struct fscrypt_policy {
__u8 version;
--
2.10.2


2017-03-31 06:21:55

by Eric Biggers

[permalink] [raw]
Subject: Re: [PATCH] fscrypt: Add support for AES-128-CBC

[+Cc linux-fscrypt]

Hi David and Daniel,

On Thu, Mar 30, 2017 at 07:38:40PM +0200, David Gstir wrote:
> From: Daniel Walter <[email protected]>
>
> fscrypt provides facilities to use different encryption algorithms which are
> selectable by userspace when setting the encryption policy. Currently, only
> AES-256-XTS for file contents and AES-256-CBC-CTS for file names are implemented.
> Which is a clear case of kernel offers the mechanism and userspace selects a
> policy. Similar to what dm-crypt and ecryptfs have.
>
> This patch adds support for using AES-128-CBC for file contents and
> AES-128-CBC-CTS for file name encryption. To mitigate watermarking attacks, IVs
> are generated using the ESSIV algorithm. While AES-CBC is actually slightly
> less secure than AES-XTS from a security point of view, there is more
> widespread hardware support. Especially low-powered embedded devices crypto
> accelerators such as CAAM or CESA support only AES-CBC-128 with an acceptable
> speed. Using AES-CBC gives us the acceptable performance while still providing
> a moderate level of security for persistent storage.
>

Thanks for sending this! I can't object too much to adding AES-CBC-128 if you
find it useful, though of course AES-256-XTS will remain the recommendation for
general use. And I don't suppose AES-256-CBC is an option for you?

Anyway, more comments below:

> @@ -147,17 +148,31 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
> {
> struct {
> __le64 index;
> - u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
> - } xts_tweak;
> + u8 padding[FS_IV_SIZE - sizeof(__le64)];
> + } blk_desc;
> struct skcipher_request *req = NULL;
> DECLARE_FS_COMPLETION_RESULT(ecr);
> struct scatterlist dst, src;
> struct fscrypt_info *ci = inode->i_crypt_info;
> struct crypto_skcipher *tfm = ci->ci_ctfm;
> + u8 iv[FS_IV_SIZE];
> int res = 0;
>
> BUG_ON(len == 0);
>
> + BUILD_BUG_ON(sizeof(blk_desc) != FS_IV_SIZE);
> + BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
> + blk_desc.index = cpu_to_le64(lblk_num);
> + memset(blk_desc.padding, 0, sizeof(blk_desc.padding));
> +
> + if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
> + BUG_ON(ci->ci_essiv_tfm == NULL);
> + memset(iv, 0, sizeof(iv));
> + crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv, (u8 *)&blk_desc);
> + } else {
> + memcpy(iv, &blk_desc, sizeof(iv));
> + }
> +

The ESSIV cipher operation should be done in-place, so that only one IV buffer
is needed. See what dm-crypt does in crypt_iv_essiv_gen(), for example.

Also, it can just use ESSIV 'if (ci->ci_essiv_tfm != NULL)'. That would avoid
the awkward BUG_ON() and hardcoding of a specific cipher mode.

> @@ -27,13 +28,13 @@ static void derive_crypt_complete(struct crypto_async_request *req, int rc)
> * derive_key_aes() - Derive a key using AES-128-ECB
> * @deriving_key: Encryption key used for derivation.
> * @source_key: Source key to which to apply derivation.
> - * @derived_key: Derived key.
> + * @derived_key_raw: Derived raw key.
> *
> * Return: Zero on success; non-zero otherwise.
> */
> static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
> - u8 source_key[FS_AES_256_XTS_KEY_SIZE],
> - u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
> + struct fscrypt_key *source_key,
> + u8 derived_raw_key[FS_MAX_KEY_SIZE])

'const struct fscrypt_key *'.

> {
> int res = 0;
> struct skcipher_request *req = NULL;
> @@ -60,10 +61,10 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
> if (res < 0)
> goto out;
>
> - sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
> - sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
> - skcipher_request_set_crypt(req, &src_sg, &dst_sg,
> - FS_AES_256_XTS_KEY_SIZE, NULL);
> + sg_init_one(&src_sg, source_key->raw, source_key->size);
> + sg_init_one(&dst_sg, derived_raw_key, source_key->size);
> + skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
> + NULL);
> res = crypto_skcipher_encrypt(req);
> if (res == -EINPROGRESS || res == -EBUSY) {
> wait_for_completion(&ecr.completion);
> @@ -75,9 +76,28 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
> return res;
> }
>
> +static bool valid_key_size(struct fscrypt_info *ci, struct fscrypt_key *key,
> + int reg_file)
> +{
> + if (reg_file) {
> + switch(ci->ci_data_mode) {
> + case FS_ENCRYPTION_MODE_AES_256_XTS:
> + return key->size >= FS_AES_256_XTS_KEY_SIZE;
> + case FS_ENCRYPTION_MODE_AES_128_CBC:
> + return key->size >= FS_AES_128_CBC_KEY_SIZE;
> + }
> + } else {
> + if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
> + return key->size >= FS_AES_256_CTS_KEY_SIZE;
> + if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
> + return key->size >= FS_AES_128_CTS_KEY_SIZE;
> + }
> + return false;
> +}
> +

This is redundant with how the key size is determined in
determine_cipher_type(). How about passing the expected key size to
validate_user_key() (instead of 'reg_file') and verifying that key->size >=
keysize?

Also, it should be verified that key->size <= FS_MAX_KEY_SIZE (since that's how
large the buffer in fscrypt_key is) and key->size % AES_BLOCK_SIZE == 0 (since
derive_key_aes() assumes the key is evenly divisible into AES blocks).

> @@ -134,6 +154,11 @@ static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
> *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
> return 0;
> }
> + if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
> + *cipher_str_ret = "cbc(aes)";
> + *keysize_ret = FS_AES_128_CBC_KEY_SIZE;
> + return 0;
> + }

switch (ci->ci_data_mode) {
...
}

> + if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_128_CTS) {
> + *cipher_str_ret = "cts(cbc(aes))";
> + *keysize_ret = FS_AES_128_CTS_KEY_SIZE;
> + return 0;
> + }

switch (ci->ci_filename_mode) {
...
}

> + if (ci->ci_essiv_tfm)
> + crypto_free_cipher(ci->ci_essiv_tfm);

No need to check for NULL before calling crypto_free_cipher().

> + if (ctx.contents_encryption_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
> + ctx.filenames_encryption_mode != FS_ENCRYPTION_MODE_AES_128_CTS)
> + return -EINVAL;
> +

I think for now we should only allow the two pairs:

contents_encryption_mode=FS_ENCRYPTION_MODE_AES_256_XTS
filenames_encryption_mode=FS_ENCRYPTION_MODE_AES_256_CTS

and

contents_encryption_mode=FS_ENCRYPTION_MODE_AES_128_CBC
filenames_encryption_mode=FS_ENCRYPTION_MODE_AES_128_CTS

Other combinations like AES-256-XTS paired with AES-128-CTS should be forbidden.

This also needs to be enforced in create_encryption_context_from_policy() so
that FS_IOC_SET_ENCRYPTION_POLICY fails with bad combinations.

> + if (crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
> + /* init ESSIV generator */
> + essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
> + if (!essiv_tfm || IS_ERR(essiv_tfm)) {
> + res = essiv_tfm ? PTR_ERR(essiv_tfm) : -ENOMEM;
> + printk(KERN_DEBUG
> + "%s: error %d (inode %u) allocating essiv tfm\n",
> + __func__, res, (unsigned) inode->i_ino);
> + goto out;
> + }
> + /* calc sha of key for essiv generation */
> + memset(sha_ws, 0, sizeof(sha_ws));
> + sha_init(essiv_key);
> + sha_transform(essiv_key, raw_key, sha_ws);
> + res = crypto_cipher_setkey(essiv_tfm, (u8 *)essiv_key, keysize);
> + if (res)
> + goto out;
> +
> + crypt_info->ci_essiv_tfm = essiv_tfm;
> + }

I think the ESSIV hash should be SHA-256 not SHA-1. SHA-1 is becoming more and
more obsolete these days. Another issue with SHA-1 is that it only produces a
20 byte hash, which means it couldn't be used if someone ever wanted to add
AES-256-CBC as another mode.

Also, the hash should be called through the crypto API.

Also, please factor creating the essiv_tfm into its own function.
fscrypt_get_encryption_info() is long enough already.

Something else to consider (probably for the future; this doesn't necessarily
have to be done yet) is that you really only need one essiv_tfm per *key*, not
one per inode. To deduplicate them you'd need a hash table or LRU queue or
something to keep track of the keys in use.

- Eric

2017-03-31 06:36:07

by Eric Biggers

[permalink] [raw]
Subject: Re: [PATCH] fscrypt: Add support for AES-128-CBC

On Thu, Mar 30, 2017 at 11:21:49PM -0700, Eric Biggers wrote:
>
> Something else to consider (probably for the future; this doesn't necessarily
> have to be done yet) is that you really only need one essiv_tfm per *key*, not
> one per inode. To deduplicate them you'd need a hash table or LRU queue or
> something to keep track of the keys in use.
>

Sorry, I screwed this up. This wouldn't work because the ESSIV key is being
derived from the per-file key, not the master key.

- Eric

2017-03-31 11:12:19

by David Gstir

[permalink] [raw]
Subject: Re: [PATCH] fscrypt: Add support for AES-128-CBC

Hi Eric,

thanks for the feedback!

> On 31.03.2017, at 08:21, Eric Biggers <[email protected]> wrote:
>
> [+Cc linux-fscrypt]

Oh, I didn't know about that list. I think MAINTAINERS should be updated to reflect that. :)

>
> Hi David and Daniel,
>
> On Thu, Mar 30, 2017 at 07:38:40PM +0200, David Gstir wrote:
>> From: Daniel Walter <[email protected]>
>>
>> fscrypt provides facilities to use different encryption algorithms which are
>> selectable by userspace when setting the encryption policy. Currently, only
>> AES-256-XTS for file contents and AES-256-CBC-CTS for file names are implemented.
>> Which is a clear case of kernel offers the mechanism and userspace selects a
>> policy. Similar to what dm-crypt and ecryptfs have.
>>
>> This patch adds support for using AES-128-CBC for file contents and
>> AES-128-CBC-CTS for file name encryption. To mitigate watermarking attacks, IVs
>> are generated using the ESSIV algorithm. While AES-CBC is actually slightly
>> less secure than AES-XTS from a security point of view, there is more
>> widespread hardware support. Especially low-powered embedded devices crypto
>> accelerators such as CAAM or CESA support only AES-CBC-128 with an acceptable
>> speed. Using AES-CBC gives us the acceptable performance while still providing
>> a moderate level of security for persistent storage.
>>
>
> Thanks for sending this! I can't object too much to adding AES-CBC-128 if you
> find it useful, though of course AES-256-XTS will remain the recommendation for
> general use.

Yes, AES-256-XTS should definitely be the recommendation and default here!
AES-128-CBC is a last resort if XTS is not possible for whatever reason.


> And I don't suppose AES-256-CBC is an option for you?

We went for AES-128 since it has less rounds and yields better performance. At least on the hardware we looked at, there was quite a difference in speed between AES-128-CBC and AES-256-CBC.

Anyways, AES-256-CBC could be added with just a few lines after this patch. :)


> Anyway, more comments below:

[...]

>> + if (ctx.contents_encryption_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
>> + ctx.filenames_encryption_mode != FS_ENCRYPTION_MODE_AES_128_CTS)
>> + return -EINVAL;
>> +
>
> I think for now we should only allow the two pairs:
>
> contents_encryption_mode=FS_ENCRYPTION_MODE_AES_256_XTS
> filenames_encryption_mode=FS_ENCRYPTION_MODE_AES_256_CTS
>
> and
>
> contents_encryption_mode=FS_ENCRYPTION_MODE_AES_128_CBC
> filenames_encryption_mode=FS_ENCRYPTION_MODE_AES_128_CTS
>
> Other combinations like AES-256-XTS paired with AES-128-CTS should be forbidden.

Yes, I agree.


> This also needs to be enforced in create_encryption_context_from_policy() so
> that FS_IOC_SET_ENCRYPTION_POLICY fails with bad combinations.
>
>> + if (crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
>> + /* init ESSIV generator */
>> + essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
>> + if (!essiv_tfm || IS_ERR(essiv_tfm)) {
>> + res = essiv_tfm ? PTR_ERR(essiv_tfm) : -ENOMEM;
>> + printk(KERN_DEBUG
>> + "%s: error %d (inode %u) allocating essiv tfm\n",
>> + __func__, res, (unsigned) inode->i_ino);
>> + goto out;
>> + }
>> + /* calc sha of key for essiv generation */
>> + memset(sha_ws, 0, sizeof(sha_ws));
>> + sha_init(essiv_key);
>> + sha_transform(essiv_key, raw_key, sha_ws);
>> + res = crypto_cipher_setkey(essiv_tfm, (u8 *)essiv_key, keysize);
>> + if (res)
>> + goto out;
>> +
>> + crypt_info->ci_essiv_tfm = essiv_tfm;
>> + }
>
> I think the ESSIV hash should be SHA-256 not SHA-1. SHA-1 is becoming more and
> more obsolete these days. Another issue with SHA-1 is that it only produces a
> 20 byte hash, which means it couldn't be used if someone ever wanted to add
> AES-256-CBC as another mode.

Good point! We'll change this to always use sha-256.


I'll wait for some more feedback and will provide a v2 which includes all your comments.

Thanks,
David

2017-04-25 14:41:17

by David Gstir

[permalink] [raw]
Subject: [PATCH v2] fscrypt: Add support for AES-128-CBC

From: Daniel Walter <[email protected]>

fscrypt provides facilities to use different encryption algorithms which
are selectable by userspace when setting the encryption policy. Currently,
only AES-256-XTS for file contents and AES-256-CBC-CTS for file names are
implemented. This is a clear case of kernel offers the mechanism and
userspace selects a policy. Similar to what dm-crypt and ecryptfs have.

This patch adds support for using AES-128-CBC for file contents and
AES-128-CBC-CTS for file name encryption. To mitigate watermarking
attacks, IVs are generated using the ESSIV algorithm. While AES-CBC is
actually slightly less secure than AES-XTS from a security point of view,
there is more widespread hardware support. Especially low-powered embedded
devices with crypto accelerators such as CAAM or CESA support only
AES-CBC-128 with an acceptable speed. Using AES-CBC gives us the acceptable
performance while still providing a moderate level of security for
persistent storage.

Signed-off-by: Daniel Walter <[email protected]>
[[email protected]: massaged commit message]
Signed-off-by: David Gstir <[email protected]>
---
v2: Compute ESSIV salt using SHA256 instead of SHA1 and improve style
as pointed out by Eric Biggers [1].

[1] https://lkml.kernel.org/r/20170331062149.GA32409%20()%20zzz

fs/crypto/crypto.c | 22 ++++--
fs/crypto/fscrypt_private.h | 5 +-
fs/crypto/keyinfo.c | 165 ++++++++++++++++++++++++++++++++++-------
fs/crypto/policy.c | 7 +-
include/linux/fscrypt_common.h | 13 ++--
include/uapi/linux/fs.h | 2 +
6 files changed, 169 insertions(+), 45 deletions(-)

diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 6d6eca394d4d..9dd5f23a55a5 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -26,6 +26,7 @@
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
+#include <crypto/aes.h>
#include "fscrypt_private.h"

static unsigned int num_prealloc_crypto_pages = 32;
@@ -147,17 +148,28 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
{
struct {
__le64 index;
- u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
- } xts_tweak;
+ u8 padding[FS_IV_SIZE - sizeof(__le64)];
+ } blk_desc;
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
+ u8 *iv = (u8 *)&blk_desc;

BUG_ON(len == 0);

+ BUILD_BUG_ON(sizeof(blk_desc) != FS_IV_SIZE);
+ BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
+ blk_desc.index = cpu_to_le64(lblk_num);
+ memset(blk_desc.padding, 0, sizeof(blk_desc.padding));
+
+ if (ci->ci_essiv_tfm != NULL) {
+ memset(iv, 0, sizeof(blk_desc));
+ crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv, iv);
+ }
+
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
@@ -170,15 +182,11 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
page_crypt_complete, &ecr);

- BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
- xts_tweak.index = cpu_to_le64(lblk_num);
- memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
-
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, len, offs);
- skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
+ skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
else
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index e39696e64494..81ce9af449cb 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -16,8 +16,10 @@
#define FS_FNAME_CRYPTO_DIGEST_SIZE 32

/* Encryption parameters */
-#define FS_XTS_TWEAK_SIZE 16
+#define FS_IV_SIZE 16
#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_128_CBC_KEY_SIZE 16
+#define FS_AES_128_CTS_KEY_SIZE 16
#define FS_AES_256_GCM_KEY_SIZE 32
#define FS_AES_256_CBC_KEY_SIZE 32
#define FS_AES_256_CTS_KEY_SIZE 32
@@ -67,6 +69,7 @@ struct fscrypt_info {
u8 ci_filename_mode;
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
+ struct crypto_cipher *ci_essiv_tfm;
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
};

diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 8cdfddce2b34..590efb0a891d 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -10,6 +10,8 @@

#include <keys/user-type.h>
#include <linux/scatterlist.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
#include "fscrypt_private.h"

static void derive_crypt_complete(struct crypto_async_request *req, int rc)
@@ -27,13 +29,13 @@ static void derive_crypt_complete(struct crypto_async_request *req, int rc)
* derive_key_aes() - Derive a key using AES-128-ECB
* @deriving_key: Encryption key used for derivation.
* @source_key: Source key to which to apply derivation.
- * @derived_key: Derived key.
+ * @derived_key_raw: Derived raw key.
*
* Return: Zero on success; non-zero otherwise.
*/
static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
- u8 source_key[FS_AES_256_XTS_KEY_SIZE],
- u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
+ const struct fscrypt_key *source_key,
+ u8 derived_raw_key[FS_MAX_KEY_SIZE])
{
int res = 0;
struct skcipher_request *req = NULL;
@@ -60,10 +62,10 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
if (res < 0)
goto out;

- sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
- sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg,
- FS_AES_256_XTS_KEY_SIZE, NULL);
+ sg_init_one(&src_sg, source_key->raw, source_key->size);
+ sg_init_one(&dst_sg, derived_raw_key, source_key->size);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
+ NULL);
res = crypto_skcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
wait_for_completion(&ecr.completion);
@@ -77,7 +79,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],

static int validate_user_key(struct fscrypt_info *crypt_info,
struct fscrypt_context *ctx, u8 *raw_key,
- const char *prefix)
+ const char *prefix, int min_keysize)
{
char *description;
struct key *keyring_key;
@@ -111,14 +113,15 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
master_key = (struct fscrypt_key *)ukp->data;
BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);

- if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
+ if (master_key->size < min_keysize || master_key->size > FS_MAX_KEY_SIZE
+ || master_key->size % AES_BLOCK_SIZE != 0) {
printk_once(KERN_WARNING
"%s: key size incorrect: %d\n",
__func__, master_key->size);
res = -ENOKEY;
goto out;
}
- res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
+ res = derive_key_aes(ctx->nonce, master_key, raw_key);
out:
up_read(&keyring_key->sem);
key_put(keyring_key);
@@ -129,27 +132,37 @@ static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
const char **cipher_str_ret, int *keysize_ret)
{
if (S_ISREG(inode->i_mode)) {
- if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
+ switch (ci->ci_data_mode) {
+ case FS_ENCRYPTION_MODE_AES_256_XTS:
*cipher_str_ret = "xts(aes)";
*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
return 0;
+ case FS_ENCRYPTION_MODE_AES_128_CBC:
+ *cipher_str_ret = "cbc(aes)";
+ *keysize_ret = FS_AES_128_CBC_KEY_SIZE;
+ return 0;
+ default:
+ pr_warn_once("fscrypto: unsupported contents encryption mode %d for inode %lu\n",
+ ci->ci_data_mode, inode->i_ino);
+ return -ENOKEY;
}
- pr_warn_once("fscrypto: unsupported contents encryption mode "
- "%d for inode %lu\n",
- ci->ci_data_mode, inode->i_ino);
- return -ENOKEY;
}

if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
- if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
+ switch (ci->ci_filename_mode) {
+ case FS_ENCRYPTION_MODE_AES_256_CTS:
*cipher_str_ret = "cts(cbc(aes))";
*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
return 0;
+ case FS_ENCRYPTION_MODE_AES_128_CTS:
+ *cipher_str_ret = "cts(cbc(aes))";
+ *keysize_ret = FS_AES_128_CTS_KEY_SIZE;
+ return 0;
+ default:
+ pr_warn_once("fscrypto: unsupported filenames encryption mode %d for inode %lu\n",
+ ci->ci_filename_mode, inode->i_ino);
+ return -ENOKEY;
}
- pr_warn_once("fscrypto: unsupported filenames encryption mode "
- "%d for inode %lu\n",
- ci->ci_filename_mode, inode->i_ino);
- return -ENOKEY;
}

pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
@@ -163,14 +176,100 @@ static void put_crypt_info(struct fscrypt_info *ci)
return;

crypto_free_skcipher(ci->ci_ctfm);
+ crypto_free_cipher(ci->ci_essiv_tfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}

+static int derive_essiv_salt(u8 *raw_key, int keysize, u8 **salt_out,
+ unsigned int *saltsize)
+{
+ int res;
+ struct crypto_ahash *hash_tfm;
+ unsigned int digestsize;
+ u8 *salt = NULL;
+ struct scatterlist sg;
+ struct ahash_request *req = NULL;
+
+ hash_tfm = crypto_alloc_ahash("sha256", 0, 0);
+ if (IS_ERR(hash_tfm))
+ return PTR_ERR(hash_tfm);
+
+ digestsize = crypto_ahash_digestsize(hash_tfm);
+ salt = kzalloc(digestsize, GFP_NOFS);
+ if (!salt) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ req = ahash_request_alloc(hash_tfm, GFP_NOFS);
+ if (!req) {
+ kfree(salt);
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sg_init_one(&sg, raw_key, keysize);
+ ahash_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ ahash_request_set_crypt(req, &sg, salt, keysize);
+
+ res = crypto_ahash_digest(req);
+ if (res) {
+ kfree(salt);
+ goto out;
+ }
+
+ *salt_out = salt;
+ *saltsize = digestsize;
+
+out:
+ crypto_free_ahash(hash_tfm);
+ ahash_request_free(req);
+ return res;
+}
+
+static int init_essiv_generator(struct fscrypt_info *ci, u8 *raw_key,
+ int keysize)
+{
+ int res;
+ struct crypto_cipher *essiv_tfm;
+ u8 *salt = NULL;
+ unsigned int saltsize;
+
+ /* init ESSIV generator */
+ essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (!essiv_tfm || IS_ERR(essiv_tfm)) {
+ res = essiv_tfm ? PTR_ERR(essiv_tfm) : -ENOMEM;
+ goto err;
+ }
+
+ res = derive_essiv_salt(raw_key, keysize, &salt, &saltsize);
+ if (res)
+ goto err;
+
+ res = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
+ if (res)
+ goto err;
+
+ ci->ci_essiv_tfm = essiv_tfm;
+
+ return 0;
+
+err:
+ if (essiv_tfm && !IS_ERR(essiv_tfm))
+ crypto_free_cipher(essiv_tfm);
+
+ kzfree(salt);
+ return res;
+}
+
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
struct fscrypt_context ctx;
struct crypto_skcipher *ctfm;
+
const char *cipher_str;
int keysize;
u8 *raw_key = NULL;
@@ -207,6 +306,10 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
return -EINVAL;

+ if (!fscrypt_valid_enc_modes(ctx.contents_encryption_mode,
+ ctx.filenames_encryption_mode))
+ return -EINVAL;
+
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
if (!crypt_info)
return -ENOMEM;
@@ -215,6 +318,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
crypt_info->ci_ctfm = NULL;
+ crypt_info->ci_essiv_tfm = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));

@@ -231,10 +335,12 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (!raw_key)
goto out;

- res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX);
+ res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
+ keysize);
if (res && inode->i_sb->s_cop->key_prefix) {
int res2 = validate_user_key(crypt_info, &ctx, raw_key,
- inode->i_sb->s_cop->key_prefix);
+ inode->i_sb->s_cop->key_prefix,
+ keysize);
if (res2) {
if (res2 == -ENOKEY)
res = -ENOKEY;
@@ -246,9 +352,9 @@ int fscrypt_get_encryption_info(struct inode *inode)
ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
if (!ctfm || IS_ERR(ctfm)) {
res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
- printk(KERN_DEBUG
- "%s: error %d (inode %u) allocating crypto tfm\n",
- __func__, res, (unsigned) inode->i_ino);
+ pr_debug(
+ "%s: error %d (inode %u) allocating crypto tfm\n",
+ __func__, res, (unsigned int) inode->i_ino);
goto out;
}
crypt_info->ci_ctfm = ctfm;
@@ -258,6 +364,15 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (res)
goto out;

+ if (crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
+ res = init_essiv_generator(crypt_info, raw_key, keysize);
+ if (res) {
+ pr_debug(
+ "%s: error %d (inode %u) allocating essiv tfm\n",
+ __func__, res, (unsigned int) inode->i_ino);
+ goto out;
+ }
+ }
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 4908906d54d5..bac8009245f2 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -41,11 +41,8 @@ static int create_encryption_context_from_policy(struct inode *inode,
memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE);

- if (!fscrypt_valid_contents_enc_mode(
- policy->contents_encryption_mode))
- return -EINVAL;
-
- if (!fscrypt_valid_filenames_enc_mode(
+ if (!fscrypt_valid_enc_modes(
+ policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return -EINVAL;

diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 10c1abfbac6c..a91ce99f6e19 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -102,14 +102,13 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
return false;
}

-static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
+static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
+ u32 filenames_mode)
{
- return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
-}
-
-static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
-{
- return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+ return ((contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS) ||
+ (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS));
}

static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 048a85e9f017..231b15d4a90e 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -272,6 +272,8 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_AES_256_GCM 2
#define FS_ENCRYPTION_MODE_AES_256_CBC 3
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+#define FS_ENCRYPTION_MODE_AES_128_CBC 5
+#define FS_ENCRYPTION_MODE_AES_128_CTS 6

struct fscrypt_policy {
__u8 version;
--
2.12.0

2017-04-25 20:10:16

by Eric Biggers

[permalink] [raw]
Subject: Re: [PATCH v2] fscrypt: Add support for AES-128-CBC

Hi Daniel and David,

On Tue, Apr 25, 2017 at 04:41:00PM +0200, David Gstir wrote:
> @@ -147,17 +148,28 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
> {
> struct {
> __le64 index;
> - u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
> - } xts_tweak;
> + u8 padding[FS_IV_SIZE - sizeof(__le64)];
> + } blk_desc;
> struct skcipher_request *req = NULL;
> DECLARE_FS_COMPLETION_RESULT(ecr);
> struct scatterlist dst, src;
> struct fscrypt_info *ci = inode->i_crypt_info;
> struct crypto_skcipher *tfm = ci->ci_ctfm;
> int res = 0;
> + u8 *iv = (u8 *)&blk_desc;
>
> BUG_ON(len == 0);
>
> + BUILD_BUG_ON(sizeof(blk_desc) != FS_IV_SIZE);
> + BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
> + blk_desc.index = cpu_to_le64(lblk_num);
> + memset(blk_desc.padding, 0, sizeof(blk_desc.padding));
> +
> + if (ci->ci_essiv_tfm != NULL) {
> + memset(iv, 0, sizeof(blk_desc));
> + crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv, iv);
> + }
> +
> req = skcipher_request_alloc(tfm, gfp_flags);
> if (!req) {
> printk_ratelimited(KERN_ERR
> @@ -170,15 +182,11 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
> req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
> page_crypt_complete, &ecr);
>
> - BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
> - xts_tweak.index = cpu_to_le64(lblk_num);
> - memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
> -
> sg_init_table(&dst, 1);
> sg_set_page(&dst, dest_page, len, offs);
> sg_init_table(&src, 1);
> sg_set_page(&src, src_page, len, offs);
> - skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
> + skcipher_request_set_crypt(req, &src, &dst, len, &iv);
> if (rw == FS_DECRYPT)
> res = crypto_skcipher_decrypt(req);
> else

There are two critical bugs here. First the IV is being zeroed before being
encrypted with the ESSIV tfm, so the resulting IV will always be the same for a
given file. It should be encrypting the block number instead. Second what's
actually being passed into the crypto API is '&iv' where IV is a pointer to
something on the stack... I really doubt that does what's intended :)

Probably the cleanest way to do this correctly is to just have the struct be the
'iv', so there's no extra pointer to deal with:

struct {
__le64 index;
u8 padding[FS_IV_SIZE - sizeof(__le64)];
} iv;

[...]

BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
iv.index = cpu_to_le64(lblk_num);
memset(iv.padding, 0, sizeof(iv.padding));

if (ci->ci_essiv_tfm != NULL) {
crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
(u8 *)&iv);
}

[...]

skcipher_request_set_crypt(req, &src, &dst, len, &iv);

> +static int derive_essiv_salt(u8 *raw_key, int keysize, u8 **salt_out,
> + unsigned int *saltsize)
> +{
> + int res;
> + struct crypto_ahash *hash_tfm;
> + unsigned int digestsize;
> + u8 *salt = NULL;
> + struct scatterlist sg;
> + struct ahash_request *req = NULL;
> +
> + hash_tfm = crypto_alloc_ahash("sha256", 0, 0);
> + if (IS_ERR(hash_tfm))
> + return PTR_ERR(hash_tfm);
> +
> + digestsize = crypto_ahash_digestsize(hash_tfm);
> + salt = kzalloc(digestsize, GFP_NOFS);
> + if (!salt) {
> + res = -ENOMEM;
> + goto out;
> + }
> +
> + req = ahash_request_alloc(hash_tfm, GFP_NOFS);
> + if (!req) {
> + kfree(salt);
> + res = -ENOMEM;
> + goto out;
> + }
> +
> + sg_init_one(&sg, raw_key, keysize);
> + ahash_request_set_callback(req,
> + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
> + NULL, NULL);
> + ahash_request_set_crypt(req, &sg, salt, keysize);
> +
> + res = crypto_ahash_digest(req);
> + if (res) {
> + kfree(salt);
> + goto out;
> + }
> +
> + *salt_out = salt;
> + *saltsize = digestsize;
> +
> +out:
> + crypto_free_ahash(hash_tfm);
> + ahash_request_free(req);
> + return res;
> +}
> +
> +static int init_essiv_generator(struct fscrypt_info *ci, u8 *raw_key,
> + int keysize)
> +{
> + int res;
> + struct crypto_cipher *essiv_tfm;
> + u8 *salt = NULL;
> + unsigned int saltsize;
> +
> + /* init ESSIV generator */
> + essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
> + if (!essiv_tfm || IS_ERR(essiv_tfm)) {
> + res = essiv_tfm ? PTR_ERR(essiv_tfm) : -ENOMEM;
> + goto err;
> + }
> +
> + res = derive_essiv_salt(raw_key, keysize, &salt, &saltsize);
> + if (res)
> + goto err;
> +
> + res = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
> + if (res)
> + goto err;
> +
> + ci->ci_essiv_tfm = essiv_tfm;
> +
> + return 0;
> +
> +err:
> + if (essiv_tfm && !IS_ERR(essiv_tfm))
> + crypto_free_cipher(essiv_tfm);
> +
> + kzfree(salt);
> + return res;
> +}

There are a few issues with how the ESSIV generator is initialized:

1.) It's allocating a possibly asynchronous SHA-256 implementation but then not
handling it actually being asynchronous. I suggest using the 'shash' API
which is easier to use.
2.) No one is going to change the digest size of SHA-256; it's fixed at 32
bytes. So just #include <crypto/sha.h> and allocate a 'u8
salt[SHA256_DIGEST_SIZE];' on the stack. Make sure to do
memzero_explicit(salt, sizeof(salt)) in all cases.
3.) It's always keying the ESSIV transform using a 256-bit AES key. It's still
secure of course, but I'm not sure it's what you intended, given that it's
used in combination with AES-128. I really think that someone who's more of
an expert on ESSIV really should weigh in, but my intuition is that you
really only need to be using AES-128, using the first 'keysize' bytes of the
hash.

You also don't really need to handle freeing the essiv_tfm on errors, as long as
you assign it to the fscrypt_info first. Also crypto_alloc_cipher() returns an
ERR_PTR(), never NULL.

Also, fs/crypto/Kconfig needs a 'select CRYPTO_SHA256' now.

Here's a revised version to consider, not actually tested though:

static int derive_essiv_salt(const u8 *raw_key, int keysize,
u8 salt[SHA256_DIGEST_SIZE])
{
struct crypto_shash *hash_tfm;

hash_tfm = crypto_alloc_shash("sha256", 0, 0);
if (IS_ERR(hash_tfm)) {
pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld",
PTR_ERR(hash_tfm));
return PTR_ERR(hash_tfm);
} else {
SHASH_DESC_ON_STACK(desc, hash_tfm);
int err;

desc->tfm = hash_tfm;
desc->flags = 0;

BUG_ON(crypto_shash_digestsize(hash_tfm) != SHA256_DIGEST_SIZE);

err = crypto_shash_digest(desc, raw_key, keysize, salt);
crypto_free_shash(hash_tfm);
return err;
}
}

static int init_essiv_generator(struct fscrypt_info *ci,
const u8 *raw_key, int keysize)
{
struct crypto_cipher *essiv_tfm;
u8 salt[SHA256_DIGEST_SIZE];
int err;

if (WARN_ON_ONCE(keysize > sizeof(salt)))
return -EINVAL;

essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(essiv_tfm))
return PTR_ERR(essiv_tfm);

ci->ci_essiv_tfm = essiv_tfm;

err = derive_essiv_salt(raw_key, keysize, salt);
if (err)
goto out;

err = crypto_cipher_setkey(essiv_tfm, salt, keysize);
out:
memzero_explicit(salt, sizeof(salt));
return err;
}

> +
> int fscrypt_get_encryption_info(struct inode *inode)
> {
> struct fscrypt_info *crypt_info;
> struct fscrypt_context ctx;
> struct crypto_skcipher *ctfm;
> +
> const char *cipher_str;
> int keysize;
> u8 *raw_key = NULL;
> @@ -207,6 +306,10 @@ int fscrypt_get_encryption_info(struct inode *inode)
> if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
> return -EINVAL;
>
> + if (!fscrypt_valid_enc_modes(ctx.contents_encryption_mode,
> + ctx.filenames_encryption_mode))
> + return -EINVAL;
> +

Indent this properly

> crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
> if (!crypt_info)
> return -ENOMEM;
> @@ -215,6 +318,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
> crypt_info->ci_data_mode = ctx.contents_encryption_mode;
> crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
> crypt_info->ci_ctfm = NULL;
> + crypt_info->ci_essiv_tfm = NULL;
> memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
> sizeof(crypt_info->ci_master_key));
>
> @@ -231,10 +335,12 @@ int fscrypt_get_encryption_info(struct inode *inode)
> if (!raw_key)
> goto out;
>
> - res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX);
> + res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
> + keysize);
> if (res && inode->i_sb->s_cop->key_prefix) {
> int res2 = validate_user_key(crypt_info, &ctx, raw_key,
> - inode->i_sb->s_cop->key_prefix);
> + inode->i_sb->s_cop->key_prefix,
> + keysize);
> if (res2) {
> if (res2 == -ENOKEY)
> res = -ENOKEY;
> @@ -246,9 +352,9 @@ int fscrypt_get_encryption_info(struct inode *inode)
> ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
> if (!ctfm || IS_ERR(ctfm)) {
> res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
> - printk(KERN_DEBUG
> - "%s: error %d (inode %u) allocating crypto tfm\n",
> - __func__, res, (unsigned) inode->i_ino);
> + pr_debug(
> + "%s: error %d (inode %u) allocating crypto tfm\n",
> + __func__, res, (unsigned int) inode->i_ino);
> goto out;

If you're changing this line just make it print i_ino as an 'unsigned long', no
need to cast it. Same below.

> }
> crypt_info->ci_ctfm = ctfm;
> @@ -258,6 +364,15 @@ int fscrypt_get_encryption_info(struct inode *inode)
> if (res)
> goto out;
>
> + if (crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
> + res = init_essiv_generator(crypt_info, raw_key, keysize);
> + if (res) {
> + pr_debug(
> + "%s: error %d (inode %u) allocating essiv tfm\n",
> + __func__, res, (unsigned int) inode->i_ino);
> + goto out;
> + }
> + }
> if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
> crypt_info = NULL;
> out:
> diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
> index 4908906d54d5..bac8009245f2 100644
> --- a/fs/crypto/policy.c
> +++ b/fs/crypto/policy.c
> @@ -41,11 +41,8 @@ static int create_encryption_context_from_policy(struct inode *inode,
> memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
> FS_KEY_DESCRIPTOR_SIZE);
>
> - if (!fscrypt_valid_contents_enc_mode(
> - policy->contents_encryption_mode))
> - return -EINVAL;
> -
> - if (!fscrypt_valid_filenames_enc_mode(
> + if (!fscrypt_valid_enc_modes(
> + policy->contents_encryption_mode,
> policy->filenames_encryption_mode))
> return -EINVAL;

Indent properly:

if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
policy->filenames_encryption_mode))

- Eric

2017-04-26 06:18:58

by David Gstir

[permalink] [raw]
Subject: Re: [PATCH v2] fscrypt: Add support for AES-128-CBC

Hi Eric!

Thanks for the feedback!

> On 25 Apr 2017, at 22:10, Eric Biggers <[email protected]> wrote:
>
> Hi Daniel and David,
>
> On Tue, Apr 25, 2017 at 04:41:00PM +0200, David Gstir wrote:
>> @@ -147,17 +148,28 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
>> {
>> struct {
>> __le64 index;
>> - u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
>> - } xts_tweak;
>> + u8 padding[FS_IV_SIZE - sizeof(__le64)];
>> + } blk_desc;
>> struct skcipher_request *req = NULL;
>> DECLARE_FS_COMPLETION_RESULT(ecr);
>> struct scatterlist dst, src;
>> struct fscrypt_info *ci = inode->i_crypt_info;
>> struct crypto_skcipher *tfm = ci->ci_ctfm;
>> int res = 0;
>> + u8 *iv = (u8 *)&blk_desc;
>>
>> BUG_ON(len == 0);
>>
>> + BUILD_BUG_ON(sizeof(blk_desc) != FS_IV_SIZE);
>> + BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
>> + blk_desc.index = cpu_to_le64(lblk_num);
>> + memset(blk_desc.padding, 0, sizeof(blk_desc.padding));
>> +
>> + if (ci->ci_essiv_tfm != NULL) {
>> + memset(iv, 0, sizeof(blk_desc));
>> + crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv, iv);
>> + }
>> +
>> req = skcipher_request_alloc(tfm, gfp_flags);
>> if (!req) {
>> printk_ratelimited(KERN_ERR
>> @@ -170,15 +182,11 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
>> req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
>> page_crypt_complete, &ecr);
>>
>> - BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
>> - xts_tweak.index = cpu_to_le64(lblk_num);
>> - memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
>> -
>> sg_init_table(&dst, 1);
>> sg_set_page(&dst, dest_page, len, offs);
>> sg_init_table(&src, 1);
>> sg_set_page(&src, src_page, len, offs);
>> - skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
>> + skcipher_request_set_crypt(req, &src, &dst, len, &iv);
>> if (rw == FS_DECRYPT)
>> res = crypto_skcipher_decrypt(req);
>> else
>
> There are two critical bugs here. First the IV is being zeroed before being
> encrypted with the ESSIV tfm, so the resulting IV will always be the same for a
> given file. It should be encrypting the block number instead. Second what's
> actually being passed into the crypto API is '&iv' where IV is a pointer to
> something on the stack... I really doubt that does what's intended :)
>
> Probably the cleanest way to do this correctly is to just have the struct be the
> 'iv', so there's no extra pointer to deal with:
>
> struct {
> __le64 index;
> u8 padding[FS_IV_SIZE - sizeof(__le64)];
> } iv;
>
> [...]
>
> BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
> BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
> iv.index = cpu_to_le64(lblk_num);
> memset(iv.padding, 0, sizeof(iv.padding));
>
> if (ci->ci_essiv_tfm != NULL) {
> crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
> (u8 *)&iv);
> }
>
> [...]
>
> skcipher_request_set_crypt(req, &src, &dst, len, &iv);

You are totally right. Somehow I completely missed that. :-/



>
>> +static int derive_essiv_salt(u8 *raw_key, int keysize, u8 **salt_out,
>> + unsigned int *saltsize)
>> +{
>> + int res;
>> + struct crypto_ahash *hash_tfm;
>> + unsigned int digestsize;
>> + u8 *salt = NULL;
>> + struct scatterlist sg;
>> + struct ahash_request *req = NULL;
>> +
>> + hash_tfm = crypto_alloc_ahash("sha256", 0, 0);
>> + if (IS_ERR(hash_tfm))
>> + return PTR_ERR(hash_tfm);
>> +
>> + digestsize = crypto_ahash_digestsize(hash_tfm);
>> + salt = kzalloc(digestsize, GFP_NOFS);
>> + if (!salt) {
>> + res = -ENOMEM;
>> + goto out;
>> + }
>> +
>> + req = ahash_request_alloc(hash_tfm, GFP_NOFS);
>> + if (!req) {
>> + kfree(salt);
>> + res = -ENOMEM;
>> + goto out;
>> + }
>> +
>> + sg_init_one(&sg, raw_key, keysize);
>> + ahash_request_set_callback(req,
>> + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
>> + NULL, NULL);
>> + ahash_request_set_crypt(req, &sg, salt, keysize);
>> +
>> + res = crypto_ahash_digest(req);
>> + if (res) {
>> + kfree(salt);
>> + goto out;
>> + }
>> +
>> + *salt_out = salt;
>> + *saltsize = digestsize;
>> +
>> +out:
>> + crypto_free_ahash(hash_tfm);
>> + ahash_request_free(req);
>> + return res;
>> +}
>> +
>> +static int init_essiv_generator(struct fscrypt_info *ci, u8 *raw_key,
>> + int keysize)
>> +{
>> + int res;
>> + struct crypto_cipher *essiv_tfm;
>> + u8 *salt = NULL;
>> + unsigned int saltsize;
>> +
>> + /* init ESSIV generator */
>> + essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
>> + if (!essiv_tfm || IS_ERR(essiv_tfm)) {
>> + res = essiv_tfm ? PTR_ERR(essiv_tfm) : -ENOMEM;
>> + goto err;
>> + }
>> +
>> + res = derive_essiv_salt(raw_key, keysize, &salt, &saltsize);
>> + if (res)
>> + goto err;
>> +
>> + res = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
>> + if (res)
>> + goto err;
>> +
>> + ci->ci_essiv_tfm = essiv_tfm;
>> +
>> + return 0;
>> +
>> +err:
>> + if (essiv_tfm && !IS_ERR(essiv_tfm))
>> + crypto_free_cipher(essiv_tfm);
>> +
>> + kzfree(salt);
>> + return res;
>> +}
>
> There are a few issues with how the ESSIV generator is initialized:
>
> 1.) It's allocating a possibly asynchronous SHA-256 implementation but then not
> handling it actually being asynchronous. I suggest using the 'shash' API
> which is easier to use.
> 2.) No one is going to change the digest size of SHA-256; it's fixed at 32
> bytes. So just #include <crypto/sha.h> and allocate a 'u8
> salt[SHA256_DIGEST_SIZE];' on the stack. Make sure to do
> memzero_explicit(salt, sizeof(salt)) in all cases.
> 3.) It's always keying the ESSIV transform using a 256-bit AES key. It's still
> secure of course, but I'm not sure it's what you intended, given that it's
> used in combination with AES-128. I really think that someone who's more of
> an expert on ESSIV really should weigh in, but my intuition is that you
> really only need to be using AES-128, using the first 'keysize' bytes of the
> hash.

My intention is to use all 256 bits we get from the hash. Yes, this will then use
AES-256 for the IV generation, but this will still yield just a 128 bit IV for
file contents encryption since the block size of AES is the same. So this is
just a case of using AES with a 256 bit key over a 128 bit one which is then
used for AES-128 computations.

On the other hand, as you pointed out, truncating the hash and using AES-128 *should*
suffice too. Especially since we are using AES-128 everywhere else!

I'm also no export on ESSIV, so I'm not a 100% sure if there is something we're
missing here. If using AES-128 is okay, I'll change it to truncate the hash.


> You also don't really need to handle freeing the essiv_tfm on errors, as long as
> you assign it to the fscrypt_info first. Also crypto_alloc_cipher() returns an
> ERR_PTR(), never NULL.
>
> Also, fs/crypto/Kconfig needs a 'select CRYPTO_SHA256' now.
>
> Here's a revised version to consider, not actually tested though:
>
> static int derive_essiv_salt(const u8 *raw_key, int keysize,
> u8 salt[SHA256_DIGEST_SIZE])
> {
> struct crypto_shash *hash_tfm;
>
> hash_tfm = crypto_alloc_shash("sha256", 0, 0);
> if (IS_ERR(hash_tfm)) {
> pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld",
> PTR_ERR(hash_tfm));
> return PTR_ERR(hash_tfm);
> } else {
> SHASH_DESC_ON_STACK(desc, hash_tfm);
> int err;
>
> desc->tfm = hash_tfm;
> desc->flags = 0;
>
> BUG_ON(crypto_shash_digestsize(hash_tfm) != SHA256_DIGEST_SIZE);
>
> err = crypto_shash_digest(desc, raw_key, keysize, salt);
> crypto_free_shash(hash_tfm);
> return err;
> }
> }
>
> static int init_essiv_generator(struct fscrypt_info *ci,
> const u8 *raw_key, int keysize)
> {
> struct crypto_cipher *essiv_tfm;
> u8 salt[SHA256_DIGEST_SIZE];
> int err;
>
> if (WARN_ON_ONCE(keysize > sizeof(salt)))
> return -EINVAL;
>
> essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
> if (IS_ERR(essiv_tfm))
> return PTR_ERR(essiv_tfm);
>
> ci->ci_essiv_tfm = essiv_tfm;
>
> err = derive_essiv_salt(raw_key, keysize, salt);
> if (err)
> goto out;
>
> err = crypto_cipher_setkey(essiv_tfm, salt, keysize);
> out:
> memzero_explicit(salt, sizeof(salt));
> return err;
> }

Thanks a lot for that! Using shash and SHA256_DIGEST_SIZE makes this much cleaner.
I'll redo that for v3.

One optimization Richard pointed out is that we should do the
crypto_alloc_shash("sha256", 0, 0) just once and reuse hash_tfm for all sha256 computations.
This will save us some alloc/frees in derive_essiv_salt.

Thanks!
David

2017-04-26 21:56:32

by Eric Biggers

[permalink] [raw]
Subject: Re: [PATCH v2] fscrypt: Add support for AES-128-CBC

Hi David,

On Wed, Apr 26, 2017 at 08:18:51AM +0200, David Gstir wrote:
> > 3.) It's always keying the ESSIV transform using a 256-bit AES key. It's still
> > secure of course, but I'm not sure it's what you intended, given that it's
> > used in combination with AES-128. I really think that someone who's more of
> > an expert on ESSIV really should weigh in, but my intuition is that you
> > really only need to be using AES-128, using the first 'keysize' bytes of the
> > hash.
>
> My intention is to use all 256 bits we get from the hash. Yes, this will then use
> AES-256 for the IV generation, but this will still yield just a 128 bit IV for
> file contents encryption since the block size of AES is the same. So this is
> just a case of using AES with a 256 bit key over a 128 bit one which is then
> used for AES-128 computations.
>
> On the other hand, as you pointed out, truncating the hash and using AES-128 *should*
> suffice too. Especially since we are using AES-128 everywhere else!
>
> I'm also no export on ESSIV, so I'm not a 100% sure if there is something we're
> missing here. If using AES-128 is okay, I'll change it to truncate the hash.
>

After thinking about it some more I'm actually slightly leaning towards AES-256,
since it matches the size of the message digest being used. I think that's how
ESSIV was designed to work, since message digests are not necessarily designed
to be truncated. Also I doubt there would be any noticable performance
difference from using AES-256 instead of AES-128, given that it's just for the
IV generation and not for the "real" encryption.

On the other hand, the message digest *is* hard-coded to SHA-256, and the
SHA-256 specification actually states that it can be truncated, with the
collision resistance decreased in the expected way.

>
> One optimization Richard pointed out is that we should do the
> crypto_alloc_shash("sha256", 0, 0) just once and reuse hash_tfm for all sha256 computations.
> This will save us some alloc/frees in derive_essiv_salt.
>

Yes, I had the same idea too. I suggest allocating it only the first time it's
used rather than always doing it in fscrypt_init(), since not everyone will be
needing it.

- Eric