2018-02-12 09:43:36

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 00/11] Ext4 encryption support for blocksize < pagesize

This patchset implements code to support encryption of Ext4 filesystem
instances that have blocksize less than pagesize. The patchset has
been tested on both ppc64 and x86_64 machines.

Eric, fscrypt_mpage_readpages() (originally, ext4_mpage_readpages())
still retains the ability to read non-encrypted file data. Please let
me know if the code has to be changed such that
fscrypt_mpage_readpages() makes it mandatory for the file's data to be
encrypted.

TODO:
F2FS and UBIFS code needs to be updated to make use of the newly added
fscrypt functions. I will do that in the next version of the patchset.

Changelog:
"RFC V1" -> "RFC V2":
1. Ext4's "encryption aware" functionality in fs/ext4/readpage.c has
been moved to fs/crypto/.
2. fscrypt functions have now been renamed to indicate that they work
on blocks rather than pages.
Eric, I have renamed completion_pages() to fscrypt_complete_pages()
rather than to fscrypt_complete_blocks(). This is because we have a
new function fscrypt_complete_block() (which operates on a single
block) and IMHO having the identifier fscrypt_complete_blocks()
which differs from it by just one letter would confuse the reader.
3. ext4_block_write_begin() now clears BH_Uptodate flag when
decryption of boundary blocks fail.
4. fscrypt_encrypt_page() (now renamed to fscrypt_encrypt_block()) is
now split into two functions. fscrypt_prep_ciphertext_page()
allocates and initializes the fscrypt context and the bounce
page. fscrypt_encrypt_block() is limited to encrypting the
filesystem's block.
5. fscrypt_zeroout_range() has been updated to work on blocksize <
pagesize scenario.
6. Documentation/filesystems/fscrypt.rst has been updated to indicate
encryption support for blocksize < pagesize.

Thanks to Eric Biggers for providing review comments for "RFC V1".

Chandan Rajendra (11):
ext4: Clear BH_Uptodate flag on decryption error
fs/buffer.c: Export end_buffer_async_read and create_page_buffers
fs/crypto/: Rename functions to indicate that they operate on FS
blocks
completion_pages: Decrypt all contiguous blocks in a page
ext4: Decrypt all boundary blocks when doing buffered write
ext4: Decrypt the block that needs to be partially zeroed
fscrypt_zeroout_range: Encrypt all zeroed out blocks of a page
Enable reading encrypted files in blocksize less than pagesize setup
fscrypt: Move completion_pages to crypto/readpage.c
Enable writing encrypted files in blocksize less than pagesize setup
ext4: Enable encryption for blocksize less than page size

Documentation/filesystems/fscrypt.rst | 14 +-
fs/buffer.c | 6 +-
fs/crypto/Makefile | 2 +-
fs/crypto/bio.c | 77 +++---
fs/crypto/crypto.c | 91 +++---
fs/crypto/fscrypt_private.h | 5 +-
fs/crypto/readpage.c | 506 ++++++++++++++++++++++++++++++++++
fs/ext4/Makefile | 2 +-
fs/ext4/ext4.h | 5 -
fs/ext4/inode.c | 53 +++-
fs/ext4/page-io.c | 34 ++-
fs/ext4/readpage.c | 294 --------------------
fs/ext4/super.c | 7 -
include/linux/buffer_head.h | 3 +
include/linux/fscrypt.h | 1 +
include/linux/fscrypt_notsupp.h | 23 +-
include/linux/fscrypt_supp.h | 20 +-
17 files changed, 700 insertions(+), 443 deletions(-)
create mode 100644 fs/crypto/readpage.c
delete mode 100644 fs/ext4/readpage.c

--
2.9.5


2018-02-12 09:42:33

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 01/11] ext4: Clear BH_Uptodate flag on decryption error

On an error return from fscrypt_decrypt_page(), ext4_block_write_begin()
can return with the page's buffer_head marked with BH_Uptodate
flag. This commit clears the BH_Uptodate flag in such cases.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/ext4/inode.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6f6589e..e8ecf67 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1231,9 +1231,13 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
}
if (unlikely(err))
page_zero_new_buffers(page, from, to);
- else if (decrypt)
+ else if (decrypt) {
err = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
+ if (err)
+ clear_buffer_uptodate(*wait_bh);
+ }
+
return err;
}
#endif
--
2.9.5

2018-02-12 09:43:39

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 03/11] fs/crypto/: Rename functions to indicate that they operate on FS blocks

The functions in fs/crypto/ actually operate on FS blocks. Hence this
commit renames the functions to represent this fact.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/crypto/bio.c | 8 ++++----
fs/crypto/crypto.c | 22 +++++++++++-----------
fs/crypto/fscrypt_private.h | 2 +-
fs/ext4/inode.c | 4 ++--
fs/ext4/page-io.c | 2 +-
fs/ext4/readpage.c | 2 +-
include/linux/fscrypt_notsupp.h | 4 ++--
include/linux/fscrypt_supp.h | 6 +++---
8 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 0d5e6a5..efb0734 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -40,7 +40,7 @@ static void completion_pages(struct work_struct *work)

bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- int ret = fscrypt_decrypt_page(page->mapping->host, page,
+ int ret = fscrypt_decrypt_block(page->mapping->host, page,
PAGE_SIZE, 0, page->index);

if (ret) {
@@ -55,13 +55,13 @@ static void completion_pages(struct work_struct *work)
bio_put(bio);
}

-void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
+void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->r.work, completion_pages);
ctx->r.bio = bio;
queue_work(fscrypt_read_workqueue, &ctx->r.work);
}
-EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
+EXPORT_SYMBOL(fscrypt_decrypt_bio_blocks);

void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
@@ -105,7 +105,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}

while (len--) {
- err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
+ err = fscrypt_do_block_crypto(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
PAGE_SIZE, 0, GFP_NOFS);
if (err)
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 732a786..24e3796 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -126,10 +126,10 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
}
EXPORT_SYMBOL(fscrypt_get_ctx);

-int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
- u64 lblk_num, struct page *src_page,
- struct page *dest_page, unsigned int len,
- unsigned int offs, gfp_t gfp_flags)
+int fscrypt_do_block_crypto(const struct inode *inode, fscrypt_direction_t rw,
+ u64 lblk_num, struct page *src_page,
+ struct page *dest_page, unsigned int len,
+ unsigned int offs, gfp_t gfp_flags)
{
struct {
__le64 index;
@@ -226,7 +226,7 @@ struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
* Return: A page with the encrypted content on success. Else, an
* error value or NULL.
*/
-struct page *fscrypt_encrypt_page(const struct inode *inode,
+struct page *fscrypt_encrypt_block(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs,
@@ -241,7 +241,7 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,

if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
/* with inplace-encryption we just encrypt the page */
- err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
+ err = fscrypt_do_block_crypto(inode, FS_ENCRYPT, lblk_num, page,
ciphertext_page, len, offs,
gfp_flags);
if (err)
@@ -262,7 +262,7 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
goto errout;

ctx->w.control_page = page;
- err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
+ err = fscrypt_do_block_crypto(inode, FS_ENCRYPT, lblk_num,
page, ciphertext_page, len, offs,
gfp_flags);
if (err) {
@@ -278,7 +278,7 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
fscrypt_release_ctx(ctx);
return ciphertext_page;
}
-EXPORT_SYMBOL(fscrypt_encrypt_page);
+EXPORT_SYMBOL(fscrypt_encrypt_block);

/**
* fscrypt_decrypt_page() - Decrypts a page in-place
@@ -295,16 +295,16 @@ EXPORT_SYMBOL(fscrypt_encrypt_page);
*
* Return: Zero on success, non-zero otherwise.
*/
-int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
+int fscrypt_decrypt_block(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs, u64 lblk_num)
{
if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
BUG_ON(!PageLocked(page));

- return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
+ return fscrypt_do_block_crypto(inode, FS_DECRYPT, lblk_num, page, page,
len, offs, GFP_NOFS);
}
-EXPORT_SYMBOL(fscrypt_decrypt_page);
+EXPORT_SYMBOL(fscrypt_decrypt_block);

/*
* Validate dentries for encrypted directories to make sure we aren't
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index c0b4f55..9821e97 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -74,7 +74,7 @@ typedef enum {
/* crypto.c */
extern int fscrypt_initialize(unsigned int cop_flags);
extern struct workqueue_struct *fscrypt_read_workqueue;
-extern int fscrypt_do_page_crypto(const struct inode *inode,
+extern int fscrypt_do_block_crypto(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page,
struct page *dest_page,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e8ecf67..69a4fd6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1232,7 +1232,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt) {
- err = fscrypt_decrypt_page(page->mapping->host, page,
+ err = fscrypt_decrypt_block(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
if (err)
clear_buffer_uptodate(*wait_bh);
@@ -4022,7 +4022,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
- WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
+ WARN_ON_ONCE(fscrypt_decrypt_block(page->mapping->host,
page, PAGE_SIZE, 0, page->index));
}
}
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db75901..0a4a1e7 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -482,7 +482,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_t gfp_flags = GFP_NOFS;

retry_encrypt:
- data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
+ data_page = fscrypt_encrypt_block(inode, page, PAGE_SIZE, 0,
page->index, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 9ffa6fa..8b2789f 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -77,7 +77,7 @@ static void mpage_end_io(struct bio *bio)
if (bio->bi_status) {
fscrypt_release_ctx(bio->bi_private);
} else {
- fscrypt_decrypt_bio_pages(bio->bi_private, bio);
+ fscrypt_decrypt_bio_blocks(bio->bi_private, bio);
return;
}
}
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 63e5880..d726b53 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -35,7 +35,7 @@ static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
return ERR_PTR(-EOPNOTSUPP);
}

-static inline int fscrypt_decrypt_page(const struct inode *inode,
+static inline int fscrypt_decrypt_block(const struct inode *inode,
struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
@@ -161,7 +161,7 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}

/* bio.c */
-static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
+static inline void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *ctx,
struct bio *bio)
{
return;
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index cf9e9fc..7720e4a 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -15,10 +15,10 @@
extern struct kmem_cache *fscrypt_info_cachep;
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+extern struct page *fscrypt_encrypt_block(const struct inode *, struct page *,
unsigned int, unsigned int,
u64, gfp_t);
-extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+extern int fscrypt_decrypt_block(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
extern void fscrypt_restore_control_page(struct page *);

@@ -139,7 +139,7 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}

/* bio.c */
-extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
--
2.9.5

2018-02-12 09:42:37

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 02/11] fs/buffer.c: Export end_buffer_async_read and create_page_buffers

The functions end_buffer_async_read() and create_page_buffers() will be
required by the fscrypt module to implement decrypting encrypted blocks
that have been read from the filesystem.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/buffer.c | 6 ++++--
include/linux/buffer_head.h | 3 +++
2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index 551b781..663180d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -256,7 +256,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
* I/O completion handler for block_read_full_page() - pages
* which come unlocked at the end of I/O.
*/
-static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
+void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first;
@@ -312,6 +312,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
local_irq_restore(flags);
return;
}
+EXPORT_SYMBOL(end_buffer_async_read);

/*
* Completion handler for block_write_full_page() - pages which are unlocked
@@ -1647,7 +1648,7 @@ static inline int block_size_bits(unsigned int blocksize)
return ilog2(blocksize);
}

-static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
+struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
{
BUG_ON(!PageLocked(page));

@@ -1656,6 +1657,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
b_state);
return page_buffers(page);
}
+EXPORT_SYMBOL(create_page_buffers);

/*
* NOTE! All mapped/uptodate combinations are valid:
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index f1aed01..edc0983 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -161,7 +161,10 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
+struct buffer_head *create_page_buffers(struct page *page, struct inode *inode,
+ unsigned int b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
+void end_buffer_async_read(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);

--
2.9.5

2018-02-12 09:43:41

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 05/11] ext4: Decrypt all boundary blocks when doing buffered write

With block size < page size, ext4_block_write_begin() can have up to two
blocks to decrypt. Hence this commit invokes fscrypt_decrypt_block() for
each of those blocks.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/ext4/inode.c | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 69a4fd6..180dd2d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1158,12 +1158,13 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
- sector_t block;
+ sector_t block, page_blk_nr;
int err = 0;
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
bool decrypt = false;
+ int i;

BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_SIZE);
@@ -1224,18 +1225,30 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
/*
* If we issued read requests, let them complete.
*/
- while (wait_bh > wait) {
- wait_on_buffer(*--wait_bh);
- if (!buffer_uptodate(*wait_bh))
+ for (i = 0; &wait[i] < wait_bh; i++) {
+ wait_on_buffer(wait[i]);
+ if (!buffer_uptodate(wait[i]))
err = -EIO;
}
- if (unlikely(err))
+ if (unlikely(err)) {
page_zero_new_buffers(page, from, to);
- else if (decrypt) {
- err = fscrypt_decrypt_block(page->mapping->host, page,
- PAGE_SIZE, 0, page->index);
- if (err)
- clear_buffer_uptodate(*wait_bh);
+ } else if (decrypt) {
+ page_blk_nr = (sector_t)page->index << (PAGE_SHIFT - bbits);
+
+ for (i = 0; &wait[i] < wait_bh; i++) {
+ int err2;
+
+ --wait_bh;
+ block = page_blk_nr + (bh_offset(wait[i]) >> bbits);
+ err2 = fscrypt_decrypt_block(page->mapping->host, page,
+ wait[i]->b_size,
+ bh_offset(wait[i]),
+ block);
+ if (err2) {
+ clear_buffer_uptodate(wait[i]);
+ err = err2;
+ }
+ }
}

return err;
--
2.9.5

2018-02-12 09:43:02

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 09/11] fscrypt: Move completion_pages to crypto/readpage.c

completion_pages() processes endio functionality for a bio that is
intended to read file data from the disk. Hence this commit moves this
function to crypto/readpage.c file.

This commit also makes mandatory the callback function argument for
fscrypt_decrypt_bio_blocks().

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/crypto/bio.c | 48 ++----------------------------------------------
fs/crypto/readpage.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 48 insertions(+), 47 deletions(-)

diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 265cba3..7188495 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -26,50 +26,6 @@
#include <linux/namei.h>
#include "fscrypt_private.h"

-/*
- * Call fscrypt_decrypt_page on every single page, reusing the encryption
- * context.
- */
-static void completion_pages(struct work_struct *work)
-{
- struct fscrypt_ctx *ctx =
- container_of(work, struct fscrypt_ctx, r.work);
- struct bio *bio = ctx->r.bio;
- struct bio_vec *bv;
- int i;
-
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
- struct inode *inode = page->mapping->host;
- const unsigned long blocksize = inode->i_sb->s_blocksize;
- const unsigned int blkbits = inode->i_blkbits;
- u64 page_blk = page->index << (PAGE_SHIFT - blkbits);
- u64 blk = page_blk + (bv->bv_offset >> blkbits);
- int nr_blks = bv->bv_len >> blkbits;
- int ret = 0;
- int j;
-
- for (j = 0; j < nr_blks; j++, blk++) {
- ret = fscrypt_decrypt_block(page->mapping->host,
- page, blocksize,
- bv->bv_offset + (j << blkbits),
- blk);
- if (ret)
- break;
- }
-
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else {
- SetPageUptodate(page);
- }
- unlock_page(page);
- }
- fscrypt_release_ctx(ctx);
- bio_put(bio);
-}
-
bool fscrypt_bio_encrypted(struct bio *bio)
{
if (bio->bi_vcnt) {
@@ -85,8 +41,8 @@ bool fscrypt_bio_encrypted(struct bio *bio)
void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *ctx, struct bio *bio,
void (*process_bio)(struct work_struct *))
{
- INIT_WORK(&ctx->r.work,
- process_bio ? process_bio : completion_pages);
+ BUG_ON(!process_bio);
+ INIT_WORK(&ctx->r.work, process_bio);
ctx->r.bio = bio;
queue_work(fscrypt_read_workqueue, &ctx->r.work);
}
diff --git a/fs/crypto/readpage.c b/fs/crypto/readpage.c
index 7372173..521c221 100644
--- a/fs/crypto/readpage.c
+++ b/fs/crypto/readpage.c
@@ -29,6 +29,50 @@

#include "fscrypt_private.h"

+/*
+ * Call fscrypt_decrypt_block on every single page, reusing the encryption
+ * context.
+ */
+static void fscrypt_complete_pages(struct work_struct *work)
+{
+ struct fscrypt_ctx *ctx =
+ container_of(work, struct fscrypt_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
+ struct bio_vec *bv;
+ int i;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+ struct inode *inode = page->mapping->host;
+ const unsigned long blocksize = inode->i_sb->s_blocksize;
+ const unsigned int blkbits = inode->i_blkbits;
+ u64 page_blk = page->index << (PAGE_SHIFT - blkbits);
+ u64 blk = page_blk + (bv->bv_offset >> blkbits);
+ int nr_blks = bv->bv_len >> blkbits;
+ int ret = 0;
+ int j;
+
+ for (j = 0; j < nr_blks; j++, blk++) {
+ ret = fscrypt_decrypt_block(page->mapping->host,
+ page, blocksize,
+ bv->bv_offset + (j << blkbits),
+ blk);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+ }
+ fscrypt_release_ctx(ctx);
+ bio_put(bio);
+}
+
static void fscrypt_complete_block(struct work_struct *work)
{
struct fscrypt_ctx *ctx =
@@ -108,7 +152,8 @@ static void fscrypt_mpage_end_io(struct bio *bio)
if (bio->bi_status) {
fscrypt_release_ctx(bio->bi_private);
} else {
- fscrypt_decrypt_bio_blocks(bio->bi_private, bio, NULL);
+ fscrypt_decrypt_bio_blocks(bio->bi_private, bio,
+ fscrypt_complete_pages);
return;
}
}
--
2.9.5

2018-02-12 09:42:54

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 07/11] fscrypt_zeroout_range: Encrypt all zeroed out blocks of a page

For block size < page size, This commit adds code to encrypt all zeroed
out blocks of a page.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/crypto/bio.c | 38 +++++++++++++++++++++++++-------------
1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 378df08..4d0d14f 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -104,10 +104,12 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = NULL;
+ unsigned int page_nr_blks;
+ unsigned int offset;
+ unsigned int page_io_len;
struct bio *bio;
int ret, err = 0;
-
- BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
+ int i;

ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
@@ -119,12 +121,23 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
goto errout;
}

- while (len--) {
- err = fscrypt_do_block_crypto(inode, FS_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page,
- PAGE_SIZE, 0, GFP_NOFS);
- if (err)
- goto errout;
+ page_nr_blks = 1 << (PAGE_SHIFT - inode->i_blkbits);
+
+ while (len) {
+ page_nr_blks = min_t(unsigned int, page_nr_blks, len);
+ page_io_len = page_nr_blks << inode->i_blkbits;
+ offset = 0;
+
+ for (i = 0; i < page_nr_blks; i++) {
+ err = fscrypt_do_block_crypto(inode, FS_ENCRYPT, lblk,
+ ZERO_PAGE(0), ciphertext_page,
+ inode->i_sb->s_blocksize, offset,
+ GFP_NOFS);
+ if (err)
+ goto errout;
+ lblk++;
+ offset += inode->i_sb->s_blocksize;
+ }

bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
@@ -135,9 +148,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- ret = bio_add_page(bio, ciphertext_page,
- inode->i_sb->s_blocksize, 0);
- if (ret != inode->i_sb->s_blocksize) {
+ ret = bio_add_page(bio, ciphertext_page, page_io_len, 0);
+ if (ret != page_io_len) {
/* should never happen! */
WARN_ON(1);
bio_put(bio);
@@ -150,8 +162,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
bio_put(bio);
if (err)
goto errout;
- lblk++;
- pblk++;
+ pblk += page_nr_blks;
+ len -= page_nr_blks;
}
err = 0;
errout:
--
2.9.5

2018-02-12 09:43:09

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 11/11] ext4: Enable encryption for blocksize less than page size

Now that we have all the code to support encryption for block size less
than page size scenario, this commit removes the conditional check in
filesystem mount code.

The commit also changes the support statement in
Documentation/filesystems/fscrypt.rst to reflect the fact that
encryption of filesystems with blocksize less than page size now works.

Signed-off-by: Chandan Rajendra <[email protected]>
---
Documentation/filesystems/fscrypt.rst | 14 +++++++-------
fs/ext4/super.c | 7 -------
2 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 776ddc6..2147e53 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -202,13 +202,13 @@ modes are not currently supported because of the difficulty of dealing
with ciphertext expansion.

For file contents, each filesystem block is encrypted independently.
-Currently, only the case where the filesystem block size is equal to
-the system's page size (usually 4096 bytes) is supported. With the
-XTS mode of operation (recommended), the logical block number within
-the file is used as the IV. With the CBC mode of operation (not
-recommended), ESSIV is used; specifically, the IV for CBC is the
-logical block number encrypted with AES-256, where the AES-256 key is
-the SHA-256 hash of the inode's data encryption key.
+Starting from Linux kernel 4.17, encryption of filesystems with block
+size less than system's page size is supported. With the XTS mode of
+operation (recommended), the logical block number within the file is
+used as the IV. With the CBC mode of operation (not recommended),
+ESSIV is used; specifically, the IV for CBC is the logical block
+number encrypted with AES-256, where the AES-256 key is the SHA-256
+hash of the inode's data encryption key.

For filenames, the full filename is encrypted at once. Because of the
requirements to retain support for efficient directory lookups and
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ebb7edb..3ec04cc 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4138,13 +4138,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
}

- if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
- (blocksize != PAGE_SIZE)) {
- ext4_msg(sb, KERN_ERR,
- "Unsupported blocksize for fs encryption");
- goto failed_mount_wq;
- }

2018-02-12 09:42:50

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 06/11] ext4: Decrypt the block that needs to be partially zeroed

__ext4_block_zero_page_range decrypts the entire page. This commit
changes the code to decrypt the block that needs to be partially zeroed
instead of the whole page.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/ext4/inode.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 180dd2d..aac0e04 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4034,9 +4034,10 @@ static int __ext4_block_zero_page_range(handle_t *handle,
ext4_encrypted_inode(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
- BUG_ON(blocksize != PAGE_SIZE);
WARN_ON_ONCE(fscrypt_decrypt_block(page->mapping->host,
- page, PAGE_SIZE, 0, page->index));
+ page, blocksize,
+ round_down(offset, blocksize),
+ iblock));
}
}
if (ext4_should_journal_data(inode)) {
--
2.9.5

2018-02-12 09:43:44

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 08/11] Enable reading encrypted files in blocksize less than pagesize setup

This commit moves ext4/readpage.c to crypto/readpage.c and adds new
functionality to enable reading encrypted files in blocksize less than
pagesize setup.

Ext4 now uses the new fscrypt API when CONFIG_EXT4_FS_ENCRYPTION is
enabled. Otherwise it invokes mpage_readpage[s]() functions.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/crypto/Makefile | 2 +-
fs/crypto/bio.c | 18 +-
fs/crypto/fscrypt_private.h | 3 +
fs/crypto/readpage.c | 461 ++++++++++++++++++++++++++++++++++++++++
fs/ext4/Makefile | 2 +-
fs/ext4/ext4.h | 5 -
fs/ext4/inode.c | 13 +-
fs/ext4/readpage.c | 294 -------------------------
include/linux/fscrypt.h | 1 +
include/linux/fscrypt_notsupp.h | 3 +-
include/linux/fscrypt_supp.h | 9 +-
11 files changed, 502 insertions(+), 309 deletions(-)
create mode 100644 fs/crypto/readpage.c
delete mode 100644 fs/ext4/readpage.c

diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index cb49698..84400e9 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o

fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
-fscrypto-$(CONFIG_BLOCK) += bio.o
+fscrypto-$(CONFIG_BLOCK) += bio.o readpage.o
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 4d0d14f..265cba3 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -70,9 +70,23 @@ static void completion_pages(struct work_struct *work)
bio_put(bio);
}

-void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *ctx, struct bio *bio)
+bool fscrypt_bio_encrypted(struct bio *bio)
{
- INIT_WORK(&ctx->r.work, completion_pages);
+ if (bio->bi_vcnt) {
+ struct inode *inode = bio->bi_io_vec->bv_page->mapping->host;
+
+ if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
+ return true;
+ }
+
+ return false;
+}
+
+void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *ctx, struct bio *bio,
+ void (*process_bio)(struct work_struct *))
+{
+ INIT_WORK(&ctx->r.work,
+ process_bio ? process_bio : completion_pages);
ctx->r.bio = bio;
queue_work(fscrypt_read_workqueue, &ctx->r.work);
}
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 9821e97..63e2b10 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -71,6 +71,9 @@ typedef enum {
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002

+/* bio.c */
+extern bool fscrypt_bio_encrypted(struct bio *bio);
+
/* crypto.c */
extern int fscrypt_initialize(unsigned int cop_flags);
extern struct workqueue_struct *fscrypt_read_workqueue;
diff --git a/fs/crypto/readpage.c b/fs/crypto/readpage.c
new file mode 100644
index 0000000..7372173
--- /dev/null
+++ b/fs/crypto/readpage.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/fs/crypto/readpage.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This was originally taken from fs/ext4/readpage.c which in turn was
+ * taken from fs/mpage.c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <linux/gfp.h>
+#include <linux/bio.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/prefetch.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
+#include <linux/cleancache.h>
+
+#include "fscrypt_private.h"
+
+static void fscrypt_complete_block(struct work_struct *work)
+{
+ struct fscrypt_ctx *ctx =
+ container_of(work, struct fscrypt_ctx, r.work);
+ struct buffer_head *bh;
+ struct bio *bio;
+ struct bio_vec *bv;
+ struct page *page;
+ struct inode *inode;
+ u64 blk_nr;
+ int ret;
+
+ bio = ctx->r.bio;
+ BUG_ON(bio->bi_vcnt != 1);
+
+ bv = bio->bi_io_vec;
+ page = bv->bv_page;
+ inode = page->mapping->host;
+
+ BUG_ON(bv->bv_len != i_blocksize(inode));
+
+ blk_nr = page->index << (PAGE_SHIFT - inode->i_blkbits);
+ blk_nr += bv->bv_offset >> inode->i_blkbits;
+
+ bh = ctx->r.bh;
+
+ ret = fscrypt_decrypt_block(inode, page, bv->bv_len,
+ bv->bv_offset, blk_nr);
+
+ end_buffer_async_read(bh, !ret);
+
+ fscrypt_release_ctx(ctx);
+ bio_put(bio);
+}
+
+static void fscrypt_block_end_io(struct bio *bio)
+{
+ struct buffer_head *bh;
+
+ if (fscrypt_bio_encrypted(bio)) {
+ struct fscrypt_ctx *ctx = bio->bi_private;
+
+ bh = ctx->r.bh;
+
+ if (bio->bi_status) {
+ fscrypt_release_ctx(ctx);
+ } else {
+ fscrypt_decrypt_bio_blocks(ctx, bio,
+ fscrypt_complete_block);
+ return;
+ }
+ } else {
+ bh = bio->bi_private;
+ }
+
+ end_buffer_async_read(bh, !bio->bi_status);
+}
+
+/*
+ * I/O completion handler for multipage BIOs.
+ *
+ * The mpage code never puts partial pages into a BIO (except for end-of-file).
+ * If a page does not map to a contiguous run of blocks then it simply falls
+ * back to block_read_full_page().
+ *
+ * Why is this? If a page's completion depends on a number of different BIOs
+ * which can complete in any order (or at the same time) then determining the
+ * status of that page is hard. See end_buffer_async_read() for the details.
+ * There is no point in duplicating all that complexity.
+ */
+static void fscrypt_mpage_end_io(struct bio *bio)
+{
+ struct bio_vec *bv;
+ int i;
+
+ if (fscrypt_bio_encrypted(bio)) {
+ if (bio->bi_status) {
+ fscrypt_release_ctx(bio->bi_private);
+ } else {
+ fscrypt_decrypt_bio_blocks(bio->bi_private, bio, NULL);
+ return;
+ }
+ }
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+
+ if (!bio->bi_status) {
+ SetPageUptodate(page);
+ } else {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ }
+ unlock_page(page);
+ }
+
+ bio_put(bio);
+}
+
+static int fscrypt_block_read_full_page(struct page *page, get_block_t *get_block)
+{
+ struct inode *inode = page->mapping->host;
+ struct fscrypt_ctx *ctx;
+ struct bio *bio;
+ sector_t iblock, lblock;
+ struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ unsigned int blocksize, bbits;
+ int nr, i;
+ int fully_mapped = 1;
+ int ret;
+
+ head = create_page_buffers(page, inode, 0);
+ blocksize = head->b_size;
+ bbits = inode->i_blkbits;
+
+ iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
+ lblock = (i_size_read(inode)+blocksize-1) >> bbits;
+ bh = head;
+ nr = 0;
+ i = 0;
+
+ do {
+ if (buffer_uptodate(bh))
+ continue;
+
+ if (!buffer_mapped(bh)) {
+ int err = 0;
+
+ fully_mapped = 0;
+ if (iblock < lblock) {
+ WARN_ON(bh->b_size != blocksize);
+ err = get_block(inode, iblock, bh, 0);
+ if (err)
+ SetPageError(page);
+ }
+ if (!buffer_mapped(bh)) {
+ zero_user(page, i << bbits, blocksize);
+ if (!err)
+ set_buffer_uptodate(bh);
+ continue;
+ }
+ /*
+ * get_block() might have updated the buffer
+ * synchronously
+ */
+ if (buffer_uptodate(bh))
+ continue;
+ }
+ arr[nr++] = bh;
+ } while (i++, iblock++, (bh = bh->b_this_page) != head);
+
+ if (fully_mapped)
+ SetPageMappedToDisk(page);
+
+ if (!nr) {
+ /*
+ * All buffers are uptodate - we can set the page uptodate
+ * as well. But not if ext4_get_block() returned an error.
+ */
+ if (!PageError(page))
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+ }
+
+ /* Stage two: lock the buffers */
+ for (i = 0; i < nr; i++) {
+ bh = arr[i];
+ lock_buffer(bh);
+ set_buffer_async_read(bh);
+ }
+
+ /*
+ * Stage 3: start the IO. Check for uptodateness
+ * inside the buffer lock in case another process reading
+ * the underlying blockdev brought it uptodate (the sct fix).
+ */
+ for (i = 0; i < nr; i++) {
+ ctx = NULL;
+ bh = arr[i];
+
+ if (buffer_uptodate(bh)) {
+ end_buffer_async_read(bh, 1);
+ continue;
+ }
+
+ if (IS_ENCRYPTED(inode)
+ && S_ISREG(inode->i_mode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx)) {
+ set_page_error:
+ zero_user_segment(page, bh_offset(bh),
+ blocksize);
+ end_buffer_async_read(bh, 0);
+ continue;
+ }
+ ctx->r.bh = bh;
+ }
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ goto set_page_error;
+ }
+
+ bio->bi_iter.bi_sector = bh->b_blocknr * (blocksize >> 9);
+ bio_set_dev(bio, bh->b_bdev);
+ bio->bi_write_hint = 0;
+
+ ret = bio_add_page(bio, bh->b_page, blocksize, bh_offset(bh));
+ BUG_ON(bio->bi_iter.bi_size != blocksize);
+
+ bio->bi_end_io = fscrypt_block_end_io;
+ if (ctx)
+ bio->bi_private = ctx;
+ else
+ bio->bi_private = bh;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+ submit_bio(bio);
+ }
+
+ return 0;
+}
+
+int fscrypt_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages, get_block_t get_block)
+{
+ struct bio *bio = NULL;
+ sector_t last_block_in_bio = 0;
+
+ struct inode *inode = mapping->host;
+ const unsigned blkbits = inode->i_blkbits;
+ const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+ const unsigned blocksize = 1 << blkbits;
+ sector_t block_in_file;
+ sector_t last_block;
+ sector_t last_block_in_file;
+ sector_t blocks[MAX_BUF_PER_PAGE];
+ unsigned page_block;
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ int length;
+ unsigned relative_block = 0;
+ struct buffer_head map_bh;
+ unsigned int nblocks;
+ unsigned long first_logical_block = 0;
+
+ map_bh.b_state = 0;
+ map_bh.b_size = 0;
+
+ for (; nr_pages; nr_pages--) {
+ int fully_mapped = 1;
+ unsigned first_hole = blocks_per_page;
+
+ prefetchw(&page->flags);
+ if (pages) {
+ page = list_entry(pages->prev, struct page, lru);
+ list_del(&page->lru);
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ readahead_gfp_mask(mapping)))
+ goto next_page;
+ }
+
+ if (page_has_buffers(page))
+ goto confused;
+
+ block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
+ last_block = block_in_file + nr_pages * blocks_per_page;
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ if (last_block > last_block_in_file)
+ last_block = last_block_in_file;
+ page_block = 0;
+
+ /*
+ * Map blocks using the previous result first.
+ */
+ nblocks = map_bh.b_size >> blkbits;
+ if (buffer_mapped(&map_bh) &&
+ block_in_file > first_logical_block &&
+ block_in_file < (first_logical_block + nblocks)) {
+ unsigned map_offset = block_in_file - first_logical_block;
+ unsigned last = nblocks - map_offset;
+
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == last) {
+ /* needed? */
+ clear_buffer_mapped(&map_bh);
+ break;
+ }
+ if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map_bh.b_blocknr + map_offset +
+ relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ }
+
+ /*
+ * Then do more get_blocks() calls until we are
+ * done with this page.
+ */
+ while (page_block < blocks_per_page) {
+ if (block_in_file < last_block) {
+ /*
+ * map.m_lblk = block_in_file;
+ * map.m_len = last_block - block_in_file;
+ */
+ map_bh.b_state = 0;
+ map_bh.b_size = (last_block - block_in_file) << blkbits;
+
+ if (get_block(inode, block_in_file, &map_bh, 0)) {
+ set_error_page:
+ SetPageError(page);
+ zero_user_segment(page, 0,
+ PAGE_SIZE);
+ unlock_page(page);
+ goto next_page;
+ }
+ first_logical_block = block_in_file;
+ }
+
+ if (!buffer_mapped(&map_bh)) {
+ fully_mapped = 0;
+ if (first_hole == blocks_per_page)
+ first_hole = page_block;
+ page_block++;
+ block_in_file++;
+ continue;
+ }
+ if (first_hole != blocks_per_page)
+ goto confused; /* hole -> non-hole */
+
+ /* Contiguous blocks? */
+ if (page_block && blocks[page_block-1] != map_bh.b_blocknr-1)
+ goto confused;
+ nblocks = map_bh.b_size >> blkbits;
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == nblocks) {
+ /* needed? */
+ clear_buffer_mapped(&map_bh);
+ break;
+ } else if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map_bh.b_blocknr+relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ }
+ if (first_hole != blocks_per_page) {
+ zero_user_segment(page, first_hole << blkbits,
+ PAGE_SIZE);
+ if (first_hole == 0) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ goto next_page;
+ }
+ } else if (fully_mapped) {
+ SetPageMappedToDisk(page);
+ }
+ if (fully_mapped && blocks_per_page == 1 &&
+ !PageUptodate(page) && cleancache_get_page(page) == 0) {
+ SetPageUptodate(page);
+ goto confused;
+ }
+
+ /*
+ * This page will go to BIO. Do we need to send this
+ * BIO off first?
+ */
+ if (bio && (last_block_in_bio != blocks[0] - 1)) {
+ submit_and_realloc:
+ submit_bio(bio);
+ bio = NULL;
+ }
+ if (bio == NULL) {
+ struct fscrypt_ctx *ctx = NULL;
+ if (IS_ENCRYPTED(inode) &&
+ S_ISREG(inode->i_mode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ goto set_error_page;
+ }
+ bio = bio_alloc(GFP_KERNEL,
+ min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ goto set_error_page;
+ }
+ bio_set_dev(bio, bdev);
+ bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ bio->bi_end_io = fscrypt_mpage_end_io;
+ bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ }
+
+ length = first_hole << blkbits;
+ if (bio_add_page(bio, page, length, 0) < length)
+ goto submit_and_realloc;
+
+ relative_block = block_in_file - first_logical_block;
+ if ((buffer_boundary(&map_bh) && relative_block == nblocks) ||
+ (first_hole != blocks_per_page)) {
+ submit_bio(bio);
+ bio = NULL;
+ } else {
+ last_block_in_bio = blocks[blocks_per_page - 1];
+ }
+ goto next_page;
+ confused:
+ if (bio) {
+ submit_bio(bio);
+ bio = NULL;
+ }
+ if (!PageUptodate(page))
+ fscrypt_block_read_full_page(page, get_block);
+ else
+ unlock_page(page);
+ next_page:
+ if (pages)
+ put_page(page);
+ }
+ BUG_ON(pages && !list_empty(pages));
+ if (bio)
+ submit_bio(bio);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_mpage_readpages);
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 8fdfcd3..7c38803 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
- mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
+ mmp.o move_extent.o namei.o page-io.o resize.o \
super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o

ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 4e091eae..98c1b83 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3053,11 +3053,6 @@ static inline void ext4_set_de_type(struct super_block *sb,
de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}

-/* readpages.c */
-extern int ext4_mpage_readpages(struct address_space *mapping,
- struct list_head *pages, struct page *page,
- unsigned nr_pages);
-
/* symlink.c */
extern const struct inode_operations ext4_encrypted_symlink_inode_operations;
extern const struct inode_operations ext4_symlink_inode_operations;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index aac0e04..66b768c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3342,7 +3342,11 @@ static int ext4_readpage(struct file *file, struct page *page)
ret = ext4_readpage_inline(inode, page);

if (ret == -EAGAIN)
- return ext4_mpage_readpages(page->mapping, NULL, page, 1);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ return fscrypt_mpage_readpages(page->mapping, NULL, page, 1, ext4_get_block);
+#else
+ return mpage_readpage(page, ext4_get_block);
+#endif

return ret;
}
@@ -3356,8 +3360,11 @@ ext4_readpages(struct file *file, struct address_space *mapping,
/* If the file has inline data, no need to do readpages. */
if (ext4_has_inline_data(inode))
return 0;
-
- return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ return fscrypt_mpage_readpages(mapping, pages, NULL, nr_pages, ext4_get_block);
+#else
+ return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
+#endif
}

static void ext4_invalidatepage(struct page *page, unsigned int offset,
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
deleted file mode 100644
index 8b2789f..0000000
--- a/fs/ext4/readpage.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/ext4/readpage.c
- *
- * Copyright (C) 2002, Linus Torvalds.
- * Copyright (C) 2015, Google, Inc.
- *
- * This was originally taken from fs/mpage.c
- *
- * The intent is the ext4_mpage_readpages() function here is intended
- * to replace mpage_readpages() in the general case, not just for
- * encrypted files. It has some limitations (see below), where it
- * will fall back to read_block_full_page(), but these limitations
- * should only be hit when page_size != block_size.
- *
- * This will allow us to attach a callback function to support ext4
- * encryption.
- *
- * If anything unusual happens, such as:
- *
- * - encountering a page which has buffers
- * - encountering a page which has a non-hole after a hole
- * - encountering a page with non-contiguous blocks
- *
- * then this code just gives up and calls the buffer_head-based read function.
- * It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_SIZE setups.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/kdev_t.h>
-#include <linux/gfp.h>
-#include <linux/bio.h>
-#include <linux/fs.h>
-#include <linux/buffer_head.h>
-#include <linux/blkdev.h>
-#include <linux/highmem.h>
-#include <linux/prefetch.h>
-#include <linux/mpage.h>
-#include <linux/writeback.h>
-#include <linux/backing-dev.h>
-#include <linux/pagevec.h>
-#include <linux/cleancache.h>
-
-#include "ext4.h"
-
-static inline bool ext4_bio_encrypted(struct bio *bio)
-{
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- return unlikely(bio->bi_private != NULL);
-#else
- return false;
-#endif
-}
-
-/*
- * I/O completion handler for multipage BIOs.
- *
- * The mpage code never puts partial pages into a BIO (except for end-of-file).
- * If a page does not map to a contiguous run of blocks then it simply falls
- * back to block_read_full_page().
- *
- * Why is this? If a page's completion depends on a number of different BIOs
- * which can complete in any order (or at the same time) then determining the
- * status of that page is hard. See end_buffer_async_read() for the details.
- * There is no point in duplicating all that complexity.
- */
-static void mpage_end_io(struct bio *bio)
-{
- struct bio_vec *bv;
- int i;
-
- if (ext4_bio_encrypted(bio)) {
- if (bio->bi_status) {
- fscrypt_release_ctx(bio->bi_private);
- } else {
- fscrypt_decrypt_bio_blocks(bio->bi_private, bio);
- return;
- }
- }
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
-
- if (!bio->bi_status) {
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
- }
-
- bio_put(bio);
-}
-
-int ext4_mpage_readpages(struct address_space *mapping,
- struct list_head *pages, struct page *page,
- unsigned nr_pages)
-{
- struct bio *bio = NULL;
- sector_t last_block_in_bio = 0;
-
- struct inode *inode = mapping->host;
- const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
- const unsigned blocksize = 1 << blkbits;
- sector_t block_in_file;
- sector_t last_block;
- sector_t last_block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
- unsigned page_block;
- struct block_device *bdev = inode->i_sb->s_bdev;
- int length;
- unsigned relative_block = 0;
- struct ext4_map_blocks map;
-
- map.m_pblk = 0;
- map.m_lblk = 0;
- map.m_len = 0;
- map.m_flags = 0;
-
- for (; nr_pages; nr_pages--) {
- int fully_mapped = 1;
- unsigned first_hole = blocks_per_page;
-
- prefetchw(&page->flags);
- if (pages) {
- page = list_entry(pages->prev, struct page, lru);
- list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping, page->index,
- readahead_gfp_mask(mapping)))
- goto next_page;
- }
-
- if (page_has_buffers(page))
- goto confused;
-
- block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
- last_block = block_in_file + nr_pages * blocks_per_page;
- last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
- if (last_block > last_block_in_file)
- last_block = last_block_in_file;
- page_block = 0;
-
- /*
- * Map blocks using the previous result first.
- */
- if ((map.m_flags & EXT4_MAP_MAPPED) &&
- block_in_file > map.m_lblk &&
- block_in_file < (map.m_lblk + map.m_len)) {
- unsigned map_offset = block_in_file - map.m_lblk;
- unsigned last = map.m_len - map_offset;
-
- for (relative_block = 0; ; relative_block++) {
- if (relative_block == last) {
- /* needed? */
- map.m_flags &= ~EXT4_MAP_MAPPED;
- break;
- }
- if (page_block == blocks_per_page)
- break;
- blocks[page_block] = map.m_pblk + map_offset +
- relative_block;
- page_block++;
- block_in_file++;
- }
- }
-
- /*
- * Then do more ext4_map_blocks() calls until we are
- * done with this page.
- */
- while (page_block < blocks_per_page) {
- if (block_in_file < last_block) {
- map.m_lblk = block_in_file;
- map.m_len = last_block - block_in_file;
-
- if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
- set_error_page:
- SetPageError(page);
- zero_user_segment(page, 0,
- PAGE_SIZE);
- unlock_page(page);
- goto next_page;
- }
- }
- if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
- fully_mapped = 0;
- if (first_hole == blocks_per_page)
- first_hole = page_block;
- page_block++;
- block_in_file++;
- continue;
- }
- if (first_hole != blocks_per_page)
- goto confused; /* hole -> non-hole */
-
- /* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != map.m_pblk-1)
- goto confused;
- for (relative_block = 0; ; relative_block++) {
- if (relative_block == map.m_len) {
- /* needed? */
- map.m_flags &= ~EXT4_MAP_MAPPED;
- break;
- } else if (page_block == blocks_per_page)
- break;
- blocks[page_block] = map.m_pblk+relative_block;
- page_block++;
- block_in_file++;
- }
- }
- if (first_hole != blocks_per_page) {
- zero_user_segment(page, first_hole << blkbits,
- PAGE_SIZE);
- if (first_hole == 0) {
- SetPageUptodate(page);
- unlock_page(page);
- goto next_page;
- }
- } else if (fully_mapped) {
- SetPageMappedToDisk(page);
- }
- if (fully_mapped && blocks_per_page == 1 &&
- !PageUptodate(page) && cleancache_get_page(page) == 0) {
- SetPageUptodate(page);
- goto confused;
- }
-
- /*
- * This page will go to BIO. Do we need to send this
- * BIO off first?
- */
- if (bio && (last_block_in_bio != blocks[0] - 1)) {
- submit_and_realloc:
- submit_bio(bio);
- bio = NULL;
- }
- if (bio == NULL) {
- struct fscrypt_ctx *ctx = NULL;
-
- if (ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- goto set_error_page;
- }
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- fscrypt_release_ctx(ctx);
- goto set_error_page;
- }
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
- bio->bi_end_io = mpage_end_io;
- bio->bi_private = ctx;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- }
-
- length = first_hole << blkbits;
- if (bio_add_page(bio, page, length, 0) < length)
- goto submit_and_realloc;
-
- if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
- (relative_block == map.m_len)) ||
- (first_hole != blocks_per_page)) {
- submit_bio(bio);
- bio = NULL;
- } else
- last_block_in_bio = blocks[blocks_per_page - 1];
- goto next_page;
- confused:
- if (bio) {
- submit_bio(bio);
- bio = NULL;
- }
- if (!PageUptodate(page))
- block_read_full_page(page, ext4_get_block);
- else
- unlock_page(page);
- next_page:
- if (pages)
- put_page(page);
- }
- BUG_ON(pages && !list_empty(pages));
- if (bio)
- submit_bio(bio);
- return 0;
-}
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 08b4b40..98c51eb 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -34,6 +34,7 @@ struct fscrypt_ctx {
} w;
struct {
struct bio *bio;
+ struct buffer_head *bh;
struct work_struct work;
} r;
struct list_head free_list; /* Free list */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index d726b53..aeb6b6d 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -162,7 +162,8 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,

/* bio.c */
static inline void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *ctx,
- struct bio *bio)
+ struct bio *bio,
+ void (*process_bio)(struct work_struct *))
{
return;
}
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 7720e4a..b4c5231 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -139,7 +139,10 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
}

/* bio.c */
-extern void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *, struct bio *);
+
+extern void fscrypt_decrypt_bio_blocks(struct fscrypt_ctx *ctx, struct bio *bio,
+ void (*process_bio)(struct work_struct *));
+extern bool fscrypt_bio_encrypted(struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
@@ -153,5 +156,7 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *new_dentry,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
-
+extern int fscrypt_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages, get_block_t get_block);
#endif /* _LINUX_FSCRYPT_SUPP_H */
--
2.9.5

2018-02-12 09:43:40

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 04/11] completion_pages: Decrypt all contiguous blocks in a page

With blocksize < pagesize, a page can contain more than one block. Hence
this commit changes completion_pages() to invoke fscrypt_decrypt_block()
for all the contiguous blocks mapped by the page.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/crypto/bio.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index efb0734..378df08 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -40,8 +40,23 @@ static void completion_pages(struct work_struct *work)

bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- int ret = fscrypt_decrypt_block(page->mapping->host, page,
- PAGE_SIZE, 0, page->index);
+ struct inode *inode = page->mapping->host;
+ const unsigned long blocksize = inode->i_sb->s_blocksize;
+ const unsigned int blkbits = inode->i_blkbits;
+ u64 page_blk = page->index << (PAGE_SHIFT - blkbits);
+ u64 blk = page_blk + (bv->bv_offset >> blkbits);
+ int nr_blks = bv->bv_len >> blkbits;
+ int ret = 0;
+ int j;
+
+ for (j = 0; j < nr_blks; j++, blk++) {
+ ret = fscrypt_decrypt_block(page->mapping->host,
+ page, blocksize,
+ bv->bv_offset + (j << blkbits),
+ blk);
+ if (ret)
+ break;
+ }

if (ret) {
WARN_ON_ONCE(1);
--
2.9.5

2018-02-12 09:43:46

by Chandan Rajendra

[permalink] [raw]
Subject: [RFC PATCH V2 10/11] Enable writing encrypted files in blocksize less than pagesize setup

This commit splits the functionality of fscrypt_encrypt_block(). The
allocation of fscrypt context and cipher text page is moved to a new
function fscrypt_prep_ciphertext_page().

ext4_bio_write_page() is modified to appropriately make use of the above
two functions.

Signed-off-by: Chandan Rajendra <[email protected]>
---
fs/crypto/crypto.c | 71 +++++++++++++++++++++--------------------
fs/ext4/page-io.c | 34 +++++++++++++-------
include/linux/fscrypt_notsupp.h | 16 +++++++---
include/linux/fscrypt_supp.h | 9 ++++--
4 files changed, 75 insertions(+), 55 deletions(-)

diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 24e3796..8fb27ee 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -195,6 +195,35 @@ struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
return ctx->w.bounce_page;
}

+struct page *fscrypt_prep_ciphertext_page(struct inode *inode,
+ struct page *page,
+ gfp_t gfp_flags)
+{
+ struct page *ciphertext_page;
+ struct fscrypt_ctx *ctx;
+
+ BUG_ON(!PageLocked(page));
+
+ ctx = fscrypt_get_ctx(inode, gfp_flags);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ ctx->w.control_page = page;
+
+ ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
+ if (IS_ERR(ciphertext_page)) {
+ fscrypt_release_ctx(ctx);
+ return ERR_CAST(ciphertext_page);
+ }
+
+ SetPagePrivate(ciphertext_page);
+ set_page_private(ciphertext_page, (unsigned long)ctx);
+ lock_page(ciphertext_page);
+
+ return ciphertext_page;
+}
+EXPORT_SYMBOL(fscrypt_prep_ciphertext_page);
+
/**
* fscypt_encrypt_page() - Encrypts a page
* @inode: The inode for which the encryption should take place
@@ -226,15 +255,11 @@ struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
* Return: A page with the encrypted content on success. Else, an
* error value or NULL.
*/
-struct page *fscrypt_encrypt_block(const struct inode *inode,
- struct page *page,
- unsigned int len,
- unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags)
-
+int fscrypt_encrypt_block(const struct inode *inode,
+ struct page *page, struct page *ciphertext_page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags)
{
- struct fscrypt_ctx *ctx;
- struct page *ciphertext_page = page;
int err;

BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
@@ -242,41 +267,17 @@ struct page *fscrypt_encrypt_block(const struct inode *inode,
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
/* with inplace-encryption we just encrypt the page */
err = fscrypt_do_block_crypto(inode, FS_ENCRYPT, lblk_num, page,
- ciphertext_page, len, offs,
- gfp_flags);
- if (err)
- return ERR_PTR(err);
-
- return ciphertext_page;
+ page, len, offs, gfp_flags);
+ return err;
}

BUG_ON(!PageLocked(page));

- ctx = fscrypt_get_ctx(inode, gfp_flags);
- if (IS_ERR(ctx))
- return (struct page *)ctx;
-
- /* The encryption operation will require a bounce page. */
- ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
- if (IS_ERR(ciphertext_page))
- goto errout;
-
- ctx->w.control_page = page;
err = fscrypt_do_block_crypto(inode, FS_ENCRYPT, lblk_num,
page, ciphertext_page, len, offs,
gfp_flags);
- if (err) {
- ciphertext_page = ERR_PTR(err);
- goto errout;
- }
- SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)ctx);
- lock_page(ciphertext_page);
- return ciphertext_page;
+ return err;

-errout:
- fscrypt_release_ctx(ctx);
- return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_block);

diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0a4a1e7..1e869d5 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -419,9 +419,12 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct inode *inode = page->mapping->host;
unsigned block_start;
struct buffer_head *bh, *head;
+ u64 blk_nr;
+ gfp_t gfp_flags = GFP_NOFS;
int ret = 0;
int nr_submitted = 0;
int nr_to_submit = 0;
+ int blocksize = (1 << inode->i_blkbits);

BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
@@ -475,15 +478,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
nr_to_submit++;
} while ((bh = bh->b_this_page) != head);

- bh = head = page_buffers(page);
-
- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
- nr_to_submit) {
- gfp_t gfp_flags = GFP_NOFS;
-
- retry_encrypt:
- data_page = fscrypt_encrypt_block(inode, page, PAGE_SIZE, 0,
- page->index, gfp_flags);
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+ && nr_to_submit) {
+ retry_prep_ciphertext_page:
+ data_page = fscrypt_prep_ciphertext_page(inode, page,
+ gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
@@ -492,17 +491,28 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
congestion_wait(BLK_RW_ASYNC, HZ/50);
}
gfp_flags |= __GFP_NOFAIL;
- goto retry_encrypt;
+ goto retry_prep_ciphertext_page;
}
data_page = NULL;
goto out;
}
}

+ blk_nr = page->index << (PAGE_SHIFT - inode->i_blkbits);
+
/* Now submit buffers to write */
+ bh = head = page_buffers(page);
do {
if (!buffer_async_write(bh))
continue;
+
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ ret = fscrypt_encrypt_block(inode, page, data_page, blocksize,
+ bh_offset(bh), blk_nr, gfp_flags);
+ if (ret)
+ break;
+ }
+
ret = io_submit_add_bh(io, inode,
data_page ? data_page : page, bh);
if (ret) {
@@ -515,12 +525,12 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
}
nr_submitted++;
clear_buffer_dirty(bh);
- } while ((bh = bh->b_this_page) != head);
+ } while (++blk_nr, (bh = bh->b_this_page) != head);

/* Error stopped previous loop? Clean up buffers... */
if (ret) {
out:
- if (data_page)
+ if (data_page && bh == head)
fscrypt_restore_control_page(data_page);
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index aeb6b6d..3b0a53b 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -26,15 +26,21 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
return;
}

-static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
- struct page *page,
- unsigned int len,
- unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags)
+static inline struct page *fscrypt_prep_ciphertext_page(struct inode *inode,
+ struct page *page,
+ gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}

+static inline int fscrypt_encrypt_block(const struct inode *inode,
+ struct page *page, struct page *ciphertext_page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int fscrypt_decrypt_block(const struct inode *inode,
struct page *page,
unsigned int len, unsigned int offs,
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index b4c5231..68a5e91 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -15,9 +15,12 @@
extern struct kmem_cache *fscrypt_info_cachep;
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_block(const struct inode *, struct page *,
- unsigned int, unsigned int,
- u64, gfp_t);
+extern struct page *fscrypt_prep_ciphertext_page(struct inode *, struct page *,
+ gfp_t);
+extern int fscrypt_encrypt_block(const struct inode *inode,
+ struct page *page, struct page *ciphertext_page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags);
extern int fscrypt_decrypt_block(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
extern void fscrypt_restore_control_page(struct page *);
--
2.9.5