Hi all,
Today's linux-next merge of the btrfs tree got a conflict in:
fs/btrfs/lzo.c
between commit:
ccaa66c8dd27 ("Revert "btrfs: compression: drop kmap/kunmap from lzo"")
from Linus' tree and commit:
d4088803f511 ("btrfs: subpage: make lzo_compress_pages() compatible")
from the btrfs tree.
I fixed it up (this may be completely wrong or incomplete :-( - see below)
and can carry the fix as necessary. This is now fixed as far as linux-next
is concerned, but any non trivial conflicts should be mentioned to your
upstream maintainer when your tree is submitted for merging. You may
also want to consider cooperating with the maintainer of the conflicting
tree to minimise any particularly complex conflicts.
--
Cheers,
Stephen Rothwell
diff --cc fs/btrfs/lzo.c
index 3dbe6eb5fda7,00cffc183ec0..000000000000
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@@ -112,61 -112,126 +112,130 @@@ static inline size_t read_compress_leng
return le32_to_cpu(dlen);
}
+ /*
+ * Will do:
+ *
+ * - Write a segment header into the destination
+ * - Copy the compressed buffer into the destination
+ * - Make sure we have enough space in the last sector to fit a segment header
+ * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
+ *
+ * Will allocate new pages when needed.
+ */
+ static int copy_compressed_data_to_page(char *compressed_data,
+ size_t compressed_size,
+ struct page **out_pages,
+ u32 *cur_out,
+ const u32 sectorsize)
+ {
+ u32 sector_bytes_left;
+ u32 orig_out;
+ struct page *cur_page;
+
+ /*
+ * We never allow a segment header crossing sector boundary, previous
+ * run should ensure we have enough space left inside the sector.
+ */
+ ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
+
+ cur_page = out_pages[*cur_out / PAGE_SIZE];
+ /* Allocate a new page */
+ if (!cur_page) {
+ cur_page = alloc_page(GFP_NOFS);
+ if (!cur_page)
+ return -ENOMEM;
+ out_pages[*cur_out / PAGE_SIZE] = cur_page;
+ }
+
- write_compress_length(page_address(cur_page) + offset_in_page(*cur_out),
++ write_compress_length(kmap(cur_page) + offset_in_page(*cur_out),
+ compressed_size);
++ kunmap(cur_page);
+ *cur_out += LZO_LEN;
+
+ orig_out = *cur_out;
+
+ /* Copy compressed data */
+ while (*cur_out - orig_out < compressed_size) {
+ u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
+ orig_out + compressed_size - *cur_out);
+
+ cur_page = out_pages[*cur_out / PAGE_SIZE];
+ /* Allocate a new page */
+ if (!cur_page) {
+ cur_page = alloc_page(GFP_NOFS);
+ if (!cur_page)
+ return -ENOMEM;
+ out_pages[*cur_out / PAGE_SIZE] = cur_page;
+ }
+
- memcpy(page_address(cur_page) + offset_in_page(*cur_out),
++ memcpy(kmap(cur_page) + offset_in_page(*cur_out),
+ compressed_data + *cur_out - orig_out, copy_len);
++ kunmap(cur_page);
+
+ *cur_out += copy_len;
+ }
+
+ /*
+ * Check if we can fit the next segment header into the remaining space
+ * of the sector.
+ */
+ sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
+ if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
+ return 0;
+
+ /* The remaining size is not enough, pad it with zeros */
- memset(page_address(cur_page) + offset_in_page(*cur_out), 0,
++ memset(kmap(cur_page) + offset_in_page(*cur_out), 0,
+ sector_bytes_left);
++ kunmap(cur_page);
+ *cur_out += sector_bytes_left;
+ return 0;
+ }
+
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
+ struct page *page_in = NULL;
int ret = 0;
- char *data_in;
- char *cpage_out, *sizes_ptr;
- int nr_pages = 0;
- struct page *in_page = NULL;
- struct page *out_page = NULL;
- unsigned long bytes_left;
- unsigned long len = *total_out;
- unsigned long nr_dest_pages = *out_pages;
- const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
- size_t in_len;
- size_t out_len;
- char *buf;
- unsigned long tot_in = 0;
- unsigned long tot_out = 0;
- unsigned long pg_bytes_left;
- unsigned long out_offset;
- unsigned long bytes;
+ /* Points to the file offset of input data */
+ u64 cur_in = start;
+ /* Points to the current output byte */
+ u32 cur_out = 0;
+ u32 len = *total_out;
++ char *sizes_ptr;
*out_pages = 0;
*total_out = 0;
*total_in = 0;
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- data_in = kmap(in_page);
-
/*
- * store the size of all chunks of compressed data in
- * the first 4 bytes
+ * Skip the header for now, we will later come back and write the total
+ * compressed size
*/
- out_page = alloc_page(GFP_NOFS);
- if (out_page == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- cpage_out = kmap(out_page);
- out_offset = LZO_LEN;
- tot_out = LZO_LEN;
- pages[0] = out_page;
- nr_pages = 1;
- pg_bytes_left = PAGE_SIZE - LZO_LEN;
-
- /* compress at most one page of data each time */
- in_len = min(len, PAGE_SIZE);
- while (tot_in < len) {
- ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
- &out_len, workspace->mem);
- if (ret != LZO_E_OK) {
- pr_debug("BTRFS: lzo in loop returned %d\n",
- ret);
+ cur_out += LZO_LEN;
+ while (cur_in < start + len) {
+ const u32 sectorsize_mask = sectorsize - 1;
+ u32 sector_off = (cur_in - start) & sectorsize_mask;
+ u32 in_len;
+ size_t out_len;
+
+ /* Get the input page first */
+ if (!page_in) {
+ page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
+ ASSERT(page_in);
+ }
+
+ /* Compress at most one sector of data each time */
+ in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
+ ASSERT(in_len);
- ret = lzo1x_1_compress(page_address(page_in) +
++ ret = lzo1x_1_compress(kmap(page_in) +
+ offset_in_page(cur_in), in_len,
+ workspace->cbuf, &out_len,
+ workspace->mem);
+ if (ret < 0) {
+ pr_debug("BTRFS: lzo in loop returned %d\n", ret);
ret = -EIO;
goto out;
}
@@@ -236,46 -252,21 +256,24 @@@
goto out;
}
- /* we're all done */
- if (tot_in >= len)
- break;
-
- if (tot_out > max_out)
- break;
-
- bytes_left = len - tot_in;
- kunmap(in_page);
- put_page(in_page);
-
- start += PAGE_SIZE;
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- data_in = kmap(in_page);
- in_len = min(bytes_left, PAGE_SIZE);
- }
-
- if (tot_out >= tot_in) {
- ret = -E2BIG;
- goto out;
+ /* Check if we have reached page boundary */
+ if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
++ kunmap(page_in);
+ put_page(page_in);
+ page_in = NULL;
+ }
}
- /* store the size of all chunks of compressed data */
+ /* Store the size of all chunks of compressed data */
- write_compress_length(page_address(pages[0]), cur_out);
+ sizes_ptr = kmap_local_page(pages[0]);
- write_compress_length(sizes_ptr, tot_out);
++ write_compress_length(sizes_ptr, cur_out);
+ kunmap_local(sizes_ptr);
ret = 0;
- *total_out = tot_out;
- *total_in = tot_in;
+ *total_out = cur_out;
+ *total_in = cur_in - start;
out:
- *out_pages = nr_pages;
- if (out_page)
- kunmap(out_page);
-
- if (in_page) {
- kunmap(in_page);
- put_page(in_page);
- }
-
+ *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
return ret;
}
On Mon, Nov 01, 2021 at 10:53:41AM +1100, Stephen Rothwell wrote:
> Hi all,
>
> Today's linux-next merge of the btrfs tree got a conflict in:
>
> fs/btrfs/lzo.c
>
> between commit:
>
> ccaa66c8dd27 ("Revert "btrfs: compression: drop kmap/kunmap from lzo"")
>
> from Linus' tree and commit:
>
> d4088803f511 ("btrfs: subpage: make lzo_compress_pages() compatible")
>
> from the btrfs tree.
>
> I fixed it up (this may be completely wrong or incomplete :-( - see below)
> and can carry the fix as necessary. This is now fixed as far as linux-next
> is concerned, but any non trivial conflicts should be mentioned to your
> upstream maintainer when your tree is submitted for merging. You may
> also want to consider cooperating with the maintainer of the conflicting
> tree to minimise any particularly complex conflicts.
Thanks, it's a bit different that I did as a proposed conflict
resulution and Linus resolved it in a yet another way. I'll refresh my
for-next branch today to minimize the conflict surface.
Hi David,
On Tue, 2 Nov 2021 11:42:44 +0100 David Sterba <[email protected]> wrote:
>
> Thanks, it's a bit different that I did as a proposed conflict
> resulution and Linus resolved it in a yet another way. I'll refresh my
> for-next branch today to minimize the conflict surface.
Thanks. I really could only try to pattern match my resolution.
--
Cheers,
Stephen Rothwell