2022-03-07 01:38:24

by NeilBrown

[permalink] [raw]
Subject: [PATCH 09/10] MM: submit multipage write for SWP_FS_OPS swap-space

swap_writepage() is given one page at a time, but may be called repeatedly
in succession.
For block-device swapspace, the blk_plug functionality allows the
multiple pages to be combined together at lower layers.
That cannot be used for SWP_FS_OPS as blk_plug may not exist - it is
only active when CONFIG_BLOCK=y. Consequently all swap reads over NFS
are single page reads.

With this patch we pass a pointer-to-pointer via the wbc.
swap_writepage can store state between calls - much like the pointer
passed explicitly to swap_readpage. After calling swap_writepage() some
number of times, the state will be passed to swap_write_unplug() which
can submit the combined request.

Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
---
include/linux/writeback.h | 7 ++++
mm/page_io.c | 78 ++++++++++++++++++++++++++++++++-------------
mm/swap.h | 4 ++
mm/vmscan.c | 9 ++++-
4 files changed, 74 insertions(+), 24 deletions(-)

diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fec248ab1fec..32b35f21cb97 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -80,6 +80,13 @@ struct writeback_control {

unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */

+ /* To enable batching of swap writes to non-block-device backends,
+ * "plug" can be set point to a 'struct swap_iocb *'. When all swap
+ * writes have been submitted, if with swap_iocb is not NULL,
+ * swap_write_unplug() should be called.
+ */
+ struct swap_iocb **swap_plug;
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb; /* wb this writeback is issued under */
struct inode *inode; /* inode being written out */
diff --git a/mm/page_io.c b/mm/page_io.c
index 4e8abbfbe388..3bf6547d6789 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -308,8 +308,9 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
{
struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
struct page *page = sio->bvec[0].bv_page;
+ int p;

- if (ret != PAGE_SIZE) {
+ if (ret != PAGE_SIZE * sio->pages) {
/*
* In the case of swap-over-nfs, this can be a
* temporary failure if the system has limited
@@ -320,43 +321,63 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
* the normal direct-to-bio case as it could
* be temporary.
*/
- set_page_dirty(page);
- ClearPageReclaim(page);
pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
ret, page_file_offset(page));
+ for (p = 0; p < sio->pages; p++) {
+ page = sio->bvec[p].bv_page;
+ set_page_dirty(page);
+ ClearPageReclaim(page);
+ }
} else
- count_vm_event(PSWPOUT);
- end_page_writeback(page);
+ count_vm_events(PSWPOUT, sio->pages);
+
+ for (p = 0; p < sio->pages; p++)
+ end_page_writeback(sio->bvec[p].bv_page);
+
mempool_free(sio, sio_pool);
}

static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
{
- struct swap_iocb *sio;
+ struct swap_iocb *sio = NULL;
struct swap_info_struct *sis = page_swap_info(page);
struct file *swap_file = sis->swap_file;
- struct address_space *mapping = swap_file->f_mapping;
- struct iov_iter from;
- int ret;
+ loff_t pos = page_file_offset(page);

set_page_writeback(page);
unlock_page(page);
- sio = mempool_alloc(sio_pool, GFP_NOIO);
- init_sync_kiocb(&sio->iocb, swap_file);
- sio->iocb.ki_complete = sio_write_complete;
- sio->iocb.ki_pos = page_file_offset(page);
- sio->bvec[0].bv_page = page;
- sio->bvec[0].bv_len = PAGE_SIZE;
- sio->bvec[0].bv_offset = 0;
- iov_iter_bvec(&from, WRITE, &sio->bvec[0], 1, PAGE_SIZE);
- ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
- if (ret != -EIOCBQUEUED)
- sio_write_complete(&sio->iocb, ret);
- return ret;
+ if (wbc->swap_plug)
+ sio = *wbc->swap_plug;
+ if (sio) {
+ if (sio->iocb.ki_filp != swap_file ||
+ sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
+ swap_write_unplug(sio);
+ sio = NULL;
+ }
+ }
+ if (!sio) {
+ sio = mempool_alloc(sio_pool, GFP_NOIO);
+ init_sync_kiocb(&sio->iocb, swap_file);
+ sio->iocb.ki_complete = sio_write_complete;
+ sio->iocb.ki_pos = pos;
+ sio->pages = 0;
+ }
+ sio->bvec[sio->pages].bv_page = page;
+ sio->bvec[sio->pages].bv_len = PAGE_SIZE;
+ sio->bvec[sio->pages].bv_offset = 0;
+ sio->pages += 1;
+ if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
+ swap_write_unplug(sio);
+ sio = NULL;
+ }
+ if (wbc->swap_plug)
+ *wbc->swap_plug = sio;
+
+ return 0;
}

int __swap_writepage(struct page *page, struct writeback_control *wbc,
- bio_end_io_t end_write_func)
+ bio_end_io_t end_write_func)
{
struct bio *bio;
int ret;
@@ -393,6 +414,19 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
return 0;
}

+void swap_write_unplug(struct swap_iocb *sio)
+{
+ struct iov_iter from;
+ struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
+ int ret;
+
+ iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
+ PAGE_SIZE * sio->pages);
+ ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
+ if (ret != -EIOCBQUEUED)
+ sio_write_complete(&sio->iocb, ret);
+}
+
static void sio_read_complete(struct kiocb *iocb, long ret)
{
struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
diff --git a/mm/swap.h b/mm/swap.h
index 0389ab147837..a6da8f612904 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -16,6 +16,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
if (unlikely(plug))
__swap_read_unplug(plug);
}
+void swap_write_unplug(struct swap_iocb *sio);
int swap_writepage(struct page *page, struct writeback_control *wbc);
void end_swap_bio_write(struct bio *bio);
int __swap_writepage(struct page *page, struct writeback_control *wbc,
@@ -71,6 +72,9 @@ static inline int swap_readpage(struct page *page, bool do_poll,
{
return 0;
}
+static inline void swap_write_unplug(struct swap_iocb *sio)
+{
+}

static inline struct address_space *swap_address_space(swp_entry_t entry)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ffae4ba82eae..1918650abf39 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1166,7 +1166,8 @@ typedef enum {
* pageout is called by shrink_page_list() for each dirty page.
* Calls ->writepage().
*/
-static pageout_t pageout(struct page *page, struct address_space *mapping)
+static pageout_t pageout(struct page *page, struct address_space *mapping,
+ struct swap_iocb **plug)
{
/*
* If the page is dirty, only perform writeback if that write
@@ -1213,6 +1214,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
.range_start = 0,
.range_end = LLONG_MAX,
.for_reclaim = 1,
+ .swap_plug = plug,
};

SetPageReclaim(page);
@@ -1539,6 +1541,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0;
bool do_demote_pass;
+ struct swap_iocb *plug = NULL;

memset(stat, 0, sizeof(*stat));
cond_resched();
@@ -1819,7 +1822,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
- switch (pageout(page, mapping)) {
+ switch (pageout(page, mapping, &plug)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
@@ -1973,6 +1976,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);

+ if (plug)
+ swap_write_unplug(plug);
return nr_reclaimed;
}




2022-04-26 07:52:08

by NeilBrown

[permalink] [raw]
Subject: Re: [PATCH 09/10] MM: submit multipage write for SWP_FS_OPS swap-space

On Mon, 18 Apr 2022, Miaohe Lin wrote:
> On 2022/3/30 7:49, NeilBrown wrote:
> > swap_writepage() is given one page at a time, but may be called repeatedly
> > in succession.
> > For block-device swapspace, the blk_plug functionality allows the
> > multiple pages to be combined together at lower layers.
> > That cannot be used for SWP_FS_OPS as blk_plug may not exist - it is
> > only active when CONFIG_BLOCK=y. Consequently all swap reads over NFS
> > are single page reads.
> >
> > With this patch we pass a pointer-to-pointer via the wbc.
> > swap_writepage can store state between calls - much like the pointer
> > passed explicitly to swap_readpage. After calling swap_writepage() some
> > number of times, the state will be passed to swap_write_unplug() which
> > can submit the combined request.
> >
> > Reviewed-by: Christoph Hellwig <[email protected]>
> > Signed-off-by: NeilBrown <[email protected]>
> ...
> >
> > static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
> > {
> > - struct swap_iocb *sio;
> > + struct swap_iocb *sio = NULL;
> > struct swap_info_struct *sis = page_swap_info(page);
> > struct file *swap_file = sis->swap_file;
> > - struct address_space *mapping = swap_file->f_mapping;
> > - struct iov_iter from;
> > - int ret;
> > + loff_t pos = page_file_offset(page);
> >
> > set_page_writeback(page);
> > unlock_page(page);
> > - sio = mempool_alloc(sio_pool, GFP_NOIO);
> > - init_sync_kiocb(&sio->iocb, swap_file);
> > - sio->iocb.ki_complete = sio_write_complete;
> > - sio->iocb.ki_pos = page_file_offset(page);
> > - sio->bvec[0].bv_page = page;
> > - sio->bvec[0].bv_len = PAGE_SIZE;
> > - sio->bvec[0].bv_offset = 0;
> > - iov_iter_bvec(&from, WRITE, &sio->bvec[0], 1, PAGE_SIZE);
> > - ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
> > - if (ret != -EIOCBQUEUED)
> > - sio_write_complete(&sio->iocb, ret);
> > - return ret;
> > + if (wbc->swap_plug)
> > + sio = *wbc->swap_plug;
> > + if (sio) {
> > + if (sio->iocb.ki_filp != swap_file ||
> > + sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> > + swap_write_unplug(sio);
> > + sio = NULL;
> > + }
> > + }
> > + if (!sio) {
> > + sio = mempool_alloc(sio_pool, GFP_NOIO);
> > + init_sync_kiocb(&sio->iocb, swap_file);
> > + sio->iocb.ki_complete = sio_write_complete;
> > + sio->iocb.ki_pos = pos;
> > + sio->pages = 0;
> > + }
> > + sio->bvec[sio->pages].bv_page = page;
> > + sio->bvec[sio->pages].bv_len = PAGE_SIZE;
>
> Many thanks for your patch. And sorry for late responding and newbie question. Does swap_writepage_fs
> support transhuge page now? We could come across transhuge page here. But bv_len == PAGE_SIZE and pages
> == 1 is assumed here. Do we need something like below:
>
> sio->bvec[sio->pages].bv_len = thp_size(page);
> sio->pages += thp_nr_pages(page);

Yes, that probably makes sense. I'll have a closer look and maybe
resend later this week.

Thanks,
NeilBrown


>
> Thanks! :)
>
> > + sio->bvec[sio->pages].bv_offset = 0;
> ...
> > .
> >
>
>