Convert the only caller to work on folios instead of pages.
This removes the last caller of put_compound_head(), so delete it.
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
include/linux/mm.h | 4 ++--
mm/gup.c | 38 ++++++++++++++++++--------------------
2 files changed, 20 insertions(+), 22 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c103c6401ecd..1ddb0a55b5ca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -216,10 +216,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
-#define page_nth(head, tail) (page_to_pfn(tail) - page_to_pfn(head))
+#define folio_nth(folio, page) (page_to_pfn(page) - folio_pfn(folio))
#else
#define nth_page(page,n) ((page) + (n))
-#define page_nth(head, tail) ((tail) - (head))
+#define folio_nth(folio, tail) ((tail) - &(folio)->page)
#endif
/* to align the pointer to the (next) page boundary */
diff --git a/mm/gup.c b/mm/gup.c
index 0cf2d5fd8d2d..1cdd5f2887a8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -156,12 +156,6 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
folio_put_refs(folio, refs);
}
-static void put_compound_head(struct page *page, int refs, unsigned int flags)
-{
- VM_BUG_ON_PAGE(PageTail(page), page);
- gup_put_folio((struct folio *)page, refs, flags);
-}
-
/**
* try_grab_page() - elevate a page's refcount by a flag-dependent amount
*
@@ -204,20 +198,21 @@ void unpin_user_page(struct page *page)
}
EXPORT_SYMBOL(unpin_user_page);
-static inline struct page *compound_range_next(unsigned long i,
+static inline struct folio *gup_folio_range_next(unsigned long i,
unsigned long npages, struct page *start, unsigned int *ntails)
{
- struct page *next, *page;
+ struct page *next;
+ struct folio *folio;
unsigned int nr = 1;
next = nth_page(start, i);
- page = compound_head(next);
- if (PageHead(page))
+ folio = page_folio(next);
+ if (folio_test_large(folio))
nr = min_t(unsigned int, npages - i,
- compound_nr(page) - page_nth(page, next));
+ folio_nr_pages(folio) - folio_nth(folio, next));
*ntails = nr;
- return page;
+ return folio;
}
static inline struct folio *gup_folio_next(unsigned long i,
@@ -326,15 +321,18 @@ EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty)
{
- unsigned long index;
- struct page *head;
- unsigned int ntails;
+ unsigned long i;
+ struct folio *folio;
+ unsigned int nr;
- for (index = 0; index < npages; index += ntails) {
- head = compound_range_next(index, npages, page, &ntails);
- if (make_dirty && !PageDirty(head))
- set_page_dirty_lock(head);
- put_compound_head(head, ntails, FOLL_PIN);
+ for (i = 0; i < npages; i += nr) {
+ folio = gup_folio_range_next(i, npages, page, &nr);
+ if (make_dirty && !folio_test_dirty(folio)) {
+ folio_lock(folio);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ }
+ gup_put_folio(folio, nr, FOLL_PIN);
}
}
EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
--
2.33.0
On Mon, Jan 10, 2022 at 04:24:04AM +0000, Matthew Wilcox (Oracle) wrote:
> +static inline struct folio *gup_folio_range_next(unsigned long i,
> unsigned long npages, struct page *start, unsigned int *ntails)
> {
> - struct page *next, *page;
> + struct page *next;
> + struct folio *folio;
> unsigned int nr = 1;
>
> next = nth_page(start, i);
> + folio = page_folio(next);
Superficial nit: initialization next and folio at declaration time
would reada little better.
Otherwise looks good:
Reviewed-by: Christoph Hellwig <[email protected]>
On Mon, Jan 10, 2022 at 12:41:22AM -0800, Christoph Hellwig wrote:
> On Mon, Jan 10, 2022 at 04:24:04AM +0000, Matthew Wilcox (Oracle) wrote:
> > +static inline struct folio *gup_folio_range_next(unsigned long i,
> > unsigned long npages, struct page *start, unsigned int *ntails)
> > {
> > - struct page *next, *page;
> > + struct page *next;
> > + struct folio *folio;
> > unsigned int nr = 1;
> >
> > next = nth_page(start, i);
> > + folio = page_folio(next);
>
> Superficial nit: initialization next and folio at declaration time
> would reada little better.
Done. Also the previous patch:
-static inline struct page *compound_next(struct page **list,
+static inline struct folio *gup_folio_next(struct page **list,
unsigned long npages, unsigned long i, unsigned int *ntails)
{
- struct page *page;
+ struct folio *folio = page_folio(list[i]);
unsigned int nr;
- page = compound_head(list[i]);
for (nr = i + 1; nr < npages; nr++) {
On 1/9/22 20:24, Matthew Wilcox (Oracle) wrote:
> Convert the only caller to work on folios instead of pages.
> This removes the last caller of put_compound_head(), so delete it.
>
> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
> ---
> include/linux/mm.h | 4 ++--
> mm/gup.c | 38 ++++++++++++++++++--------------------
> 2 files changed, 20 insertions(+), 22 deletions(-)
>
Reviewed-by: John Hubbard <[email protected]>
thanks,
--
John Hubbard
NVIDIA
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index c103c6401ecd..1ddb0a55b5ca 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -216,10 +216,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
>
> #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
> #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
> -#define page_nth(head, tail) (page_to_pfn(tail) - page_to_pfn(head))
> +#define folio_nth(folio, page) (page_to_pfn(page) - folio_pfn(folio))
> #else
> #define nth_page(page,n) ((page) + (n))
> -#define page_nth(head, tail) ((tail) - (head))
> +#define folio_nth(folio, tail) ((tail) - &(folio)->page)
> #endif
>
> /* to align the pointer to the (next) page boundary */
> diff --git a/mm/gup.c b/mm/gup.c
> index 0cf2d5fd8d2d..1cdd5f2887a8 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -156,12 +156,6 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
> folio_put_refs(folio, refs);
> }
>
> -static void put_compound_head(struct page *page, int refs, unsigned int flags)
> -{
> - VM_BUG_ON_PAGE(PageTail(page), page);
> - gup_put_folio((struct folio *)page, refs, flags);
> -}
> -
> /**
> * try_grab_page() - elevate a page's refcount by a flag-dependent amount
> *
> @@ -204,20 +198,21 @@ void unpin_user_page(struct page *page)
> }
> EXPORT_SYMBOL(unpin_user_page);
>
> -static inline struct page *compound_range_next(unsigned long i,
> +static inline struct folio *gup_folio_range_next(unsigned long i,
> unsigned long npages, struct page *start, unsigned int *ntails)
> {
> - struct page *next, *page;
> + struct page *next;
> + struct folio *folio;
> unsigned int nr = 1;
>
> next = nth_page(start, i);
> - page = compound_head(next);
> - if (PageHead(page))
> + folio = page_folio(next);
> + if (folio_test_large(folio))
> nr = min_t(unsigned int, npages - i,
> - compound_nr(page) - page_nth(page, next));
> + folio_nr_pages(folio) - folio_nth(folio, next));
>
> *ntails = nr;
> - return page;
> + return folio;
> }
>
> static inline struct folio *gup_folio_next(unsigned long i,
> @@ -326,15 +321,18 @@ EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
> void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
> bool make_dirty)
> {
> - unsigned long index;
> - struct page *head;
> - unsigned int ntails;
> + unsigned long i;
> + struct folio *folio;
> + unsigned int nr;
>
> - for (index = 0; index < npages; index += ntails) {
> - head = compound_range_next(index, npages, page, &ntails);
> - if (make_dirty && !PageDirty(head))
> - set_page_dirty_lock(head);
> - put_compound_head(head, ntails, FOLL_PIN);
> + for (i = 0; i < npages; i += nr) {
> + folio = gup_folio_range_next(i, npages, page, &nr);
> + if (make_dirty && !folio_test_dirty(folio)) {
> + folio_lock(folio);
> + folio_mark_dirty(folio);
> + folio_unlock(folio);
> + }
> + gup_put_folio(folio, nr, FOLL_PIN);
> }
> }
> EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);