This patchset has a fixup for gup/mm, and provides the necessary arch
hooks to enable protected virtualization.
Andrew: please simply squash/fixup the first patch into the appropriate
one that is already in your tree.
v2 -> v3:
* revert some cosmetic changes to improve readability
* improve some comments
v1 -> v2:
* use put_compound_head in the first patch
* fix commit message of the second patch
* minor code cleanups
* some comments to explain why sometimes we are not doing things
Claudio Imbrenda (2):
mm/gup: fixup for 9947ea2c1e608e32 "mm/gup: track FOLL_PIN pages"
mm/gup/writeback: add callbacks for inaccessible pages
include/linux/gfp.h | 6 ++++
mm/gup.c | 76 +++++++++++++++++++++++++++++----------------
mm/page-writeback.c | 5 +++
3 files changed, 61 insertions(+), 26 deletions(-)
--
2.24.1
In case pin fails, we need to unpin, a simple put_page will not be enough
fixup for commit 9947ea2c1e608e32 ("mm/gup: track FOLL_PIN pages")
it can be simply squashed in
Signed-off-by: Claudio Imbrenda <[email protected]>
---
mm/gup.c | 46 +++++++++++++++++++++++-----------------------
1 file changed, 23 insertions(+), 23 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index f589299b0d4a..81a95fbe9901 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -116,6 +116,28 @@ static __maybe_unused struct page *try_grab_compound_head(struct page *page,
return NULL;
}
+static void put_compound_head(struct page *page, int refs, unsigned int flags)
+{
+ if (flags & FOLL_PIN) {
+ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
+ refs);
+
+ if (hpage_pincount_available(page))
+ hpage_pincount_sub(page, refs);
+ else
+ refs *= GUP_PIN_COUNTING_BIAS;
+ }
+
+ VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
+ /*
+ * Calling put_page() for each ref is unnecessarily slow. Only the last
+ * ref needs a put_page().
+ */
+ if (refs > 1)
+ page_ref_sub(page, refs - 1);
+ put_page(page);
+}
+
/**
* try_grab_page() - elevate a page's refcount by a flag-dependent amount
*
@@ -2134,7 +2156,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(head);
+ put_compound_head(head, 1, flags);
goto pte_unmap;
}
@@ -2267,28 +2289,6 @@ static int record_subpages(struct page *page, unsigned long addr,
return nr;
}
-static void put_compound_head(struct page *page, int refs, unsigned int flags)
-{
- if (flags & FOLL_PIN) {
- mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
- refs);
-
- if (hpage_pincount_available(page))
- hpage_pincount_sub(page, refs);
- else
- refs *= GUP_PIN_COUNTING_BIAS;
- }
-
- VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
- /*
- * Calling put_page() for each ref is unnecessarily slow. Only the last
- * ref needs a put_page().
- */
- if (refs > 1)
- page_ref_sub(page, refs - 1);
- put_page(page);
-}
-
#ifdef CONFIG_ARCH_HAS_HUGEPD
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz)
--
2.24.1
On 3/4/20 5:06 AM, Claudio Imbrenda wrote:
> In case pin fails, we need to unpin, a simple put_page will not be enough
>
> fixup for commit 9947ea2c1e608e32 ("mm/gup: track FOLL_PIN pages")
>
> it can be simply squashed in
>
> Signed-off-by: Claudio Imbrenda <[email protected]>
> ---
Although it won't show up anywhere, in case this helps Andrew sort out what
has been looked at, you can add:
Reviewed-by: John Hubbard <[email protected]>
thanks,
--
John Hubbard
NVIDIA
> mm/gup.c | 46 +++++++++++++++++++++++-----------------------
> 1 file changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/mm/gup.c b/mm/gup.c
> index f589299b0d4a..81a95fbe9901 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -116,6 +116,28 @@ static __maybe_unused struct page *try_grab_compound_head(struct page *page,
> return NULL;
> }
>
> +static void put_compound_head(struct page *page, int refs, unsigned int flags)
> +{
> + if (flags & FOLL_PIN) {
> + mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
> + refs);
> +
> + if (hpage_pincount_available(page))
> + hpage_pincount_sub(page, refs);
> + else
> + refs *= GUP_PIN_COUNTING_BIAS;
> + }
> +
> + VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
> + /*
> + * Calling put_page() for each ref is unnecessarily slow. Only the last
> + * ref needs a put_page().
> + */
> + if (refs > 1)
> + page_ref_sub(page, refs - 1);
> + put_page(page);
> +}
> +
> /**
> * try_grab_page() - elevate a page's refcount by a flag-dependent amount
> *
> @@ -2134,7 +2156,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
> goto pte_unmap;
>
> if (unlikely(pte_val(pte) != pte_val(*ptep))) {
> - put_page(head);
> + put_compound_head(head, 1, flags);
> goto pte_unmap;
> }
>
> @@ -2267,28 +2289,6 @@ static int record_subpages(struct page *page, unsigned long addr,
> return nr;
> }
>
> -static void put_compound_head(struct page *page, int refs, unsigned int flags)
> -{
> - if (flags & FOLL_PIN) {
> - mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
> - refs);
> -
> - if (hpage_pincount_available(page))
> - hpage_pincount_sub(page, refs);
> - else
> - refs *= GUP_PIN_COUNTING_BIAS;
> - }
> -
> - VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
> - /*
> - * Calling put_page() for each ref is unnecessarily slow. Only the last
> - * ref needs a put_page().
> - */
> - if (refs > 1)
> - page_ref_sub(page, refs - 1);
> - put_page(page);
> -}
> -
> #ifdef CONFIG_ARCH_HAS_HUGEPD
> static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
> unsigned long sz)
>