From: ZhangPeng <[email protected]>
This minor patch series remove page_rmapping() and convert the last user
fault_dirty_shared_page() to use a folio.
ZhangPeng (2):
mm: remove page_rmapping()
mm: use a folio in fault_dirty_shared_page()
include/linux/mm.h | 1 -
mm/memory.c | 10 +++++-----
mm/util.c | 6 ------
3 files changed, 5 insertions(+), 12 deletions(-)
--
2.25.1
From: ZhangPeng <[email protected]>
After converting the last user to folio_raw_mapping(), we can safely
remove the function.
Signed-off-by: ZhangPeng <[email protected]>
---
include/linux/mm.h | 1 -
mm/memory.c | 2 +-
mm/util.c | 6 ------
3 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9ecb8b9c07f6..8c7eba7370b2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2129,7 +2129,6 @@ static inline void *folio_address(const struct folio *folio)
return page_address(&folio->page);
}
-extern void *page_rmapping(struct page *page);
extern pgoff_t __page_file_index(struct page *page);
/*
diff --git a/mm/memory.c b/mm/memory.c
index 21fab2727209..6921df44a99f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2979,7 +2979,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
- mapping = page_rmapping(page);
+ mapping = folio_raw_mapping(page_folio(page));
unlock_page(page);
if (!page_mkwrite)
diff --git a/mm/util.c b/mm/util.c
index dd12b9531ac4..5e9305189c3f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -734,12 +734,6 @@ void *vcalloc(size_t n, size_t size)
}
EXPORT_SYMBOL(vcalloc);
-/* Neutral page->mapping pointer to address_space or anon_vma or other */
-void *page_rmapping(struct page *page)
-{
- return folio_raw_mapping(page_folio(page));
-}
-
struct anon_vma *folio_anon_vma(struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;
--
2.25.1
From: ZhangPeng <[email protected]>
We can replace four implicit calls to compound_head() with one by using
folio.
Signed-off-by: ZhangPeng <[email protected]>
---
mm/memory.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 6921df44a99f..73b03706451c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2967,20 +2967,20 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping;
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
bool dirtied;
bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
- dirtied = set_page_dirty(page);
- VM_BUG_ON_PAGE(PageAnon(page), page);
+ dirtied = folio_mark_dirty(folio);
+ VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
/*
* Take a local copy of the address_space - page.mapping may be zeroed
* by truncate after unlock_page(). The address_space itself remains
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
- mapping = folio_raw_mapping(page_folio(page));
- unlock_page(page);
+ mapping = folio_raw_mapping(folio);
+ folio_unlock(folio);
if (!page_mkwrite)
file_update_time(vma->vm_file);
--
2.25.1
On 6/30/23 5:13 AM, Peng Zhang wrote:
> From: ZhangPeng <[email protected]>
>
> We can replace four implicit calls to compound_head() with one by using
> folio.
>
> Signed-off-by: ZhangPeng <[email protected]>
> ---
> mm/memory.c | 10 +++++-----
> 1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 6921df44a99f..73b03706451c 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2967,20 +2967,20 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
> {
> struct vm_area_struct *vma = vmf->vma;
> struct address_space *mapping;
> - struct page *page = vmf->page;
> + struct folio *folio = page_folio(vmf->page);
> bool dirtied;
> bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
>
> - dirtied = set_page_dirty(page);
> - VM_BUG_ON_PAGE(PageAnon(page), page);
> + dirtied = folio_mark_dirty(folio);
> + VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
> /*
> * Take a local copy of the address_space - page.mapping may be zeroed
> * by truncate after unlock_page(). The address_space itself remains
> * pinned by vma->vm_file's reference. We rely on unlock_page()'s
> * release semantics to prevent the compiler from undoing this copying.
> */
> - mapping = folio_raw_mapping(page_folio(page));
> - unlock_page(page);
> + mapping = folio_raw_mapping(folio);
> + folio_unlock(folio);
>
> if (!page_mkwrite)
> file_update_time(vma->vm_file);
Reviewed-by: Sidhartha Kumar <[email protected]>
On 6/30/23 5:13 AM, Peng Zhang wrote:
> From: ZhangPeng <[email protected]>
>
> After converting the last user to folio_raw_mapping(), we can safely
> remove the function.
>
> Signed-off-by: ZhangPeng <[email protected]>
> ---
> include/linux/mm.h | 1 -
> mm/memory.c | 2 +-
> mm/util.c | 6 ------
> 3 files changed, 1 insertion(+), 8 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 9ecb8b9c07f6..8c7eba7370b2 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2129,7 +2129,6 @@ static inline void *folio_address(const struct folio *folio)
> return page_address(&folio->page);
> }
>
> -extern void *page_rmapping(struct page *page);
> extern pgoff_t __page_file_index(struct page *page);
>
> /*
> diff --git a/mm/memory.c b/mm/memory.c
> index 21fab2727209..6921df44a99f 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2979,7 +2979,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
> * pinned by vma->vm_file's reference. We rely on unlock_page()'s
> * release semantics to prevent the compiler from undoing this copying.
> */
> - mapping = page_rmapping(page);
> + mapping = folio_raw_mapping(page_folio(page));
> unlock_page(page);
>
> if (!page_mkwrite)
> diff --git a/mm/util.c b/mm/util.c
> index dd12b9531ac4..5e9305189c3f 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -734,12 +734,6 @@ void *vcalloc(size_t n, size_t size)
> }
> EXPORT_SYMBOL(vcalloc);
>
> -/* Neutral page->mapping pointer to address_space or anon_vma or other */
> -void *page_rmapping(struct page *page)
> -{
> - return folio_raw_mapping(page_folio(page));
> -}
> -
> struct anon_vma *folio_anon_vma(struct folio *folio)
> {
> unsigned long mapping = (unsigned long)folio->mapping;
Reviewed-by: Sidhartha Kumar <[email protected]>
On 6/30/23 5:13 AM, Peng Zhang wrote:
> From: ZhangPeng <[email protected]>
>
> After converting the last user to folio_raw_mapping(), we can safely
> remove the function.
>
> Signed-off-by: ZhangPeng <[email protected]>
> ---
> include/linux/mm.h | 1 -
> mm/memory.c | 2 +-
> mm/util.c | 6 ------
> 3 files changed, 1 insertion(+), 8 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 9ecb8b9c07f6..8c7eba7370b2 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2129,7 +2129,6 @@ static inline void *folio_address(const struct folio *folio)
> return page_address(&folio->page);
> }
>
> -extern void *page_rmapping(struct page *page);
> extern pgoff_t __page_file_index(struct page *page);
>
> /*
> diff --git a/mm/memory.c b/mm/memory.c
> index 21fab2727209..6921df44a99f 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2979,7 +2979,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
> * pinned by vma->vm_file's reference. We rely on unlock_page()'s
> * release semantics to prevent the compiler from undoing this copying.
> */
struct page *page = vmf->page;
...
dirtied = set_page_dirty(page);
VM_BUG_ON_PAGE(PageAnon(page), page);
Can this earlier part of the function be converted to folios so a folio
can be passed directly in folio_raw_mapping()?
> - mapping = page_rmapping(page);
> + mapping = folio_raw_mapping(page_folio(page));
> unlock_page(page);
>
> if (!page_mkwrite)
> diff --git a/mm/util.c b/mm/util.c
> index dd12b9531ac4..5e9305189c3f 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -734,12 +734,6 @@ void *vcalloc(size_t n, size_t size)
> }
> EXPORT_SYMBOL(vcalloc);
>
> -/* Neutral page->mapping pointer to address_space or anon_vma or other */
> -void *page_rmapping(struct page *page)
> -{
> - return folio_raw_mapping(page_folio(page));
> -}
> -
> struct anon_vma *folio_anon_vma(struct folio *folio)
> {
> unsigned long mapping = (unsigned long)folio->mapping;
On Fri, Jun 30, 2023 at 08:13:08PM +0800, Peng Zhang wrote:
> From: ZhangPeng <[email protected]>
>
> This minor patch series remove page_rmapping() and convert the last user
> fault_dirty_shared_page() to use a folio.
This patch series works better if you do it in the other order.
First convert the user, then remove the now-unused page_rmapping().
It's almost always a mistake to start with "I want to remove all the
users of API x". You should be thinking "Here is function y that still
uses pages; I shall convert it to use folios".
On 2023/7/1 0:03, Sidhartha Kumar wrote:
> On 6/30/23 5:13 AM, Peng Zhang wrote:
>> From: ZhangPeng <[email protected]>
>>
>> After converting the last user to folio_raw_mapping(), we can safely
>> remove the function.
>>
>> Signed-off-by: ZhangPeng <[email protected]>
>> ---
>> include/linux/mm.h | 1 -
>> mm/memory.c | 2 +-
>> mm/util.c | 6 ------
>> 3 files changed, 1 insertion(+), 8 deletions(-)
>>
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index 9ecb8b9c07f6..8c7eba7370b2 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -2129,7 +2129,6 @@ static inline void *folio_address(const struct
>> folio *folio)
>> return page_address(&folio->page);
>> }
>> -extern void *page_rmapping(struct page *page);
>> extern pgoff_t __page_file_index(struct page *page);
>> /*
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 21fab2727209..6921df44a99f 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -2979,7 +2979,7 @@ static vm_fault_t
>> fault_dirty_shared_page(struct vm_fault *vmf)
>> * pinned by vma->vm_file's reference. We rely on unlock_page()'s
>> * release semantics to prevent the compiler from undoing this
>> copying.
>> */
>
>
> struct page *page = vmf->page;
>
> ...
>
> dirtied = set_page_dirty(page);
> VM_BUG_ON_PAGE(PageAnon(page), page);
>
> Can this earlier part of the function be converted to folios so a
> folio can be passed directly in folio_raw_mapping()?
Thanks for your feedback.
I convert the last user to use a folio in patch 2. I'll reverse the order of the two patches.
Best Regards,
Peng
>> - mapping = page_rmapping(page);
>> + mapping = folio_raw_mapping(page_folio(page));
>> unlock_page(page);
>> if (!page_mkwrite)
>> diff --git a/mm/util.c b/mm/util.c
>> index dd12b9531ac4..5e9305189c3f 100644
>> --- a/mm/util.c
>> +++ b/mm/util.c
>> @@ -734,12 +734,6 @@ void *vcalloc(size_t n, size_t size)
>> }
>> EXPORT_SYMBOL(vcalloc);
>> -/* Neutral page->mapping pointer to address_space or anon_vma or
>> other */
>> -void *page_rmapping(struct page *page)
>> -{
>> - return folio_raw_mapping(page_folio(page));
>> -}
>> -
>> struct anon_vma *folio_anon_vma(struct folio *folio)
>> {
>> unsigned long mapping = (unsigned long)folio->mapping;
>
On 2023/7/1 5:39, Matthew Wilcox wrote:
> On Fri, Jun 30, 2023 at 08:13:08PM +0800, Peng Zhang wrote:
>> From: ZhangPeng <[email protected]>
>>
>> This minor patch series remove page_rmapping() and convert the last user
>> fault_dirty_shared_page() to use a folio.
> This patch series works better if you do it in the other order.
> First convert the user, then remove the now-unused page_rmapping().
>
> It's almost always a mistake to start with "I want to remove all the
> users of API x". You should be thinking "Here is function y that still
> uses pages; I shall convert it to use folios".
Thanks for your feedback.
I'll reverse the order of the two patches. Convert user first, then remove the now-unused page_rmapping().
Best Regards,
Peng