2022-04-29 15:28:22

by Miaohe Lin

[permalink] [raw]
Subject: [PATCH 9/9] mm/z3fold: fix z3fold_page_migrate races with z3fold_map

Think about the below scene:
CPU1 CPU2
z3fold_page_migrate z3fold_map
z3fold_page_trylock
...
z3fold_page_unlock
/* slots still points to old zhdr*/
get_z3fold_header
get slots from handle
get old zhdr from slots
z3fold_page_trylock
return *old* zhdr
encode_handle(new_zhdr, FIRST|LAST|MIDDLE)
put_page(page) /* zhdr is freed! */
but zhdr is still used by caller!

z3fold_map can map freed z3fold page and lead to use-after-free bug.
To fix it, we add PAGE_MIGRATED to indicate z3fold page is migrated
and soon to be released. So get_z3fold_header won't return such page.

Fixes: 1f862989b04a ("mm/z3fold.c: support page migration")
Signed-off-by: Miaohe Lin <[email protected]>
---
mm/z3fold.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index a7769befd74e..f41f8b0d9e9a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -181,6 +181,7 @@ enum z3fold_page_flags {
NEEDS_COMPACTING,
PAGE_STALE,
PAGE_CLAIMED, /* by either reclaim or free */
+ PAGE_MIGRATED, /* page is migrated and soon to be released */
};

/*
@@ -270,8 +271,13 @@ static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
locked = z3fold_page_trylock(zhdr);
read_unlock(&slots->lock);
- if (locked)
- break;
+ if (locked) {
+ struct page *page = virt_to_page(zhdr);
+
+ if (!test_bit(PAGE_MIGRATED, &page->private))
+ break;
+ z3fold_page_unlock(zhdr);
+ }
cpu_relax();
} while (true);
} else {
@@ -389,6 +395,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
clear_bit(NEEDS_COMPACTING, &page->private);
clear_bit(PAGE_STALE, &page->private);
clear_bit(PAGE_CLAIMED, &page->private);
+ clear_bit(PAGE_MIGRATED, &page->private);
if (headless)
return zhdr;

@@ -1576,7 +1583,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
new_zhdr = page_address(newpage);
memcpy(new_zhdr, zhdr, PAGE_SIZE);
newpage->private = page->private;
- page->private = 0;
+ set_bit(PAGE_MIGRATED, &page->private);
z3fold_page_unlock(zhdr);
spin_lock_init(&new_zhdr->page_lock);
INIT_WORK(&new_zhdr->work, compact_page_work);
@@ -1606,7 +1613,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa

queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);

- clear_bit(PAGE_CLAIMED, &page->private);
+ /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
+ page->private = 0;
put_page(page);
return 0;
}
--
2.23.0


2022-05-19 09:25:22

by Vitaly Wool

[permalink] [raw]
Subject: Re: [PATCH 9/9] mm/z3fold: fix z3fold_page_migrate races with z3fold_map

On Fri, Apr 29, 2022 at 8:40 AM Miaohe Lin <[email protected]> wrote:
>
> Think about the below scene:
> CPU1 CPU2
> z3fold_page_migrate z3fold_map
> z3fold_page_trylock
> ...
> z3fold_page_unlock
> /* slots still points to old zhdr*/
> get_z3fold_header
> get slots from handle
> get old zhdr from slots
> z3fold_page_trylock
> return *old* zhdr
> encode_handle(new_zhdr, FIRST|LAST|MIDDLE)
> put_page(page) /* zhdr is freed! */
> but zhdr is still used by caller!
>
> z3fold_map can map freed z3fold page and lead to use-after-free bug.
> To fix it, we add PAGE_MIGRATED to indicate z3fold page is migrated
> and soon to be released. So get_z3fold_header won't return such page.
>
> Fixes: 1f862989b04a ("mm/z3fold.c: support page migration")
> Signed-off-by: Miaohe Lin <[email protected]>
> ---
> mm/z3fold.c | 16 ++++++++++++----
> 1 file changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index a7769befd74e..f41f8b0d9e9a 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -181,6 +181,7 @@ enum z3fold_page_flags {
> NEEDS_COMPACTING,
> PAGE_STALE,
> PAGE_CLAIMED, /* by either reclaim or free */
> + PAGE_MIGRATED, /* page is migrated and soon to be released */
> };
>
> /*
> @@ -270,8 +271,13 @@ static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
> zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
> locked = z3fold_page_trylock(zhdr);
> read_unlock(&slots->lock);
> - if (locked)
> - break;
> + if (locked) {
> + struct page *page = virt_to_page(zhdr);
> +
> + if (!test_bit(PAGE_MIGRATED, &page->private))
> + break;
> + z3fold_page_unlock(zhdr);
> + }
> cpu_relax();
> } while (true);
> } else {
> @@ -389,6 +395,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
> clear_bit(NEEDS_COMPACTING, &page->private);
> clear_bit(PAGE_STALE, &page->private);
> clear_bit(PAGE_CLAIMED, &page->private);
> + clear_bit(PAGE_MIGRATED, &page->private);
> if (headless)
> return zhdr;
>
> @@ -1576,7 +1583,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
> new_zhdr = page_address(newpage);
> memcpy(new_zhdr, zhdr, PAGE_SIZE);
> newpage->private = page->private;
> - page->private = 0;
> + set_bit(PAGE_MIGRATED, &page->private);
> z3fold_page_unlock(zhdr);
> spin_lock_init(&new_zhdr->page_lock);
> INIT_WORK(&new_zhdr->work, compact_page_work);
> @@ -1606,7 +1613,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
>
> queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
>
> - clear_bit(PAGE_CLAIMED, &page->private);
> + /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
> + page->private = 0;
> put_page(page);
> return 0;
> }

Reviewed-by: Vitaly Wool <[email protected]>
> --
> 2.23.0
>