Transparent huge pages can change page->flags (PG_compound_lock)
without taking Slab lock. So sl[auo]b need to use atomic bit
operation while changing page->flags.
Specificly this patch fixes race between compound_unlock and slab
functions which does page-flags update. This can occur when
get_page/put_page is called on page from slab object.
Reported-by: Amey Bhide <[email protected]>
Signed-off-by: Pravin B Shelar <[email protected]>
---
include/linux/page-flags.h | 4 ++--
mm/slab.c | 4 ++--
mm/slob.c | 8 ++++----
mm/slub.c | 4 ++--
4 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c88d2a9..ba5b275 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -201,14 +201,14 @@ PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
TESTCLEARFLAG(Active, active)
-__PAGEFLAG(Slab, slab)
+PAGEFLAG(Slab, slab)
PAGEFLAG(Checked, checked) /* Used by some filesystems */
PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
-__PAGEFLAG(SlobFree, slob_free)
+PAGEFLAG(SlobFree, slob_free)
/*
* Private page markings that may be used by the filesystem that owns the page
diff --git a/mm/slab.c b/mm/slab.c
index e901a36..55e8c61 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1817,7 +1817,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
for (i = 0; i < nr_pages; i++)
- __SetPageSlab(page + i);
+ SetPageSlab(page + i);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
@@ -1850,7 +1850,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
NR_SLAB_UNRECLAIMABLE, nr_freed);
while (i--) {
BUG_ON(!PageSlab(page));
- __ClearPageSlab(page);
+ ClearPageSlab(page);
page++;
}
if (current->reclaim_state)
diff --git a/mm/slob.c b/mm/slob.c
index 8105be4..7256a1a 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -140,12 +140,12 @@ static inline int is_slob_page(struct slob_page *sp)
static inline void set_slob_page(struct slob_page *sp)
{
- __SetPageSlab((struct page *)sp);
+ SetPageSlab((struct page *)sp);
}
static inline void clear_slob_page(struct slob_page *sp)
{
- __ClearPageSlab((struct page *)sp);
+ ClearPageSlab((struct page *)sp);
}
static inline struct slob_page *slob_page(const void *addr)
@@ -164,13 +164,13 @@ static inline int slob_page_free(struct slob_page *sp)
static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
{
list_add(&sp->list, list);
- __SetPageSlobFree((struct page *)sp);
+ SetPageSlobFree((struct page *)sp);
}
static inline void clear_slob_page_free(struct slob_page *sp)
{
list_del(&sp->list);
- __ClearPageSlobFree((struct page *)sp);
+ ClearPageSlobFree((struct page *)sp);
}
#define SLOB_UNIT sizeof(slob_t)
diff --git a/mm/slub.c b/mm/slub.c
index 548bd12..0b53cb5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -362,7 +362,7 @@ static __always_inline void slab_lock(struct page *page)
static __always_inline void slab_unlock(struct page *page)
{
- __bit_spin_unlock(PG_locked, &page->flags);
+ bit_spin_unlock(PG_locked, &page->flags);
}
/* Interrupts must be disabled (for the fallback code to work right) */
@@ -1413,7 +1413,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
- __ClearPageSlab(page);
+ ClearPageSlab(page);
reset_page_mapcount(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
--
1.7.10
On Tue, 2012-05-08 at 11:55 -0700, Pravin B Shelar wrote:
> Transparent huge pages can change page->flags (PG_compound_lock)
> without taking Slab lock. So sl[auo]b need to use atomic bit
> operation while changing page->flags.
> Specificly this patch fixes race between compound_unlock and slab
> functions which does page-flags update. This can occur when
> get_page/put_page is called on page from slab object.
But should get_page()/put_page() be called on a page own by slub ?
On Tue, 8 May 2012, Eric Dumazet wrote:
> On Tue, 2012-05-08 at 11:55 -0700, Pravin B Shelar wrote:
> > Transparent huge pages can change page->flags (PG_compound_lock)
> > without taking Slab lock. So sl[auo]b need to use atomic bit
> > operation while changing page->flags.
> > Specificly this patch fixes race between compound_unlock and slab
> > functions which does page-flags update. This can occur when
> > get_page/put_page is called on page from slab object.
>
>
> But should get_page()/put_page() be called on a page own by slub ?
Can occur in slab allocators if the slab memory is used for DMA. I dont
like the performance impact of the atomics. In particular slab_unlock() in
slub is or used to be a hot path item. It is still hot on arches that do
not support this_cpu_cmpxchg_double. With the cmpxchg_double only the
debug mode is affected.
On Tue, May 8, 2012 at 12:22 PM, Christoph Lameter <[email protected]> wrote:
> On Tue, 8 May 2012, Eric Dumazet wrote:
>
>> On Tue, 2012-05-08 at 11:55 -0700, Pravin B Shelar wrote:
>> > Transparent huge pages can change page->flags (PG_compound_lock)
>> > without taking Slab lock. So sl[auo]b need to use atomic bit
>> > operation while changing page->flags.
>> > Specificly this patch fixes race between compound_unlock and slab
>> > functions which does page-flags update. This can occur when
>> > get_page/put_page is called on page from slab object.
>>
>>
>> But should get_page()/put_page() be called on a page own by slub ?
>
> Can occur in slab allocators if the slab memory is used for DMA. I dont
> like the performance impact of the atomics. In particular slab_unlock() in
> slub is or used to be a hot path item. It is still hot on arches that do
> not support this_cpu_cmpxchg_double. With the cmpxchg_double only the
> debug mode is affected.
>
I agree this would impact performance. I am not sure how else we can
fix this issue. As far as slab_unlock in hot path case is concerned,
it is more likely to corrupt page->flags in that case.
On Wed, 9 May 2012, Pravin Shelar wrote:
> On Tue, May 8, 2012 at 12:22 PM, Christoph Lameter <[email protected]> wrote:
> > On Tue, 8 May 2012, Eric Dumazet wrote:
> >
> >> On Tue, 2012-05-08 at 11:55 -0700, Pravin B Shelar wrote:
> >> > Transparent huge pages can change page->flags (PG_compound_lock)
> >> > without taking Slab lock. So sl[auo]b need to use atomic bit
> >> > operation while changing page->flags.
> >> > Specificly this patch fixes race between compound_unlock and slab
> >> > functions which does page-flags update. This can occur when
> >> > get_page/put_page is called on page from slab object.
> >>
> >>
> >> But should get_page()/put_page() be called on a page own by slub ?
> >
> > Can occur in slab allocators if the slab memory is used for DMA. I dont
> > like the performance impact of the atomics. In particular slab_unlock() in
> > slub is or used to be a hot path item. It is still hot on arches that do
> > not support this_cpu_cmpxchg_double. With the cmpxchg_double only the
> > debug mode is affected.
> >
>
> I agree this would impact performance. I am not sure how else we can
> fix this issue. As far as slab_unlock in hot path case is concerned,
> it is more likely to corrupt page->flags in that case.
Dont modify any page flags from THP logic if its a slab page? THP cannot
break up or merge slab pages anyways.
On Wed, May 9, 2012 at 10:25 AM, Christoph Lameter <[email protected]> wrote:
> On Wed, 9 May 2012, Pravin Shelar wrote:
>
>> On Tue, May 8, 2012 at 12:22 PM, Christoph Lameter <[email protected]> wrote:
>> > On Tue, 8 May 2012, Eric Dumazet wrote:
>> >
>> >> On Tue, 2012-05-08 at 11:55 -0700, Pravin B Shelar wrote:
>> >> > Transparent huge pages can change page->flags (PG_compound_lock)
>> >> > without taking Slab lock. So sl[auo]b need to use atomic bit
>> >> > operation while changing page->flags.
>> >> > Specificly this patch fixes race between compound_unlock and slab
>> >> > functions which does page-flags update. This can occur when
>> >> > get_page/put_page is called on page from slab object.
>> >>
>> >>
>> >> But should get_page()/put_page() be called on a page own by slub ?
>> >
>> > Can occur in slab allocators if the slab memory is used for DMA. I dont
>> > like the performance impact of the atomics. In particular slab_unlock() in
>> > slub is or used to be a hot path item. It is still hot on arches that do
>> > not support this_cpu_cmpxchg_double. With the cmpxchg_double only the
>> > debug mode is affected.
>> >
>>
>> I agree this would impact performance. I am not sure how else we can
>> fix this issue. As far as slab_unlock in hot path case is concerned,
>> it is more likely to corrupt page->flags in that case.
>
> Dont modify any page flags from THP logic if its a slab page? THP cannot
> break up or merge slab pages anyways.
Good idea, I will post patch soon.
>
>