2007-01-28 14:11:40

by Peter Zijlstra

[permalink] [raw]
Subject: [PATCH] mm: remove global locks from mm/highmem.c

Eradicate global locks.

- kmap_lock is removed by extensive use of atomic_t, a new flush
scheme and modifying set_page_address to only allow NULL<->virt
transitions.

A count of 0 is an exclusive state acting as an entry lock. This is done
using inc_not_zero and cmpxchg. The restriction on changing the virtual
address closes the gap with concurrent additions of the same entry.

- pool_lock is removed by using the pkmap index for the
page_address_maps.

By using the pkmap index for the hash entries it is no longer needed to
keep a free list.

This patch has been in -rt for a while but should also help regular
highmem machines with multiple cores/cpus.

Signed-off-by: Peter Zijlstra <[email protected]>
---
include/linux/mm.h | 32 ++-
mm/highmem.c | 433 ++++++++++++++++++++++++++++++-----------------------
2 files changed, 276 insertions(+), 189 deletions(-)

Index: linux/include/linux/mm.h
===================================================================
--- linux.orig/include/linux/mm.h
+++ linux/include/linux/mm.h
@@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
#endif

#if defined(WANT_PAGE_VIRTUAL)
-#define page_address(page) ((page)->virtual)
-#define set_page_address(page, address) \
- do { \
- (page)->virtual = (address); \
- } while(0)
-#define page_address_init() do { } while(0)
+/*
+ * wrap page->virtual so it is safe to set/read locklessly
+ */
+#define page_address(page) \
+ ({ typeof((page)->virtual) v = (page)->virtual; \
+ smp_read_barrier_depends(); \
+ v; })
+
+static inline int set_page_address(struct page *page, void *address)
+{
+ if (address)
+ return cmpxchg(&page->virtual, NULL, address) == NULL;
+ else {
+ /*
+ * cmpxchg is a bit abused because it is not guaranteed
+ * safe wrt direct assignment on all platforms.
+ */
+ void *virt = page->virtual;
+ return cmpxchg(&page->vitrual, virt, NULL) == virt;
+ }
+}
+void page_address_init(void);
#endif

#if defined(HASHED_PAGE_VIRTUAL)
void *page_address(struct page *page);
-void set_page_address(struct page *page, void *virtual);
+int set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif

#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
-#define set_page_address(page, address) do { } while(0)
+#define set_page_address(page, address) (0)
#define page_address_init() do { } while(0)
#endif

Index: linux/mm/highmem.c
===================================================================
--- linux.orig/mm/highmem.c
+++ linux/mm/highmem.c
@@ -14,6 +14,11 @@
* based on Linus' idea.
*
* Copyright (C) 1999 Ingo Molnar <[email protected]>
+ *
+ * Largely rewritten to get rid of all global locks
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Peter Zijlstra <[email protected]>
+ *
*/

#include <linux/mm.h>
@@ -27,18 +32,14 @@
#include <linux/hash.h>
#include <linux/highmem.h>
#include <linux/blktrace_api.h>
+
#include <asm/tlbflush.h>
+#include <asm/pgtable.h>

-/*
- * Virtual_count is not a pure "count".
- * 0 means that it is not mapped, and has not been mapped
- * since a TLB flush - it is usable.
- * 1 means that there are no users, but it has been mapped
- * since the last TLB flush - so we can't use it.
- * n means that there are (n-1) current users of it.
- */
#ifdef CONFIG_HIGHMEM

+static int __set_page_address(struct page *page, void *virtual, int pos);
+
unsigned long totalhigh_pages __read_mostly;

unsigned int nr_free_highpages (void)
@@ -52,164 +53,208 @@ unsigned int nr_free_highpages (void)
return pages;
}

-static int pkmap_count[LAST_PKMAP];
-static unsigned int last_pkmap_nr;
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
+/*
+ * count is not a pure "count".
+ * 0 means its owned exclusively by someone
+ * 1 means its free for use - either mapped or not.
+ * n means that there are (n-1) current users of it.
+ */
+static atomic_t pkmap_count[LAST_PKMAP];
+static atomic_t pkmap_hand;

pte_t * pkmap_page_table;

static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);

-static void flush_all_zero_pkmaps(void)
+/*
+ * Try to free a given kmap slot.
+ *
+ * Returns:
+ * -1 - in use
+ * 0 - free, no TLB flush needed
+ * 1 - free, needs TLB flush
+ */
+static int pkmap_try_free(int pos)
{
- int i;
-
- flush_cache_kmaps();
+ if (atomic_cmpxchg(&pkmap_count[pos], 1, 0) != 1)
+ return -1;

- for (i = 0; i < LAST_PKMAP; i++) {
- struct page *page;
+ /*
+ * TODO: add a young bit to make it CLOCK
+ */
+ if (!pte_none(pkmap_page_table[pos])) {
+ struct page *page = pte_page(pkmap_page_table[pos]);
+ unsigned long addr = PKMAP_ADDR(pos);
+ pte_t *ptep = &pkmap_page_table[pos];
+
+ VM_BUG_ON(addr != (unsigned long)page_address(page));
+
+ if (!__set_page_address(page, NULL, pos))
+ BUG();
+ flush_kernel_dcache_page(page);
+ pte_clear(&init_mm, addr, ptep);

- /*
- * zero means we don't have anything to do,
- * >1 means that it is still in use. Only
- * a count of 1 means that it is free but
- * needs to be unmapped
- */
- if (pkmap_count[i] != 1)
- continue;
- pkmap_count[i] = 0;
+ return 1;
+ }

- /* sanity check */
- BUG_ON(pte_none(pkmap_page_table[i]));
+ return 0;
+}

- /*
- * Don't need an atomic fetch-and-clear op here;
- * no-one has the page mapped, and cannot get at
- * its virtual address (and hence PTE) without first
- * getting the kmap_lock (which is held here).
- * So no dangers, even with speculative execution.
- */
- page = pte_page(pkmap_page_table[i]);
- pte_clear(&init_mm, (unsigned long)page_address(page),
- &pkmap_page_table[i]);
+static inline void pkmap_put(atomic_t *counter)
+{
+ switch (atomic_dec_return(counter)) {
+ case 0:
+ BUG();

- set_page_address(page, NULL);
+ case 1:
+ wake_up(&pkmap_map_wait);
}
- flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
}

-static inline unsigned long map_new_virtual(struct page *page)
+#define TLB_BATCH 32
+
+static int pkmap_get_free(void)
{
- unsigned long vaddr;
- int count;
+ int i, pos, flush;
+ DECLARE_WAITQUEUE(wait, current);

-start:
- count = LAST_PKMAP;
- /* Find an empty entry */
- for (;;) {
- last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
- if (!last_pkmap_nr) {
- flush_all_zero_pkmaps();
- count = LAST_PKMAP;
- }
- if (!pkmap_count[last_pkmap_nr])
- break; /* Found a usable entry */
- if (--count)
- continue;
+restart:
+ for (i = 0; i < LAST_PKMAP; i++) {
+ pos = atomic_inc_return(&pkmap_hand) % LAST_PKMAP;
+ flush = pkmap_try_free(pos);
+ if (flush >= 0)
+ goto got_one;
+ }
+
+ /*
+ * wait for somebody else to unmap their entries
+ */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&pkmap_map_wait, &wait);
+ schedule();
+ remove_wait_queue(&pkmap_map_wait, &wait);
+
+ goto restart;
+
+got_one:
+ if (flush) {
+#if 0
+ flush_tlb_kernel_range(PKMAP_ADDR(pos), PKMAP_ADDR(pos+1));
+#else
+ int pos2 = (pos + 1) % LAST_PKMAP;
+ int nr;
+ int entries[TLB_BATCH];

/*
- * Sleep for somebody else to unmap their entries
+ * For those architectures that cannot help but flush the
+ * whole TLB, flush some more entries to make it worthwhile.
+ * Scan ahead of the hand to minimise search distances.
*/
- {
- DECLARE_WAITQUEUE(wait, current);
+ for (i = 0, nr = 0; i < LAST_PKMAP && nr < TLB_BATCH;
+ i++, pos2 = (pos2 + 1) % LAST_PKMAP) {

- __set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&pkmap_map_wait, &wait);
- spin_unlock(&kmap_lock);
- schedule();
- remove_wait_queue(&pkmap_map_wait, &wait);
- spin_lock(&kmap_lock);
-
- /* Somebody else might have mapped it while we slept */
- if (page_address(page))
- return (unsigned long)page_address(page);
+ flush = pkmap_try_free(pos2);
+ if (flush < 0)
+ continue;
+
+ if (!flush) {
+ atomic_t *counter = &pkmap_count[pos2];
+ VM_BUG_ON(atomic_read(counter) != 0);
+ atomic_set(counter, 2);
+ pkmap_put(counter);
+ } else
+ entries[nr++] = pos2;
+ }
+ flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));

- /* Re-start */
- goto start;
+ for (i = 0; i < nr; i++) {
+ atomic_t *counter = &pkmap_count[entries[i]];
+ VM_BUG_ON(atomic_read(counter) != 0);
+ atomic_set(counter, 2);
+ pkmap_put(counter);
}
+#endif
}
- vaddr = PKMAP_ADDR(last_pkmap_nr);
- set_pte_at(&init_mm, vaddr,
- &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+ return pos;
+}
+
+static unsigned long pkmap_insert(struct page *page)
+{
+ int pos = pkmap_get_free();
+ unsigned long vaddr = PKMAP_ADDR(pos);
+ pte_t *ptep = &pkmap_page_table[pos];
+ pte_t entry = mk_pte(page, kmap_prot);
+ atomic_t *counter = &pkmap_count[pos];
+
+ VM_BUG_ON(atomic_read(counter) != 0);

- pkmap_count[last_pkmap_nr] = 1;
- set_page_address(page, (void *)vaddr);
+ set_pte_at(&init_mm, vaddr, ptep, entry);
+ if (unlikely(!__set_page_address(page, (void *)vaddr, pos))) {
+ /*
+ * concurrent pkmap_inserts for this page -
+ * the other won the race, release this entry.
+ *
+ * we can still clear the pte without a tlb flush since
+ * it couldn't have been used yet.
+ */
+ pte_clear(&init_mm, vaddr, ptep);
+ VM_BUG_ON(atomic_read(counter) != 0);
+ atomic_set(counter, 2);
+ pkmap_put(counter);
+ vaddr = 0;
+ } else
+ atomic_set(counter, 2);

return vaddr;
}

-void fastcall *kmap_high(struct page *page)
+fastcall void *kmap_high(struct page *page)
{
unsigned long vaddr;
-
- /*
- * For highmem pages, we can't trust "virtual" until
- * after we have the lock.
- *
- * We cannot call this from interrupts, as it may block
- */
- spin_lock(&kmap_lock);
+again:
vaddr = (unsigned long)page_address(page);
+ if (vaddr) {
+ atomic_t *counter = &pkmap_count[PKMAP_NR(vaddr)];
+ if (atomic_inc_not_zero(counter)) {
+ /*
+ * atomic_inc_not_zero implies a (memory) barrier on success
+ * so page address will be reloaded.
+ */
+ unsigned long vaddr2 = (unsigned long)page_address(page);
+ if (likely(vaddr == vaddr2))
+ return (void *)vaddr;
+
+ /*
+ * Oops, we got someone else.
+ *
+ * This can happen if we get preempted after
+ * page_address() and before atomic_inc_not_zero()
+ * and during that preemption this slot is freed and
+ * reused.
+ */
+ pkmap_put(counter);
+ goto again;
+ }
+ }
+
+ vaddr = pkmap_insert(page);
if (!vaddr)
- vaddr = map_new_virtual(page);
- pkmap_count[PKMAP_NR(vaddr)]++;
- BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
- spin_unlock(&kmap_lock);
- return (void*) vaddr;
+ goto again;
+
+ return (void *)vaddr;
}

EXPORT_SYMBOL(kmap_high);

-void fastcall kunmap_high(struct page *page)
+fastcall void kunmap_high(struct page *page)
{
- unsigned long vaddr;
- unsigned long nr;
- int need_wakeup;
-
- spin_lock(&kmap_lock);
- vaddr = (unsigned long)page_address(page);
+ unsigned long vaddr = (unsigned long)page_address(page);
BUG_ON(!vaddr);
- nr = PKMAP_NR(vaddr);
-
- /*
- * A count must never go down to zero
- * without a TLB flush!
- */
- need_wakeup = 0;
- switch (--pkmap_count[nr]) {
- case 0:
- BUG();
- case 1:
- /*
- * Avoid an unnecessary wake_up() function call.
- * The common case is pkmap_count[] == 1, but
- * no waiters.
- * The tasks queued in the wait-queue are guarded
- * by both the lock in the wait-queue-head and by
- * the kmap_lock. As the kmap_lock is held here,
- * no need for the wait-queue-head's lock. Simply
- * test if the queue is empty.
- */
- need_wakeup = waitqueue_active(&pkmap_map_wait);
- }
- spin_unlock(&kmap_lock);
-
- /* do wake-up, if needed, race-free outside of the spin lock */
- if (need_wakeup)
- wake_up(&pkmap_map_wait);
+ pkmap_put(&pkmap_count[PKMAP_NR(vaddr)]);
}

EXPORT_SYMBOL(kunmap_high);
+
#endif

#if defined(HASHED_PAGE_VIRTUAL)
@@ -217,19 +262,13 @@ EXPORT_SYMBOL(kunmap_high);
#define PA_HASH_ORDER 7

/*
- * Describes one page->virtual association
+ * Describes one page->virtual address association.
*/
-struct page_address_map {
+static struct page_address_map {
struct page *page;
void *virtual;
struct list_head list;
-};
-
-/*
- * page_address_map freelist, allocated from page_address_maps.
- */
-static struct list_head page_address_pool; /* freelist */
-static spinlock_t pool_lock; /* protects page_address_pool */
+} page_address_maps[LAST_PKMAP];

/*
* Hash table bucket
@@ -244,91 +283,123 @@ static struct page_address_slot *page_sl
return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
}

-void *page_address(struct page *page)
+static void *__page_address(struct page_address_slot *pas, struct page *page)
{
- unsigned long flags;
- void *ret;
- struct page_address_slot *pas;
-
- if (!PageHighMem(page))
- return lowmem_page_address(page);
+ void *ret = NULL;

- pas = page_slot(page);
- ret = NULL;
- spin_lock_irqsave(&pas->lock, flags);
if (!list_empty(&pas->lh)) {
struct page_address_map *pam;

list_for_each_entry(pam, &pas->lh, list) {
if (pam->page == page) {
ret = pam->virtual;
- goto done;
+ break;
}
}
}
-done:
+
+ return ret;
+}
+
+void *page_address(struct page *page)
+{
+ unsigned long flags;
+ void *ret;
+ struct page_address_slot *pas;
+
+ if (!PageHighMem(page))
+ return lowmem_page_address(page);
+
+ pas = page_slot(page);
+ spin_lock_irqsave(&pas->lock, flags);
+ ret = __page_address(pas, page);
spin_unlock_irqrestore(&pas->lock, flags);
return ret;
}

EXPORT_SYMBOL(page_address);

-void set_page_address(struct page *page, void *virtual)
+static int __set_page_address(struct page *page, void *virtual, int pos)
{
+ int ret = 0;
unsigned long flags;
struct page_address_slot *pas;
struct page_address_map *pam;

- BUG_ON(!PageHighMem(page));
+ VM_BUG_ON(!PageHighMem(page));
+ VM_BUG_ON(atomic_read(&pkmap_count[pos]) != 0);
+ VM_BUG_ON(pos < 0 || pos >= LAST_PKMAP);

pas = page_slot(page);
- if (virtual) { /* Add */
- BUG_ON(list_empty(&page_address_pool));
+ pam = &page_address_maps[pos];

- spin_lock_irqsave(&pool_lock, flags);
- pam = list_entry(page_address_pool.next,
- struct page_address_map, list);
- list_del(&pam->list);
- spin_unlock_irqrestore(&pool_lock, flags);
-
- pam->page = page;
- pam->virtual = virtual;
-
- spin_lock_irqsave(&pas->lock, flags);
- list_add_tail(&pam->list, &pas->lh);
- spin_unlock_irqrestore(&pas->lock, flags);
- } else { /* Remove */
- spin_lock_irqsave(&pas->lock, flags);
- list_for_each_entry(pam, &pas->lh, list) {
- if (pam->page == page) {
- list_del(&pam->list);
- spin_unlock_irqrestore(&pas->lock, flags);
- spin_lock_irqsave(&pool_lock, flags);
- list_add_tail(&pam->list, &page_address_pool);
- spin_unlock_irqrestore(&pool_lock, flags);
- goto done;
- }
+ spin_lock_irqsave(&pas->lock, flags);
+ if (virtual) { /* add */
+ VM_BUG_ON(!list_empty(&pam->list));
+
+ if (!__page_address(pas, page)) {
+ pam->page = page;
+ pam->virtual = virtual;
+ list_add_tail(&pam->list, &pas->lh);
+ ret = 1;
+ }
+ } else { /* remove */
+ if (!list_empty(&pam->list)) {
+ list_del_init(&pam->list);
+ ret = 1;
}
- spin_unlock_irqrestore(&pas->lock, flags);
}
-done:
- return;
+ spin_unlock_irqrestore(&pas->lock, flags);
+
+ return ret;
}

-static struct page_address_map page_address_maps[LAST_PKMAP];
+int set_page_address(struct page *page, void *virtual)
+{
+ /*
+ * set_page_address is not supposed to be called when using
+ * hashed virtual addresses.
+ */
+ BUG();
+ return 0;
+}

-void __init page_address_init(void)
+void __init __page_address_init(void)
{
int i;

- INIT_LIST_HEAD(&page_address_pool);
for (i = 0; i < ARRAY_SIZE(page_address_maps); i++)
- list_add(&page_address_maps[i].list, &page_address_pool);
+ INIT_LIST_HEAD(&page_address_maps[i].list);
+
for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
INIT_LIST_HEAD(&page_address_htable[i].lh);
spin_lock_init(&page_address_htable[i].lock);
}
- spin_lock_init(&pool_lock);
+}
+
+#elif defined (CONFIG_HIGHMEM) /* HASHED_PAGE_VIRTUAL */
+
+static int __set_page_address(struct page *page, void *virtual, int pos)
+{
+ return set_page_address(page, virtual);
}

#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
+
+#if defined(CONFIG_HIGHMEM) || defined(HASHED_PAGE_VIRTUAL)
+
+void __init page_address_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pkmap_count); i++)
+ atomic_set(&pkmap_count[i], 1);
+#endif
+
+#ifdef HASHED_PAGE_VIRTUAL
+ __page_address_init();
+#endif
+}
+
+#endif



2007-01-28 14:49:36

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Sun, Jan 28, 2007 at 03:11:34PM +0100, Peter Zijlstra wrote:
> Eradicate global locks.
>
> - kmap_lock is removed by extensive use of atomic_t, a new flush
> scheme and modifying set_page_address to only allow NULL<->virt
> transitions.

What's the point for this? Extensive atomic_t use is usually much worse
than spinlocks. A spinlock region is just a single atomic instruction,
as soon as you do more than one atomic_t you tend to make scalability
worse. Not to mention that atomic_t are much worse when you try to
profile scalability issues.

What benchmark shows a problem with the current locking, and from what
caller? In doubt we just need to convert that caller to kmap_atomic.


>
> A count of 0 is an exclusive state acting as an entry lock. This is done
> using inc_not_zero and cmpxchg. The restriction on changing the virtual
> address closes the gap with concurrent additions of the same entry.
>
> - pool_lock is removed by using the pkmap index for the
> page_address_maps.
>
> By using the pkmap index for the hash entries it is no longer needed to
> keep a free list.
>
> This patch has been in -rt for a while but should also help regular
> highmem machines with multiple cores/cpus.
>
> Signed-off-by: Peter Zijlstra <[email protected]>
> ---
> include/linux/mm.h | 32 ++-
> mm/highmem.c | 433 ++++++++++++++++++++++++++++++-----------------------
> 2 files changed, 276 insertions(+), 189 deletions(-)
>
> Index: linux/include/linux/mm.h
> ===================================================================
> --- linux.orig/include/linux/mm.h
> +++ linux/include/linux/mm.h
> @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
> #endif
>
> #if defined(WANT_PAGE_VIRTUAL)
> -#define page_address(page) ((page)->virtual)
> -#define set_page_address(page, address) \
> - do { \
> - (page)->virtual = (address); \
> - } while(0)
> -#define page_address_init() do { } while(0)
> +/*
> + * wrap page->virtual so it is safe to set/read locklessly
> + */
> +#define page_address(page) \
> + ({ typeof((page)->virtual) v = (page)->virtual; \
> + smp_read_barrier_depends(); \
> + v; })
> +
> +static inline int set_page_address(struct page *page, void *address)
> +{
> + if (address)
> + return cmpxchg(&page->virtual, NULL, address) == NULL;
> + else {
> + /*
> + * cmpxchg is a bit abused because it is not guaranteed
> + * safe wrt direct assignment on all platforms.
> + */
> + void *virt = page->virtual;
> + return cmpxchg(&page->vitrual, virt, NULL) == virt;
> + }
> +}
> +void page_address_init(void);
> #endif
>
> #if defined(HASHED_PAGE_VIRTUAL)
> void *page_address(struct page *page);
> -void set_page_address(struct page *page, void *virtual);
> +int set_page_address(struct page *page, void *virtual);
> void page_address_init(void);
> #endif
>
> #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
> #define page_address(page) lowmem_page_address(page)
> -#define set_page_address(page, address) do { } while(0)
> +#define set_page_address(page, address) (0)
> #define page_address_init() do { } while(0)
> #endif
>
> Index: linux/mm/highmem.c
> ===================================================================
> --- linux.orig/mm/highmem.c
> +++ linux/mm/highmem.c
> @@ -14,6 +14,11 @@
> * based on Linus' idea.
> *
> * Copyright (C) 1999 Ingo Molnar <[email protected]>
> + *
> + * Largely rewritten to get rid of all global locks
> + *
> + * Copyright (C) 2006 Red Hat, Inc., Peter Zijlstra <[email protected]>
> + *
> */
>
> #include <linux/mm.h>
> @@ -27,18 +32,14 @@
> #include <linux/hash.h>
> #include <linux/highmem.h>
> #include <linux/blktrace_api.h>
> +
> #include <asm/tlbflush.h>
> +#include <asm/pgtable.h>
>
> -/*
> - * Virtual_count is not a pure "count".
> - * 0 means that it is not mapped, and has not been mapped
> - * since a TLB flush - it is usable.
> - * 1 means that there are no users, but it has been mapped
> - * since the last TLB flush - so we can't use it.
> - * n means that there are (n-1) current users of it.
> - */
> #ifdef CONFIG_HIGHMEM
>
> +static int __set_page_address(struct page *page, void *virtual, int pos);
> +
> unsigned long totalhigh_pages __read_mostly;
>
> unsigned int nr_free_highpages (void)
> @@ -52,164 +53,208 @@ unsigned int nr_free_highpages (void)
> return pages;
> }
>
> -static int pkmap_count[LAST_PKMAP];
> -static unsigned int last_pkmap_nr;
> -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
> +/*
> + * count is not a pure "count".
> + * 0 means its owned exclusively by someone
> + * 1 means its free for use - either mapped or not.
> + * n means that there are (n-1) current users of it.
> + */
> +static atomic_t pkmap_count[LAST_PKMAP];
> +static atomic_t pkmap_hand;
>
> pte_t * pkmap_page_table;
>
> static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
>
> -static void flush_all_zero_pkmaps(void)
> +/*
> + * Try to free a given kmap slot.
> + *
> + * Returns:
> + * -1 - in use
> + * 0 - free, no TLB flush needed
> + * 1 - free, needs TLB flush
> + */
> +static int pkmap_try_free(int pos)
> {
> - int i;
> -
> - flush_cache_kmaps();
> + if (atomic_cmpxchg(&pkmap_count[pos], 1, 0) != 1)
> + return -1;
>
> - for (i = 0; i < LAST_PKMAP; i++) {
> - struct page *page;
> + /*
> + * TODO: add a young bit to make it CLOCK
> + */
> + if (!pte_none(pkmap_page_table[pos])) {
> + struct page *page = pte_page(pkmap_page_table[pos]);
> + unsigned long addr = PKMAP_ADDR(pos);
> + pte_t *ptep = &pkmap_page_table[pos];
> +
> + VM_BUG_ON(addr != (unsigned long)page_address(page));
> +
> + if (!__set_page_address(page, NULL, pos))
> + BUG();
> + flush_kernel_dcache_page(page);
> + pte_clear(&init_mm, addr, ptep);
>
> - /*
> - * zero means we don't have anything to do,
> - * >1 means that it is still in use. Only
> - * a count of 1 means that it is free but
> - * needs to be unmapped
> - */
> - if (pkmap_count[i] != 1)
> - continue;
> - pkmap_count[i] = 0;
> + return 1;
> + }
>
> - /* sanity check */
> - BUG_ON(pte_none(pkmap_page_table[i]));
> + return 0;
> +}
>
> - /*
> - * Don't need an atomic fetch-and-clear op here;
> - * no-one has the page mapped, and cannot get at
> - * its virtual address (and hence PTE) without first
> - * getting the kmap_lock (which is held here).
> - * So no dangers, even with speculative execution.
> - */
> - page = pte_page(pkmap_page_table[i]);
> - pte_clear(&init_mm, (unsigned long)page_address(page),
> - &pkmap_page_table[i]);
> +static inline void pkmap_put(atomic_t *counter)
> +{
> + switch (atomic_dec_return(counter)) {
> + case 0:
> + BUG();
>
> - set_page_address(page, NULL);
> + case 1:
> + wake_up(&pkmap_map_wait);
> }
> - flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
> }
>
> -static inline unsigned long map_new_virtual(struct page *page)
> +#define TLB_BATCH 32
> +
> +static int pkmap_get_free(void)
> {
> - unsigned long vaddr;
> - int count;
> + int i, pos, flush;
> + DECLARE_WAITQUEUE(wait, current);
>
> -start:
> - count = LAST_PKMAP;
> - /* Find an empty entry */
> - for (;;) {
> - last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
> - if (!last_pkmap_nr) {
> - flush_all_zero_pkmaps();
> - count = LAST_PKMAP;
> - }
> - if (!pkmap_count[last_pkmap_nr])
> - break; /* Found a usable entry */
> - if (--count)
> - continue;
> +restart:
> + for (i = 0; i < LAST_PKMAP; i++) {
> + pos = atomic_inc_return(&pkmap_hand) % LAST_PKMAP;
> + flush = pkmap_try_free(pos);
> + if (flush >= 0)
> + goto got_one;
> + }
> +
> + /*
> + * wait for somebody else to unmap their entries
> + */
> + __set_current_state(TASK_UNINTERRUPTIBLE);
> + add_wait_queue(&pkmap_map_wait, &wait);
> + schedule();
> + remove_wait_queue(&pkmap_map_wait, &wait);
> +
> + goto restart;
> +
> +got_one:
> + if (flush) {
> +#if 0
> + flush_tlb_kernel_range(PKMAP_ADDR(pos), PKMAP_ADDR(pos+1));
> +#else
> + int pos2 = (pos + 1) % LAST_PKMAP;
> + int nr;
> + int entries[TLB_BATCH];
>
> /*
> - * Sleep for somebody else to unmap their entries
> + * For those architectures that cannot help but flush the
> + * whole TLB, flush some more entries to make it worthwhile.
> + * Scan ahead of the hand to minimise search distances.
> */
> - {
> - DECLARE_WAITQUEUE(wait, current);
> + for (i = 0, nr = 0; i < LAST_PKMAP && nr < TLB_BATCH;
> + i++, pos2 = (pos2 + 1) % LAST_PKMAP) {
>
> - __set_current_state(TASK_UNINTERRUPTIBLE);
> - add_wait_queue(&pkmap_map_wait, &wait);
> - spin_unlock(&kmap_lock);
> - schedule();
> - remove_wait_queue(&pkmap_map_wait, &wait);
> - spin_lock(&kmap_lock);
> -
> - /* Somebody else might have mapped it while we slept */
> - if (page_address(page))
> - return (unsigned long)page_address(page);
> + flush = pkmap_try_free(pos2);
> + if (flush < 0)
> + continue;
> +
> + if (!flush) {
> + atomic_t *counter = &pkmap_count[pos2];
> + VM_BUG_ON(atomic_read(counter) != 0);
> + atomic_set(counter, 2);
> + pkmap_put(counter);
> + } else
> + entries[nr++] = pos2;
> + }
> + flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
>
> - /* Re-start */
> - goto start;
> + for (i = 0; i < nr; i++) {
> + atomic_t *counter = &pkmap_count[entries[i]];
> + VM_BUG_ON(atomic_read(counter) != 0);
> + atomic_set(counter, 2);
> + pkmap_put(counter);
> }
> +#endif
> }
> - vaddr = PKMAP_ADDR(last_pkmap_nr);
> - set_pte_at(&init_mm, vaddr,
> - &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
> + return pos;
> +}
> +
> +static unsigned long pkmap_insert(struct page *page)
> +{
> + int pos = pkmap_get_free();
> + unsigned long vaddr = PKMAP_ADDR(pos);
> + pte_t *ptep = &pkmap_page_table[pos];
> + pte_t entry = mk_pte(page, kmap_prot);
> + atomic_t *counter = &pkmap_count[pos];
> +
> + VM_BUG_ON(atomic_read(counter) != 0);
>
> - pkmap_count[last_pkmap_nr] = 1;
> - set_page_address(page, (void *)vaddr);
> + set_pte_at(&init_mm, vaddr, ptep, entry);
> + if (unlikely(!__set_page_address(page, (void *)vaddr, pos))) {
> + /*
> + * concurrent pkmap_inserts for this page -
> + * the other won the race, release this entry.
> + *
> + * we can still clear the pte without a tlb flush since
> + * it couldn't have been used yet.
> + */
> + pte_clear(&init_mm, vaddr, ptep);
> + VM_BUG_ON(atomic_read(counter) != 0);
> + atomic_set(counter, 2);
> + pkmap_put(counter);
> + vaddr = 0;
> + } else
> + atomic_set(counter, 2);
>
> return vaddr;
> }
>
> -void fastcall *kmap_high(struct page *page)
> +fastcall void *kmap_high(struct page *page)
> {
> unsigned long vaddr;
> -
> - /*
> - * For highmem pages, we can't trust "virtual" until
> - * after we have the lock.
> - *
> - * We cannot call this from interrupts, as it may block
> - */
> - spin_lock(&kmap_lock);
> +again:
> vaddr = (unsigned long)page_address(page);
> + if (vaddr) {
> + atomic_t *counter = &pkmap_count[PKMAP_NR(vaddr)];
> + if (atomic_inc_not_zero(counter)) {
> + /*
> + * atomic_inc_not_zero implies a (memory) barrier on success
> + * so page address will be reloaded.
> + */
> + unsigned long vaddr2 = (unsigned long)page_address(page);
> + if (likely(vaddr == vaddr2))
> + return (void *)vaddr;
> +
> + /*
> + * Oops, we got someone else.
> + *
> + * This can happen if we get preempted after
> + * page_address() and before atomic_inc_not_zero()
> + * and during that preemption this slot is freed and
> + * reused.
> + */
> + pkmap_put(counter);
> + goto again;
> + }
> + }
> +
> + vaddr = pkmap_insert(page);
> if (!vaddr)
> - vaddr = map_new_virtual(page);
> - pkmap_count[PKMAP_NR(vaddr)]++;
> - BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
> - spin_unlock(&kmap_lock);
> - return (void*) vaddr;
> + goto again;
> +
> + return (void *)vaddr;
> }
>
> EXPORT_SYMBOL(kmap_high);
>
> -void fastcall kunmap_high(struct page *page)
> +fastcall void kunmap_high(struct page *page)
> {
> - unsigned long vaddr;
> - unsigned long nr;
> - int need_wakeup;
> -
> - spin_lock(&kmap_lock);
> - vaddr = (unsigned long)page_address(page);
> + unsigned long vaddr = (unsigned long)page_address(page);
> BUG_ON(!vaddr);
> - nr = PKMAP_NR(vaddr);
> -
> - /*
> - * A count must never go down to zero
> - * without a TLB flush!
> - */
> - need_wakeup = 0;
> - switch (--pkmap_count[nr]) {
> - case 0:
> - BUG();
> - case 1:
> - /*
> - * Avoid an unnecessary wake_up() function call.
> - * The common case is pkmap_count[] == 1, but
> - * no waiters.
> - * The tasks queued in the wait-queue are guarded
> - * by both the lock in the wait-queue-head and by
> - * the kmap_lock. As the kmap_lock is held here,
> - * no need for the wait-queue-head's lock. Simply
> - * test if the queue is empty.
> - */
> - need_wakeup = waitqueue_active(&pkmap_map_wait);
> - }
> - spin_unlock(&kmap_lock);
> -
> - /* do wake-up, if needed, race-free outside of the spin lock */
> - if (need_wakeup)
> - wake_up(&pkmap_map_wait);
> + pkmap_put(&pkmap_count[PKMAP_NR(vaddr)]);
> }
>
> EXPORT_SYMBOL(kunmap_high);
> +
> #endif
>
> #if defined(HASHED_PAGE_VIRTUAL)
> @@ -217,19 +262,13 @@ EXPORT_SYMBOL(kunmap_high);
> #define PA_HASH_ORDER 7
>
> /*
> - * Describes one page->virtual association
> + * Describes one page->virtual address association.
> */
> -struct page_address_map {
> +static struct page_address_map {
> struct page *page;
> void *virtual;
> struct list_head list;
> -};
> -
> -/*
> - * page_address_map freelist, allocated from page_address_maps.
> - */
> -static struct list_head page_address_pool; /* freelist */
> -static spinlock_t pool_lock; /* protects page_address_pool */
> +} page_address_maps[LAST_PKMAP];
>
> /*
> * Hash table bucket
> @@ -244,91 +283,123 @@ static struct page_address_slot *page_sl
> return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
> }
>
> -void *page_address(struct page *page)
> +static void *__page_address(struct page_address_slot *pas, struct page *page)
> {
> - unsigned long flags;
> - void *ret;
> - struct page_address_slot *pas;
> -
> - if (!PageHighMem(page))
> - return lowmem_page_address(page);
> + void *ret = NULL;
>
> - pas = page_slot(page);
> - ret = NULL;
> - spin_lock_irqsave(&pas->lock, flags);
> if (!list_empty(&pas->lh)) {
> struct page_address_map *pam;
>
> list_for_each_entry(pam, &pas->lh, list) {
> if (pam->page == page) {
> ret = pam->virtual;
> - goto done;
> + break;
> }
> }
> }
> -done:
> +
> + return ret;
> +}
> +
> +void *page_address(struct page *page)
> +{
> + unsigned long flags;
> + void *ret;
> + struct page_address_slot *pas;
> +
> + if (!PageHighMem(page))
> + return lowmem_page_address(page);
> +
> + pas = page_slot(page);
> + spin_lock_irqsave(&pas->lock, flags);
> + ret = __page_address(pas, page);
> spin_unlock_irqrestore(&pas->lock, flags);
> return ret;
> }
>
> EXPORT_SYMBOL(page_address);
>
> -void set_page_address(struct page *page, void *virtual)
> +static int __set_page_address(struct page *page, void *virtual, int pos)
> {
> + int ret = 0;
> unsigned long flags;
> struct page_address_slot *pas;
> struct page_address_map *pam;
>
> - BUG_ON(!PageHighMem(page));
> + VM_BUG_ON(!PageHighMem(page));
> + VM_BUG_ON(atomic_read(&pkmap_count[pos]) != 0);
> + VM_BUG_ON(pos < 0 || pos >= LAST_PKMAP);
>
> pas = page_slot(page);
> - if (virtual) { /* Add */
> - BUG_ON(list_empty(&page_address_pool));
> + pam = &page_address_maps[pos];
>
> - spin_lock_irqsave(&pool_lock, flags);
> - pam = list_entry(page_address_pool.next,
> - struct page_address_map, list);
> - list_del(&pam->list);
> - spin_unlock_irqrestore(&pool_lock, flags);
> -
> - pam->page = page;
> - pam->virtual = virtual;
> -
> - spin_lock_irqsave(&pas->lock, flags);
> - list_add_tail(&pam->list, &pas->lh);
> - spin_unlock_irqrestore(&pas->lock, flags);
> - } else { /* Remove */
> - spin_lock_irqsave(&pas->lock, flags);
> - list_for_each_entry(pam, &pas->lh, list) {
> - if (pam->page == page) {
> - list_del(&pam->list);
> - spin_unlock_irqrestore(&pas->lock, flags);
> - spin_lock_irqsave(&pool_lock, flags);
> - list_add_tail(&pam->list, &page_address_pool);
> - spin_unlock_irqrestore(&pool_lock, flags);
> - goto done;
> - }
> + spin_lock_irqsave(&pas->lock, flags);
> + if (virtual) { /* add */
> + VM_BUG_ON(!list_empty(&pam->list));
> +
> + if (!__page_address(pas, page)) {
> + pam->page = page;
> + pam->virtual = virtual;
> + list_add_tail(&pam->list, &pas->lh);
> + ret = 1;
> + }
> + } else { /* remove */
> + if (!list_empty(&pam->list)) {
> + list_del_init(&pam->list);
> + ret = 1;
> }
> - spin_unlock_irqrestore(&pas->lock, flags);
> }
> -done:
> - return;
> + spin_unlock_irqrestore(&pas->lock, flags);
> +
> + return ret;
> }
>
> -static struct page_address_map page_address_maps[LAST_PKMAP];
> +int set_page_address(struct page *page, void *virtual)
> +{
> + /*
> + * set_page_address is not supposed to be called when using
> + * hashed virtual addresses.
> + */
> + BUG();
> + return 0;
> +}
>
> -void __init page_address_init(void)
> +void __init __page_address_init(void)
> {
> int i;
>
> - INIT_LIST_HEAD(&page_address_pool);
> for (i = 0; i < ARRAY_SIZE(page_address_maps); i++)
> - list_add(&page_address_maps[i].list, &page_address_pool);
> + INIT_LIST_HEAD(&page_address_maps[i].list);
> +
> for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
> INIT_LIST_HEAD(&page_address_htable[i].lh);
> spin_lock_init(&page_address_htable[i].lock);
> }
> - spin_lock_init(&pool_lock);
> +}
> +
> +#elif defined (CONFIG_HIGHMEM) /* HASHED_PAGE_VIRTUAL */
> +
> +static int __set_page_address(struct page *page, void *virtual, int pos)
> +{
> + return set_page_address(page, virtual);
> }
>
> #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
> +
> +#if defined(CONFIG_HIGHMEM) || defined(HASHED_PAGE_VIRTUAL)
> +
> +void __init page_address_init(void)
> +{
> +#ifdef CONFIG_HIGHMEM
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(pkmap_count); i++)
> + atomic_set(&pkmap_count[i], 1);
> +#endif
> +
> +#ifdef HASHED_PAGE_VIRTUAL
> + __page_address_init();
> +#endif
> +}
> +
> +#endif
>
>
> -
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
---end quoted text---

2007-01-28 15:18:44

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c


* Christoph Hellwig <[email protected]> wrote:

> On Sun, Jan 28, 2007 at 03:11:34PM +0100, Peter Zijlstra wrote:
> > Eradicate global locks.
> >
> > - kmap_lock is removed by extensive use of atomic_t, a new flush
> > scheme and modifying set_page_address to only allow NULL<->virt
> > transitions.
>
> What's the point for this? [...]

scalability. I did lock profiling on the -rt kernel, which exposes such
things nicely. Half of the lock contention events during kernel compile
were due to kmap(). (The system had 2 GB of RAM, so 40% lowmem, 60%
highmem.)

> [...] In doubt we just need to convert that caller to kmap_atomic.

the pagecache ones cannot be converted to kmap_atomic, because we can
block while holding them. Plus kmap_atomic is quite a bit slower than
this scalable version of kmap().

Ingo

ps. please fix your mailer to not emit Mail-Followup-To headers. In Mutt
you can do this via "set followup_to=no" in your .muttrc.

2007-01-28 15:29:05

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Sun, Jan 28, 2007 at 04:17:00PM +0100, Ingo Molnar wrote:
> scalability. I did lock profiling on the -rt kernel, which exposes such
> things nicely. Half of the lock contention events during kernel compile
> were due to kmap(). (The system had 2 GB of RAM, so 40% lowmem, 60%
> highmem.)

Numbers please, and not on -rt but on mainline. Please show the profiles.

> ps. please fix your mailer to not emit Mail-Followup-To headers. In Mutt
> you can do this via "set followup_to=no" in your .muttrc.

I have told you last time that this is absolutely intentional and I won't
change it.

2007-01-28 15:50:20

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c


* Christoph Hellwig <[email protected]> wrote:

> On Sun, Jan 28, 2007 at 04:17:00PM +0100, Ingo Molnar wrote:
> > scalability. I did lock profiling on the -rt kernel, which exposes
> > such things nicely. Half of the lock contention events during kernel
> > compile were due to kmap(). (The system had 2 GB of RAM, so 40%
> > lowmem, 60% highmem.)
>
> Numbers please, and not on -rt but on mainline. Please show the
> profiles.

i'm sorry, but do you realize that files_lock is a global lock,
triggered by /every single/ file close?

" files_lock is a global lock and we touch it for every single
sys_close() system call that the system does. "

You really dont need to be a rocket scientist to see that it's a
globally bouncing cacheline that has a major effect on certain
VFS-intense workloads. Peter has worked hard to eliminate its effects
without having to couple this to an intrusive rewrite of the TTY layer.

( really, i personally find your dismissive style apalling and i think
such a reception of a nice patchset must be humiliating to Peter. I
certainly try to avoid to be involved with any VFS internals, due to
this unwelcoming tone of discussion. Had you been around when i
started contributing to the Linux kernel i'd probably not be hacking
the kernel today. You are a good hacker but the simultaneous
collateral damage you are causing is significantly reducing the net
benefit. )

> > ps. please fix your mailer to not emit Mail-Followup-To headers. In
> > Mutt you can do this via "set followup_to=no" in your .muttrc.
>
> I have told you last time that this is absolutely intentional and I
> won't change it.

( You are messing up the reply headers, everyone is listed in the 'To:'
field for any reply to your mail, instead of being added to the Cc:
list. )

Ingo

2007-01-28 15:54:35

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Sun, Jan 28, 2007 at 04:48:06PM +0100, Ingo Molnar wrote:
> i'm sorry, but do you realize that files_lock is a global lock,
> triggered by /every single/ file close?

Please check which thread you're in before you start such lengthy rants.

2007-01-28 18:21:31

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c


* Christoph Hellwig <[email protected]> wrote:

> On Sun, Jan 28, 2007 at 04:48:06PM +0100, Ingo Molnar wrote:
> > i'm sorry, but do you realize that files_lock is a global lock,
> > triggered by /every single/ file close?
>
> Please check which thread you're in before you start such lengthy
> rants.

my reply applies to the other thread too, you made a similar comment
there too:

* Christoph Hellwig <[email protected]> wrote:

> On Sun, Jan 28, 2007 at 12:51:18PM +0100, Peter Zijlstra wrote:
> > This patch-set breaks up the global file_list_lock which was found
> > to be a severe contention point under basically any filesystem
> > intensive workload.
>
> Benchmarks, please. Where exactly do you see contention for this?

Ingo

2007-01-28 22:29:38

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Sun, 28 Jan 2007 15:11:34 +0100
Peter Zijlstra <[email protected]> wrote:

> Eradicate global locks.
>
> - kmap_lock is removed by extensive use of atomic_t, a new flush
> scheme and modifying set_page_address to only allow NULL<->virt
> transitions.
>
> A count of 0 is an exclusive state acting as an entry lock. This is done
> using inc_not_zero and cmpxchg. The restriction on changing the virtual
> address closes the gap with concurrent additions of the same entry.
>
> - pool_lock is removed by using the pkmap index for the
> page_address_maps.
>
> By using the pkmap index for the hash entries it is no longer needed to
> keep a free list.
>
> This patch has been in -rt for a while but should also help regular
> highmem machines with multiple cores/cpus.

I really don't recall any performance problems being reported out of that
code in recent years.

As Christoph says, it's very much preferred that code be migrated over to
kmap_atomic(). Partly because kmap() is deadlockable in situations where a
large number of threads are trying to take two kmaps at the same time and
we run out. This happened in the past, but incidences have gone away,
probably because of kmap->kmap_atomic conversions.

>From which callsite have you measured problems?

> Index: linux/include/linux/mm.h
> ===================================================================
> --- linux.orig/include/linux/mm.h
> +++ linux/include/linux/mm.h
> @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
> #endif
>
> #if defined(WANT_PAGE_VIRTUAL)
> -#define page_address(page) ((page)->virtual)
> -#define set_page_address(page, address) \
> - do { \
> - (page)->virtual = (address); \
> - } while(0)
> -#define page_address_init() do { } while(0)
> +/*
> + * wrap page->virtual so it is safe to set/read locklessly
> + */
> +#define page_address(page) \
> + ({ typeof((page)->virtual) v = (page)->virtual; \
> + smp_read_barrier_depends(); \
> + v; })
> +
> +static inline int set_page_address(struct page *page, void *address)
> +{
> + if (address)
> + return cmpxchg(&page->virtual, NULL, address) == NULL;
> + else {
> + /*
> + * cmpxchg is a bit abused because it is not guaranteed
> + * safe wrt direct assignment on all platforms.
> + */
> + void *virt = page->virtual;
> + return cmpxchg(&page->vitrual, virt, NULL) == virt;
> + }
> +}

Have you verified that all architectures which can implement
WANT_PAGE_VIRTUAL also implement cmpxchg?

Have you verified that sufficient headers are included for this to compile
correctly on all WANT_PAGE_VIRTUAL-enabling architectures on all configs?
I don't see asm/system.h being included in mm.h and if I get yet another
damned it-wont-compile patch I might do something irreversible.

> +static int pkmap_get_free(void)
> {
> - unsigned long vaddr;
> - int count;
> + int i, pos, flush;
> + DECLARE_WAITQUEUE(wait, current);
>
> -start:
> - count = LAST_PKMAP;
> - /* Find an empty entry */
> - for (;;) {
> - last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;

The old code used masking.

> - if (!last_pkmap_nr) {
> - flush_all_zero_pkmaps();
> - count = LAST_PKMAP;
> - }
> - if (!pkmap_count[last_pkmap_nr])
> - break; /* Found a usable entry */
> - if (--count)
> - continue;
> +restart:
> + for (i = 0; i < LAST_PKMAP; i++) {
> + pos = atomic_inc_return(&pkmap_hand) % LAST_PKMAP;

The new code does more-expensive modulus. Necessary?

> + flush = pkmap_try_free(pos);
> + if (flush >= 0)
> + goto got_one;
> + }
> +
> + /*
> + * wait for somebody else to unmap their entries
> + */
> + __set_current_state(TASK_UNINTERRUPTIBLE);
> + add_wait_queue(&pkmap_map_wait, &wait);
> + schedule();
> + remove_wait_queue(&pkmap_map_wait, &wait);

This looks wrong. What happens if everyone else does their unmap between
the __set_current_state() and the add_wait_queue()?


2007-01-29 02:52:30

by Nick Piggin

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

Andrew Morton wrote:
> On Sun, 28 Jan 2007 15:11:34 +0100
> Peter Zijlstra <[email protected]> wrote:

>>+static inline int set_page_address(struct page *page, void *address)
>>+{
>>+ if (address)
>>+ return cmpxchg(&page->virtual, NULL, address) == NULL;
>>+ else {
>>+ /*
>>+ * cmpxchg is a bit abused because it is not guaranteed
>>+ * safe wrt direct assignment on all platforms.
>>+ */
>>+ void *virt = page->virtual;
>>+ return cmpxchg(&page->vitrual, virt, NULL) == virt;
>>+ }
>>+}
>
>
> Have you verified that all architectures which can implement
> WANT_PAGE_VIRTUAL also implement cmpxchg?

Simple: we should not implement cmpxchg in generic code. We should
be able to use atomic_long_cmpxchg for this -- it will work perfectly
regardless of what anybody else tells you.

cmpxchg is only required for when that memory location may get modified
by some other means than under your control (eg. userspace, in the case
of drm, or hardware MMU in the case of Christoph's old page fault
scalability patches).

--
SUSE Labs, Novell Inc.
Send instant messages to your online friends http://au.messenger.yahoo.com

2007-01-29 09:45:44

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:

> As Christoph says, it's very much preferred that code be migrated over to
> kmap_atomic(). Partly because kmap() is deadlockable in situations where a
> large number of threads are trying to take two kmaps at the same time and
> we run out. This happened in the past, but incidences have gone away,
> probably because of kmap->kmap_atomic conversions.

> From which callsite have you measured problems?

CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
mainline.

> > Index: linux/include/linux/mm.h
> > ===================================================================
> > --- linux.orig/include/linux/mm.h
> > +++ linux/include/linux/mm.h
> > @@ -543,23 +543,39 @@ static __always_inline void *lowmem_page
> > #endif
> >
> > #if defined(WANT_PAGE_VIRTUAL)
> > -#define page_address(page) ((page)->virtual)
> > -#define set_page_address(page, address) \
> > - do { \
> > - (page)->virtual = (address); \
> > - } while(0)
> > -#define page_address_init() do { } while(0)
> > +/*
> > + * wrap page->virtual so it is safe to set/read locklessly
> > + */
> > +#define page_address(page) \
> > + ({ typeof((page)->virtual) v = (page)->virtual; \
> > + smp_read_barrier_depends(); \
> > + v; })
> > +
> > +static inline int set_page_address(struct page *page, void *address)
> > +{
> > + if (address)
> > + return cmpxchg(&page->virtual, NULL, address) == NULL;
> > + else {
> > + /*
> > + * cmpxchg is a bit abused because it is not guaranteed
> > + * safe wrt direct assignment on all platforms.
> > + */
> > + void *virt = page->virtual;
> > + return cmpxchg(&page->vitrual, virt, NULL) == virt;
> > + }
> > +}
>
> Have you verified that all architectures which can implement
> WANT_PAGE_VIRTUAL also implement cmpxchg?

It might have been my mistaken in understanding the latest cmpxchg
thread. My understanding was that since LL/SC is not exposable as a low
level primitive all platforms should implement a cmpxchg where some
would not be save against direct assignment.

Anyway, I'll do as Nick says and replace it with atomic_long_cmpxchg.

> Have you verified that sufficient headers are included for this to compile
> correctly on all WANT_PAGE_VIRTUAL-enabling architectures on all configs?
> I don't see asm/system.h being included in mm.h and if I get yet another
> damned it-wont-compile patch I might do something irreversible.

Point taken.

> > +static int pkmap_get_free(void)
> > {
> > - unsigned long vaddr;
> > - int count;
> > + int i, pos, flush;
> > + DECLARE_WAITQUEUE(wait, current);
> >
> > -start:
> > - count = LAST_PKMAP;
> > - /* Find an empty entry */
> > - for (;;) {
> > - last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
>
> The old code used masking.
>
> > - if (!last_pkmap_nr) {
> > - flush_all_zero_pkmaps();
> > - count = LAST_PKMAP;
> > - }
> > - if (!pkmap_count[last_pkmap_nr])
> > - break; /* Found a usable entry */
> > - if (--count)
> > - continue;
> > +restart:
> > + for (i = 0; i < LAST_PKMAP; i++) {
> > + pos = atomic_inc_return(&pkmap_hand) % LAST_PKMAP;
>
> The new code does more-expensive modulus. Necessary?

I thought GCC would automagically use masking when presented with a
power-of-two constant. Can make it more explicit though.

> > + flush = pkmap_try_free(pos);
> > + if (flush >= 0)
> > + goto got_one;
> > + }
> > +
> > + /*
> > + * wait for somebody else to unmap their entries
> > + */
> > + __set_current_state(TASK_UNINTERRUPTIBLE);
> > + add_wait_queue(&pkmap_map_wait, &wait);
> > + schedule();
> > + remove_wait_queue(&pkmap_map_wait, &wait);
>
> This looks wrong. What happens if everyone else does their unmap between
> the __set_current_state() and the add_wait_queue()?

Eek, you are quite right.

2007-01-29 19:09:33

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c


* Andrew Morton <[email protected]> wrote:

> > Eradicate global locks.
> >
> > - kmap_lock is removed by extensive use of atomic_t, a new flush
> > scheme and modifying set_page_address to only allow NULL<->virt
> > transitions.

> I really don't recall any performance problems being reported out of
> that code in recent years.

well, almost nobody profiles 32-bit boxes. I personally always knew that
kmap() sucks on certain 32-bit SMP workloads (and -rt's scheduling model
makes such bottlenecks even more apparent) - but many people acted in
the belief that 64-bit is all that matters and 32-bit scalability is
obsolete. Here are the numbers that i think changes the picture:

http://www.fedoraproject.org/awstats/stats/updates-released-fc6-i386.total
http://www.fedoraproject.org/awstats/stats/updates-released-fc6-x86_64.total

For every 64-bit Fedora box there's more than seven 32-bit boxes. I
think 32-bit is going to live with us far longer than many thought, so
we might as well make it work better. Both HIGHMEM and HIGHPTE is the
default on many distro kernels, which pushes the kmap infrastructure
quite a bit.

> As Christoph says, it's very much preferred that code be migrated over
> to kmap_atomic(). Partly because kmap() is deadlockable in situations
> where a large number of threads are trying to take two kmaps at the
> same time and we run out. This happened in the past, but incidences
> have gone away, probably because of kmap->kmap_atomic conversions.

the problem is that everything that was easy to migrate was migrated off
kmap() already - and it's exactly those hard cases that cannot be
converted (like the pagecache use) which is the most frequent kmap()
users.

While "it would be nice" to eliminate kmap(), but reality is that it's
here and the patches from Peter to make it (quite a bit) more scalable
are here as well.

plus, with these fixes kmap() is actually faster than kmap_atomic().
(because kunmap_atomic() necessiates an INVLPG instruction which is
quite slow.)

Ingo

2007-01-29 19:19:18

by Hugh Dickins

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Mon, 29 Jan 2007, Ingo Molnar wrote:
>
> For every 64-bit Fedora box there's more than seven 32-bit boxes. I
> think 32-bit is going to live with us far longer than many thought, so
> we might as well make it work better. Both HIGHMEM and HIGHPTE is the
> default on many distro kernels, which pushes the kmap infrastructure
> quite a bit.

But HIGHPTE uses kmap_atomic (in mainline: does -rt use kmap there?)

Hugh

2007-01-29 19:54:53

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c


* Hugh Dickins <[email protected]> wrote:

> > For every 64-bit Fedora box there's more than seven 32-bit boxes. I
> > think 32-bit is going to live with us far longer than many thought,
> > so we might as well make it work better. Both HIGHMEM and HIGHPTE is
> > the default on many distro kernels, which pushes the kmap
> > infrastructure quite a bit.
>
> But HIGHPTE uses kmap_atomic (in mainline: does -rt use kmap there?)

The contention i saw was on mainline and in the pagecache uses of
kmap(). With HIGHPTE i only meant that typically every available highmem
option is enabled on 32-bit distro kernel rpms, to make it work on as
wide selection of hardware as possible. Sometimes PAE is split into a
separate rpm, but mostly there's just one 32-bit kernel.

Ingo

2007-01-29 20:08:19

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c


* Ingo Molnar <[email protected]> wrote:

> Here are the numbers that i think changes the picture:

i forgot to explain them:

current (estimated) total installed base of 32-bit (i686) Fedora:

> http://www.fedoraproject.org/awstats/stats/updates-released-fc6-i386.total

current (estimated) total installed base of 64-bit (x86_64) Fedora:

> http://www.fedoraproject.org/awstats/stats/updates-released-fc6-x86_64.total

current (estimated) total installed base of PPC(64) Fedora:

> http://www.fedoraproject.org/awstats/stats/updates-released-fc6-ppc.total

these are updated daily i think. The counters started late October 2006,
when FC6 was released.

Ingo

2007-01-30 01:31:22

by Martin Bligh

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

Peter Zijlstra wrote:
> On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
>
>> As Christoph says, it's very much preferred that code be migrated over to
>> kmap_atomic(). Partly because kmap() is deadlockable in situations where a
>> large number of threads are trying to take two kmaps at the same time and
>> we run out. This happened in the past, but incidences have gone away,
>> probably because of kmap->kmap_atomic conversions.
>
>> From which callsite have you measured problems?
>
> CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
> mainline.
>

CONFIG_HIGHPTE is always horrid -we've known that for years.
Don't use it.

If that's all we're fixing here, I'd be highly suspect ...

M.

2007-01-30 01:41:45

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Mon, 29 Jan 2007 17:31:20 -0800
"Martin J. Bligh" <[email protected]> wrote:

> Peter Zijlstra wrote:
> > On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
> >
> >> As Christoph says, it's very much preferred that code be migrated over to
> >> kmap_atomic(). Partly because kmap() is deadlockable in situations where a
> >> large number of threads are trying to take two kmaps at the same time and
> >> we run out. This happened in the past, but incidences have gone away,
> >> probably because of kmap->kmap_atomic conversions.
> >
> >> From which callsite have you measured problems?
> >
> > CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
> > mainline.
> >
>
> CONFIG_HIGHPTE is always horrid -we've known that for years.

We have? What's wrong with it? <looks around for bug reports>

> Don't use it.
>
> If that's all we're fixing here, I'd be highly suspect ...

highpte uses atomic kmaps - it is unrelated to this work.

2007-01-30 01:49:16

by Martin Bligh

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

Andrew Morton wrote:
> On Mon, 29 Jan 2007 17:31:20 -0800
> "Martin J. Bligh" <[email protected]> wrote:
>
>> Peter Zijlstra wrote:
>>> On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
>>>
>>>> As Christoph says, it's very much preferred that code be migrated over to
>>>> kmap_atomic(). Partly because kmap() is deadlockable in situations where a
>>>> large number of threads are trying to take two kmaps at the same time and
>>>> we run out. This happened in the past, but incidences have gone away,
>>>> probably because of kmap->kmap_atomic conversions.
>>>> From which callsite have you measured problems?
>>> CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
>>> mainline.
>>>
>> CONFIG_HIGHPTE is always horrid -we've known that for years.
>
> We have? What's wrong with it? <looks around for bug reports>

http://www.ussg.iu.edu/hypermail/linux/kernel/0307.0/0463.html

July 2003.


M.

2007-01-30 02:02:50

by Nick Piggin

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

Ingo Molnar wrote:

> For every 64-bit Fedora box there's more than seven 32-bit boxes. I
> think 32-bit is going to live with us far longer than many thought, so
> we might as well make it work better. Both HIGHMEM and HIGHPTE is the
> default on many distro kernels, which pushes the kmap infrastructure
> quite a bit.

I don't think anybody would argue against numbers, but just that there
are not many big 32-bit SMPs anymore. And if Bill Irwin didn't fix the
kmap problem back then, it would be interesting to see a system and
workload where it actually is a bottleneck.

Not that I'm against any patch to improve scalability, if it doesn't
hurt single-threaded performance ;)

> the problem is that everything that was easy to migrate was migrated off
> kmap() already - and it's exactly those hard cases that cannot be
> converted (like the pagecache use) which is the most frequent kmap()
> users.

Which pagecache use? file_read_actor()?

--
SUSE Labs, Novell Inc.
Send instant messages to your online friends http://au.messenger.yahoo.com

2007-01-30 02:16:33

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Mon, 29 Jan 2007 17:49:14 -0800
"Martin J. Bligh" <[email protected]> wrote:

> Andrew Morton wrote:
> > On Mon, 29 Jan 2007 17:31:20 -0800
> > "Martin J. Bligh" <[email protected]> wrote:
> >
> >> Peter Zijlstra wrote:
> >>> On Sun, 2007-01-28 at 14:29 -0800, Andrew Morton wrote:
> >>>
> >>>> As Christoph says, it's very much preferred that code be migrated over to
> >>>> kmap_atomic(). Partly because kmap() is deadlockable in situations where a
> >>>> large number of threads are trying to take two kmaps at the same time and
> >>>> we run out. This happened in the past, but incidences have gone away,
> >>>> probably because of kmap->kmap_atomic conversions.
> >>>> From which callsite have you measured problems?
> >>> CONFIG_HIGHPTE code in -rt was horrid. I'll do some measurements on
> >>> mainline.
> >>>
> >> CONFIG_HIGHPTE is always horrid -we've known that for years.
> >
> > We have? What's wrong with it? <looks around for bug reports>
>
> http://www.ussg.iu.edu/hypermail/linux/kernel/0307.0/0463.html

2% overhead for a pte-intensive workload for unknown reasons four years
ago. Sort of a mini-horrid, no?

We still don't know what is the source of kmap() activity which
necessitated this patch btw. AFAIK the busiest source is ext2 directories,
but perhaps NFS under certain conditions?

<looks at xfs_iozero>

->prepare_write no longer requires that the caller kmap the page.

2007-01-31 00:45:21

by David Chinner

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
> We still don't know what is the source of kmap() activity which
> necessitated this patch btw. AFAIK the busiest source is ext2 directories,
> but perhaps NFS under certain conditions?
>
> <looks at xfs_iozero>
>
> ->prepare_write no longer requires that the caller kmap the page.

Agreed, but don't we (xfs_iozero) have to map it first to zero it?

I think what you are saying here, Andrew, is that we can
do something like:

page = grab_cache_page
->prepare_write(page)
kaddr = kmap_atomic(page, KM_USER0)
memset(kaddr+offset, 0, bytes)
flush_dcache_page(page)
kunmap_atomic(kaddr, KM_USER0)
->commit_write(page)

to avoid using kmap() altogether?

Cheers,

Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group

2007-01-31 01:11:50

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Wed, 31 Jan 2007 11:44:36 +1100
David Chinner <[email protected]> wrote:

> On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
> > We still don't know what is the source of kmap() activity which
> > necessitated this patch btw. AFAIK the busiest source is ext2 directories,
> > but perhaps NFS under certain conditions?
> >
> > <looks at xfs_iozero>
> >
> > ->prepare_write no longer requires that the caller kmap the page.
>
> Agreed, but don't we (xfs_iozero) have to map it first to zero it?
>
> I think what you are saying here, Andrew, is that we can
> do something like:
>
> page = grab_cache_page
> ->prepare_write(page)
> kaddr = kmap_atomic(page, KM_USER0)
> memset(kaddr+offset, 0, bytes)
> flush_dcache_page(page)
> kunmap_atomic(kaddr, KM_USER0)
> ->commit_write(page)
>
> to avoid using kmap() altogether?
>

Yup. Even better, use clear_highpage().

2007-01-31 03:23:03

by David Chinner

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Tue, Jan 30, 2007 at 05:11:32PM -0800, Andrew Morton wrote:
> On Wed, 31 Jan 2007 11:44:36 +1100
> David Chinner <[email protected]> wrote:
>
> > On Mon, Jan 29, 2007 at 06:15:57PM -0800, Andrew Morton wrote:
> > > We still don't know what is the source of kmap() activity which
> > > necessitated this patch btw. AFAIK the busiest source is ext2 directories,
> > > but perhaps NFS under certain conditions?
> > >
> > > <looks at xfs_iozero>
> > >
> > > ->prepare_write no longer requires that the caller kmap the page.
> >
> > Agreed, but don't we (xfs_iozero) have to map it first to zero it?
> >
> > I think what you are saying here, Andrew, is that we can
> > do something like:
> >
> > page = grab_cache_page
> > ->prepare_write(page)
> > kaddr = kmap_atomic(page, KM_USER0)
> > memset(kaddr+offset, 0, bytes)
> > flush_dcache_page(page)
> > kunmap_atomic(kaddr, KM_USER0)
> > ->commit_write(page)
> >
> > to avoid using kmap() altogether?
>
> Yup. Even better, use clear_highpage().

For even more goodness, clearmem_highpage_flush() does exactly
the right thing for partial page zeroing ;)

Thanks, Andrew, I've added a patch to my QA tree with this mod.

Cheers,

Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group

2007-02-02 12:05:31

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > Yup. Even better, use clear_highpage().
>
> For even more goodness, clearmem_highpage_flush() does exactly
> the right thing for partial page zeroing ;)

Note that there are tons of places in buffer.c that could use
clearmem_highpage_flush(). See the so far untested patch below:


Index: linux-2.6/fs/buffer.c
===================================================================
--- linux-2.6.orig/fs/buffer.c 2007-02-02 12:53:51.000000000 +0100
+++ linux-2.6/fs/buffer.c 2007-02-02 12:59:42.000000000 +0100
@@ -1858,13 +1858,8 @@
if (block_start >= to)
break;
if (buffer_new(bh)) {
- void *kaddr;
-
clear_buffer_new(bh);
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr+block_start, 0, bh->b_size);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, block_start, bh->b_size);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
@@ -1952,10 +1947,8 @@
SetPageError(page);
}
if (!buffer_mapped(bh)) {
- void *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + i * blocksize, 0, blocksize);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, i * blocksize,
+ blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
@@ -2098,7 +2091,6 @@
long status;
unsigned zerofrom;
unsigned blocksize = 1 << inode->i_blkbits;
- void *kaddr;

while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
status = -ENOMEM;
@@ -2120,10 +2112,8 @@
PAGE_CACHE_SIZE, get_block);
if (status)
goto out_unmap;
- kaddr = kmap_atomic(new_page, KM_USER0);
- memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
- flush_dcache_page(new_page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, zerofrom,
+ PAGE_CACHE_SIZE - zerofrom);
generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
unlock_page(new_page);
page_cache_release(new_page);
@@ -2150,10 +2140,7 @@
if (status)
goto out1;
if (zerofrom < offset) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr+zerofrom, 0, offset-zerofrom);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, zerofrom, offset - zerofrom);
__block_commit_write(inode, page, zerofrom, offset);
}
return 0;
@@ -2368,10 +2355,7 @@
* Error recovery is pretty slack. Clear the page and mark it dirty
* so we'll later zero out any blocks which _were_ allocated.
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr, 0, PAGE_CACHE_SIZE);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
set_page_dirty(page);
return ret;
@@ -2405,7 +2389,6 @@
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
- void *kaddr;
int ret;

/* Is the page fully inside i_size? */
@@ -2436,10 +2419,7 @@
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
@@ -2460,7 +2440,6 @@
unsigned to;
struct page *page;
const struct address_space_operations *a_ops = mapping->a_ops;
- char *kaddr;
int ret = 0;

if ((offset & (blocksize - 1)) == 0)
@@ -2474,10 +2453,7 @@
to = (offset + blocksize) & ~(blocksize - 1);
ret = a_ops->prepare_write(NULL, page, offset, to);
if (ret == 0) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
set_page_dirty(page);
}
unlock_page(page);
@@ -2498,7 +2474,6 @@
struct inode *inode = mapping->host;
struct page *page;
struct buffer_head *bh;
- void *kaddr;
int err;

blocksize = 1 << inode->i_blkbits;
@@ -2552,11 +2527,7 @@
goto unlock;
}

- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
-
+ memclear_highpage_flush(page, offset, length);
mark_buffer_dirty(bh);
err = 0;

@@ -2577,7 +2548,6 @@
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
- void *kaddr;

/* Is the page fully inside i_size? */
if (page->index < end_index)
@@ -2603,10 +2573,7 @@
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
return __block_write_full_page(inode, page, get_block, wbc);
}

2007-02-02 19:37:51

by Matt Mackall

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Fri, Feb 02, 2007 at 12:05:11PM +0000, Christoph Hellwig wrote:
> On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > > Yup. Even better, use clear_highpage().
> >
> > For even more goodness, clearmem_highpage_flush() does exactly
> > the right thing for partial page zeroing ;)
>
> Note that there are tons of places in buffer.c that could use
> clearmem_highpage_flush(). See the so far untested patch below:
>

You probably need s/memclear/clearmem/g..

> Index: linux-2.6/fs/buffer.c
> ===================================================================
> --- linux-2.6.orig/fs/buffer.c 2007-02-02 12:53:51.000000000 +0100
> +++ linux-2.6/fs/buffer.c 2007-02-02 12:59:42.000000000 +0100
> @@ -1858,13 +1858,8 @@
> if (block_start >= to)
> break;
> if (buffer_new(bh)) {
> - void *kaddr;
> -
> clear_buffer_new(bh);
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr+block_start, 0, bh->b_size);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, block_start, bh->b_size);
> set_buffer_uptodate(bh);
> mark_buffer_dirty(bh);
> }
> @@ -1952,10 +1947,8 @@
> SetPageError(page);
> }
> if (!buffer_mapped(bh)) {
> - void *kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr + i * blocksize, 0, blocksize);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, i * blocksize,
> + blocksize);
> if (!err)
> set_buffer_uptodate(bh);
> continue;
> @@ -2098,7 +2091,6 @@
> long status;
> unsigned zerofrom;
> unsigned blocksize = 1 << inode->i_blkbits;
> - void *kaddr;
>
> while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
> status = -ENOMEM;
> @@ -2120,10 +2112,8 @@
> PAGE_CACHE_SIZE, get_block);
> if (status)
> goto out_unmap;
> - kaddr = kmap_atomic(new_page, KM_USER0);
> - memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
> - flush_dcache_page(new_page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, zerofrom,
> + PAGE_CACHE_SIZE - zerofrom);
> generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
> unlock_page(new_page);
> page_cache_release(new_page);
> @@ -2150,10 +2140,7 @@
> if (status)
> goto out1;
> if (zerofrom < offset) {
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr+zerofrom, 0, offset-zerofrom);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, zerofrom, offset - zerofrom);
> __block_commit_write(inode, page, zerofrom, offset);
> }
> return 0;
> @@ -2368,10 +2355,7 @@
> * Error recovery is pretty slack. Clear the page and mark it dirty
> * so we'll later zero out any blocks which _were_ allocated.
> */
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr, 0, PAGE_CACHE_SIZE);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
> SetPageUptodate(page);
> set_page_dirty(page);
> return ret;
> @@ -2405,7 +2389,6 @@
> loff_t i_size = i_size_read(inode);
> const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
> unsigned offset;
> - void *kaddr;
> int ret;
>
> /* Is the page fully inside i_size? */
> @@ -2436,10 +2419,7 @@
> * the page size, the remaining memory is zeroed when mapped, and
> * writes to that region are not written out to the file."
> */
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
> out:
> ret = mpage_writepage(page, get_block, wbc);
> if (ret == -EAGAIN)
> @@ -2460,7 +2440,6 @@
> unsigned to;
> struct page *page;
> const struct address_space_operations *a_ops = mapping->a_ops;
> - char *kaddr;
> int ret = 0;
>
> if ((offset & (blocksize - 1)) == 0)
> @@ -2474,10 +2453,7 @@
> to = (offset + blocksize) & ~(blocksize - 1);
> ret = a_ops->prepare_write(NULL, page, offset, to);
> if (ret == 0) {
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
> set_page_dirty(page);
> }
> unlock_page(page);
> @@ -2498,7 +2474,6 @@
> struct inode *inode = mapping->host;
> struct page *page;
> struct buffer_head *bh;
> - void *kaddr;
> int err;
>
> blocksize = 1 << inode->i_blkbits;
> @@ -2552,11 +2527,7 @@
> goto unlock;
> }
>
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr + offset, 0, length);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> -
> + memclear_highpage_flush(page, offset, length);
> mark_buffer_dirty(bh);
> err = 0;
>
> @@ -2577,7 +2548,6 @@
> loff_t i_size = i_size_read(inode);
> const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
> unsigned offset;
> - void *kaddr;
>
> /* Is the page fully inside i_size? */
> if (page->index < end_index)
> @@ -2603,10 +2573,7 @@
> * the page size, the remaining memory is zeroed when mapped, and
> * writes to that region are not written out to the file."
> */
> - kaddr = kmap_atomic(page, KM_USER0);
> - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
> - flush_dcache_page(page);
> - kunmap_atomic(kaddr, KM_USER0);
> + memclear_highpage_flush(page, offset, PAGE_CACHE_SIZE - offset);
> return __block_write_full_page(inode, page, get_block, wbc);
> }
>
> -
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/

--
Mathematics is the supreme nostalgia of our time.

2007-02-02 23:14:53

by David Chinner

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Fri, Feb 02, 2007 at 12:05:11PM +0000, Christoph Hellwig wrote:
> On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > > Yup. Even better, use clear_highpage().
> >
> > For even more goodness, clearmem_highpage_flush() does exactly
> > the right thing for partial page zeroing ;)
>
> Note that there are tons of places in buffer.c that could use
> clearmem_highpage_flush(). See the so far untested patch below:

Runs through XFSQA just fine. Looks good to me, Christoph.

Cheers,

Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group

2007-02-02 23:17:30

by David Chinner

[permalink] [raw]
Subject: Re: [PATCH] mm: remove global locks from mm/highmem.c

On Fri, Feb 02, 2007 at 01:24:40PM -0600, Matt Mackall wrote:
> On Fri, Feb 02, 2007 at 12:05:11PM +0000, Christoph Hellwig wrote:
> > On Wed, Jan 31, 2007 at 02:22:24PM +1100, David Chinner wrote:
> > > > Yup. Even better, use clear_highpage().
> > >
> > > For even more goodness, clearmem_highpage_flush() does exactly
> > > the right thing for partial page zeroing ;)
> >
> > Note that there are tons of places in buffer.c that could use
> > clearmem_highpage_flush(). See the so far untested patch below:
> >
>
> You probably need s/memclear/clearmem/g..

Not needed - as usual, the code is right and the comments
are wrong. ;)

Cheers,

Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group