2010-04-09 06:08:06

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH -v12 00/39] use lmb with x86

the new lmb could be used to early_res in x86.

Suggested by: David, Ben, and Thomas

-v6: change sequence as requested by Thomas
-v7: seperate them to more patches
-v8: add boundary checking to make sure not free partial page.
-v9: use lmb_debug to control print out of reserve_lmb.
add e820 clean up, and e820 become __initdata
-v10:use lmb.rmo_size and ARCH_DISCARD_LMB according to Michael
change name to lmb_find_area/reserve_lmb_area/free_lmb_area,
according to Michael
update find_lmb_area to use __lmb_alloc_base according to ben
-v11:move find_lmb_area_size back to x86.
x86 has own find_lmb_area, and could be disabled by ARCH_LMB_FIND_AREA
because _lmb_find_base has different behavoir from x86's old one.
one from high to high and one from low to high
need more test
tested for x86 32bit/64bit, numa/nonuma, nobootmem/bootmem.
-v12:refresh the series with current tip
seperate nobootmem.c, so could remove some #ifdef
still keep CONFIG_NO_BOOTMEM, in x86 .c, and could use the as tags
so other lmb could refer them to use NO_BOOTMEM.

still keep find_lmb_area, may replace those find_lmb_area will lmb_alloc, if
everything go fine

Please put them into tip and -next to have more test coverage.

Thanks

Yinghai


2010-04-09 06:07:24

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 22/39] x86, lmb: Add lmb_find_area_size()

size is returned according free range.
Will be used to find free ranges for early_memtest and memory corruption check

Do not mess it up with mm/lmb.c yet.

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/mm/Makefile | 2 +
arch/x86/mm/lmb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 90 insertions(+), 0 deletions(-)
create mode 100644 arch/x86/mm/lmb.c

diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index a4c7683..8ab0505 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
obj-$(CONFIG_K8_NUMA) += k8topology_64.o
obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o

+obj-$(CONFIG_HAVE_LMB) += lmb.o
+
obj-$(CONFIG_MEMTEST) += memtest.o
diff --git a/arch/x86/mm/lmb.c b/arch/x86/mm/lmb.c
new file mode 100644
index 0000000..3229e9e
--- /dev/null
+++ b/arch/x86/mm/lmb.c
@@ -0,0 +1,88 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/range.h>
+
+/* Check for already reserved areas */
+static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
+{
+ int i;
+ u64 addr = *addrp, last;
+ u64 size = *sizep;
+ bool changed = false;
+again:
+ last = addr + size;
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ if (last > r->base && addr < r->base) {
+ size = r->base - addr;
+ changed = true;
+ goto again;
+ }
+ if (last > (r->base + r->size) && addr < (r->base + r->size)) {
+ addr = round_up(r->base + r->size, align);
+ size = last - addr;
+ changed = true;
+ goto again;
+ }
+ if (last <= (r->base + r->size) && addr >= r->base) {
+ (*sizep)++;
+ return false;
+ }
+ }
+ if (changed) {
+ *addrp = addr;
+ *sizep = size;
+ }
+ return changed;
+}
+
+static u64 __init __lmb_find_area_size(u64 ei_start, u64 ei_last, u64 start,
+ u64 *sizep, u64 align)
+{
+ u64 addr, last;
+
+ addr = round_up(ei_start, align);
+ if (addr < start)
+ addr = round_up(start, align);
+ if (addr >= ei_last)
+ goto out;
+ *sizep = ei_last - addr;
+ while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
+ ;
+ last = addr + *sizep;
+ if (last > ei_last)
+ goto out;
+
+ return addr;
+
+out:
+ return -1ULL;
+}
+
+/*
+ * Find next free range after *start
+ */
+u64 __init lmb_find_area_size(u64 start, u64 *sizep, u64 align)
+{
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 ei_start = lmb.memory.region[i].base;
+ u64 ei_last = ei_start + lmb.memory.region[i].size;
+ u64 addr;
+
+ addr = __lmb_find_area_size(ei_start, ei_last, start,
+ sizep, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+
+ return -1ULL;
+}
+
--
1.6.4.2

2010-04-09 06:07:16

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 15/39] lmb: Add lmb_find_area_node()

It can be used to find NODE_DATA for numa.

Need to make sure early_node_map[] is filled before it is called, otherwise
it will fallback to lmb_find_area(), with node range.

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 1 +
mm/lmb.c | 15 +++++++++++++++
2 files changed, 16 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 5dbc4ef..4078825 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -91,6 +91,7 @@ void lmb_add_memory(u64 start, u64 end);
u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);
+u64 lmb_find_area_node(int nid, u64 start, u64 end, u64 size, u64 align);

void lmb_to_bootmem(u64 start, u64 end);
struct range;
diff --git a/mm/lmb.c b/mm/lmb.c
index cf0f1c9..d3a58fb 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -791,6 +791,21 @@ u64 __init __weak lmb_find_area(u64 start, u64 end, u64 size, u64 align)
return -1ULL;
}
/*
+ * Need to call this function after lmb_register_active_regions,
+ * so early_node_map[] is filled already.
+ */
+u64 __init lmb_find_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
+ u64 addr;
+ addr = find_memory_core_early(nid, size, align, start, end);
+ if (addr != -1ULL)
+ return addr;
+
+ /* Fallback, should already have start end within node range */
+ return lmb_find_area(start, end, size, align);
+}
+
+/*
* Finds an active region in the address range from start_pfn to last_pfn and
* returns its range in ei_startpfn and ei_endpfn for the lmb entry.
*/
--
1.6.4.2

2010-04-09 06:07:29

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 25/39] x86: Replace e820_/_early string with lmb_

1.include linux/lmb.h directly. so later could reduce e820.h reference.
2 this patch is done by sed scripts mainly

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/include/asm/efi.h | 2 +-
arch/x86/kernel/acpi/sleep.c | 5 +++--
arch/x86/kernel/apic/numaq_32.c | 3 ++-
arch/x86/kernel/efi.c | 5 +++--
arch/x86/kernel/head32.c | 4 ++--
arch/x86/kernel/head64.c | 4 ++--
arch/x86/kernel/setup.c | 25 ++++++++++++-------------
arch/x86/kernel/trampoline.c | 6 +++---
arch/x86/mm/init.c | 5 +++--
arch/x86/mm/init_32.c | 10 ++++++----
arch/x86/mm/init_64.c | 9 +++++----
arch/x86/mm/k8topology_64.c | 4 +++-
arch/x86/mm/memtest.c | 7 +++----
arch/x86/mm/numa_32.c | 17 +++++++++--------
arch/x86/mm/numa_64.c | 32 ++++++++++++++++----------------
arch/x86/mm/srat_32.c | 3 ++-
arch/x86/mm/srat_64.c | 9 +++++----
arch/x86/xen/mmu.c | 5 +++--
arch/x86/xen/setup.c | 3 ++-
mm/bootmem.c | 4 ++--
20 files changed, 87 insertions(+), 75 deletions(-)

diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 8406ed7..06703f3 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -90,7 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
#endif /* CONFIG_X86_32 */

extern int add_efi_memmap;
-extern void efi_reserve_early(void);
+extern void efi_lmb_reserve_area(void);
extern void efi_call_phys_prelog(void);
extern void efi_call_phys_epilog(void);

diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index f996103..4502470 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -7,6 +7,7 @@

#include <linux/acpi.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
#include <asm/segment.h>
@@ -133,7 +134,7 @@ void __init acpi_reserve_wakeup_memory(void)
return;
}

- mem = find_e820_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE);
+ mem = lmb_find_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE);

if (mem == -1L) {
printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
@@ -141,7 +142,7 @@ void __init acpi_reserve_wakeup_memory(void)
}
acpi_realmode = (unsigned long) phys_to_virt(mem);
acpi_wakeup_address = mem;
- reserve_early(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
+ lmb_reserve_area(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
}


diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index 3e28401..c71e494 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -26,6 +26,7 @@
#include <linux/nodemask.h>
#include <linux/topology.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
@@ -88,7 +89,7 @@ static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
node_end_pfn[node] =
MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);

- e820_register_active_regions(node, node_start_pfn[node],
+ lmb_register_active_regions(node, node_start_pfn[node],
node_end_pfn[node]);

memory_present(node, node_start_pfn[node], node_end_pfn[node]);
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 299f03f..bb31919 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/efi.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/time.h>
@@ -275,7 +276,7 @@ static void __init do_add_efi_memmap(void)
sanitize_e820_map();
}

-void __init efi_reserve_early(void)
+void __init efi_lmb_reserve_area(void)
{
unsigned long pmap;

@@ -290,7 +291,7 @@ void __init efi_reserve_early(void)
boot_params.efi_info.efi_memdesc_size;
memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
- reserve_early(pmap, pmap + memmap.nr_map * memmap.desc_size,
+ lmb_reserve_area(pmap, pmap + memmap.nr_map * memmap.desc_size,
"EFI memmap");
}

diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index ab3e366..ecd12a9 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -42,7 +42,7 @@ void __init i386_start_kernel(void)
lmb_reserve_area(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
#endif

- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+ lmb_reserve_area(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");

#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
@@ -51,7 +51,7 @@ void __init i386_start_kernel(void)
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
- reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
+ lmb_reserve_area(ramdisk_image, ramdisk_end, "RAMDISK");
}
#endif

diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 89dd2de..4063134 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -101,7 +101,7 @@ void __init x86_64_start_reservations(char *real_mode_data)

copy_bootdata(__va(real_mode_data));

- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+ lmb_reserve_area(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");

#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
@@ -110,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
- reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
+ lmb_reserve_area(ramdisk_image, ramdisk_end, "RAMDISK");
}
#endif

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 61ff9de..4d2a984 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -302,7 +302,7 @@ static inline void init_gbpages(void)
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
- reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK");
+ lmb_reserve_area(__pa(_brk_start), __pa(_brk_end), "BRK");

/* Mark brk area as locked down and no longer taking any
new allocations */
@@ -324,7 +324,7 @@ static void __init relocate_initrd(void)
char *p, *q;

/* We need to move the initrd down into lowmem */
- ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
+ ramdisk_here = lmb_find_area(0, end_of_lowmem, area_size,
PAGE_SIZE);

if (ramdisk_here == -1ULL)
@@ -333,8 +333,7 @@ static void __init relocate_initrd(void)

/* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- reserve_early(ramdisk_here, ramdisk_here + area_size,
- "NEW RAMDISK");
+ lmb_reserve_area(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
@@ -390,7 +389,7 @@ static void __init reserve_initrd(void)
initrd_start = 0;

if (ramdisk_size >= (end_of_lowmem>>1)) {
- free_early(ramdisk_image, ramdisk_end);
+ lmb_free_area(ramdisk_image, ramdisk_end);
printk(KERN_ERR "initrd too large to handle, "
"disabling initrd\n");
return;
@@ -413,7 +412,7 @@ static void __init reserve_initrd(void)

relocate_initrd();

- free_early(ramdisk_image, ramdisk_end);
+ lmb_free_area(ramdisk_image, ramdisk_end);
}
#else
static void __init reserve_initrd(void)
@@ -469,7 +468,7 @@ static void __init e820_reserve_setup_data(void)
e820_print_map("reserve setup_data");
}

-static void __init reserve_early_setup_data(void)
+static void __init lmb_reserve_area_setup_data(void)
{
struct setup_data *data;
u64 pa_data;
@@ -481,7 +480,7 @@ static void __init reserve_early_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
sprintf(buf, "setup data %x", data->type);
- reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
+ lmb_reserve_area(pa_data, pa_data+sizeof(*data)+data->len, buf);
pa_data = data->next;
early_iounmap(data, sizeof(*data));
}
@@ -519,7 +518,7 @@ static void __init reserve_crashkernel(void)
if (crash_base <= 0) {
const unsigned long long alignment = 16<<20; /* 16M */

- crash_base = find_e820_area(alignment, ULONG_MAX, crash_size,
+ crash_base = lmb_find_area(alignment, ULONG_MAX, crash_size,
alignment);
if (crash_base == -1ULL) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
@@ -528,14 +527,14 @@ static void __init reserve_crashkernel(void)
} else {
unsigned long long start;

- start = find_e820_area(crash_base, ULONG_MAX, crash_size,
+ start = lmb_find_area(crash_base, ULONG_MAX, crash_size,
1<<20);
if (start != crash_base) {
pr_info("crashkernel reservation failed - memory is in use.\n");
return;
}
}
- reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL");
+ lmb_reserve_area(crash_base, crash_base + crash_size, "CRASH KERNEL");

printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",
@@ -774,7 +773,7 @@ void __init setup_arch(char **cmdline_p)
#endif
4)) {
efi_enabled = 1;
- efi_reserve_early();
+ efi_lmb_reserve_area();
}
#endif

@@ -834,7 +833,7 @@ void __init setup_arch(char **cmdline_p)
vmi_activate();

/* after early param, so could get panic from serial */
- reserve_early_setup_data();
+ lmb_reserve_area_setup_data();

if (acpi_mps_check()) {
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index c652ef6..1192dcb 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -1,7 +1,7 @@
#include <linux/io.h>
+#include <linux/lmb.h>

#include <asm/trampoline.h>
-#include <asm/e820.h>

#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
#define __trampinit
@@ -19,12 +19,12 @@ void __init reserve_trampoline_memory(void)
unsigned long mem;

/* Has to be in very low memory so we can execute real-mode AP code. */
- mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
+ mem = lmb_find_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
if (mem == -1L)
panic("Cannot allocate trampoline\n");

trampoline_base = __va(mem);
- reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
+ lmb_reserve_area(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
}

/*
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index b278535..94f7cb9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -2,6 +2,7 @@
#include <linux/initrd.h>
#include <linux/ioport.h>
#include <linux/swap.h>
+#include <linux/lmb.h>

#include <asm/cacheflush.h>
#include <asm/e820.h>
@@ -75,7 +76,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#else
start = 0x8000;
#endif
- e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
+ e820_table_start = lmb_find_area(start, max_pfn_mapped<<PAGE_SHIFT,
tables, PAGE_SIZE);
if (e820_table_start == -1UL)
panic("Cannot find space for the kernel page tables");
@@ -299,7 +300,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
__flush_tlb_all();

if (!after_bootmem && e820_table_end > e820_table_start)
- reserve_early(e820_table_start << PAGE_SHIFT,
+ lmb_reserve_area(e820_table_start << PAGE_SHIFT,
e820_table_end << PAGE_SHIFT, "PGTABLE");

if (!after_bootmem)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 90e0545..c01c711 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -25,6 +25,7 @@
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/proc_fs.h>
#include <linux/memory_hotplug.h>
#include <linux/initrd.h>
@@ -712,14 +713,14 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > max_low_pfn)
highstart_pfn = max_low_pfn;
- e820_register_active_regions(0, 0, highend_pfn);
+ lmb_register_active_regions(0, 0, highend_pfn);
sparse_memory_present_with_active_regions(0);
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
num_physpages = highend_pfn;
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
- e820_register_active_regions(0, 0, max_low_pfn);
+ lmb_register_active_regions(0, 0, max_low_pfn);
sparse_memory_present_with_active_regions(0);
num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
@@ -781,11 +782,11 @@ void __init setup_bootmem_allocator(void)
* Initialize the boot-time allocator (with low memory only):
*/
bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
+ bootmap = lmb_find_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
PAGE_SIZE);
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
- reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
+ lmb_reserve_area(bootmap, bootmap + bootmap_size, "BOOTMAP");
#endif

printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
@@ -1069,3 +1070,4 @@ void mark_rodata_ro(void)
#endif
}
#endif
+
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 634fa08..0d2252c 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -21,6 +21,7 @@
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/pfn.h>
@@ -577,18 +578,18 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
unsigned long bootmap_size, bootmap;

bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
- bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
+ bootmap = lmb_find_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
PAGE_SIZE);
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
- reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
+ lmb_reserve_area(bootmap, bootmap + bootmap_size, "BOOTMAP");
/* don't touch min_low_pfn */
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
0, end_pfn);
- e820_register_active_regions(0, start_pfn, end_pfn);
+ lmb_register_active_regions(0, start_pfn, end_pfn);
free_bootmem_with_active_regions(0, end_pfn);
#else
- e820_register_active_regions(0, start_pfn, end_pfn);
+ lmb_register_active_regions(0, start_pfn, end_pfn);
#endif
}
#endif
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 970ed57..d7d031b 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -11,6 +11,8 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/nodemask.h>
+#include <linux/lmb.h>
+
#include <asm/io.h>
#include <linux/pci_ids.h>
#include <linux/acpi.h>
@@ -222,7 +224,7 @@ int __init k8_scan_nodes(void)
for_each_node_mask(i, node_possible_map) {
int j;

- e820_register_active_regions(i,
+ lmb_register_active_regions(i,
nodes[i].start >> PAGE_SHIFT,
nodes[i].end >> PAGE_SHIFT);
for (j = apicid_base; j < cores + apicid_base; j++)
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 18d244f..0e4a006 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -6,8 +6,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pfn.h>
-
-#include <asm/e820.h>
+#include <linux/lmb.h>

static u64 patterns[] __initdata = {
0,
@@ -35,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
(unsigned long long) pattern,
(unsigned long long) start_bad,
(unsigned long long) end_bad);
- reserve_early(start_bad, end_bad, "BAD RAM");
+ lmb_reserve_area(start_bad, end_bad, "BAD RAM");
}

static void __init memtest(u64 pattern, u64 start_phys, u64 size)
@@ -74,7 +73,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end)
u64 size = 0;

while (start < end) {
- start = find_e820_area_size(start, &size, 1);
+ start = lmb_find_area_size(start, &size, 1);

/* done ? */
if (start >= end)
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 809baaa..d8d655f 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -24,6 +24,7 @@

#include <linux/mm.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/mmzone.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
@@ -120,7 +121,7 @@ int __init get_memcfg_numa_flat(void)

node_start_pfn[0] = 0;
node_end_pfn[0] = max_pfn;
- e820_register_active_regions(0, 0, max_pfn);
+ lmb_register_active_regions(0, 0, max_pfn);
memory_present(0, 0, max_pfn);
node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);

@@ -161,14 +162,14 @@ static void __init allocate_pgdat(int nid)
NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
else {
unsigned long pgdat_phys;
- pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT,
+ pgdat_phys = lmb_find_area(min_low_pfn<<PAGE_SHIFT,
max_pfn_mapped<<PAGE_SHIFT,
sizeof(pg_data_t),
PAGE_SIZE);
NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
memset(buf, 0, sizeof(buf));
sprintf(buf, "NODE_DATA %d", nid);
- reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
+ lmb_reserve_area(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
}
printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
nid, (unsigned long)NODE_DATA(nid));
@@ -291,7 +292,7 @@ static __init unsigned long calculate_numa_remap_pages(void)
PTRS_PER_PTE);
node_kva_target <<= PAGE_SHIFT;
do {
- node_kva_final = find_e820_area(node_kva_target,
+ node_kva_final = lmb_find_area(node_kva_target,
((u64)node_end_pfn[nid])<<PAGE_SHIFT,
((u64)size)<<PAGE_SHIFT,
LARGE_PAGE_BYTES);
@@ -318,9 +319,9 @@ static __init unsigned long calculate_numa_remap_pages(void)
* but we could have some hole in high memory, and it will only
* check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
* to use it as free.
- * So reserve_early here, hope we don't run out of that array
+ * So lmb_reserve_area here, hope we don't run out of that array
*/
- reserve_early(node_kva_final,
+ lmb_reserve_area(node_kva_final,
node_kva_final+(((u64)size)<<PAGE_SHIFT),
"KVA RAM");

@@ -367,7 +368,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,

kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
do {
- kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
+ kva_start_pfn = lmb_find_area(kva_target_pfn<<PAGE_SHIFT,
max_low_pfn<<PAGE_SHIFT,
kva_pages<<PAGE_SHIFT,
PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
@@ -382,7 +383,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
printk(KERN_INFO "max_pfn = %lx\n", max_pfn);

/* avoid clash with initrd */
- reserve_early(kva_start_pfn<<PAGE_SHIFT,
+ lmb_reserve_area(kva_start_pfn<<PAGE_SHIFT,
(kva_start_pfn + kva_pages)<<PAGE_SHIFT,
"KVA PG");
#ifdef CONFIG_HIGHMEM
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 6e0f896..18d2296 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -90,7 +90,7 @@ static int __init allocate_cachealigned_memnodemap(void)

addr = 0x8000;
nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
- nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
+ nodemap_addr = lmb_find_area(addr, max_pfn<<PAGE_SHIFT,
nodemap_size, L1_CACHE_BYTES);
if (nodemap_addr == -1UL) {
printk(KERN_ERR
@@ -99,7 +99,7 @@ static int __init allocate_cachealigned_memnodemap(void)
return -1;
}
memnodemap = phys_to_virt(nodemap_addr);
- reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
+ lmb_reserve_area(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");

printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
nodemap_addr, nodemap_addr + nodemap_size);
@@ -230,7 +230,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
if (node_data[nodeid] == NULL)
return;
nodedata_phys = __pa(node_data[nodeid]);
- reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
+ lmb_reserve_area(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
nodedata_phys + pgdat_size - 1);
nid = phys_to_nid(nodedata_phys);
@@ -249,7 +249,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
* Find a place for the bootmem map
* nodedata_phys could be on other nodes by alloc_bootmem,
* so need to sure bootmap_start not to be small, otherwise
- * early_node_mem will get that with find_e820_area instead
+ * early_node_mem will get that with lmb_find_area instead
* of alloc_bootmem, that could clash with reserved range
*/
bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
@@ -261,12 +261,12 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
bootmap = early_node_mem(nodeid, bootmap_start, end,
bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
if (bootmap == NULL) {
- free_early(nodedata_phys, nodedata_phys + pgdat_size);
+ lmb_free_area(nodedata_phys, nodedata_phys + pgdat_size);
node_data[nodeid] = NULL;
return;
}
bootmap_start = __pa(bootmap);
- reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
+ lmb_reserve_area(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
"BOOTMAP");

bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
@@ -420,7 +420,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
nr_nodes = MAX_NUMNODES;
}

- size = (max_addr - addr - e820_hole_size(addr, max_addr)) / nr_nodes;
+ size = (max_addr - addr - lmb_hole_size(addr, max_addr)) / nr_nodes;
/*
* Calculate the number of big nodes that can be allocated as a result
* of consolidating the remainder.
@@ -456,7 +456,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
* non-reserved memory is less than the per-node size.
*/
while (end - physnodes[i].start -
- e820_hole_size(physnodes[i].start, end) < size) {
+ lmb_hole_size(physnodes[i].start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > physnodes[i].end) {
end = physnodes[i].end;
@@ -470,7 +470,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
- e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ lmb_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;

/*
@@ -479,7 +479,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
* physical node.
*/
if (physnodes[i].end - end -
- e820_hole_size(end, physnodes[i].end) < size)
+ lmb_hole_size(end, physnodes[i].end) < size)
end = physnodes[i].end;

/*
@@ -507,7 +507,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
{
u64 end = start + size;

- while (end - start - e820_hole_size(start, end) < size) {
+ while (end - start - lmb_hole_size(start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > max_addr) {
end = max_addr;
@@ -536,7 +536,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
* creates a uniform distribution of node sizes across the entire
* machine (but not necessarily over physical nodes).
*/
- min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) /
+ min_size = (max_addr - addr - lmb_hole_size(addr, max_addr)) /
MAX_NUMNODES;
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
@@ -569,7 +569,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
- e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ lmb_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;

/*
@@ -578,7 +578,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
* physical node.
*/
if (physnodes[i].end - end -
- e820_hole_size(end, physnodes[i].end) < size)
+ lmb_hole_size(end, physnodes[i].end) < size)
end = physnodes[i].end;

/*
@@ -642,7 +642,7 @@ static int __init numa_emulation(unsigned long start_pfn,
*/
remove_all_active_ranges();
for_each_node_mask(i, node_possible_map) {
- e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
+ lmb_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
nodes[i].end >> PAGE_SHIFT);
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
}
@@ -695,7 +695,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
node_set(0, node_possible_map);
for (i = 0; i < nr_cpu_ids; i++)
numa_set_node(i, 0);
- e820_register_active_regions(0, start_pfn, last_pfn);
+ lmb_register_active_regions(0, start_pfn, last_pfn);
setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
}

diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 9324f13..68dd606 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -25,6 +25,7 @@
*/
#include <linux/mm.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/mmzone.h>
#include <linux/acpi.h>
#include <linux/nodemask.h>
@@ -264,7 +265,7 @@ int __init get_memcfg_from_srat(void)
if (node_read_chunk(chunk->nid, chunk))
continue;

- e820_register_active_regions(chunk->nid, chunk->start_pfn,
+ lmb_register_active_regions(chunk->nid, chunk->start_pfn,
min(chunk->end_pfn, max_pfn));
}
/* for out of order entries in SRAT */
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 28c6876..84e11b9 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/mm.h>
#include <asm/proto.h>
#include <asm/numa.h>
@@ -98,7 +99,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
unsigned long phys;

length = slit->header.length;
- phys = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, length,
+ phys = lmb_find_area(0, max_pfn_mapped<<PAGE_SHIFT, length,
PAGE_SIZE);

if (phys == -1L)
@@ -106,7 +107,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)

acpi_slit = __va(phys);
memcpy(acpi_slit, slit, length);
- reserve_early(phys, phys + length, "ACPI SLIT");
+ lmb_reserve_area(phys, phys + length, "ACPI SLIT");
}

/* Callback for Proximity Domain -> x2APIC mapping */
@@ -324,7 +325,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
pxmram = 0;
}

- e820ram = max_pfn - (e820_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
+ e820ram = max_pfn - (lmb_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
printk(KERN_ERR
@@ -373,7 +374,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
}

for_each_node_mask(i, nodes_parsed)
- e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
+ lmb_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
nodes[i].end >> PAGE_SHIFT);
/* for out of order entries in SRAT */
sort_node_map();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 914f046..28185a8 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -44,6 +44,7 @@
#include <linux/bug.h>
#include <linux/module.h>
#include <linux/gfp.h>
+#include <linux/lmb.h>

#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -1735,7 +1736,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
__xen_write_cr3(true, __pa(pgd));
xen_mc_issue(PARAVIRT_LAZY_CPU);

- reserve_early(__pa(xen_start_info->pt_base),
+ lmb_reserve_area(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base +
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
@@ -1773,7 +1774,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,

pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));

- reserve_early(__pa(xen_start_info->pt_base),
+ lmb_reserve_area(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base +
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 3f2c411..d2954eb 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/pm.h>
+#include <linux/lmb.h>

#include <asm/elf.h>
#include <asm/vdso.h>
@@ -59,7 +60,7 @@ char * __init xen_memory_setup(void)
* - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h>
*/
- reserve_early(__pa(xen_start_info->mfn_list),
+ lmb_reserve_area(__pa(xen_start_info->mfn_list),
__pa(xen_start_info->pt_base),
"XEN START INFO");

diff --git a/mm/bootmem.c b/mm/bootmem.c
index dac3f56..2a4c8b5 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -435,7 +435,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
#ifdef CONFIG_NO_BOOTMEM
- free_early(physaddr, physaddr + size);
+ lmb_free_area(physaddr, physaddr + size);
#else
unsigned long start, end;

@@ -460,7 +460,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
void __init free_bootmem(unsigned long addr, unsigned long size)
{
#ifdef CONFIG_NO_BOOTMEM
- free_early(addr, addr + size);
+ lmb_free_area(addr, addr + size);
#else
unsigned long start, end;

--
1.6.4.2

2010-04-09 06:07:33

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 16/39] lmb: Add lmb_free_memory_size()

It will return free memory size in specified range.

We can not use memory_size - reserved_size here, because some reserved area
may not be in the scope of lmb.memory.region.

Use lmb.memory.region subtracting lmb.reserved.region to get free range array.
then count size of all free ranges.

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 1 +
mm/lmb.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 52 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 4078825..b874dc0 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -100,6 +100,7 @@ int get_free_all_memory_range(struct range **rangep, int nodeid);
void lmb_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn);
u64 lmb_hole_size(u64 start, u64 end);
+u64 lmb_free_memory_size(u64 addr, u64 limit);

#include <asm/lmb.h>

diff --git a/mm/lmb.c b/mm/lmb.c
index d3a58fb..dccd539 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -746,6 +746,57 @@ void __init lmb_to_bootmem(u64 start, u64 end)
}
#endif

+u64 __init lmb_free_memory_size(u64 addr, u64 limit)
+{
+ int i, count;
+ struct range *range;
+ int nr_range;
+ u64 final_start, final_end;
+ u64 free_size;
+
+ count = lmb.reserved.cnt * 2;
+
+ range = find_range_array(count);
+ nr_range = 0;
+
+ addr = PFN_UP(addr);
+ limit = PFN_DOWN(limit);
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ struct lmb_property *r = &lmb.memory.region[i];
+
+ final_start = PFN_UP(r->base);
+ final_end = PFN_DOWN(r->base + r->size);
+ if (final_start >= final_end)
+ continue;
+ if (final_start >= limit || final_end <= addr)
+ continue;
+
+ nr_range = add_range(range, count, nr_range, final_start, final_end);
+ }
+ subtract_range(range, count, 0, addr);
+ subtract_range(range, count, limit, -1ULL);
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+
+ final_start = PFN_DOWN(r->base);
+ final_end = PFN_UP(r->base + r->size);
+ if (final_start >= final_end)
+ continue;
+ if (final_start >= limit || final_end <= addr)
+ continue;
+
+ subtract_range(range, count, final_start, final_end);
+ }
+ nr_range = clean_sort_range(range, count);
+
+ free_size = 0;
+ for (i = 0; i < nr_range; i++)
+ free_size += range[i].end - range[i].start;
+
+ return free_size << PAGE_SHIFT;
+}
+
u64 __init __weak __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align)
{
--
1.6.4.2

2010-04-09 06:07:38

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 18/39] lmb: Add lmb_reserve_area_overlap_ok()

Some areas from firmware could be reserved several times from different callers.

If these area are overlapped, We may have overlapped entries in lmb.reserved.

Try to free the area at first, before rerserve them again.

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 1 +
mm/lmb.c | 27 +++++++++++++++++++++++++--
2 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 8332934..3c8095f 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -86,6 +86,7 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
}

void lmb_reserve_area(u64 start, u64 end, char *name);
+void lmb_reserve_area_overlap_ok(u64 start, u64 end, char *name);
void lmb_free_area(u64 start, u64 end);
void lmb_add_memory(u64 start, u64 end);
u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
diff --git a/mm/lmb.c b/mm/lmb.c
index feb3dfa..34fc030 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -605,6 +605,12 @@ void __init lmb_add_memory(u64 start, u64 end)
__check_and_double_region_array(&lmb.memory, &lmb_memory_region[0]);
}

+static void __init __lmb_reserve_area(u64 start, u64 end, char *name)
+{
+ lmb_add_region(&lmb.reserved, start, end - start);
+ __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
+}
+
void __init lmb_reserve_area(u64 start, u64 end, char *name)
{
if (start == end)
@@ -613,8 +619,25 @@ void __init lmb_reserve_area(u64 start, u64 end, char *name)
if (WARN_ONCE(start > end, "lmb_reserve_area: wrong range [%#llx, %#llx]\n", start, end))
return;

- lmb_add_region(&lmb.reserved, start, end - start);
- __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
+ __lmb_reserve_area(start, end, name);
+}
+
+/*
+ * Could be used to avoid having overlap entries in lmb.reserved.region.
+ * Don't need to use it with area that is from lmb_find_area()
+ * Only use it for the area that fw hidden area.
+ */
+void __init lmb_reserve_area_overlap_ok(u64 start, u64 end, char *name)
+{
+ if (start == end)
+ return;
+
+ if (WARN_ONCE(start > end, "lmb_reserve_area_overlap_ok: wrong range [%#llx, %#llx]\n", start, end))
+ return;
+
+ /* Free that region at first */
+ lmb_free(start, end - start);
+ __lmb_reserve_area(start, end, name);
}

void __init lmb_free_area(u64 start, u64 end)
--
1.6.4.2

2010-04-09 06:08:23

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 20/39] lmb: Add ARCH_DISCARD_LMB to put lmb code to .init

So those lmb bits could be released after kernel is booted up.

Arch code could define ARCH_DISCARD_LMB in asm/lmb.h, __init_lmb will become __init, __initdata_lmb will becom __initdata

x86 code will use that.

-v2: use ARCH_DISCARD_LMB according to Michael Ellerman

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 8 +++++++
mm/lmb.c | 54 ++++++++++++++++++++++++++++++--------------------
2 files changed, 40 insertions(+), 22 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 3c8095f..f615626 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -106,6 +106,14 @@ u64 lmb_memory_size(u64 addr, u64 limit);

#include <asm/lmb.h>

+#ifdef ARCH_DISCARD_LMB
+#define __init_lmb __init
+#define __initdata_lmb __initdata
+#else
+#define __init_lmb
+#define __initdata_lmb
+#endif
+
#endif /* CONFIG_HAVE_LMB */

#endif /* __KERNEL__ */
diff --git a/mm/lmb.c b/mm/lmb.c
index dfbf660..ab3d85f 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -21,11 +21,11 @@

#define LMB_ALLOC_ANYWHERE 0

-struct lmb lmb;
-static struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
-static struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
+struct lmb lmb __initdata_lmb;
+static struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1] __initdata_lmb;
+static struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1] __initdata_lmb;

-static int lmb_debug;
+static int lmb_debug __initdata_lmb;

static int __init early_lmb(char *p)
{
@@ -35,7 +35,7 @@ static int __init early_lmb(char *p)
}
early_param("lmb", early_lmb);

-static void lmb_dump(struct lmb_region *region, char *name)
+static void __init_lmb lmb_dump(struct lmb_region *region, char *name)
{
unsigned long long base, size;
int i;
@@ -51,7 +51,7 @@ static void lmb_dump(struct lmb_region *region, char *name)
}
}

-void lmb_dump_all(void)
+void __init_lmb lmb_dump_all(void)
{
if (!lmb_debug)
return;
@@ -64,13 +64,13 @@ void lmb_dump_all(void)
lmb_dump(&lmb.reserved, "reserved");
}

-static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
+static unsigned long __init_lmb lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
u64 size2)
{
return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}

-static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
+static long __init_lmb lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
{
if (base2 == base1 + size1)
return 1;
@@ -80,7 +80,7 @@ static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
return 0;
}

-static long lmb_regions_adjacent(struct lmb_region *rgn,
+static long __init_lmb lmb_regions_adjacent(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
u64 base1 = rgn->region[r1].base;
@@ -91,7 +91,7 @@ static long lmb_regions_adjacent(struct lmb_region *rgn,
return lmb_addrs_adjacent(base1, size1, base2, size2);
}

-static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+static void __init_lmb lmb_remove_region(struct lmb_region *rgn, unsigned long r)
{
unsigned long i;

@@ -103,7 +103,7 @@ static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
}

/* Assumption: base addr of region 1 < base addr of region 2 */
-static void lmb_coalesce_regions(struct lmb_region *rgn,
+static void __init_lmb lmb_coalesce_regions(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
rgn->region[r1].size += rgn->region[r2].size;
@@ -140,7 +140,7 @@ void __init lmb_analyze(void)
lmb.memory.size += lmb.memory.region[i].size;
}

-static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
+static long __init_lmb lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
{
unsigned long coalesced = 0;
long adjacent, i;
@@ -204,7 +204,7 @@ static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
return 0;
}

-long lmb_add(u64 base, u64 size)
+long __init_lmb lmb_add(u64 base, u64 size)
{
struct lmb_region *_rgn = &lmb.memory;

@@ -216,7 +216,7 @@ long lmb_add(u64 base, u64 size)

}

-static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
+static long __init_lmb __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
{
u64 rgnbegin, rgnend;
u64 end = base + size;
@@ -264,7 +264,7 @@ static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
return lmb_add_region(rgn, end, rgnend - end);
}

-long lmb_remove(u64 base, u64 size)
+long __init_lmb lmb_remove(u64 base, u64 size)
{
return __lmb_remove(&lmb.memory, base, size);
}
@@ -283,7 +283,7 @@ long __init lmb_reserve(u64 base, u64 size)
return lmb_add_region(_rgn, base, size);
}

-long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
+long __init_lmb lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
{
unsigned long i;

@@ -297,12 +297,12 @@ long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
return (i < rgn->cnt) ? i : -1;
}

-static u64 lmb_align_down(u64 addr, u64 size)
+static u64 __init_lmb lmb_align_down(u64 addr, u64 size)
{
return addr & ~(size - 1);
}

-static u64 lmb_align_up(u64 addr, u64 size)
+static u64 __init_lmb lmb_align_up(u64 addr, u64 size)
{
return (addr + (size - 1)) & ~(size - 1);
}
@@ -462,7 +462,7 @@ u64 __init lmb_phys_mem_size(void)
return lmb.memory.size;
}

-u64 lmb_end_of_DRAM(void)
+u64 __init_lmb lmb_end_of_DRAM(void)
{
int idx = lmb.memory.cnt - 1;

@@ -526,7 +526,7 @@ int __init lmb_is_reserved(u64 addr)
return 0;
}

-int lmb_is_region_reserved(u64 base, u64 size)
+int __init_lmb lmb_is_region_reserved(u64 base, u64 size)
{
return lmb_overlaps_region(&lmb.reserved, base, size);
}
@@ -535,7 +535,7 @@ int lmb_is_region_reserved(u64 base, u64 size)
* Given a <base, len>, find which memory regions belong to this range.
* Adjust the request and return a contiguous chunk.
*/
-int lmb_find(struct lmb_property *res)
+int __init_lmb lmb_find(struct lmb_property *res)
{
int i;
u64 rstart, rend;
@@ -689,10 +689,11 @@ static void __init subtract_lmb_reserved(struct range *range, int az)
int i, count;
u64 final_start, final_end;

+#ifdef ARCH_DISCARD_LMB
/* Take out region array itself at first*/
if (lmb.reserved.region != lmb_reserved_region)
lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
-
+#endif
count = lmb.reserved.cnt;

if (lmb_debug)
@@ -708,9 +709,11 @@ static void __init subtract_lmb_reserved(struct range *range, int az)
continue;
subtract_range(range, az, final_start, final_end);
}
+#ifdef ARCH_DISCARD_LMB
/* Put region array back ? */
if (lmb.reserved.region != lmb_reserved_region)
lmb_reserve(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+#endif
}

int __init get_free_all_memory_range(struct range **rangep, int nodeid)
@@ -735,6 +738,7 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
subtract_lmb_reserved(range, count);
nr_range = clean_sort_range(range, count);

+#ifdef ARCH_DISCARD_LMB
/* Need to clear it ? */
if (nodeid == MAX_NUMNODES) {
memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
@@ -742,6 +746,7 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
lmb.reserved.nr_regions = 0;
lmb.reserved.cnt = 0;
}
+#endif

*rangep = range;
return nr_range;
@@ -752,9 +757,11 @@ void __init lmb_to_bootmem(u64 start, u64 end)
int i, count;
u64 final_start, final_end;

+#ifdef ARCH_DISCARD_LMB
/* Take out region array itself */
if (lmb.reserved.region != lmb_reserved_region)
lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+#endif

count = lmb.reserved.cnt;
if (lmb_debug)
@@ -774,11 +781,14 @@ void __init lmb_to_bootmem(u64 start, u64 end)
pr_cont(" ==> [%010llx - %010llx]\n", final_start, final_end);
reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
}
+
+#ifdef ARCH_DISCARD_LMB
/* Clear them to avoid misusing ? */
memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
lmb.reserved.region = NULL;
lmb.reserved.nr_regions = 0;
lmb.reserved.cnt = 0;
+#endif
}
#endif

--
1.6.4.2

2010-04-09 06:08:38

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 09/39] bootmem, x86: Add weak version of reserve_bootmem_generic

It will be used lmb_to_bootmem converting

It is an wrapper for reserve_bootmem, and x86 64bit is using special one.

Also clean up that version for x86_64. We don't need to take care of numa
path for that, bootmem can handle it how

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/mm/init_32.c | 6 ------
arch/x86/mm/init_64.c | 20 ++------------------
mm/bootmem.c | 6 ++++++
3 files changed, 8 insertions(+), 24 deletions(-)

diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index bca7909..90e0545 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -1069,9 +1069,3 @@ void mark_rodata_ro(void)
#endif
}
#endif
-
-int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
- int flags)
-{
- return reserve_bootmem(phys, len, flags);
-}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ee41bba..634fa08 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -799,13 +799,10 @@ void mark_rodata_ro(void)

#endif

+#ifndef CONFIG_NO_BOOTMEM
int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
int flags)
{
-#ifdef CONFIG_NUMA
- int nid, next_nid;
- int ret;
-#endif
unsigned long pfn = phys >> PAGE_SHIFT;

if (pfn >= max_pfn) {
@@ -821,21 +818,7 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
return -EFAULT;
}

- /* Should check here against the e820 map to avoid double free */
-#ifdef CONFIG_NUMA
- nid = phys_to_nid(phys);
- next_nid = phys_to_nid(phys + len - 1);
- if (nid == next_nid)
- ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
- else
- ret = reserve_bootmem(phys, len, flags);
-
- if (ret != 0)
- return ret;
-
-#else
reserve_bootmem(phys, len, flags);
-#endif

if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
dma_reserve += len / PAGE_SIZE;
@@ -844,6 +827,7 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,

return 0;
}
+#endif

int kern_addr_valid(unsigned long addr)
{
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 58c66cc..ee31b95 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -526,6 +526,12 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
}

#ifndef CONFIG_NO_BOOTMEM
+int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
+ int flags)
+{
+ return reserve_bootmem(phys, len, flags);
+}
+
static unsigned long __init align_idx(struct bootmem_data *bdata,
unsigned long idx, unsigned long step)
{
--
1.6.4.2

2010-04-09 06:08:51

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 10/39] lmb: Add lmb_to_bootmem()

lmb_to_bootmem() will reserve lmb.reserved.region in bootmem after bootmem is
set up.

We can use it to with all arches that support lmb later.

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 2 ++
mm/lmb.c | 32 ++++++++++++++++++++++++++++++++
2 files changed, 34 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 598662f..1e236d1 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -91,6 +91,8 @@ u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);

+void lmb_to_bootmem(u64 start, u64 end);
+
#include <asm/lmb.h>

#endif /* __KERNEL__ */
diff --git a/mm/lmb.c b/mm/lmb.c
index a514d41..ee3d945 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -630,6 +630,38 @@ void __init lmb_free_area(u64 start, u64 end)
__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
}

+#ifndef CONFIG_NO_BOOTMEM
+void __init lmb_to_bootmem(u64 start, u64 end)
+{
+ int i, count;
+ u64 final_start, final_end;
+
+ /* Take out region array itself */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+
+ count = lmb.reserved.cnt;
+ pr_info("(%d early reservations) ==> bootmem [%010llx - %010llx]\n", count, start, end);
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ pr_info(" #%d [%010llx - %010llx] ", i, r->base, r->base + r->size);
+ final_start = max(start, r->base);
+ final_end = min(end, r->base + r->size);
+ if (final_start >= final_end) {
+ pr_cont("\n");
+ continue;
+ }
+ pr_cont(" ==> [%010llx - %010llx]\n", final_start, final_end);
+ reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
+ }
+ /* Clear them to avoid misusing ? */
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+ lmb.reserved.region = NULL;
+ lmb.reserved.nr_regions = 0;
+ lmb.reserved.cnt = 0;
+}
+#endif
+
u64 __init __weak __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align)
{
--
1.6.4.2

2010-04-09 06:09:14

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 05/39] lmb: Seperate region array from lmb_region struct

lmb_init() will connect them back.
Add nr_regions in struct lmb_region to track region array size.

So later We can install dynamically allocated region array to that pointer

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 3 ++-
mm/lmb.c | 9 ++++++++-
2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index f3d1433..e14ea8d 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -26,7 +26,8 @@ struct lmb_property {
struct lmb_region {
unsigned long cnt;
u64 size;
- struct lmb_property region[MAX_LMB_REGIONS+1];
+ struct lmb_property *region;
+ unsigned long nr_regions;
};

struct lmb {
diff --git a/mm/lmb.c b/mm/lmb.c
index b1fc526..65b62dc 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -18,6 +18,8 @@
#define LMB_ALLOC_ANYWHERE 0

struct lmb lmb;
+static struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+static struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];

static int lmb_debug;

@@ -106,6 +108,11 @@ static void lmb_coalesce_regions(struct lmb_region *rgn,

void __init lmb_init(void)
{
+ lmb.memory.region = lmb_memory_region;
+ lmb.reserved.region = lmb_reserved_region;
+ lmb.memory.nr_regions = ARRAY_SIZE(lmb_memory_region);
+ lmb.reserved.nr_regions = ARRAY_SIZE(lmb_reserved_region);
+
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
@@ -169,7 +176,7 @@ static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)

if (coalesced)
return coalesced;
- if (rgn->cnt >= MAX_LMB_REGIONS)
+ if (rgn->cnt > rgn->nr_regions)
return -1;

/* Couldn't coalesce the LMB, so add it to the sorted table. */
--
1.6.4.2

2010-04-09 06:09:18

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 39/39] x86: have nobootmem version setup_bootmem_allocator()

We can reduce #ifdef number from 3 to one in init_32.c

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/mm/init_32.c | 15 ++++++++++-----
1 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c01c711..dfdd035 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -771,11 +771,9 @@ static unsigned long __init setup_node_bootmem(int nodeid,

return bootmap + bootmap_size;
}
-#endif

void __init setup_bootmem_allocator(void)
{
-#ifndef CONFIG_NO_BOOTMEM
int nodeid;
unsigned long bootmap_size, bootmap;
/*
@@ -787,13 +785,11 @@ void __init setup_bootmem_allocator(void)
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
lmb_reserve_area(bootmap, bootmap + bootmap_size, "BOOTMAP");
-#endif

printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
max_pfn_mapped<<PAGE_SHIFT);
printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);

-#ifndef CONFIG_NO_BOOTMEM
for_each_online_node(nodeid) {
unsigned long start_pfn, end_pfn;

@@ -811,10 +807,19 @@ void __init setup_bootmem_allocator(void)
bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
bootmap);
}
-#endif

after_bootmem = 1;
}
+#else
+void __init setup_bootmem_allocator(void)
+{
+ printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
+ max_pfn_mapped<<PAGE_SHIFT);
+ printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
+
+ after_bootmem = 1;
+}
+#endif

/*
* paging_init() sets up the page tables - note that the first 8MB are
--
1.6.4.2

2010-04-09 06:09:28

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 33/39] x86: Change e820_any_mapped() to __init

We don't need to expose e820_any_mapped() anymore

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/kernel/e820.c | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index be6e1e6..471784b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -47,9 +47,10 @@ EXPORT_SYMBOL(pci_mem_start);
/*
* This function checks if any part of the range <start,end> is mapped
* with type.
+ * phys_pud_init() is using it and is _meminit, but we have !after_bootmem
+ * so could use refok here
*/
-int
-e820_any_mapped(u64 start, u64 end, unsigned type)
+int __init_refok e820_any_mapped(u64 start, u64 end, unsigned type)
{
int i;

@@ -64,7 +65,6 @@ e820_any_mapped(u64 start, u64 end, unsigned type)
}
return 0;
}
-EXPORT_SYMBOL_GPL(e820_any_mapped);

/*
* This function checks if the entire range <start,end> is mapped with type.
--
1.6.4.2

2010-04-09 06:09:39

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 38/39] lmb: move __alloc_memory_core_early() to nobootmem.c

We can remove #ifdef in mm/page_alloc.c

and change that function to static

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/mm.h | 2 --
mm/nobootmem.c | 21 +++++++++++++++++++++
mm/page_alloc.c | 24 ------------------------
3 files changed, 21 insertions(+), 26 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7774e1d..2a14361 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1161,8 +1161,6 @@ int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit);
-void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
- u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index abaec96..8c26d02 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -40,6 +40,27 @@ unsigned long max_pfn;
unsigned long saved_max_pfn;
#endif

+static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit)
+{
+ void *ptr;
+
+ u64 addr;
+
+ if (limit > lmb.default_alloc_limit)
+ limit = lmb.default_alloc_limit;
+
+ addr = find_memory_core_early(nid, size, align, goal, limit);
+
+ if (addr == -1ULL)
+ return NULL;
+
+ ptr = phys_to_virt(addr);
+ memset(ptr, 0, size);
+ lmb_reserve_area(addr, addr + size, "BOOTMEM");
+ return ptr;
+}
+
/*
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 233c403..faa749c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3437,30 +3437,6 @@ int __init add_from_early_node_map(struct range *range, int az,
return nr_range;
}

-#ifdef CONFIG_NO_BOOTMEM
-void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit)
-{
- void *ptr;
-
- u64 addr;
-
- if (limit > lmb.default_alloc_limit)
- limit = lmb.default_alloc_limit;
-
- addr = find_memory_core_early(nid, size, align, goal, limit);
-
- if (addr == -1ULL)
- return NULL;
-
- ptr = phys_to_virt(addr);
- memset(ptr, 0, size);
- lmb_reserve_area(addr, addr + size, "BOOTMEM");
- return ptr;
-}
-#endif
-
-
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
{
int i;
--
1.6.4.2

2010-04-09 06:09:42

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 32/39] x86: Add get_centaur_ram_top()

So we can avoid to access e820.map[] directly.

later we could move e820 to static and _initdata

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/include/asm/e820.h | 9 ++++++
arch/x86/kernel/cpu/centaur.c | 53 +-------------------------------------
arch/x86/kernel/e820.c | 56 +++++++++++++++++++++++++++++++++++++++++
arch/x86/kernel/setup.c | 2 +
4 files changed, 69 insertions(+), 51 deletions(-)

diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 334281f..cd7de51 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -76,6 +76,15 @@ struct e820map {
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;

+#if defined(CONFIG_X86_OOSTORE) && defined(CONFIG_CPU_SUP_CENTAUR)
+extern int centaur_ram_top;
+void get_centaur_ram_top(void);
+#else
+static inline void get_centaur_ram_top(void)
+{
+}
+#endif
+
extern unsigned long pci_mem_start;
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e58d978..bb49358 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -37,63 +37,14 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
}

-/*
- * Figure what we can cover with MCR's
- *
- * Shortcut: We know you can't put 4Gig of RAM on a winchip
- */
-static u32 __cpuinit ramtop(void)
-{
- u32 clip = 0xFFFFFFFFUL;
- u32 top = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
-
- if (e820.map[i].addr > 0xFFFFFFFFUL)
- continue;
- /*
- * Don't MCR over reserved space. Ignore the ISA hole
- * we frob around that catastrophe already
- */
- if (e820.map[i].type == E820_RESERVED) {
- if (e820.map[i].addr >= 0x100000UL &&
- e820.map[i].addr < clip)
- clip = e820.map[i].addr;
- continue;
- }
- start = e820.map[i].addr;
- end = e820.map[i].addr + e820.map[i].size;
- if (start >= end)
- continue;
- if (end > top)
- top = end;
- }
- /*
- * Everything below 'top' should be RAM except for the ISA hole.
- * Because of the limited MCR's we want to map NV/ACPI into our
- * MCR range for gunk in RAM
- *
- * Clip might cause us to MCR insufficient RAM but that is an
- * acceptable failure mode and should only bite obscure boxes with
- * a VESA hole at 15Mb
- *
- * The second case Clip sometimes kicks in is when the EBDA is marked
- * as reserved. Again we fail safe with reasonable results
- */
- if (top > clip)
- top = clip;
-
- return top;
-}
+int __cpuinitdata centaur_ram_top;

/*
* Compute a set of MCR's to give maximum coverage
*/
static int __cpuinit centaur_mcr_compute(int nr, int key)
{
- u32 mem = ramtop();
+ u32 mem = centaur_ram_top;
u32 root = power2(mem);
u32 base = root;
u32 top = root;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 57c938a..be6e1e6 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1131,6 +1131,62 @@ void __init setup_memory_map(void)
e820_print_map(who);
}

+#if defined(CONFIG_X86_OOSTORE) && defined(CONFIG_CPU_SUP_CENTAUR)
+/*
+ * Figure what we can cover with MCR's
+ *
+ * Shortcut: We know you can't put 4Gig of RAM on a winchip
+ */
+void __init get_centaur_ram_top(void)
+{
+ u32 clip = 0xFFFFFFFFUL;
+ u32 top = 0;
+ int i;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
+ return;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long start, end;
+
+ if (e820.map[i].addr > 0xFFFFFFFFUL)
+ continue;
+ /*
+ * Don't MCR over reserved space. Ignore the ISA hole
+ * we frob around that catastrophe already
+ */
+ if (e820.map[i].type == E820_RESERVED) {
+ if (e820.map[i].addr >= 0x100000UL &&
+ e820.map[i].addr < clip)
+ clip = e820.map[i].addr;
+ continue;
+ }
+ start = e820.map[i].addr;
+ end = e820.map[i].addr + e820.map[i].size;
+ if (start >= end)
+ continue;
+ if (end > top)
+ top = end;
+ }
+ /*
+ * Everything below 'top' should be RAM except for the ISA hole.
+ * Because of the limited MCR's we want to map NV/ACPI into our
+ * MCR range for gunk in RAM
+ *
+ * Clip might cause us to MCR insufficient RAM but that is an
+ * acceptable failure mode and should only bite obscure boxes with
+ * a VESA hole at 15Mb
+ *
+ * The second case Clip sometimes kicks in is when the EBDA is marked
+ * as reserved. Again we fail safe with reasonable results
+ */
+ if (top > clip)
+ top = clip;
+
+ centaur_ram_top = top;
+}
+#endif
+
void __init init_lmb_memory(void)
{
lmb_init();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 403b15c..a7ed663 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -893,6 +893,8 @@ void __init setup_arch(char **cmdline_p)
if (mtrr_trim_uncached_memory(max_pfn))
max_pfn = e820_end_of_ram_pfn();

+ get_centaur_ram_top();
+
#ifdef CONFIG_X86_32
/* max_low_pfn get updated here */
find_low_pfn_range();
--
1.6.4.2

2010-04-09 06:09:45

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 31/39] x86: Use wake_system_ram_range() instead of e820_any_mapped() in agp path

Move apterture_valid back to .c

and early path still use e820_any_mapped()

So later we can make e820_any_mapped() to _init

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/include/asm/gart.h | 22 ----------------------
arch/x86/kernel/aperture_64.c | 22 ++++++++++++++++++++++
drivers/char/agp/amd64-agp.c | 39 ++++++++++++++++++++++++++++++++++++++-
3 files changed, 60 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 4ac5b0f..2b63a91 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -74,26 +74,4 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}

-static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
-{
- if (!aper_base)
- return 0;
-
- if (aper_base + aper_size > 0x100000000ULL) {
- printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
- return 0;
- }
- if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
- printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
- return 0;
- }
- if (aper_size < min_size) {
- printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
- aper_size>>20, min_size>>20);
- return 0;
- }
-
- return 1;
-}
-
#endif /* _ASM_X86_GART_H */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997..f6e6270 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -145,6 +145,28 @@ static u32 __init find_cap(int bus, int slot, int func, int cap)
return 0;
}

+static int __init aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
+{
+ if (!aper_base)
+ return 0;
+
+ if (aper_base + aper_size > 0x100000000ULL) {
+ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
+ return 0;
+ }
+ if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
+ printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
+ return 0;
+ }
+ if (aper_size < min_size) {
+ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
+ aper_size>>20, min_size>>20);
+ return 0;
+ }
+
+ return 1;
+}
+
/* Read a standard AGPv3 bridge header */
static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
{
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index fd50ead..85cabd0 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -14,7 +14,6 @@
#include <linux/agp_backend.h>
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
-#include <asm/e820.h>
#include <asm/k8.h>
#include <asm/gart.h>
#include "agp.h"
@@ -231,6 +230,44 @@ static const struct agp_bridge_driver amd_8151_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};

+static int __devinit
+__is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
+{
+ return 1;
+}
+
+static int __devinit any_ram_in_range(u64 base, u64 size)
+{
+ unsigned long pfn, nr_pages;
+
+ pfn = base >> PAGE_SHIFT;
+ nr_pages = size >> PAGE_SHIFT;
+
+ return walk_system_ram_range(pfn, nr_pages, NULL, __is_ram) == 1;
+}
+
+static int __devinit aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
+{
+ if (!aper_base)
+ return 0;
+
+ if (aper_base + aper_size > 0x100000000ULL) {
+ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
+ return 0;
+ }
+ if (any_ram_in_range(aper_base, aper_size)) {
+ printk(KERN_INFO "Aperture pointing to E820 RAM. Ignoring.\n");
+ return 0;
+ }
+ if (aper_size < min_size) {
+ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
+ aper_size>>20, min_size>>20);
+ return 0;
+ }
+
+ return 1;
+}
+
/* Some basic sanity checks for the aperture. */
static int __devinit agp_aperture_valid(u64 aper, u32 size)
{
--
1.6.4.2

2010-04-09 06:09:33

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 34/39] x86: Use walk_system_ream_range() instead of referring e820.map directly for tboot

So we can make e820 to be __initdata

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/kernel/tboot.c | 22 +++++++++-------------
1 files changed, 9 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index cc2c604..cf27d64 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -170,34 +170,30 @@ static void tboot_create_trampoline(void)

#ifdef CONFIG_ACPI_SLEEP

-static void add_mac_region(phys_addr_t start, unsigned long size)
+static int
+add_mac_region(unsigned long start_pfn, unsigned long nr_pages, void *arg)
{
+ u64 start = start_pfn;
+ u64 size = nr_pages;
struct tboot_mac_region *mr;
- phys_addr_t end = start + size;

if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS)
panic("tboot: Too many MAC regions\n");

if (start && size) {
mr = &tboot->mac_regions[tboot->num_mac_regions++];
- mr->start = round_down(start, PAGE_SIZE);
- mr->size = round_up(end, PAGE_SIZE) - mr->start;
+ mr->start = start << PAGE_SHIFT;
+ mr->size = (u32) (size << PAGE_SHIFT);
}
+
+ return 0;
}

static int tboot_setup_sleep(void)
{
- int i;
-
tboot->num_mac_regions = 0;

- for (i = 0; i < e820.nr_map; i++) {
- if ((e820.map[i].type != E820_RAM)
- && (e820.map[i].type != E820_RESERVED_KERN))
- continue;
-
- add_mac_region(e820.map[i].addr, e820.map[i].size);
- }
+ walk_system_ram_range(0, max_pfn, NULL, add_mac_region);

tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;

--
1.6.4.2

2010-04-09 06:10:49

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 35/39] x86: make e820 to be __initdata

Finally no user after init boot stage. We can free it to save some bytes.

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/include/asm/e820.h | 2 --
arch/x86/kernel/e820.c | 2 +-
2 files changed, 1 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index cd7de51..f2ab72e 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -73,8 +73,6 @@ struct e820map {
#define BIOS_END 0x00100000

#ifdef __KERNEL__
-/* see comment in arch/x86/kernel/e820.c */
-extern struct e820map e820;

#if defined(CONFIG_X86_OOSTORE) && defined(CONFIG_CPU_SUP_CENTAUR)
extern int centaur_ram_top;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 471784b..2c1260f 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -35,7 +35,7 @@
* user can e.g. boot the original kernel with mem=1G while still booting the
* next kernel with full memory.
*/
-struct e820map e820;
+static struct e820map __initdata e820;
static struct e820map __initdata e820_saved;

/* For PCI or other memory-mapped resources */
--
1.6.4.2

2010-04-09 06:10:53

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 29/39] x86, lmb: Use lmb_memory_size()/lmb_free_memory_size() to get correct dma_reserve

lmb_memory_size() will return memory size in lmb.memory.region.
lmb_free_memory_size() will return free memory size in lmb.memory.region.

So We can get exact reseved size in specified range.

Set the size right after initmem_init(), because later bootmem API will
get area above 16M. (except some fallback).

Later after we remove the bootmem, We could call that just before paging_init().

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/include/asm/e820.h | 2 ++
arch/x86/kernel/e820.c | 17 +++++++++++++++++
arch/x86/kernel/setup.c | 1 +
arch/x86/mm/init_64.c | 7 -------
4 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index de6cd06..334281f 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -117,6 +117,8 @@ extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);

void init_lmb_memory(void);
void fill_lmb_memory(void);
+void find_lmb_dma_reserve(void);
+
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e0ba87d..57c938a 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1151,3 +1151,20 @@ void __init fill_lmb_memory(void)
lmb_analyze();
lmb_dump_all();
}
+
+void __init find_lmb_dma_reserve(void)
+{
+#ifdef CONFIG_X86_64
+ u64 free_size_pfn;
+ u64 mem_size_pfn;
+ /*
+ * need to find out used area below MAX_DMA_PFN
+ * need to use lmb to get free size in [0, MAX_DMA_PFN]
+ * at first, and assume boot_mem will not take below MAX_DMA_PFN
+ */
+ mem_size_pfn = lmb_memory_size(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
+ free_size_pfn = lmb_free_memory_size(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
+ set_dma_reserve(mem_size_pfn - free_size_pfn);
+#endif
+}
+
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 4d2a984..403b15c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1001,6 +1001,7 @@ void __init setup_arch(char **cmdline_p)
#endif

initmem_init(0, max_pfn, acpi, k8);
+ find_lmb_dma_reserve();
#ifndef CONFIG_NO_BOOTMEM
lmb_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0d2252c..37c7a82 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -53,8 +53,6 @@
#include <asm/init.h>
#include <linux/bootmem.h>

-static unsigned long dma_reserve __initdata;
-
static int __init parse_direct_gbpages_off(char *arg)
{
direct_gbpages = 0;
@@ -821,11 +819,6 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,

reserve_bootmem(phys, len, flags);

- if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
- dma_reserve += len / PAGE_SIZE;
- set_dma_reserve(dma_reserve);
- }
-
return 0;
}
#endif
--
1.6.4.2

2010-04-09 06:11:20

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 27/39] x86, lmb: turn off ARCH_LMB_FIND_AREA

32bit now can use generic __find_lmb_area now. So we can turn arch one off

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/Kconfig | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index da9040b..bbe4e99 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -584,7 +584,7 @@ config PARAVIRT_DEBUG
a paravirt_op is missing when it is called.

config ARCH_LMB_FIND_AREA
- default y
+ default n
bool "Use x86 own lmb_find_area()"
---help---
Use lmb_find_area() version instead of generic version, it get free
--
1.6.4.2

2010-04-09 06:11:35

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 28/39] x86: Remove not used early_res code

and some functions in e820.c that are not used anymore

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/include/asm/e820.h | 14 -
arch/x86/kernel/e820.c | 42 ---
include/linux/early_res.h | 23 --
kernel/early_res.c | 584 -------------------------------------------
4 files changed, 0 insertions(+), 663 deletions(-)
delete mode 100644 include/linux/early_res.h
delete mode 100644 kernel/early_res.c

diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 396c849..de6cd06 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -111,32 +111,18 @@ static inline void early_memtest(unsigned long start, unsigned long end)
}
#endif

-extern unsigned long end_user_pfn;
-
-extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
-extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
-extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
-
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
-extern void e820_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern u64 e820_hole_size(u64 start, u64 end);
-
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);

void init_lmb_memory(void);
void fill_lmb_memory(void);
-
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
extern void setup_memory_map(void);
extern char *default_machine_specific_memory_setup(void);

-void reserve_early(u64 start, u64 end, char *name);
-void free_early(u64 start, u64 end);
-
/*
* Returns true iff the specified range [s,e) is completely contained inside
* the ISA region.
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 3fa3c0a..e0ba87d 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -744,22 +744,6 @@ core_initcall(e820_mark_nvs_memory);
#endif

/*
- * Find a free area with specified alignment in a specific range.
- */
-u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
-{
- return lmb_find_area(start, end, size, align);
-}
-
-/*
- * Find next free range after *start
- */
-u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
-{
- return lmb_find_area_size(start, sizep, align);
-}
-
-/*
* pre allocated 4k and reserved it in lmb and e820_saved
*/
u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
@@ -851,32 +835,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
}

-/* Walk the e820 map and register active regions within a node */
-void __init e820_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long last_pfn)
-{
- lmb_register_active_regions(nid, start_pfn, last_pfn);
-}
-
-/*
- * Find the hole size (in bytes) in the memory range.
- * @start: starting address of the memory range to scan
- * @end: ending address of the memory range to scan
- */
-u64 __init e820_hole_size(u64 start, u64 end)
-{
- return lmb_hole_size(start, end);
-}
-
-void reserve_early(u64 start, u64 end, char *name)
-{
- lmb_reserve_area(start, end, name);
-}
-void free_early(u64 start, u64 end)
-{
- lmb_free_area(start, end);
-}
-
static void early_panic(char *msg)
{
early_printk(msg);
diff --git a/include/linux/early_res.h b/include/linux/early_res.h
deleted file mode 100644
index 29c09f5..0000000
--- a/include/linux/early_res.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _LINUX_EARLY_RES_H
-#define _LINUX_EARLY_RES_H
-#ifdef __KERNEL__
-
-extern void reserve_early(u64 start, u64 end, char *name);
-extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
-extern void free_early(u64 start, u64 end);
-void free_early_partial(u64 start, u64 end);
-extern void early_res_to_bootmem(u64 start, u64 end);
-
-void reserve_early_without_check(u64 start, u64 end, char *name);
-u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
- u64 size, u64 align);
-u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
- u64 *sizep, u64 align);
-u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
-u64 get_max_mapped(void);
-#include <linux/range.h>
-int get_free_all_memory_range(struct range **rangep, int nodeid);
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_EARLY_RES_H */
diff --git a/kernel/early_res.c b/kernel/early_res.c
deleted file mode 100644
index 31aa933..0000000
--- a/kernel/early_res.c
+++ /dev/null
@@ -1,584 +0,0 @@
-/*
- * early_res, could be used to replace bootmem
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/early_res.h>
-
-/*
- * Early reserved memory areas.
- */
-/*
- * need to make sure this one is bigger enough before
- * find_fw_memmap_area could be used
- */
-#define MAX_EARLY_RES_X 32
-
-struct early_res {
- u64 start, end;
- char name[15];
- char overlap_ok;
-};
-static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata;
-
-static int max_early_res __initdata = MAX_EARLY_RES_X;
-static struct early_res *early_res __initdata = &early_res_x[0];
-static int early_res_count __initdata;
-
-static int __init find_overlapped_early(u64 start, u64 end)
-{
- int i;
- struct early_res *r;
-
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- r = &early_res[i];
- if (end > r->start && start < r->end)
- break;
- }
-
- return i;
-}
-
-/*
- * Drop the i-th range from the early reservation map,
- * by copying any higher ranges down one over it, and
- * clearing what had been the last slot.
- */
-static void __init drop_range(int i)
-{
- int j;
-
- for (j = i + 1; j < max_early_res && early_res[j].end; j++)
- ;
-
- memmove(&early_res[i], &early_res[i + 1],
- (j - 1 - i) * sizeof(struct early_res));
-
- early_res[j - 1].end = 0;
- early_res_count--;
-}
-
-static void __init drop_range_partial(int i, u64 start, u64 end)
-{
- u64 common_start, common_end;
- u64 old_start, old_end;
-
- old_start = early_res[i].start;
- old_end = early_res[i].end;
- common_start = max(old_start, start);
- common_end = min(old_end, end);
-
- /* no overlap ? */
- if (common_start >= common_end)
- return;
-
- if (old_start < common_start) {
- /* make head segment */
- early_res[i].end = common_start;
- if (old_end > common_end) {
- char name[15];
-
- /*
- * Save a local copy of the name, since the
- * early_res array could get resized inside
- * reserve_early_without_check() ->
- * __check_and_double_early_res(), which would
- * make the current name pointer invalid.
- */
- strncpy(name, early_res[i].name,
- sizeof(early_res[i].name) - 1);
- /* add another for left over on tail */
- reserve_early_without_check(common_end, old_end, name);
- }
- return;
- } else {
- if (old_end > common_end) {
- /* reuse the entry for tail left */
- early_res[i].start = common_end;
- return;
- }
- /* all covered */
- drop_range(i);
- }
-}
-
-/*
- * Split any existing ranges that:
- * 1) are marked 'overlap_ok', and
- * 2) overlap with the stated range [start, end)
- * into whatever portion (if any) of the existing range is entirely
- * below or entirely above the stated range. Drop the portion
- * of the existing range that overlaps with the stated range,
- * which will allow the caller of this routine to then add that
- * stated range without conflicting with any existing range.
- */
-static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
-{
- int i;
- struct early_res *r;
- u64 lower_start, lower_end;
- u64 upper_start, upper_end;
- char name[15];
-
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- r = &early_res[i];
-
- /* Continue past non-overlapping ranges */
- if (end <= r->start || start >= r->end)
- continue;
-
- /*
- * Leave non-ok overlaps as is; let caller
- * panic "Overlapping early reservations"
- * when it hits this overlap.
- */
- if (!r->overlap_ok)
- return;
-
- /*
- * We have an ok overlap. We will drop it from the early
- * reservation map, and add back in any non-overlapping
- * portions (lower or upper) as separate, overlap_ok,
- * non-overlapping ranges.
- */
-
- /* 1. Note any non-overlapping (lower or upper) ranges. */
- strncpy(name, r->name, sizeof(name) - 1);
-
- lower_start = lower_end = 0;
- upper_start = upper_end = 0;
- if (r->start < start) {
- lower_start = r->start;
- lower_end = start;
- }
- if (r->end > end) {
- upper_start = end;
- upper_end = r->end;
- }
-
- /* 2. Drop the original ok overlapping range */
- drop_range(i);
-
- i--; /* resume for-loop on copied down entry */
-
- /* 3. Add back in any non-overlapping ranges. */
- if (lower_end)
- reserve_early_overlap_ok(lower_start, lower_end, name);
- if (upper_end)
- reserve_early_overlap_ok(upper_start, upper_end, name);
- }
-}
-
-static void __init __reserve_early(u64 start, u64 end, char *name,
- int overlap_ok)
-{
- int i;
- struct early_res *r;
-
- i = find_overlapped_early(start, end);
- if (i >= max_early_res)
- panic("Too many early reservations");
- r = &early_res[i];
- if (r->end)
- panic("Overlapping early reservations "
- "%llx-%llx %s to %llx-%llx %s\n",
- start, end - 1, name ? name : "", r->start,
- r->end - 1, r->name);
- r->start = start;
- r->end = end;
- r->overlap_ok = overlap_ok;
- if (name)
- strncpy(r->name, name, sizeof(r->name) - 1);
- early_res_count++;
-}
-
-/*
- * A few early reservtations come here.
- *
- * The 'overlap_ok' in the name of this routine does -not- mean it
- * is ok for these reservations to overlap an earlier reservation.
- * Rather it means that it is ok for subsequent reservations to
- * overlap this one.
- *
- * Use this entry point to reserve early ranges when you are doing
- * so out of "Paranoia", reserving perhaps more memory than you need,
- * just in case, and don't mind a subsequent overlapping reservation
- * that is known to be needed.
- *
- * The drop_overlaps_that_are_ok() call here isn't really needed.
- * It would be needed if we had two colliding 'overlap_ok'
- * reservations, so that the second such would not panic on the
- * overlap with the first. We don't have any such as of this
- * writing, but might as well tolerate such if it happens in
- * the future.
- */
-void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
-{
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 1);
-}
-
-static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
-{
- u64 start, end, size, mem;
- struct early_res *new;
-
- /* do we have enough slots left ? */
- if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
- return;
-
- /* double it */
- mem = -1ULL;
- size = sizeof(struct early_res) * max_early_res * 2;
- if (early_res == early_res_x)
- start = 0;
- else
- start = early_res[0].end;
- end = ex_start;
- if (start + size < end)
- mem = find_fw_memmap_area(start, end, size,
- sizeof(struct early_res));
- if (mem == -1ULL) {
- start = ex_end;
- end = get_max_mapped();
- if (start + size < end)
- mem = find_fw_memmap_area(start, end, size,
- sizeof(struct early_res));
- }
- if (mem == -1ULL)
- panic("can not find more space for early_res array");
-
- new = __va(mem);
- /* save the first one for own */
- new[0].start = mem;
- new[0].end = mem + size;
- new[0].overlap_ok = 0;
- /* copy old to new */
- if (early_res == early_res_x) {
- memcpy(&new[1], &early_res[0],
- sizeof(struct early_res) * max_early_res);
- memset(&new[max_early_res+1], 0,
- sizeof(struct early_res) * (max_early_res - 1));
- early_res_count++;
- } else {
- memcpy(&new[1], &early_res[1],
- sizeof(struct early_res) * (max_early_res - 1));
- memset(&new[max_early_res], 0,
- sizeof(struct early_res) * max_early_res);
- }
- memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
- early_res = new;
- max_early_res *= 2;
- printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n",
- max_early_res, mem, mem + size - 1);
-}
-
-/*
- * Most early reservations come here.
- *
- * We first have drop_overlaps_that_are_ok() drop any pre-existing
- * 'overlap_ok' ranges, so that we can then reserve this memory
- * range without risk of panic'ing on an overlapping overlap_ok
- * early reservation.
- */
-void __init reserve_early(u64 start, u64 end, char *name)
-{
- if (start >= end)
- return;
-
- __check_and_double_early_res(start, end);
-
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 0);
-}
-
-void __init reserve_early_without_check(u64 start, u64 end, char *name)
-{
- struct early_res *r;
-
- if (start >= end)
- return;
-
- __check_and_double_early_res(start, end);
-
- r = &early_res[early_res_count];
-
- r->start = start;
- r->end = end;
- r->overlap_ok = 0;
- if (name)
- strncpy(r->name, name, sizeof(r->name) - 1);
- early_res_count++;
-}
-
-void __init free_early(u64 start, u64 end)
-{
- struct early_res *r;
- int i;
-
- i = find_overlapped_early(start, end);
- r = &early_res[i];
- if (i >= max_early_res || r->end != end || r->start != start)
- panic("free_early on not reserved area: %llx-%llx!",
- start, end - 1);
-
- drop_range(i);
-}
-
-void __init free_early_partial(u64 start, u64 end)
-{
- struct early_res *r;
- int i;
-
- if (start == end)
- return;
-
- if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end))
- return;
-
-try_next:
- i = find_overlapped_early(start, end);
- if (i >= max_early_res)
- return;
-
- r = &early_res[i];
- /* hole ? */
- if (r->end >= end && r->start <= start) {
- drop_range_partial(i, start, end);
- return;
- }
-
- drop_range_partial(i, start, end);
- goto try_next;
-}
-
-#ifdef CONFIG_NO_BOOTMEM
-static void __init subtract_early_res(struct range *range, int az)
-{
- int i, count;
- u64 final_start, final_end;
- int idx = 0;
-
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
-
-#define DEBUG_PRINT_EARLY_RES 1
-
-#if DEBUG_PRINT_EARLY_RES
- printk(KERN_INFO "Subtract (%d early reservations)\n", count);
-#endif
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
-#if DEBUG_PRINT_EARLY_RES
- printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
- r->start, r->end, r->name);
-#endif
- final_start = PFN_DOWN(r->start);
- final_end = PFN_UP(r->end);
- if (final_start >= final_end)
- continue;
- subtract_range(range, az, final_start, final_end);
- }
-
-}
-
-int __init get_free_all_memory_range(struct range **rangep, int nodeid)
-{
- int i, count;
- u64 start = 0, end;
- u64 size;
- u64 mem;
- struct range *range;
- int nr_range;
-
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- count *= 2;
-
- size = sizeof(struct range) * count;
- end = get_max_mapped();
-#ifdef MAX_DMA32_PFN
- if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
- start = MAX_DMA32_PFN << PAGE_SHIFT;
-#endif
- mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
- if (mem == -1ULL)
- panic("can not find more space for range free");
-
- range = __va(mem);
- /* use early_node_map[] and early_res to get range array at first */
- memset(range, 0, size);
- nr_range = 0;
-
- /* need to go over early_node_map to find out good range for node */
- nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
-#ifdef CONFIG_X86_32
- subtract_range(range, count, max_low_pfn, -1ULL);
-#endif
- subtract_early_res(range, count);
- nr_range = clean_sort_range(range, count);
-
- /* need to clear it ? */
- if (nodeid == MAX_NUMNODES) {
- memset(&early_res[0], 0,
- sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
- }
-
- *rangep = range;
- return nr_range;
-}
-#else
-void __init early_res_to_bootmem(u64 start, u64 end)
-{
- int i, count;
- u64 final_start, final_end;
- int idx = 0;
-
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
-
- printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
- count - idx, max_early_res, start, end);
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
- printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
- r->start, r->end, r->name);
- final_start = max(start, r->start);
- final_end = min(end, r->end);
- if (final_start >= final_end) {
- printk(KERN_CONT "\n");
- continue;
- }
- printk(KERN_CONT " ==> [%010llx - %010llx]\n",
- final_start, final_end);
- reserve_bootmem_generic(final_start, final_end - final_start,
- BOOTMEM_DEFAULT);
- }
- /* clear them */
- memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
- early_res_count = 0;
-}
-#endif
-
-/* Check for already reserved areas */
-static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
-{
- int i;
- u64 addr = *addrp;
- int changed = 0;
- struct early_res *r;
-again:
- i = find_overlapped_early(addr, addr + size);
- r = &early_res[i];
- if (i < max_early_res && r->end) {
- *addrp = addr = round_up(r->end, align);
- changed = 1;
- goto again;
- }
- return changed;
-}
-
-/* Check for already reserved areas */
-static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
-{
- int i;
- u64 addr = *addrp, last;
- u64 size = *sizep;
- int changed = 0;
-again:
- last = addr + size;
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- struct early_res *r = &early_res[i];
- if (last > r->start && addr < r->start) {
- size = r->start - addr;
- changed = 1;
- goto again;
- }
- if (last > r->end && addr < r->end) {
- addr = round_up(r->end, align);
- size = last - addr;
- changed = 1;
- goto again;
- }
- if (last <= r->end && addr >= r->start) {
- (*sizep)++;
- return 0;
- }
- }
- if (changed) {
- *addrp = addr;
- *sizep = size;
- }
- return changed;
-}
-
-/*
- * Find a free area with specified alignment in a specific range.
- * only with the area.between start to end is active range from early_node_map
- * so they are good as RAM
- */
-u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
- u64 size, u64 align)
-{
- u64 addr, last;
-
- addr = round_up(ei_start, align);
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- goto out;
- while (bad_addr(&addr, size, align) && addr+size <= ei_last)
- ;
- last = addr + size;
- if (last > ei_last)
- goto out;
- if (last > end)
- goto out;
-
- return addr;
-
-out:
- return -1ULL;
-}
-
-u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
- u64 *sizep, u64 align)
-{
- u64 addr, last;
-
- addr = round_up(ei_start, align);
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- goto out;
- *sizep = ei_last - addr;
- while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
- ;
- last = addr + *sizep;
- if (last > ei_last)
- goto out;
-
- return addr;
-
-out:
- return -1ULL;
-}
--
1.6.4.2

2010-04-09 06:11:50

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 30/39] x86: put 64 bit numa node memmap above 16M

Do not use 0x8000 hard code value anymore.

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/mm/numa_64.c | 2 +-
arch/x86/mm/srat_64.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 18d2296..b8438ac 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -88,7 +88,7 @@ static int __init allocate_cachealigned_memnodemap(void)
if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
return 0;

- addr = 0x8000;
+ addr = __pa(MAX_DMA_ADDRESS);
nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
nodemap_addr = lmb_find_area(addr, max_pfn<<PAGE_SHIFT,
nodemap_size, L1_CACHE_BYTES);
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 84e11b9..416b665 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -99,8 +99,8 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
unsigned long phys;

length = slit->header.length;
- phys = lmb_find_area(0, max_pfn_mapped<<PAGE_SHIFT, length,
- PAGE_SIZE);
+ phys = lmb_find_area(__pa(MAX_DMA_ADDRESS), max_pfn_mapped<<PAGE_SHIFT,
+ length, PAGE_SIZE);

if (phys == -1L)
panic(" Can not save slit!\n");
--
1.6.4.2

2010-04-09 06:09:07

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 12/39] lmb: Add lmb_register_active_regions() and lmb_hole_size()

lmb_register_active_regions() will be used to fill early_node_map,
the result will be lmb.memory.region AND numa data

lmb_hole_size will be used to find hole size on lmb.memory.region
with specified range.

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 4 +++
mm/lmb.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 72 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 2ee2cc1..cf8f7ca 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -95,6 +95,10 @@ void lmb_to_bootmem(u64 start, u64 end);
struct range;
int get_free_all_memory_range(struct range **rangep, int nodeid);

+void lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn);
+u64 lmb_hole_size(u64 start, u64 end);
+
#include <asm/lmb.h>

#endif /* __KERNEL__ */
diff --git a/mm/lmb.c b/mm/lmb.c
index f11df14..cf0f1c9 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -790,3 +790,71 @@ u64 __init __weak lmb_find_area(u64 start, u64 end, u64 size, u64 align)
}
return -1ULL;
}
+/*
+ * Finds an active region in the address range from start_pfn to last_pfn and
+ * returns its range in ei_startpfn and ei_endpfn for the lmb entry.
+ */
+static int __init lmb_find_active_region(const struct lmb_property *ei,
+ unsigned long start_pfn,
+ unsigned long last_pfn,
+ unsigned long *ei_startpfn,
+ unsigned long *ei_endpfn)
+{
+ u64 align = PAGE_SIZE;
+
+ *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
+ *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
+
+ /* Skip map entries smaller than a page */
+ if (*ei_startpfn >= *ei_endpfn)
+ return 0;
+
+ /* Skip if map is outside the node */
+ if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
+ return 0;
+
+ /* Check for overlaps */
+ if (*ei_startpfn < start_pfn)
+ *ei_startpfn = start_pfn;
+ if (*ei_endpfn > last_pfn)
+ *ei_endpfn = last_pfn;
+
+ return 1;
+}
+
+/* Walk the lmb.memory map and register active regions within a node */
+void __init lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn)
+{
+ unsigned long ei_startpfn;
+ unsigned long ei_endpfn;
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++)
+ if (lmb_find_active_region(&lmb.memory.region[i],
+ start_pfn, last_pfn,
+ &ei_startpfn, &ei_endpfn))
+ add_active_range(nid, ei_startpfn, ei_endpfn);
+}
+
+/*
+ * Find the hole size (in bytes) in the memory range.
+ * @start: starting address of the memory range to scan
+ * @end: ending address of the memory range to scan
+ */
+u64 __init lmb_hole_size(u64 start, u64 end)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long last_pfn = end >> PAGE_SHIFT;
+ unsigned long ei_startpfn, ei_endpfn, ram = 0;
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ if (lmb_find_active_region(&lmb.memory.region[i],
+ start_pfn, last_pfn,
+ &ei_startpfn, &ei_endpfn))
+ ram += ei_endpfn - ei_startpfn;
+ }
+ return end - start - ((u64)ram << PAGE_SHIFT);
+}
+
--
1.6.4.2

2010-04-09 06:11:53

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 36/39] bootmem: Add nobootmem.c to reduce the #ifdef

Introduce nobootmem.c to hold wrapper for CONFIG_NO_BOOTMEM=y.

that will remove related #ifdef in bootmem.c

Signed-off-by: Yinghai Lu <[email protected]>
---
mm/Makefile | 8 +-
mm/bootmem.c | 151 +----------------------
mm/nobootmem.c | 389 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 397 insertions(+), 151 deletions(-)
create mode 100644 mm/nobootmem.c

diff --git a/mm/Makefile b/mm/Makefile
index 52492f9..2ab3039 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -7,12 +7,18 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
vmalloc.o pagewalk.o

-obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
+obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o mmu_context.o \
$(mmu-y)
+ifdef CONFIG_NO_BOOTMEM
+ obj-y += nobootmem.o
+else
+ obj-y += bootmem.o
+endif
+
obj-y += init-mm.o

obj-$(CONFIG_HAVE_LMB) += lmb.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 2a4c8b5..2741c34 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -35,7 +35,6 @@ unsigned long max_pfn;
unsigned long saved_max_pfn;
#endif

-#ifndef CONFIG_NO_BOOTMEM
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;

static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
@@ -146,7 +145,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
min_low_pfn = start;
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
-#endif
+
/*
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
@@ -171,53 +170,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
}
}

-#ifdef CONFIG_NO_BOOTMEM
-static void __init __free_pages_memory(unsigned long start, unsigned long end)
-{
- int i;
- unsigned long start_aligned, end_aligned;
- int order = ilog2(BITS_PER_LONG);
-
- start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
- end_aligned = end & ~(BITS_PER_LONG - 1);
-
- if (end_aligned <= start_aligned) {
- for (i = start; i < end; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
-
- return;
- }
-
- for (i = start; i < start_aligned; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
-
- for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
- __free_pages_bootmem(pfn_to_page(i), order);
-
- for (i = end_aligned; i < end; i++)
- __free_pages_bootmem(pfn_to_page(i), 0);
-}
-
-unsigned long __init free_all_memory_core_early(int nodeid)
-{
- int i;
- u64 start, end;
- unsigned long count = 0;
- struct range *range = NULL;
- int nr_range;
-
- nr_range = get_free_all_memory_range(&range, nodeid);
-
- for (i = 0; i < nr_range; i++) {
- start = range[i].start;
- end = range[i].end;
- count += end - start;
- __free_pages_memory(start, end);
- }
-
- return count;
-}
-#else
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
int aligned;
@@ -278,7 +230,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)

return count;
}
-#endif

/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
@@ -289,12 +240,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
-#ifdef CONFIG_NO_BOOTMEM
- /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
- return 0;
-#else
return free_all_bootmem_core(pgdat->bdata);
-#endif
}

/**
@@ -304,16 +250,6 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
*/
unsigned long __init free_all_bootmem(void)
{
-#ifdef CONFIG_NO_BOOTMEM
- /*
- * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
- * because in some case like Node0 doesnt have RAM installed
- * low ram will be on Node1
- * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
- * will be used instead of only Node0 related
- */
- return free_all_memory_core_early(MAX_NUMNODES);
-#else
unsigned long total_pages = 0;
bootmem_data_t *bdata;

@@ -321,10 +257,8 @@ unsigned long __init free_all_bootmem(void)
total_pages += free_all_bootmem_core(bdata);

return total_pages;
-#endif
}

-#ifndef CONFIG_NO_BOOTMEM
static void __init __free(bootmem_data_t *bdata,
unsigned long sidx, unsigned long eidx)
{
@@ -419,7 +353,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
}
BUG();
}
-#endif

/**
* free_bootmem_node - mark a page range as usable
@@ -434,9 +367,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
-#ifdef CONFIG_NO_BOOTMEM
- lmb_free_area(physaddr, physaddr + size);
-#else
unsigned long start, end;

kmemleak_free_part(__va(physaddr), size);
@@ -445,7 +375,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
end = PFN_DOWN(physaddr + size);

mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
-#endif
}

/**
@@ -459,9 +388,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
*/
void __init free_bootmem(unsigned long addr, unsigned long size)
{
-#ifdef CONFIG_NO_BOOTMEM
- lmb_free_area(addr, addr + size);
-#else
unsigned long start, end;

kmemleak_free_part(__va(addr), size);
@@ -470,7 +396,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
end = PFN_DOWN(addr + size);

mark_bootmem(start, end, 0, 0);
-#endif
}

/**
@@ -487,17 +412,12 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
-#ifdef CONFIG_NO_BOOTMEM
- panic("no bootmem");
- return 0;
-#else
unsigned long start, end;

start = PFN_DOWN(physaddr);
end = PFN_UP(physaddr + size);

return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
-#endif
}

/**
@@ -513,20 +433,14 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
-#ifdef CONFIG_NO_BOOTMEM
- panic("no bootmem");
- return 0;
-#else
unsigned long start, end;

start = PFN_DOWN(addr);
end = PFN_UP(addr + size);

return mark_bootmem(start, end, 1, flags);
-#endif
}

-#ifndef CONFIG_NO_BOOTMEM
int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
int flags)
{
@@ -683,33 +597,12 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
#endif
return NULL;
}
-#endif

static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
-#ifdef CONFIG_NO_BOOTMEM
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
-restart:
-
- ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
-
- if (ptr)
- return ptr;
-
- if (goal != 0) {
- goal = 0;
- goto restart;
- }
-
- return NULL;
-#else
bootmem_data_t *bdata;
void *region;

@@ -735,7 +628,6 @@ restart:
}

return NULL;
-#endif
}

/**
@@ -756,10 +648,6 @@ void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
{
unsigned long limit = 0;

-#ifdef CONFIG_NO_BOOTMEM
- limit = -1UL;
-#endif
-
return ___alloc_bootmem_nopanic(size, align, goal, limit);
}

@@ -796,14 +684,9 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
{
unsigned long limit = 0;

-#ifdef CONFIG_NO_BOOTMEM
- limit = -1UL;
-#endif
-
return ___alloc_bootmem(size, align, goal, limit);
}

-#ifndef CONFIG_NO_BOOTMEM
static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
@@ -820,7 +703,6 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,

return ___alloc_bootmem(size, align, goal, limit);
}
-#endif

/**
* __alloc_bootmem_node - allocate boot memory from a specific node
@@ -843,12 +725,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);

-#ifdef CONFIG_NO_BOOTMEM
- return __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
-#else
return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
-#endif
}

void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -869,13 +746,8 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
unsigned long new_goal;

new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
-#ifdef CONFIG_NO_BOOTMEM
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- new_goal, -1ULL);
-#else
ptr = alloc_bootmem_core(pgdat->bdata, size, align,
new_goal, 0);
-#endif
if (ptr)
return ptr;
}
@@ -896,16 +768,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
-#ifdef CONFIG_NO_BOOTMEM
- unsigned long pfn, goal, limit;
-
- pfn = section_nr_to_pfn(section_nr);
- goal = pfn << PAGE_SHIFT;
- limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
-
- return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
- SMP_CACHE_BYTES, goal, limit);
-#else
bootmem_data_t *bdata;
unsigned long pfn, goal, limit;

@@ -915,7 +777,6 @@ void * __init alloc_bootmem_section(unsigned long size,
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];

return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
-#endif
}
#endif

@@ -927,16 +788,11 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);

-#ifdef CONFIG_NO_BOOTMEM
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
-#else
ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
if (ptr)
return ptr;

ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
-#endif
if (ptr)
return ptr;

@@ -987,11 +843,6 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);

-#ifdef CONFIG_NO_BOOTMEM
- return __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
-#else
return ___alloc_bootmem_node(pgdat->bdata, size, align,
goal, ARCH_LOW_ADDRESS_LIMIT);
-#endif
}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
new file mode 100644
index 0000000..283673e
--- /dev/null
+++ b/mm/nobootmem.c
@@ -0,0 +1,389 @@
+/*
+ * bootmem - A boot-time physical memory allocator and configurator
+ *
+ * Copyright (C) 1999 Ingo Molnar
+ * 1999 Kanoj Sarcar, SGI
+ * 2008 Johannes Weiner
+ *
+ * Access to this subsystem has to be serialized externally (which is true
+ * for the boot process anyway).
+ */
+#include <linux/init.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/kmemleak.h>
+#include <linux/range.h>
+#include <linux/lmb.h>
+
+#include <asm/bug.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+
+#include "internal.h"
+
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+
+#ifdef CONFIG_CRASH_DUMP
+/*
+ * If we have booted due to a crash, max_pfn will be a very low value. We need
+ * to know the amount of memory that the previous kernel used.
+ */
+unsigned long saved_max_pfn;
+#endif
+
+/*
+ * free_bootmem_late - free bootmem pages directly to page allocator
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * This is only useful when the bootmem allocator has already been torn
+ * down, but we are still initializing the system. Pages are given directly
+ * to the page allocator, no bootmem metadata is updated because it is gone.
+ */
+void __init free_bootmem_late(unsigned long addr, unsigned long size)
+{
+ unsigned long cursor, end;
+
+ kmemleak_free_part(__va(addr), size);
+
+ cursor = PFN_UP(addr);
+ end = PFN_DOWN(addr + size);
+
+ for (; cursor < end; cursor++) {
+ __free_pages_bootmem(pfn_to_page(cursor), 0);
+ totalram_pages++;
+ }
+}
+
+static void __init __free_pages_memory(unsigned long start, unsigned long end)
+{
+ int i;
+ unsigned long start_aligned, end_aligned;
+ int order = ilog2(BITS_PER_LONG);
+
+ start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+ end_aligned = end & ~(BITS_PER_LONG - 1);
+
+ if (end_aligned <= start_aligned) {
+ for (i = start; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ return;
+ }
+
+ for (i = start; i < start_aligned; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+
+ for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
+ __free_pages_bootmem(pfn_to_page(i), order);
+
+ for (i = end_aligned; i < end; i++)
+ __free_pages_bootmem(pfn_to_page(i), 0);
+}
+
+unsigned long __init free_all_memory_core_early(int nodeid)
+{
+ int i;
+ u64 start, end;
+ unsigned long count = 0;
+ struct range *range = NULL;
+ int nr_range;
+
+ nr_range = get_free_all_memory_range(&range, nodeid);
+
+ for (i = 0; i < nr_range; i++) {
+ start = range[i].start;
+ end = range[i].end;
+ count += end - start;
+ __free_pages_memory(start, end);
+ }
+
+ return count;
+}
+
+/**
+ * free_all_bootmem_node - release a node's free pages to the buddy allocator
+ * @pgdat: node to be released
+ *
+ * Returns the number of pages actually released.
+ */
+unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
+{
+ register_page_bootmem_info_node(pgdat);
+
+ /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+ return 0;
+}
+
+/**
+ * free_all_bootmem - release free pages to the buddy allocator
+ *
+ * Returns the number of pages actually released.
+ */
+unsigned long __init free_all_bootmem(void)
+{
+ /*
+ * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
+ * because in some case like Node0 doesnt have RAM installed
+ * low ram will be on Node1
+ * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
+ * will be used instead of only Node0 related
+ */
+ return free_all_memory_core_early(MAX_NUMNODES);
+}
+
+/**
+ * free_bootmem_node - mark a page range as usable
+ * @pgdat: node the range resides on
+ * @physaddr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * The range must reside completely on the specified node.
+ */
+void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
+ unsigned long size)
+{
+ lmb_free_area(physaddr, physaddr + size);
+}
+
+/**
+ * free_bootmem - mark a page range as usable
+ * @addr: starting address of the range
+ * @size: size of the range in bytes
+ *
+ * Partial pages will be considered reserved and left as they are.
+ *
+ * The range must be contiguous but may span node boundaries.
+ */
+void __init free_bootmem(unsigned long addr, unsigned long size)
+{
+ lmb_free_area(addr, addr + size);
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit)
+{
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
+
+restart:
+
+ ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
+
+ if (ptr)
+ return ptr;
+
+ if (goal != 0) {
+ goal = 0;
+ goto restart;
+ }
+
+ return NULL;
+}
+
+/**
+ * __alloc_bootmem_nopanic - allocate boot memory without panicking
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * Returns NULL on failure.
+ */
+void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ unsigned long limit = -1UL;
+
+ return ___alloc_bootmem_nopanic(size, align, goal, limit);
+}
+
+static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
+{
+ void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
+
+ if (mem)
+ return mem;
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
+}
+
+/**
+ * __alloc_bootmem - allocate boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ unsigned long limit = -1UL;
+
+ return ___alloc_bootmem(size, align, goal, limit);
+}
+
+/**
+ * __alloc_bootmem_node - allocate boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ return __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+}
+
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+#ifdef MAX_DMA32_PFN
+ unsigned long end_pfn;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ /* update goal according ...MAX_DMA32_PFN */
+ end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+
+ if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
+ (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
+ void *ptr;
+ unsigned long new_goal;
+
+ new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ new_goal, -1ULL);
+ if (ptr)
+ return ptr;
+ }
+#endif
+
+ return __alloc_bootmem_node(pgdat, size, align, goal);
+
+}
+
+#ifdef CONFIG_SPARSEMEM
+/**
+ * alloc_bootmem_section - allocate boot memory from a specific section
+ * @size: size of the request in bytes
+ * @section_nr: sparse map section to allocate from
+ *
+ * Return NULL on failure.
+ */
+void * __init alloc_bootmem_section(unsigned long size,
+ unsigned long section_nr)
+{
+ unsigned long pfn, goal, limit;
+
+ pfn = section_nr_to_pfn(section_nr);
+ goal = pfn << PAGE_SHIFT;
+ limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+
+ return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
+ SMP_CACHE_BYTES, goal, limit);
+}
+#endif
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ void *ptr;
+
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, -1ULL);
+ if (ptr)
+ return ptr;
+
+ return __alloc_bootmem_nopanic(size, align, goal);
+}
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+/**
+ * __alloc_bootmem_low - allocate low boot memory
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may happen on any node in the system.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
+}
+
+/**
+ * __alloc_bootmem_low_node - allocate low boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ return __alloc_memory_core_early(pgdat->node_id, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
+}
--
1.6.4.2

2010-04-09 06:12:21

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 37/39] mm: move contig_page_data define to bootmem.c/nobootmem.c

We can remove #ifdef in mm/page_alloc.c

Signed-off-by: Yinghai Lu <[email protected]>
---
mm/bootmem.c | 7 +++++++
mm/nobootmem.c | 5 +++++
mm/page_alloc.c | 9 ---------
3 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/mm/bootmem.c b/mm/bootmem.c
index 2741c34..ff55ad7 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -23,6 +23,13 @@

#include "internal.h"

+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data = {
+ .bdata = &bootmem_node_data[0]
+ };
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 283673e..abaec96 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -23,6 +23,11 @@

#include "internal.h"

+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data;
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 256aed0..233c403 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4496,15 +4496,6 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
dma_reserve = new_dma_reserve;
}

-#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = {
-#ifndef CONFIG_NO_BOOTMEM
- .bdata = &bootmem_node_data[0]
-#endif
- };
-EXPORT_SYMBOL(contig_page_data);
-#endif
-
void __init free_area_init(unsigned long *zones_size)
{
free_area_init_node(0, zones_size,
--
1.6.4.2

2010-04-09 06:07:19

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 17/39] lmb: Add lmb_memory_size()

It will return memory size in specified range according to lmb.memory.region

Try to share some code with lmb_free_memory_size() by passing get_free to
__lmb_memory_size().

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 1 +
mm/lmb.c | 18 +++++++++++++++++-
2 files changed, 18 insertions(+), 1 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index b874dc0..8332934 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -101,6 +101,7 @@ void lmb_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn);
u64 lmb_hole_size(u64 start, u64 end);
u64 lmb_free_memory_size(u64 addr, u64 limit);
+u64 lmb_memory_size(u64 addr, u64 limit);

#include <asm/lmb.h>

diff --git a/mm/lmb.c b/mm/lmb.c
index dccd539..feb3dfa 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -746,7 +746,7 @@ void __init lmb_to_bootmem(u64 start, u64 end)
}
#endif

-u64 __init lmb_free_memory_size(u64 addr, u64 limit)
+static u64 __init __lmb_memory_size(u64 addr, u64 limit, bool get_free)
{
int i, count;
struct range *range;
@@ -776,6 +776,10 @@ u64 __init lmb_free_memory_size(u64 addr, u64 limit)
}
subtract_range(range, count, 0, addr);
subtract_range(range, count, limit, -1ULL);
+
+ /* Subtract lmb.reserved.region in range ? */
+ if (!get_free)
+ goto sort_and_count_them;
for (i = 0; i < lmb.reserved.cnt; i++) {
struct lmb_property *r = &lmb.reserved.region[i];

@@ -788,6 +792,8 @@ u64 __init lmb_free_memory_size(u64 addr, u64 limit)

subtract_range(range, count, final_start, final_end);
}
+
+sort_and_count_them:
nr_range = clean_sort_range(range, count);

free_size = 0;
@@ -797,6 +803,16 @@ u64 __init lmb_free_memory_size(u64 addr, u64 limit)
return free_size << PAGE_SHIFT;
}

+u64 __init lmb_free_memory_size(u64 addr, u64 limit)
+{
+ return __lmb_memory_size(addr, limit, true);
+}
+
+u64 __init lmb_memory_size(u64 addr, u64 limit)
+{
+ return __lmb_memory_size(addr, limit, false);
+}
+
u64 __init __weak __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align)
{
--
1.6.4.2

2010-04-09 06:12:42

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 14/39] lmb: Add find_memory_core_early()

According to node range in early_node_map[] with __lmb_find_area
to find free range.

Will be used by lmb_find_area_node()

lmb_find_area_node will be used to find right buffer for NODE_DATA

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/mm.h | 2 ++
mm/page_alloc.c | 29 +++++++++++++++++++++++++++++
2 files changed, 31 insertions(+), 0 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index fb19bb9..7774e1d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1159,6 +1159,8 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
+u64 __init find_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit);
void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d03c946..12a74ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -21,6 +21,7 @@
#include <linux/pagemap.h>
#include <linux/jiffies.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
@@ -3393,6 +3394,34 @@ void __init free_bootmem_with_active_regions(int nid,
}
}

+#ifdef CONFIG_HAVE_LMB
+u64 __init find_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit)
+{
+ int i;
+
+ /* Need to go over early_node_map to find out good range for node */
+ for_each_active_range_index_in_nid(i, nid) {
+ u64 addr;
+ u64 ei_start, ei_last;
+
+ ei_last = early_node_map[i].end_pfn;
+ ei_last <<= PAGE_SHIFT;
+ ei_start = early_node_map[i].start_pfn;
+ ei_start <<= PAGE_SHIFT;
+ addr = __lmb_find_area(ei_start, ei_last,
+ goal, limit, size, align);
+
+ if (addr == -1ULL)
+ continue;
+
+ return addr;
+ }
+
+ return -1ULL;
+}
+#endif
+
int __init add_from_early_node_map(struct range *range, int az,
int nr_range, int nid)
{
--
1.6.4.2

2010-04-09 06:13:02

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 04/39] lmb: Move lmb.c to mm/

lmb.c is memory related, so move it to mm/. It is suggested by Ingo

Signed-off-by: Yinghai Lu <[email protected]>
---
lib/Makefile | 2 --
mm/Makefile | 2 ++
{lib => mm}/lmb.c | 0
3 files changed, 2 insertions(+), 2 deletions(-)
rename {lib => mm}/lmb.c (100%)

diff --git a/lib/Makefile b/lib/Makefile
index 2e152ae..a463a4d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -85,8 +85,6 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o

lib-$(CONFIG_GENERIC_BUG) += bug.o

-obj-$(CONFIG_HAVE_LMB) += lmb.o
-
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o

obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
diff --git a/mm/Makefile b/mm/Makefile
index 6c2a73a..52492f9 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -15,6 +15,8 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
$(mmu-y)
obj-y += init-mm.o

+obj-$(CONFIG_HAVE_LMB) += lmb.o
+
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
diff --git a/lib/lmb.c b/mm/lmb.c
similarity index 100%
rename from lib/lmb.c
rename to mm/lmb.c
--
1.6.4.2

2010-04-09 06:13:23

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 23/39] x86, lmb: Add x86 version of __lmb_find_area()

Generic version is going from high to low, and it seems it can not find
right area compact enough.

the x86 version will go from goal to limit and just like the way We used
for early_res

use ARCH_FIND_LMB_AREA to select from them.

For 32 bit have to use CONFIG_ARCH_FIND_LMB_AREA=y, because some alloc_bootmem
in nobootmem config, hard code -1ULL as limit

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/Kconfig | 8 +++++
arch/x86/mm/lmb.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 86 insertions(+), 0 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d3b7bb3..7415db5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -585,6 +585,14 @@ config PARAVIRT_DEBUG
Enable to debug paravirt_ops internals. Specifically, BUG if
a paravirt_op is missing when it is called.

+config ARCH_LMB_FIND_AREA
+ default y
+ bool "Use x86 own lmb_find_area()"
+ ---help---
+ Use lmb_find_area() version instead of generic version, it get free
+ area up from low.
+ Generic one try to get free area down from limit.
+
config NO_BOOTMEM
default y
bool "Disable Bootmem code"
diff --git a/arch/x86/mm/lmb.c b/arch/x86/mm/lmb.c
index 3229e9e..302a205 100644
--- a/arch/x86/mm/lmb.c
+++ b/arch/x86/mm/lmb.c
@@ -86,3 +86,81 @@ u64 __init lmb_find_area_size(u64 start, u64 *sizep, u64 align)
return -1ULL;
}

+#ifdef CONFIG_ARCH_LMB_FIND_AREA
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+ int i;
+ struct lmb_property *r;
+
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ r = &lmb.reserved.region[i];
+ if (end > r->base && start < (r->base + r->size))
+ break;
+ }
+
+ return i;
+}
+
+/* Check for already reserved areas */
+static inline bool __init bad_addr(u64 *addrp, u64 size, u64 align)
+{
+ int i;
+ u64 addr = *addrp;
+ bool changed = false;
+ struct lmb_property *r;
+again:
+ i = find_overlapped_early(addr, addr + size);
+ r = &lmb.reserved.region[i];
+ if (i < lmb.reserved.cnt && r->size) {
+ *addrp = addr = round_up(r->base + r->size, align);
+ changed = true;
+ goto again;
+ }
+ return changed;
+}
+
+u64 __init __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align)
+{
+ u64 addr, last;
+
+ addr = round_up(ei_start, align);
+ if (addr < start)
+ addr = round_up(start, align);
+ if (addr >= ei_last)
+ goto out;
+ while (bad_addr(&addr, size, align) && addr+size <= ei_last)
+ ;
+ last = addr + size;
+ if (last > ei_last)
+ goto out;
+ if (last > end)
+ goto out;
+
+ return addr;
+
+out:
+ return -1ULL;
+}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ */
+u64 __init lmb_find_area(u64 start, u64 end, u64 size, u64 align)
+{
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 ei_start = lmb.memory.region[i].base;
+ u64 ei_last = ei_start + lmb.memory.region[i].size;
+ u64 addr;
+
+ addr = __lmb_find_area(ei_start, ei_last, start, end,
+ size, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+ return -1ULL;
+}
+#endif
--
1.6.4.2

2010-04-09 06:13:29

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 26/39] nobootmem: use lmb.default_alloc_limit in alloc_bootmem path

Generic version __lmb_find_area() is going from high to low, and for 32bit
active_region for 32bit does include high pages

need to replace the limit with lmb.default_alloc_limit, aka get_max_mapped()

with this patch, x86 32bit could use generic version of __lmb_find_area()

Signed-off-by: Yinghai Lu <[email protected]>
---
mm/page_alloc.c | 3 +++
1 files changed, 3 insertions(+), 0 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 79bd44b..256aed0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3445,6 +3445,9 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,

u64 addr;

+ if (limit > lmb.default_alloc_limit)
+ limit = lmb.default_alloc_limit;
+
addr = find_memory_core_early(nid, size, align, goal, limit);

if (addr == -1ULL)
--
1.6.4.2

2010-04-09 06:13:34

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 07/39] lmb: Add lmb_find_area()

It will try find area according with size/align in specified range (start, end).

lmb_find_area() will honor goal/limit.

also make it more easy for x86 to use lmb.
x86 early_res is using find/reserve pattern instead of alloc.

When we need temporaray buff for range array etc for range work, if We are using
lmb_alloc(), We will need to add some post fix code for buffer that is used
by range array, because it is in the lmb.reserved already. and have to call
extra lmb_free().

-v2: Change name to lmb_find_area() according to Michael Ellerman
-v3: Add generic weak version __lmb_find_area()

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 4 ++++
mm/lmb.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 53 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index e14ea8d..4cf2f3b 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -83,6 +83,10 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
lmb_size_pages(type, region_nr);
}

+u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align);
+u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);
+
#include <asm/lmb.h>

#endif /* __KERNEL__ */
diff --git a/mm/lmb.c b/mm/lmb.c
index 392d805..7010212 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -11,9 +11,13 @@
*/

#include <linux/kernel.h>
+#include <linux/types.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/range.h>

#define LMB_ALLOC_ANYWHERE 0

@@ -559,3 +563,48 @@ int lmb_find(struct lmb_property *res)
}
return -1;
}
+
+u64 __init __weak __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align)
+{
+ u64 final_start, final_end;
+ u64 mem;
+
+ final_start = max(ei_start, start);
+ final_end = min(ei_last, end);
+
+ if (final_start >= final_end)
+ return -1ULL;
+
+ mem = __lmb_find_base(size, align, final_end);
+
+ if (mem == -1ULL)
+ return -1ULL;
+
+ lmb_free(mem, size);
+ if (mem >= final_start)
+ return mem;
+
+ return -1ULL;
+}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ */
+u64 __init __weak lmb_find_area(u64 start, u64 end, u64 size, u64 align)
+{
+ int i;
+
+ for (i = lmb.memory.cnt - 1; i >= 0; i--) {
+ u64 ei_start = lmb.memory.region[i].base;
+ u64 ei_last = ei_start + lmb.memory.region[i].size;
+ u64 addr;
+
+ addr = __lmb_find_area(ei_start, ei_last, start, end,
+ size, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+ return -1ULL;
+}
--
1.6.4.2

2010-04-09 06:13:38

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 24/39] x86: Use lmb to replace early_res

1. replace find_e820_area with lmb_find_area
2. replace reserve_early with lmb_reserve_area
3. replace free_early with lmb_free_area.
4. NO_BOOTMEM will switch to use lmb too.
5. use _e820, _early wrap in the patch, in following patch, will
replace them all
6. because lmb_free_area support partial free, we can remove some special care
7. Need to make sure that lmb_find_area() is called after fill_lmb_memory()
so adjust some calling later in setup.c::setup_arch()
-- corruption_check and mptable_update

-v2: Move reserve_brk() early
Before fill_lmb_area, to avoid overlap between brk and lmb_find_area()
that could happen We have more then 128 RAM entry in E820 tables, and
fill_lmb_memory() could use lmb_find_area() to find a new place for
lmb.memory.region array.
and We don't need to use extend_brk() after fill_lmb_area()
So move reserve_brk() early before fill_lmb_area().
-v3: Move find_smp_config early
To make sure lmb_find_area not find wrong place, if BIOS doesn't put mptable
in right place.
-v4: Treat RESERVED_KERN as RAM in lmb.memory. and they are already in
lmb.reserved already..
use __NOT_KEEP_LMB to make sure lmb related code could be freed later.

Suggested-by: David S. Miller <[email protected]>
Suggested-by: Benjamin Herrenschmidt <[email protected]>
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/Kconfig | 9 +--
arch/x86/include/asm/e820.h | 15 +++--
arch/x86/include/asm/lmb.h | 12 +++
arch/x86/kernel/check.c | 14 ++--
arch/x86/kernel/e820.c | 147 ++++++++++-----------------------------
arch/x86/kernel/head.c | 3 +-
arch/x86/kernel/head32.c | 6 +-
arch/x86/kernel/head64.c | 3 +
arch/x86/kernel/mpparse.c | 5 +-
arch/x86/kernel/setup.c | 46 +++++++++----
arch/x86/kernel/setup_percpu.c | 6 --
arch/x86/mm/numa_64.c | 5 +-
kernel/Makefile | 1 -
mm/bootmem.c | 1 +
mm/page_alloc.c | 35 ++-------
mm/sparse-vmemmap.c | 11 ---
16 files changed, 127 insertions(+), 192 deletions(-)
create mode 100644 arch/x86/include/asm/lmb.h

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7415db5..da9040b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -27,6 +27,7 @@ config X86
select HAVE_PERF_EVENTS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
+ select HAVE_LMB
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
@@ -192,9 +193,6 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y

-config HAVE_EARLY_RES
- def_bool y
-
config HAVE_INTEL_TXT
def_bool y
depends on EXPERIMENTAL && DMAR && ACPI
@@ -597,14 +595,13 @@ config NO_BOOTMEM
default y
bool "Disable Bootmem code"
---help---
- Use early_res directly instead of bootmem before slab is ready.
+ Use lmb directly instead of bootmem before slab is ready.
- allocator (buddy) [generic]
- early allocator (bootmem) [generic]
- - very early allocator (reserve_early*()) [x86]
+ - very early allocator (lmb) [some generic]
- very very early allocator (early brk model) [x86]
So reduce one layer between early allocator to final allocator

-
config MEMTEST
bool "Memtest"
---help---
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 0457c49..396c849 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -116,24 +116,27 @@ extern unsigned long end_user_pfn;
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
-#include <linux/early_res.h>

extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
-extern int e820_find_active_region(const struct e820entry *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn);
extern void e820_register_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern u64 e820_hole_size(u64 start, u64 end);
+
+extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
+
+void init_lmb_memory(void);
+void fill_lmb_memory(void);
+
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
extern void setup_memory_map(void);
extern char *default_machine_specific_memory_setup(void);

+void reserve_early(u64 start, u64 end, char *name);
+void free_early(u64 start, u64 end);
+
/*
* Returns true iff the specified range [s,e) is completely contained inside
* the ISA region.
diff --git a/arch/x86/include/asm/lmb.h b/arch/x86/include/asm/lmb.h
new file mode 100644
index 0000000..e43c57a
--- /dev/null
+++ b/arch/x86/include/asm/lmb.h
@@ -0,0 +1,12 @@
+#ifndef _X86_LMB_H
+#define _X86_LMB_H
+
+u64 lmb_find_area_size(u64 start, u64 *sizep, u64 align);
+
+#define LMB_DBG(fmt...) printk(fmt)
+
+#define LMB_REAL_LIMIT 0
+
+#define ARCH_DISCARD_LMB
+
+#endif
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index fc999e6..fcb3f11 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -2,7 +2,8 @@
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
-#include <asm/e820.h>
+#include <linux/lmb.h>
+
#include <asm/proto.h>

/*
@@ -18,10 +19,12 @@ static int __read_mostly memory_corruption_check = -1;
static unsigned __read_mostly corruption_check_size = 64*1024;
static unsigned __read_mostly corruption_check_period = 60; /* seconds */

-static struct e820entry scan_areas[MAX_SCAN_AREAS];
+static struct scan_area {
+ u64 addr;
+ u64 size;
+} scan_areas[MAX_SCAN_AREAS];
static int num_scan_areas;

-
static __init int set_corruption_check(char *arg)
{
char *end;
@@ -81,7 +84,7 @@ void __init setup_bios_corruption_check(void)

while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
u64 size;
- addr = find_e820_area_size(addr, &size, PAGE_SIZE);
+ addr = lmb_find_area_size(addr, &size, PAGE_SIZE);

if (!(addr + 1))
break;
@@ -92,7 +95,7 @@ void __init setup_bios_corruption_check(void)
if ((addr + size) > corruption_check_size)
size = corruption_check_size - addr;

- e820_update_range(addr, size, E820_RAM, E820_RESERVED);
+ lmb_reserve_area(addr, addr + size, "SCAN RAM");
scan_areas[num_scan_areas].addr = addr;
scan_areas[num_scan_areas].size = size;
num_scan_areas++;
@@ -105,7 +108,6 @@ void __init setup_bios_corruption_check(void)

printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
num_scan_areas);
- update_e820();
}


diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 73dc6a7..3fa3c0a 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -15,6 +15,7 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
+#include <linux/lmb.h>

#include <asm/e820.h>
#include <asm/proto.h>
@@ -747,69 +748,19 @@ core_initcall(e820_mark_nvs_memory);
*/
u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- u64 addr;
- u64 ei_start, ei_last;
-
- if (ei->type != E820_RAM)
- continue;
-
- ei_last = ei->addr + ei->size;
- ei_start = ei->addr;
- addr = find_early_area(ei_start, ei_last, start, end,
- size, align);
-
- if (addr != -1ULL)
- return addr;
- }
- return -1ULL;
-}
-
-u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
-{
- return find_e820_area(start, end, size, align);
+ return lmb_find_area(start, end, size, align);
}

-u64 __init get_max_mapped(void)
-{
- u64 end = max_pfn_mapped;
-
- end <<= PAGE_SHIFT;
-
- return end;
-}
/*
* Find next free range after *start
*/
u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- u64 addr;
- u64 ei_start, ei_last;
-
- if (ei->type != E820_RAM)
- continue;
-
- ei_last = ei->addr + ei->size;
- ei_start = ei->addr;
- addr = find_early_area_size(ei_start, ei_last, start,
- sizep, align);
-
- if (addr != -1ULL)
- return addr;
- }
-
- return -1ULL;
+ return lmb_find_area_size(start, sizep, align);
}

/*
- * pre allocated 4k and reserved it in e820
+ * pre allocated 4k and reserved it in lmb and e820_saved
*/
u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
{
@@ -818,7 +769,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
u64 start;

for (start = startt; ; start += size) {
- start = find_e820_area_size(start, &size, align);
+ start = lmb_find_area_size(start, &size, align);
if (!(start + 1))
return 0;
if (size >= sizet)
@@ -835,10 +786,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
addr = round_down(start + size - sizet, align);
if (addr < start)
return 0;
- e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
+ lmb_reserve_area(addr, addr + sizet, "new next");
e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "update e820 for early_reserve_e820\n");
- update_e820();
+ printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
update_e820_saved();

return addr;
@@ -900,52 +850,12 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
{
return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
}
-/*
- * Finds an active region in the address range from start_pfn to last_pfn and
- * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
- */
-int __init e820_find_active_region(const struct e820entry *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn)
-{
- u64 align = PAGE_SIZE;
-
- *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
- *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
-
- /* Skip map entries smaller than a page */
- if (*ei_startpfn >= *ei_endpfn)
- return 0;
-
- /* Skip if map is outside the node */
- if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
- *ei_startpfn >= last_pfn)
- return 0;
-
- /* Check for overlaps */
- if (*ei_startpfn < start_pfn)
- *ei_startpfn = start_pfn;
- if (*ei_endpfn > last_pfn)
- *ei_endpfn = last_pfn;
-
- return 1;
-}

/* Walk the e820 map and register active regions within a node */
void __init e820_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn)
{
- unsigned long ei_startpfn;
- unsigned long ei_endpfn;
- int i;
-
- for (i = 0; i < e820.nr_map; i++)
- if (e820_find_active_region(&e820.map[i],
- start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- add_active_range(nid, ei_startpfn, ei_endpfn);
+ lmb_register_active_regions(nid, start_pfn, last_pfn);
}

/*
@@ -955,18 +865,16 @@ void __init e820_register_active_regions(int nid, unsigned long start_pfn,
*/
u64 __init e820_hole_size(u64 start, u64 end)
{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long last_pfn = end >> PAGE_SHIFT;
- unsigned long ei_startpfn, ei_endpfn, ram = 0;
- int i;
+ return lmb_hole_size(start, end);
+}

- for (i = 0; i < e820.nr_map; i++) {
- if (e820_find_active_region(&e820.map[i],
- start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- ram += ei_endpfn - ei_startpfn;
- }
- return end - start - ((u64)ram << PAGE_SHIFT);
+void reserve_early(u64 start, u64 end, char *name)
+{
+ lmb_reserve_area(start, end, name);
+}
+void free_early(u64 start, u64 end)
+{
+ lmb_free_area(start, end);
}

static void early_panic(char *msg)
@@ -1264,3 +1172,24 @@ void __init setup_memory_map(void)
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
+
+void __init init_lmb_memory(void)
+{
+ lmb_init();
+}
+
+void __init fill_lmb_memory(void)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
+ continue;
+ lmb_add_memory(ei->addr, ei->addr + ei->size);
+ }
+
+ lmb_analyze();
+ lmb_dump_all();
+}
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index 3e66bd3..5802287 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -1,5 +1,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/lmb.h>

#include <asm/setup.h>
#include <asm/bios_ebda.h>
@@ -51,5 +52,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000;

/* reserve all memory between lowmem and the 1MB mark */
- reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved");
+ lmb_reserve_area_overlap_ok(lowmem, 0x100000, "BIOS reserved");
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index b2e2460..ab3e366 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/start_kernel.h>
#include <linux/mm.h>
+#include <linux/lmb.h>

#include <asm/setup.h>
#include <asm/sections.h>
@@ -30,14 +31,15 @@ static void __init i386_default_early_setup(void)

void __init i386_start_kernel(void)
{
+ init_lmb_memory();
+
#ifdef CONFIG_X86_TRAMPOLINE
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
- "EX TRAMPOLINE");
+ lmb_reserve_area(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
#endif

reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 7147143..89dd2de 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -12,6 +12,7 @@
#include <linux/percpu.h>
#include <linux/start_kernel.h>
#include <linux/io.h>
+#include <linux/lmb.h>

#include <asm/processor.h>
#include <asm/proto.h>
@@ -96,6 +97,8 @@ void __init x86_64_start_kernel(char * real_mode_data)

void __init x86_64_start_reservations(char *real_mode_data)
{
+ init_lmb_memory();
+
copy_bootdata(__va(real_mode_data));

reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e81030f..fad5af6 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/bitops.h>
@@ -664,7 +665,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf)
{
unsigned long size = get_mpc_size(mpf->physptr);

- reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc");
+ lmb_reserve_area_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc");
}

static int __init smp_scan_config(unsigned long base, unsigned long length)
@@ -693,7 +694,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
mpf, (u64)virt_to_phys(mpf));

mem = virt_to_phys(mpf);
- reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf");
+ lmb_reserve_area_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf");
if (mpf->physptr)
smp_reserve_memory(mpf);

diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 894a48a..61ff9de 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -31,6 +31,7 @@
#include <linux/apm_bios.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/mca.h>
@@ -614,7 +615,7 @@ static __init void reserve_ibft_region(void)
addr = find_ibft_region(&size);

if (size)
- reserve_early_overlap_ok(addr, addr + size, "ibft");
+ lmb_reserve_area_overlap_ok(addr, addr + size, "ibft");
}

#ifdef CONFIG_X86_RESERVE_LOW_64K
@@ -697,6 +698,15 @@ static void __init trim_bios_range(void)
sanitize_e820_map();
}

+static u64 __init get_max_mapped(void)
+{
+ u64 end = max_pfn_mapped;
+
+ end <<= PAGE_SHIFT;
+
+ return end;
+}
+
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -879,8 +889,6 @@ void __init setup_arch(char **cmdline_p)
*/
max_pfn = e820_end_of_ram_pfn();

- /* preallocate 4k for mptable mpc */
- early_reserve_e820_mpc_new();
/* update e820 for memory not covered by WB MTRRs */
mtrr_bp_init();
if (mtrr_trim_uncached_memory(max_pfn))
@@ -905,15 +913,6 @@ void __init setup_arch(char **cmdline_p)
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
#endif

-#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
- setup_bios_corruption_check();
-#endif
-
- printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
- max_pfn_mapped<<PAGE_SHIFT);
-
- reserve_brk();
-
/*
* Find and reserve possible boot-time SMP configuration:
*/
@@ -921,6 +920,26 @@ void __init setup_arch(char **cmdline_p)

reserve_ibft_region();

+ /*
+ * Need to conclude brk, before fill_lmb_memory()
+ * it could use lmb_find_area, could overlap with
+ * brk area.
+ */
+ reserve_brk();
+
+ lmb.default_alloc_limit = get_max_mapped();
+ fill_lmb_memory();
+
+ /* preallocate 4k for mptable mpc */
+ early_reserve_e820_mpc_new();
+
+#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
+ setup_bios_corruption_check();
+#endif
+
+ printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
+ max_pfn_mapped<<PAGE_SHIFT);
+
reserve_trampoline_memory();

#ifdef CONFIG_ACPI_SLEEP
@@ -944,6 +963,7 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = max_pfn;
}
#endif
+ lmb.default_alloc_limit = get_max_mapped();

/*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -983,7 +1003,7 @@ void __init setup_arch(char **cmdline_p)

initmem_init(0, max_pfn, acpi, k8);
#ifndef CONFIG_NO_BOOTMEM
- early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
+ lmb_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
#endif

dma32_reserve_bootmem();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b..35abcb8 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -137,13 +137,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)

static void __init pcpu_fc_free(void *ptr, size_t size)
{
-#ifdef CONFIG_NO_BOOTMEM
- u64 start = __pa(ptr);
- u64 end = start + size;
- free_early_partial(start, end);
-#else
free_bootmem(__pa(ptr), size);
-#endif
}

static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 8948f47..6e0f896 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -7,6 +7,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/lmb.h>
#include <linux/mmzone.h>
#include <linux/ctype.h>
#include <linux/module.h>
@@ -174,7 +175,7 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
end > (MAX_DMA32_PFN<<PAGE_SHIFT))
start = MAX_DMA32_PFN<<PAGE_SHIFT;
- mem = find_e820_area(start, end, size, align);
+ mem = lmb_find_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);

@@ -184,7 +185,7 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
start = MAX_DMA32_PFN<<PAGE_SHIFT;
else
start = MAX_DMA_PFN<<PAGE_SHIFT;
- mem = find_e820_area(start, end, size, align);
+ mem = lmb_find_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);

diff --git a/kernel/Makefile b/kernel/Makefile
index d5c3006..754bf79 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -11,7 +11,6 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
async.o range.o
-obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
obj-y += groups.o

ifdef CONFIG_FUNCTION_TRACER
diff --git a/mm/bootmem.c b/mm/bootmem.c
index ee31b95..dac3f56 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/kmemleak.h>
#include <linux/range.h>
+#include <linux/lmb.h>

#include <asm/bug.h>
#include <asm/io.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 12a74ad..79bd44b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3441,38 +3441,19 @@ int __init add_from_early_node_map(struct range *range, int az,
void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
- int i;
void *ptr;

- /* need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid(i, nid) {
- u64 addr;
- u64 ei_start, ei_last;
-
- ei_last = early_node_map[i].end_pfn;
- ei_last <<= PAGE_SHIFT;
- ei_start = early_node_map[i].start_pfn;
- ei_start <<= PAGE_SHIFT;
- addr = find_early_area(ei_start, ei_last,
- goal, limit, size, align);
-
- if (addr == -1ULL)
- continue;
+ u64 addr;

-#if 0
- printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
- nid,
- ei_start, ei_last, goal, limit, size,
- align, addr);
-#endif
+ addr = find_memory_core_early(nid, size, align, goal, limit);

- ptr = phys_to_virt(addr);
- memset(ptr, 0, size);
- reserve_early_without_check(addr, addr + size, "BOOTMEM");
- return ptr;
- }
+ if (addr == -1ULL)
+ return NULL;

- return NULL;
+ ptr = phys_to_virt(addr);
+ memset(ptr, 0, size);
+ lmb_reserve_area(addr, addr + size, "BOOTMEM");
+ return ptr;
}
#endif

diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index aa33fd6..29d6cbf 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -220,18 +220,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,

if (vmemmap_buf_start) {
/* need to free left buf */
-#ifdef CONFIG_NO_BOOTMEM
- free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end));
- if (vmemmap_buf_start < vmemmap_buf) {
- char name[15];
-
- snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
- reserve_early_without_check(__pa(vmemmap_buf_start),
- __pa(vmemmap_buf), name);
- }
-#else
free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
-#endif
vmemmap_buf = NULL;
vmemmap_buf_end = NULL;
}
--
1.6.4.2

2010-04-09 06:07:12

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 06/39] lmb: Seperate __lmb_find_base() from __lmb_alloc_base()

So We can construct one generic __find_lmb_area() later

Signed-off-by: Yinghai Lu <[email protected]>
---
mm/lmb.c | 21 +++++++++++++++++----
1 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/mm/lmb.c b/mm/lmb.c
index 65b62dc..392d805 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -393,7 +393,7 @@ u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
return alloc;
}

-u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
+static u64 __init __lmb_find_base(u64 size, u64 align, u64 max_addr)
{
long i, j;
u64 base = 0;
@@ -426,8 +426,6 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
j = lmb_overlaps_region(&lmb.reserved, base, size);
if (j < 0) {
/* this area isn't reserved, take it */
- if (lmb_add_region(&lmb.reserved, base, size) < 0)
- return 0;
return base;
}
res_base = lmb.reserved.region[j].base;
@@ -436,7 +434,22 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
base = lmb_align_down(res_base - size, align);
}
}
- return 0;
+ return -1ULL;
+}
+
+u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
+{
+ u64 base;
+
+ base = __lmb_find_base(size, align, max_addr);
+
+ if (base == -1ULL)
+ return 0;
+
+ if (lmb_add_region(&lmb.reserved, base, size) < 0)
+ return 0;
+
+ return base;
}

/* You must call lmb_analyze() before this. */
--
1.6.4.2

2010-04-09 06:14:10

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 01/39] swiotlb: Use page alignment for early buffer allocation

We could call free_bootmem_late() if swiotlb is not used, and
it will shrink to page alignement.

So alloc them with page alignment at first, to avoid lose two pages

before patch:
[ 0.000000] lmb_reserve_area: [00d3600000, 00d7600000] swiotlb buffer
[ 0.000000] lmb_reserve_area: [00d7e7ef40, 00d7e9ef40] swiotlb list
[ 0.000000] lmb_reserve_area: [00d7e3ef40, 00d7e7ef40] swiotlb orig_ad
[ 0.000000] lmb_reserve_area: [000008a000, 0000092000] swiotlb overflo

after patch will get
[ 0.000000] lmb_reserve_area: [00d3600000, 00d7600000] swiotlb buffer
[ 0.000000] lmb_reserve_area: [00d7e7e000, 00d7e9e000] swiotlb list
[ 0.000000] lmb_reserve_area: [00d7e3e000, 00d7e7e000] swiotlb orig_ad
[ 0.000000] lmb_reserve_area: [000008a000, 0000092000] swiotlb overflo

Signed-off-by: Yinghai Lu <[email protected]>
Cc: FUJITA Tomonori <[email protected]>
Cc: Becky Bruce <[email protected]>
---
lib/swiotlb.c | 16 ++++++++--------
1 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5fddf72..1bd4258 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -159,7 +159,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(bytes);
+ io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + bytes;
@@ -169,16 +169,16 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
- io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
+ io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
+ io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));

/*
* Get the overflow emergency buffer
*/
- io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+ io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
if (verbose)
@@ -304,13 +304,13 @@ void __init swiotlb_free(void)
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
free_bootmem_late(__pa(io_tlb_overflow_buffer),
- io_tlb_overflow);
+ PAGE_ALIGN(io_tlb_overflow));
free_bootmem_late(__pa(io_tlb_orig_addr),
- io_tlb_nslabs * sizeof(phys_addr_t));
+ PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
free_bootmem_late(__pa(io_tlb_list),
- io_tlb_nslabs * sizeof(int));
+ PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
free_bootmem_late(__pa(io_tlb_start),
- io_tlb_nslabs << IO_TLB_SHIFT);
+ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
}

--
1.6.4.2

2010-04-09 06:14:32

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 19/39] lmb: Use lmb_debug to control debug message print out

Also let lmb_reserve_area/lmb_free_area could print out name if lmb=debug is
specified

will also print ther name when reserve_lmb_area/free_lmb_area are called.

Signed-off-by: Yinghai Lu <[email protected]>
---
mm/lmb.c | 29 +++++++++++++++++++++--------
1 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/mm/lmb.c b/mm/lmb.c
index 34fc030..dfbf660 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -591,8 +591,9 @@ static void __init __check_and_double_region_array(struct lmb_region *type,
memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
type->region = new;
type->nr_regions = rgnsz * 2;
- printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
- type->nr_regions, mem, mem + size - 1);
+ if (lmb_debug)
+ pr_info("lmb.reserved.region array is doubled to %ld at [%010llx - %010llx]\n",
+ type->nr_regions, mem, mem + size - 1);

/* Free old one ?*/
if (old != static_region)
@@ -619,6 +620,8 @@ void __init lmb_reserve_area(u64 start, u64 end, char *name)
if (WARN_ONCE(start > end, "lmb_reserve_area: wrong range [%#llx, %#llx]\n", start, end))
return;

+ if (lmb_debug)
+ pr_info(" lmb_reserve_area: [%010llx, %010llx] %16s\n", start, end, name);
__lmb_reserve_area(start, end, name);
}

@@ -635,6 +638,8 @@ void __init lmb_reserve_area_overlap_ok(u64 start, u64 end, char *name)
if (WARN_ONCE(start > end, "lmb_reserve_area_overlap_ok: wrong range [%#llx, %#llx]\n", start, end))
return;

+ if (lmb_debug)
+ pr_info(" lmb_reserve_area_overlap_ok: [%010llx, %010llx] %16s\n", start, end, name);
/* Free that region at first */
lmb_free(start, end - start);
__lmb_reserve_area(start, end, name);
@@ -648,6 +653,8 @@ void __init lmb_free_area(u64 start, u64 end)
if (WARN_ONCE(start > end, "lmb_free_area: wrong range [%#llx, %#llx]\n", start, end))
return;

+ if (lmb_debug)
+ pr_info(" lmb_free_area: [%010llx, %010llx]\n", start, end);
/* keep punching hole, could run out of slots too */
lmb_free(start, end - start);
__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
@@ -688,11 +695,13 @@ static void __init subtract_lmb_reserved(struct range *range, int az)

count = lmb.reserved.cnt;

- pr_info("Subtract (%d early reservations)\n", count);
+ if (lmb_debug)
+ pr_info("Subtract (%d early reservations)\n", count);

for (i = 0; i < count; i++) {
struct lmb_property *r = &lmb.reserved.region[i];
- pr_info(" #%d [%010llx - %010llx]\n", i, r->base, r->base + r->size);
+ if (lmb_debug)
+ pr_info(" #%03d [%010llx - %010llx]\n", i, r->base, r->base + r->size);
final_start = PFN_DOWN(r->base);
final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
@@ -748,17 +757,21 @@ void __init lmb_to_bootmem(u64 start, u64 end)
lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);

count = lmb.reserved.cnt;
- pr_info("(%d early reservations) ==> bootmem [%010llx - %010llx]\n", count, start, end);
+ if (lmb_debug)
+ pr_info("(%d early reservations) ==> bootmem [%010llx - %010llx]\n", count, start, end);
for (i = 0; i < count; i++) {
struct lmb_property *r = &lmb.reserved.region[i];
- pr_info(" #%d [%010llx - %010llx] ", i, r->base, r->base + r->size);
+ if (lmb_debug)
+ pr_info(" #%03d [%010llx - %010llx] ", i, r->base, r->base + r->size);
final_start = max(start, r->base);
final_end = min(end, r->base + r->size);
if (final_start >= final_end) {
- pr_cont("\n");
+ if (lmb_debug)
+ pr_cont("\n");
continue;
}
- pr_cont(" ==> [%010llx - %010llx]\n", final_start, final_end);
+ if (lmb_debug)
+ pr_cont(" ==> [%010llx - %010llx]\n", final_start, final_end);
reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
}
/* Clear them to avoid misusing ? */
--
1.6.4.2

2010-04-09 06:07:10

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 03/39] x86: Align e820 ram range to page

to workaround wrong BIOS memory map.

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/kernel/e820.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 44 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 14d0a1a..73dc6a7 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1044,6 +1044,47 @@ static int __init parse_memmap_opt(char *p)
}
early_param("memmap", parse_memmap_opt);

+static void __init e820_align_ram_page(void)
+{
+ int i;
+ bool changed = false;;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *entry = &e820.map[i];
+ u64 start, end;
+ u64 start_aligned, end_aligned;
+
+ if (entry->type != E820_RAM)
+ continue;
+
+ start = entry->addr;
+ end = start + entry->size;
+
+ start_aligned = round_up(start, PAGE_SIZE);
+ end_aligned = round_down(end, PAGE_SIZE);
+
+ if (end_aligned <= start_aligned) {
+ e820_update_range(start, end - start, E820_RAM, E820_RESERVED);
+ changed = true;
+ continue;
+ }
+ if (start < start_aligned) {
+ e820_update_range(start, start_aligned - start, E820_RAM, E820_RESERVED);
+ changed = true;
+ }
+ if (end_aligned < end) {
+ e820_update_range(end_aligned, end - end_aligned, E820_RAM, E820_RESERVED);
+ changed = true;
+ }
+ }
+
+ if (changed) {
+ sanitize_e820_map();
+ printk(KERN_INFO "aligned physical RAM map:\n");
+ e820_print_map("aligned");
+ }
+}
+
void __init finish_e820_parsing(void)
{
if (userdef) {
@@ -1056,6 +1097,9 @@ void __init finish_e820_parsing(void)
printk(KERN_INFO "user-defined physical RAM map:\n");
e820_print_map("user");
}
+
+ /* In case, We have RAM entres that are not PAGE aligned */
+ e820_align_ram_page();
}

static inline const char *e820_type_to_string(int e820_type)
--
1.6.4.2

2010-04-09 06:14:48

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 11/39] lmb: Add get_free_all_memory_range()

get_free_all_memory_range is for CONFIG_NO_BOOTMEM=y, and will be called by
free_all_memory_core_early().

It will use early_node_map aka active ranges subtract lmb.reserved to
get all free range, and those ranges will convert to slab pages.

-v3: use __lmb_find_base() to get range free buffer.

Signed-off-by: Yinghai Lu <[email protected]>
Cc: Jan Beulich <[email protected]>
---
include/linux/lmb.h | 2 +
mm/lmb.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 87 insertions(+), 1 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 1e236d1..2ee2cc1 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -92,6 +92,8 @@ u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);

void lmb_to_bootmem(u64 start, u64 end);
+struct range;
+int get_free_all_memory_range(struct range **rangep, int nodeid);

#include <asm/lmb.h>

diff --git a/mm/lmb.c b/mm/lmb.c
index ee3d945..f11df14 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -630,7 +630,91 @@ void __init lmb_free_area(u64 start, u64 end)
__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
}

-#ifndef CONFIG_NO_BOOTMEM
+static __init struct range *find_range_array(int count)
+{
+ u64 end, size, mem;
+ struct range *range;
+
+ size = sizeof(struct range) * count;
+ end = lmb.default_alloc_limit;
+
+ mem = __lmb_find_base(size, sizeof(struct range), end);
+ if (mem == -1ULL)
+ panic("can not find more space for range array");
+
+ /*
+ * This range is tempoaray, so don't reserve it, it will not be
+ * overlapped because We will not alloccate new buffer before
+ * We discard this one
+ */
+ range = __va(mem);
+ memset(range, 0, size);
+
+ return range;
+}
+
+#ifdef CONFIG_NO_BOOTMEM
+static void __init subtract_lmb_reserved(struct range *range, int az)
+{
+ int i, count;
+ u64 final_start, final_end;
+
+ /* Take out region array itself at first*/
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+
+ count = lmb.reserved.cnt;
+
+ pr_info("Subtract (%d early reservations)\n", count);
+
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ pr_info(" #%d [%010llx - %010llx]\n", i, r->base, r->base + r->size);
+ final_start = PFN_DOWN(r->base);
+ final_end = PFN_UP(r->base + r->size);
+ if (final_start >= final_end)
+ continue;
+ subtract_range(range, az, final_start, final_end);
+ }
+ /* Put region array back ? */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_reserve(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+}
+
+int __init get_free_all_memory_range(struct range **rangep, int nodeid)
+{
+ int count;
+ struct range *range;
+ int nr_range;
+
+ count = lmb.reserved.cnt * 2;
+
+ range = find_range_array(count);
+ nr_range = 0;
+
+ /*
+ * Use early_node_map[] and lmb.reserved.region to get range array
+ * at first
+ */
+ nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
+#ifdef CONFIG_X86_32
+ subtract_range(range, count, max_low_pfn, -1ULL);
+#endif
+ subtract_lmb_reserved(range, count);
+ nr_range = clean_sort_range(range, count);
+
+ /* Need to clear it ? */
+ if (nodeid == MAX_NUMNODES) {
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+ lmb.reserved.region = NULL;
+ lmb.reserved.nr_regions = 0;
+ lmb.reserved.cnt = 0;
+ }
+
+ *rangep = range;
+ return nr_range;
+}
+#else
void __init lmb_to_bootmem(u64 start, u64 end)
{
int i, count;
--
1.6.4.2

2010-04-09 06:15:05

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 02/39] x86: Add sanitize_e820_map()

So We don't need to take e820.map with it.

Also change e820_saved to initdata to get some bytes memory back.

Signed-off-by: Yinghai Lu <[email protected]>
---
arch/x86/include/asm/e820.h | 5 ++---
arch/x86/kernel/e820.c | 26 ++++++++++++++++++--------
arch/x86/kernel/efi.c | 2 +-
arch/x86/kernel/setup.c | 10 +++++-----
arch/x86/xen/setup.c | 4 +---
5 files changed, 27 insertions(+), 20 deletions(-)

diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index ec8a52d..0457c49 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -75,15 +75,14 @@ struct e820map {
#ifdef __KERNEL__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;
-extern struct e820map e820_saved;

extern unsigned long pci_mem_start;
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
extern void e820_print_map(char *who);
-extern int
-sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
+int sanitize_e820_map(void);
+void save_e820_map(void);
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type);
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 7bca3c6..14d0a1a 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -35,7 +35,7 @@
* next kernel with full memory.
*/
struct e820map e820;
-struct e820map e820_saved;
+static struct e820map __initdata e820_saved;

/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0xaeedbabe;
@@ -224,7 +224,7 @@ void __init e820_print_map(char *who)
* ______________________4_
*/

-int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
+static int __init __sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
u32 *pnr_map)
{
struct change_member {
@@ -383,6 +383,11 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
return 0;
}

+int __init sanitize_e820_map(void)
+{
+ return __sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+}
+
static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
{
while (nr_map) {
@@ -571,7 +576,7 @@ void __init update_e820(void)
u32 nr_map;

nr_map = e820.nr_map;
- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
+ if (__sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
return;
e820.nr_map = nr_map;
printk(KERN_INFO "modified physical RAM map:\n");
@@ -582,7 +587,7 @@ static void __init update_e820_saved(void)
u32 nr_map;

nr_map = e820_saved.nr_map;
- if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
+ if (__sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
return;
e820_saved.nr_map = nr_map;
}
@@ -677,7 +682,7 @@ void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
sdata = early_ioremap(pa_data, map_len);
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
if (map_len > PAGE_SIZE)
early_iounmap(sdata, map_len);
printk(KERN_INFO "extended physical RAM map:\n");
@@ -1044,7 +1049,7 @@ void __init finish_e820_parsing(void)
if (userdef) {
u32 nr = e820.nr_map;

- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
+ if (__sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
early_panic("Invalid user supplied memory map");
e820.nr_map = nr;

@@ -1174,7 +1179,7 @@ char *__init default_machine_specific_memory_setup(void)
* the next section from 1mb->appropriate_mem_k
*/
new_nr = boot_params.e820_entries;
- sanitize_e820_map(boot_params.e820_map,
+ __sanitize_e820_map(boot_params.e820_map,
ARRAY_SIZE(boot_params.e820_map),
&new_nr);
boot_params.e820_entries = new_nr;
@@ -1201,12 +1206,17 @@ char *__init default_machine_specific_memory_setup(void)
return who;
}

+void __init save_e820_map(void)
+{
+ memcpy(&e820_saved, &e820, sizeof(struct e820map));
+}
+
void __init setup_memory_map(void)
{
char *who;

who = x86_init.resources.memory_setup();
- memcpy(&e820_saved, &e820, sizeof(struct e820map));
+ save_e820_map();
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index c2fa9b8..299f03f 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -272,7 +272,7 @@ static void __init do_add_efi_memmap(void)
}
e820_add_region(start, size, e820_type);
}
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
}

void __init efi_reserve_early(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4851ef..894a48a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -462,8 +462,8 @@ static void __init e820_reserve_setup_data(void)
if (!found)
return;

- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- memcpy(&e820_saved, &e820, sizeof(struct e820map));
+ sanitize_e820_map();
+ save_e820_map();
printk(KERN_INFO "extended physical RAM map:\n");
e820_print_map("reserve setup_data");
}
@@ -625,7 +625,7 @@ static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
d->ident);

e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();

return 0;
}
@@ -694,7 +694,7 @@ static void __init trim_bios_range(void)
* take them out.
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
}

/*
@@ -865,7 +865,7 @@ void __init setup_arch(char **cmdline_p)
if (ppro_with_ram_bug()) {
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
printk(KERN_INFO "fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ad0047f..3f2c411 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -43,8 +43,6 @@ char * __init xen_memory_setup(void)

max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);

- e820.nr_map = 0;
-
e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);

/*
@@ -65,7 +63,7 @@ char * __init xen_memory_setup(void)
__pa(xen_start_info->pt_base),
"XEN START INFO");

- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();

return "Xen";
}
--
1.6.4.2

2010-04-09 06:15:13

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 08/39] lmb: Add lmb_reserve_area/lmb_free_area

They will check if the region array is big enough.

__check_and_double_region_array will try to double the region array if that
array spare slots is not big enough. Old array will be copied to new array.

Arch code should set lmb.default_alloc_limit accordingly, so the new array is in
accessiable address.

-v2: change get_max_mapped() to lmb.default_alloc_limit according to Michael
Ellerman and Ben
change to lmb_reserve_area and lmb_free_area according to Michael Ellerman
-v3: call check_and_double after reserve/free, so could avoid to use
find_lmb_area. Suggested by Michael Ellerman

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 4 +++
mm/lmb.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 70 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 4cf2f3b..598662f 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -33,6 +33,7 @@ struct lmb_region {
struct lmb {
unsigned long debug;
u64 rmo_size;
+ u64 default_alloc_limit;
struct lmb_region memory;
struct lmb_region reserved;
};
@@ -83,6 +84,9 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
lmb_size_pages(type, region_nr);
}

+void lmb_reserve_area(u64 start, u64 end, char *name);
+void lmb_free_area(u64 start, u64 end);
+void lmb_add_memory(u64 start, u64 end);
u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);
diff --git a/mm/lmb.c b/mm/lmb.c
index 7010212..a514d41 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -564,6 +564,72 @@ int lmb_find(struct lmb_property *res)
return -1;
}

+static void __init __check_and_double_region_array(struct lmb_region *type,
+ struct lmb_property *static_region)
+{
+ u64 size, mem;
+ struct lmb_property *new, *old;
+ unsigned long rgnsz = type->nr_regions;
+
+ /* Do we have enough slots left ? */
+ if ((rgnsz - type->cnt) > 2)
+ return;
+
+ old = type->region;
+ /* Double the array size */
+ size = sizeof(struct lmb_property) * rgnsz * 2;
+
+ mem = __lmb_alloc_base(size, sizeof(struct lmb_property), lmb.default_alloc_limit);
+ if (mem == 0)
+ panic("can not find more space for lmb.reserved.region array");
+
+ new = __va(mem);
+ /* Copy old to new */
+ memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
+ memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
+
+ memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
+ type->region = new;
+ type->nr_regions = rgnsz * 2;
+ printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+ type->nr_regions, mem, mem + size - 1);
+
+ /* Free old one ?*/
+ if (old != static_region)
+ lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
+}
+
+void __init lmb_add_memory(u64 start, u64 end)
+{
+ lmb_add_region(&lmb.memory, start, end - start);
+ __check_and_double_region_array(&lmb.memory, &lmb_memory_region[0]);
+}
+
+void __init lmb_reserve_area(u64 start, u64 end, char *name)
+{
+ if (start == end)
+ return;
+
+ if (WARN_ONCE(start > end, "lmb_reserve_area: wrong range [%#llx, %#llx]\n", start, end))
+ return;
+
+ lmb_add_region(&lmb.reserved, start, end - start);
+ __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
+}
+
+void __init lmb_free_area(u64 start, u64 end)
+{
+ if (start == end)
+ return;
+
+ if (WARN_ONCE(start > end, "lmb_free_area: wrong range [%#llx, %#llx]\n", start, end))
+ return;
+
+ /* keep punching hole, could run out of slots too */
+ lmb_free(start, end - start);
+ __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
+}
+
u64 __init __weak __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align)
{
--
1.6.4.2

2010-04-09 06:15:38

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 21/39] lmb: Add __free/__reserve/__clear_lmb_reserved_region_array()

Seperate those three functions and could be shared by related callers.

Signed-off-by: Yinghai Lu <[email protected]>
---
mm/lmb.c | 63 +++++++++++++++++++++++++++++++++++--------------------------
1 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/mm/lmb.c b/mm/lmb.c
index ab3d85f..5483d69 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -684,16 +684,41 @@ static __init struct range *find_range_array(int count)
}

#ifdef CONFIG_NO_BOOTMEM
-static void __init subtract_lmb_reserved(struct range *range, int az)
+static void __init __free_lmb_reserved_region_array(void)
{
- int i, count;
- u64 final_start, final_end;
-
#ifdef ARCH_DISCARD_LMB
/* Take out region array itself at first*/
if (lmb.reserved.region != lmb_reserved_region)
lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
#endif
+}
+static void __init __reserve_lmb_reserved_region_array(void)
+{
+#ifdef ARCH_DISCARD_LMB
+ /* Put region array back ? */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_reserve(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+#endif
+}
+
+static void __init __clear_lmb_reserved_region_array(void)
+{
+#ifdef ARCH_DISCARD_LMB
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+ lmb.reserved.region = NULL;
+ lmb.reserved.nr_regions = 0;
+ lmb.reserved.cnt = 0;
+#endif
+}
+
+static void __init subtract_lmb_reserved(struct range *range, int az)
+{
+ int i, count;
+ u64 final_start, final_end;
+
+ /* Take out region array itself at first*/
+ __free_lmb_reserved_region_array();
+
count = lmb.reserved.cnt;

if (lmb_debug)
@@ -709,11 +734,9 @@ static void __init subtract_lmb_reserved(struct range *range, int az)
continue;
subtract_range(range, az, final_start, final_end);
}
-#ifdef ARCH_DISCARD_LMB
+
/* Put region array back ? */
- if (lmb.reserved.region != lmb_reserved_region)
- lmb_reserve(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
-#endif
+ __reserve_lmb_reserved_region_array();
}

int __init get_free_all_memory_range(struct range **rangep, int nodeid)
@@ -738,15 +761,9 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
subtract_lmb_reserved(range, count);
nr_range = clean_sort_range(range, count);

-#ifdef ARCH_DISCARD_LMB
/* Need to clear it ? */
- if (nodeid == MAX_NUMNODES) {
- memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
- lmb.reserved.region = NULL;
- lmb.reserved.nr_regions = 0;
- lmb.reserved.cnt = 0;
- }
-#endif
+ if (nodeid == MAX_NUMNODES)
+ __clear_lmb_reserved_region_array();

*rangep = range;
return nr_range;
@@ -757,11 +774,8 @@ void __init lmb_to_bootmem(u64 start, u64 end)
int i, count;
u64 final_start, final_end;

-#ifdef ARCH_DISCARD_LMB
/* Take out region array itself */
- if (lmb.reserved.region != lmb_reserved_region)
- lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
-#endif
+ __free_lmb_reserved_region_array();

count = lmb.reserved.cnt;
if (lmb_debug)
@@ -782,13 +796,8 @@ void __init lmb_to_bootmem(u64 start, u64 end)
reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
}

-#ifdef ARCH_DISCARD_LMB
- /* Clear them to avoid misusing ? */
- memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
- lmb.reserved.region = NULL;
- lmb.reserved.nr_regions = 0;
- lmb.reserved.cnt = 0;
-#endif
+ /* Assume lmb_to_bootmem is only called one time */
+ __clear_lmb_reserved_region_array();
}
#endif

--
1.6.4.2

2010-04-09 06:15:35

by Yinghai Lu

[permalink] [raw]
Subject: [PATCH 13/39] lmb: Prepare to include linux/lmb.h in core file

Need to add protection in linux/lmb.h, to prepare to include it in
mm/page_alloc.c and mm/bootmem.c etc.

Signed-off-by: Yinghai Lu <[email protected]>
---
include/linux/lmb.h | 3 +++
1 files changed, 3 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index cf8f7ca..5dbc4ef 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -2,6 +2,7 @@
#define _LINUX_LMB_H
#ifdef __KERNEL__

+#ifdef CONFIG_HAVE_LMB
/*
* Logical memory blocks.
*
@@ -101,6 +102,8 @@ u64 lmb_hole_size(u64 start, u64 end);

#include <asm/lmb.h>

+#endif /* CONFIG_HAVE_LMB */
+
#endif /* __KERNEL__ */

#endif /* _LINUX_LMB_H */
--
1.6.4.2

2010-04-13 03:43:34

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH -v12 00/39] use lmb with x86

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> the new lmb could be used to early_res in x86.
>
> Suggested by: David, Ben, and Thomas

I still find most of your changeset comments to be very very poor, if
comprehensible at all. You really MUST make an effort there.

Cheers,
Ben.

> -v6: change sequence as requested by Thomas
> -v7: seperate them to more patches
> -v8: add boundary checking to make sure not free partial page.
> -v9: use lmb_debug to control print out of reserve_lmb.
> add e820 clean up, and e820 become __initdata
> -v10:use lmb.rmo_size and ARCH_DISCARD_LMB according to Michael
> change name to lmb_find_area/reserve_lmb_area/free_lmb_area,
> according to Michael
> update find_lmb_area to use __lmb_alloc_base according to ben
> -v11:move find_lmb_area_size back to x86.
> x86 has own find_lmb_area, and could be disabled by ARCH_LMB_FIND_AREA
> because _lmb_find_base has different behavoir from x86's old one.
> one from high to high and one from low to high
> need more test
> tested for x86 32bit/64bit, numa/nonuma, nobootmem/bootmem.
> -v12:refresh the series with current tip
> seperate nobootmem.c, so could remove some #ifdef
> still keep CONFIG_NO_BOOTMEM, in x86 .c, and could use the as tags
> so other lmb could refer them to use NO_BOOTMEM.
>
> still keep find_lmb_area, may replace those find_lmb_area will lmb_alloc, if
> everything go fine
>
> Please put them into tip and -next to have more test coverage.
>
> Thanks
>
> Yinghai

2010-04-13 03:53:58

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 04/39] lmb: Move lmb.c to mm/

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> lmb.c is memory related, so move it to mm/. It is suggested by Ingo
>
> Signed-off-by: Yinghai Lu <[email protected]>

Acked-by: Benjamin Herrenschmidt <[email protected]>
---

> ---
> lib/Makefile | 2 --
> mm/Makefile | 2 ++
> {lib => mm}/lmb.c | 0
> 3 files changed, 2 insertions(+), 2 deletions(-)
> rename {lib => mm}/lmb.c (100%)
>
> diff --git a/lib/Makefile b/lib/Makefile
> index 2e152ae..a463a4d 100644
> --- a/lib/Makefile
> +++ b/lib/Makefile
> @@ -85,8 +85,6 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
>
> lib-$(CONFIG_GENERIC_BUG) += bug.o
>
> -obj-$(CONFIG_HAVE_LMB) += lmb.o
> -
> obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
>
> obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
> diff --git a/mm/Makefile b/mm/Makefile
> index 6c2a73a..52492f9 100644
> --- a/mm/Makefile
> +++ b/mm/Makefile
> @@ -15,6 +15,8 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
> $(mmu-y)
> obj-y += init-mm.o
>
> +obj-$(CONFIG_HAVE_LMB) += lmb.o
> +
> obj-$(CONFIG_BOUNCE) += bounce.o
> obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
> obj-$(CONFIG_HAS_DMA) += dmapool.o
> diff --git a/lib/lmb.c b/mm/lmb.c
> similarity index 100%
> rename from lib/lmb.c
> rename to mm/lmb.c

2010-04-13 03:57:34

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 05/39] lmb: Seperate region array from lmb_region struct

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> lmb_init() will connect them back.
> Add nr_regions in struct lmb_region to track region array size.
>
> So later We can install dynamically allocated region array to that pointer
>
> Signed-off-by: Yinghai Lu <[email protected]>

Acked-by: Benjamin Herrenschmidt <[email protected]>

> ---
> include/linux/lmb.h | 3 ++-
> mm/lmb.c | 9 ++++++++-
> 2 files changed, 10 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/lmb.h b/include/linux/lmb.h
> index f3d1433..e14ea8d 100644
> --- a/include/linux/lmb.h
> +++ b/include/linux/lmb.h
> @@ -26,7 +26,8 @@ struct lmb_property {
> struct lmb_region {
> unsigned long cnt;
> u64 size;
> - struct lmb_property region[MAX_LMB_REGIONS+1];
> + struct lmb_property *region;
> + unsigned long nr_regions;
> };
>
> struct lmb {
> diff --git a/mm/lmb.c b/mm/lmb.c
> index b1fc526..65b62dc 100644
> --- a/mm/lmb.c
> +++ b/mm/lmb.c
> @@ -18,6 +18,8 @@
> #define LMB_ALLOC_ANYWHERE 0
>
> struct lmb lmb;
> +static struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
> +static struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
>
> static int lmb_debug;
>
> @@ -106,6 +108,11 @@ static void lmb_coalesce_regions(struct lmb_region *rgn,
>
> void __init lmb_init(void)
> {
> + lmb.memory.region = lmb_memory_region;
> + lmb.reserved.region = lmb_reserved_region;
> + lmb.memory.nr_regions = ARRAY_SIZE(lmb_memory_region);
> + lmb.reserved.nr_regions = ARRAY_SIZE(lmb_reserved_region);
> +
> /* Create a dummy zero size LMB which will get coalesced away later.
> * This simplifies the lmb_add() code below...
> */
> @@ -169,7 +176,7 @@ static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
>
> if (coalesced)
> return coalesced;
> - if (rgn->cnt >= MAX_LMB_REGIONS)
> + if (rgn->cnt > rgn->nr_regions)
> return -1;
>
> /* Couldn't coalesce the LMB, so add it to the sorted table. */

2010-04-13 03:59:32

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 06/39] lmb: Seperate __lmb_find_base() from __lmb_alloc_base()

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:

> + return -1ULL;
> +}

Defininf some kind of LMB_ERROR would be nicer.

Cheers,
Ben.

> +u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
> +{
> + u64 base;
> +
> + base = __lmb_find_base(size, align, max_addr);
> +
> + if (base == -1ULL)
> + return 0;
> +
> + if (lmb_add_region(&lmb.reserved, base, size) < 0)
> + return 0;
> +
> + return base;
> }
>
> /* You must call lmb_analyze() before this. */

2010-04-13 04:07:14

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 07/39] lmb: Add lmb_find_area()

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> It will try find area according with size/align in specified range (start, end).
>
> lmb_find_area() will honor goal/limit.
>
> also make it more easy for x86 to use lmb.
> x86 early_res is using find/reserve pattern instead of alloc.
>
> When we need temporaray buff for range array etc for range work, if We are using
> lmb_alloc(), We will need to add some post fix code for buffer that is used
> by range array, because it is in the lmb.reserved already. and have to call
> extra lmb_free().
>
> -v2: Change name to lmb_find_area() according to Michael Ellerman
> -v3: Add generic weak version __lmb_find_area()

Haven't you noticed there's already way too many functions walking the
LMBs ? :-)

I think the ones doing nid alloc could/should be also rewritten to use
one single low level __lmb_find_* no ?

Cheers,
Ben.

> Signed-off-by: Yinghai Lu <[email protected]>
> ---
> include/linux/lmb.h | 4 ++++
> mm/lmb.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 53 insertions(+), 0 deletions(-)
>
> diff --git a/include/linux/lmb.h b/include/linux/lmb.h
> index e14ea8d..4cf2f3b 100644
> --- a/include/linux/lmb.h
> +++ b/include/linux/lmb.h
> @@ -83,6 +83,10 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
> lmb_size_pages(type, region_nr);
> }
>
> +u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
> + u64 size, u64 align);
> +u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);
> +
> #include <asm/lmb.h>
>
> #endif /* __KERNEL__ */
> diff --git a/mm/lmb.c b/mm/lmb.c
> index 392d805..7010212 100644
> --- a/mm/lmb.c
> +++ b/mm/lmb.c
> @@ -11,9 +11,13 @@
> */
>
> #include <linux/kernel.h>
> +#include <linux/types.h>
> #include <linux/init.h>
> #include <linux/bitops.h>
> #include <linux/lmb.h>
> +#include <linux/bootmem.h>
> +#include <linux/mm.h>
> +#include <linux/range.h>
>
> #define LMB_ALLOC_ANYWHERE 0
>
> @@ -559,3 +563,48 @@ int lmb_find(struct lmb_property *res)
> }
> return -1;
> }
> +
> +u64 __init __weak __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
> + u64 size, u64 align)
> +{
> + u64 final_start, final_end;
> + u64 mem;
> +
> + final_start = max(ei_start, start);
> + final_end = min(ei_last, end);
> +
> + if (final_start >= final_end)
> + return -1ULL;
> +
> + mem = __lmb_find_base(size, align, final_end);
> +
> + if (mem == -1ULL)
> + return -1ULL;
> +
> + lmb_free(mem, size);
> + if (mem >= final_start)
> + return mem;
> +
> + return -1ULL;
> +}
> +
> +/*
> + * Find a free area with specified alignment in a specific range.
> + */
> +u64 __init __weak lmb_find_area(u64 start, u64 end, u64 size, u64 align)
> +{
> + int i;
> +
> + for (i = lmb.memory.cnt - 1; i >= 0; i--) {
> + u64 ei_start = lmb.memory.region[i].base;
> + u64 ei_last = ei_start + lmb.memory.region[i].size;
> + u64 addr;
> +
> + addr = __lmb_find_area(ei_start, ei_last, start, end,
> + size, align);
> +
> + if (addr != -1ULL)
> + return addr;
> + }
> + return -1ULL;
> +}

2010-04-13 04:17:51

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 08/39] lmb: Add lmb_reserve_area/lmb_free_area

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> They will check if the region array is big enough.
>
> __check_and_double_region_array will try to double the region array if that
> array spare slots is not big enough. Old array will be copied to new array.
>
> Arch code should set lmb.default_alloc_limit accordingly, so the new array is in
> accessiable address.
>
> -v2: change get_max_mapped() to lmb.default_alloc_limit according to Michael
> Ellerman and Ben
> change to lmb_reserve_area and lmb_free_area according to Michael Ellerman
> -v3: call check_and_double after reserve/free, so could avoid to use
> find_lmb_area. Suggested by Michael Ellerman
>
> Signed-off-by: Yinghai Lu <[email protected]>

So a few things here:

default_alloc_limit: This should be a patch of its own I believe, we
should provide a way for callers to also honor the limit, I'm sure
without that we're going to hit funny problems -especially- if we start
replacing bootmem. (Heh, low/high mem anyone ?)

I would think that the basic lmb_alloc() should be modified to use the
current limit, and maybe add an lmb_alloc_anywhere() as an inline
wrapper to lmb_alloc_base(..., LMB_ALLOC_ANYWHERE); In fact, lmb_alloc()
should become an inline wrapper too.

Also, the way you added the calls to __check_and_double_region_array()
is fishy (what a function name btw !). IE. You added it in 2 or 3
places, missing a whole bunch, which will guarantee some kind of
unexpected behaviour especially when using the _nid variants.

Now, maybe the idea of moving things to -after- the call wasn't that
good. I still don't quite get why we can't do things lazily, especially
if we remove some of the code duplication in there.

In any case, its about time to clarify what is API and what is internal
in LMB and clean up the entry path.

Cheers,
Ben.

> ---
> include/linux/lmb.h | 4 +++
> mm/lmb.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 70 insertions(+), 0 deletions(-)
>
> diff --git a/include/linux/lmb.h b/include/linux/lmb.h
> index 4cf2f3b..598662f 100644
> --- a/include/linux/lmb.h
> +++ b/include/linux/lmb.h
> @@ -33,6 +33,7 @@ struct lmb_region {
> struct lmb {
> unsigned long debug;
> u64 rmo_size;
> + u64 default_alloc_limit;
> struct lmb_region memory;
> struct lmb_region reserved;
> };
> @@ -83,6 +84,9 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
> lmb_size_pages(type, region_nr);
> }
>
> +void lmb_reserve_area(u64 start, u64 end, char *name);
> +void lmb_free_area(u64 start, u64 end);
> +void lmb_add_memory(u64 start, u64 end);
> u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
> u64 size, u64 align);
> u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);
> diff --git a/mm/lmb.c b/mm/lmb.c
> index 7010212..a514d41 100644
> --- a/mm/lmb.c
> +++ b/mm/lmb.c
> @@ -564,6 +564,72 @@ int lmb_find(struct lmb_property *res)
> return -1;
> }
>
> +static void __init __check_and_double_region_array(struct lmb_region *type,
> + struct lmb_property *static_region)
> +{
> + u64 size, mem;
> + struct lmb_property *new, *old;
> + unsigned long rgnsz = type->nr_regions;
> +
> + /* Do we have enough slots left ? */
> + if ((rgnsz - type->cnt) > 2)
> + return;
> +
> + old = type->region;
> + /* Double the array size */
> + size = sizeof(struct lmb_property) * rgnsz * 2;
> +
> + mem = __lmb_alloc_base(size, sizeof(struct lmb_property), lmb.default_alloc_limit);
> + if (mem == 0)
> + panic("can not find more space for lmb.reserved.region array");
> +
> + new = __va(mem);
> + /* Copy old to new */
> + memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
> + memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
> +
> + memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
> + type->region = new;
> + type->nr_regions = rgnsz * 2;
> + printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
> + type->nr_regions, mem, mem + size - 1);
> +
> + /* Free old one ?*/
> + if (old != static_region)
> + lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
> +}
> +
> +void __init lmb_add_memory(u64 start, u64 end)
> +{
> + lmb_add_region(&lmb.memory, start, end - start);
> + __check_and_double_region_array(&lmb.memory, &lmb_memory_region[0]);
> +}
> +
> +void __init lmb_reserve_area(u64 start, u64 end, char *name)
> +{
> + if (start == end)
> + return;
> +
> + if (WARN_ONCE(start > end, "lmb_reserve_area: wrong range [%#llx, %#llx]\n", start, end))
> + return;
> +
> + lmb_add_region(&lmb.reserved, start, end - start);
> + __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
> +}
> +
> +void __init lmb_free_area(u64 start, u64 end)
> +{
> + if (start == end)
> + return;
> +
> + if (WARN_ONCE(start > end, "lmb_free_area: wrong range [%#llx, %#llx]\n", start, end))
> + return;
> +
> + /* keep punching hole, could run out of slots too */
> + lmb_free(start, end - start);
> + __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0]);
> +}
> +
> u64 __init __weak __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
> u64 size, u64 align)
> {

2010-04-13 04:20:49

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 15/39] lmb: Add lmb_find_area_node()

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> It can be used to find NODE_DATA for numa.
>
> Need to make sure early_node_map[] is filled before it is called, otherwise
> it will fallback to lmb_find_area(), with node range.

Isn't that more duplication from what's already in there for nid
handling ? (Which btw may not be optimal, simply what I'm saying is that
all that stuff should be factored).

Cheers,
Ben.

> Signed-off-by: Yinghai Lu <[email protected]>
> ---
> include/linux/lmb.h | 1 +
> mm/lmb.c | 15 +++++++++++++++
> 2 files changed, 16 insertions(+), 0 deletions(-)
>
> diff --git a/include/linux/lmb.h b/include/linux/lmb.h
> index 5dbc4ef..4078825 100644
> --- a/include/linux/lmb.h
> +++ b/include/linux/lmb.h
> @@ -91,6 +91,7 @@ void lmb_add_memory(u64 start, u64 end);
> u64 __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
> u64 size, u64 align);
> u64 lmb_find_area(u64 start, u64 end, u64 size, u64 align);
> +u64 lmb_find_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
>
> void lmb_to_bootmem(u64 start, u64 end);
> struct range;
> diff --git a/mm/lmb.c b/mm/lmb.c
> index cf0f1c9..d3a58fb 100644
> --- a/mm/lmb.c
> +++ b/mm/lmb.c
> @@ -791,6 +791,21 @@ u64 __init __weak lmb_find_area(u64 start, u64 end, u64 size, u64 align)
> return -1ULL;
> }
> /*
> + * Need to call this function after lmb_register_active_regions,
> + * so early_node_map[] is filled already.
> + */
> +u64 __init lmb_find_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
> +{
> + u64 addr;
> + addr = find_memory_core_early(nid, size, align, start, end);
> + if (addr != -1ULL)
> + return addr;
> +
> + /* Fallback, should already have start end within node range */
> + return lmb_find_area(start, end, size, align);
> +}
> +
> +/*
> * Finds an active region in the address range from start_pfn to last_pfn and
> * returns its range in ei_startpfn and ei_endpfn for the lmb entry.
> */

2010-04-13 04:22:41

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 18/39] lmb: Add lmb_reserve_area_overlap_ok()

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> Some areas from firmware could be reserved several times from different callers.
>
> If these area are overlapped, We may have overlapped entries in lmb.reserved.
>
> Try to free the area at first, before rerserve them again.
>
> Signed-off-by: Yinghai Lu <[email protected]>
> ---

> +
> +/*
> + * Could be used to avoid having overlap entries in lmb.reserved.region.
> + * Don't need to use it with area that is from lmb_find_area()
> + * Only use it for the area that fw hidden area.
> + */
> +void __init lmb_reserve_area_overlap_ok(u64 start, u64 end, char *name)
> +{
> + if (start == end)
> + return;
> +
> + if (WARN_ONCE(start > end, "lmb_reserve_area_overlap_ok: wrong range [%#llx, %#llx]\n", start, end))
> + return;
> +
> + /* Free that region at first */
> + lmb_free(start, end - start);
> + __lmb_reserve_area(start, end, name);
> }

That is going to only work with one overlap. IE. lmb_free() will not do
very well unless it's a one and only match.

You should modify it to work in a loop.

Besides, lmb_reserve_area_overlap_ok() sucks as a name :-)

Cheers,
Ben.



2010-04-13 04:24:35

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 26/39] nobootmem: use lmb.default_alloc_limit in alloc_bootmem path

On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> Generic version __lmb_find_area() is going from high to low, and for 32bit
> active_region for 32bit does include high pages
>
> need to replace the limit with lmb.default_alloc_limit, aka get_max_mapped()
>
> with this patch, x86 32bit could use generic version of __lmb_find_area()

So you unconditionally add access to some lmb specific data structure to
generic code ? That isn't going to work very well on archs that don't
use lmb.

Also, those things should be local to lmb_* anyways.

Cheers,
Ben.

> Signed-off-by: Yinghai Lu <[email protected]>
> ---
> mm/page_alloc.c | 3 +++
> 1 files changed, 3 insertions(+), 0 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 79bd44b..256aed0 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3445,6 +3445,9 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
>
> u64 addr;
>
> + if (limit > lmb.default_alloc_limit)
> + limit = lmb.default_alloc_limit;
> +
> addr = find_memory_core_early(nid, size, align, goal, limit);
>
> if (addr == -1ULL)

2010-04-13 04:31:36

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 07/39] lmb: Add lmb_find_area()

On 04/12/2010 09:05 PM, Benjamin Herrenschmidt wrote:
> On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
>> It will try find area according with size/align in specified range (start, end).
>>
>> lmb_find_area() will honor goal/limit.
>>
>> also make it more easy for x86 to use lmb.
>> x86 early_res is using find/reserve pattern instead of alloc.
>>
>> When we need temporaray buff for range array etc for range work, if We are using
>> lmb_alloc(), We will need to add some post fix code for buffer that is used
>> by range array, because it is in the lmb.reserved already. and have to call
>> extra lmb_free().
>>
>> -v2: Change name to lmb_find_area() according to Michael Ellerman
>> -v3: Add generic weak version __lmb_find_area()
>
> Haven't you noticed there's already way too many functions walking the
> LMBs ? :-)

x86 is using original lmb_reserve, lmb_free(), but have own version lmb_find_area(), and it will be dropped after
more testing of generic version of lmb_find_area()

>
> I think the ones doing nid alloc could/should be also rewritten to use
> one single low level __lmb_find_* no ?

that nid_alloc() only has one user (sparc64).

maybe could be replaced by lmd_find_area_node(), but need to make sure early_node_map[] is filled at first.

Thanks

Yinghai

2010-04-13 04:33:32

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 06/39] lmb: Seperate __lmb_find_base() from __lmb_alloc_base()

On 04/12/2010 08:58 PM, Benjamin Herrenschmidt wrote:
> On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
>
>> + return -1ULL;
>> +}
>
> Defininf some kind of LMB_ERROR would be nicer.

ok, also __lmb_alloc_base could return 0 when fail to get one.

replace that 0 with LMB_ERROR aka -1ULL?

Yinghai

2010-04-13 04:40:01

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 15/39] lmb: Add lmb_find_area_node()

On 04/12/2010 09:18 PM, Benjamin Herrenschmidt wrote:
> On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
>> It can be used to find NODE_DATA for numa.
>>
>> Need to make sure early_node_map[] is filled before it is called, otherwise
>> it will fallback to lmb_find_area(), with node range.
>
> Isn't that more duplication from what's already in there for nid
> handling ? (Which btw may not be optimal, simply what I'm saying is that
> all that stuff should be factored).

maybe could try to merge them later. or let sparc64 to use lmb_find_area_node()?

YH

2010-04-13 04:47:56

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 18/39] lmb: Add lmb_reserve_area_overlap_ok()

On 04/12/2010 09:21 PM, Benjamin Herrenschmidt wrote:
> On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
>> Some areas from firmware could be reserved several times from different callers.
>>
>> If these area are overlapped, We may have overlapped entries in lmb.reserved.
>>
>> Try to free the area at first, before rerserve them again.
>>
>> Signed-off-by: Yinghai Lu <[email protected]>
>> ---
>
>> +
>> +/*
>> + * Could be used to avoid having overlap entries in lmb.reserved.region.
>> + * Don't need to use it with area that is from lmb_find_area()
>> + * Only use it for the area that fw hidden area.
>> + */
>> +void __init lmb_reserve_area_overlap_ok(u64 start, u64 end, char *name)
>> +{
>> + if (start == end)
>> + return;
>> +
>> + if (WARN_ONCE(start > end, "lmb_reserve_area_overlap_ok: wrong range [%#llx, %#llx]\n", start, end))
>> + return;
>> +
>> + /* Free that region at first */
>> + lmb_free(start, end - start);
>> + __lmb_reserve_area(start, end, name);
>> }
>
> That is going to only work with one overlap. IE. lmb_free() will not do
> very well unless it's a one and only match.
>
> You should modify it to work in a loop.
that is only for some special cases about area that is reserved for fw region.

and even there is overlapped area, it the code still can go through when lmb_to_bootmem or create range list for slab.
because they are using range array subtract.

>
> Besides, lmb_reserve_area_overlap_ok() sucks as a name :-)

any suggestion for better name?

YH

2010-04-13 04:54:32

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 26/39] nobootmem: use lmb.default_alloc_limit in alloc_bootmem path

On 04/12/2010 09:23 PM, Benjamin Herrenschmidt wrote:
> On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
>> Generic version __lmb_find_area() is going from high to low, and for 32bit
>> active_region for 32bit does include high pages
>>
>> need to replace the limit with lmb.default_alloc_limit, aka get_max_mapped()
>>
>> with this patch, x86 32bit could use generic version of __lmb_find_area()
>
> So you unconditionally add access to some lmb specific data structure to
> generic code ? That isn't going to work very well on archs that don't
> use lmb.

the whole function

#ifdef CONFIG_HAVE_LMB
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
int i;

/* Need to go over early_node_map to find out good range for node */
for_each_active_range_index_in_nid(i, nid) {
u64 addr;
u64 ei_start, ei_last;

ei_last = early_node_map[i].end_pfn;
ei_last <<= PAGE_SHIFT;
ei_start = early_node_map[i].start_pfn;
ei_start <<= PAGE_SHIFT;
addr = __lmb_find_area(ei_start, ei_last,
goal, limit, size, align);

if (addr == -1ULL)
continue;

return addr;
}

return -1ULL;
}
#endif

need to access early_node_map[], so leave the function here.


>
> Also, those things should be local to lmb_* anyways.

if you insist, could move it to lmb.c and use work_with_active_regions() around it.

YH

2010-04-13 05:08:52

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 07/39] lmb: Add lmb_find_area()

On Mon, 2010-04-12 at 21:29 -0700, Yinghai wrote:
>
> > Haven't you noticed there's already way too many functions walking
> the
> > LMBs ? :-)
>
> x86 is using original lmb_reserve, lmb_free(), but have own version
> lmb_find_area(), and it will be dropped after
> more testing of generic version of lmb_find_area()

Do -not- add no APIs that are meant to be dropped. They never are in
practice. What I'm saying here is that the LMB code (including existing
stuff) could use some factoring in this area.

> >
> > I think the ones doing nid alloc could/should be also rewritten to
> use
> > one single low level __lmb_find_* no ?
>
> that nid_alloc() only has one user (sparc64).
>
> maybe could be replaced by lmd_find_area_node(), but need to make sure
> early_node_map[] is filled at first.

How does it work today ? IE. Which ever mechanism is used that works I
don't care but we shouldn't use 2 different ones.

Cheers,
Ben.

2010-04-13 05:12:18

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 18/39] lmb: Add lmb_reserve_area_overlap_ok()

On Mon, 2010-04-12 at 21:44 -0700, Yinghai wrote:

> that is only for some special cases about area that is reserved for fw region.

And ? From what I can see in the code, it will still not work properly
unless all your special cases end up fitting right with the bug you
effectively have..

> and even there is overlapped area, it the code still can go through when lmb_to_bootmem or create range list for slab.
> because they are using range array subtract.

Well, either we just have overlapped areas or we don't ... we shouldn't
have some kind of overlap_ok() thing that does the right thing ..
sometimes, but maybe not, but we don't care anyways, which is what you
seem to be saying.

> > Besides, lmb_reserve_area_overlap_ok() sucks as a name :-)
>
> any suggestion for better name?

Well, what you actually implemented is

lmb_reserve_area_overlap_maybe_ok_not_too_sure_though()

What we should decide is once for all, is it ok to have lmb_reserve() be
called for overlapping ranges. I think the answer is yes and in fact, we
don't take special care in powerpc either there so overlap could happen
in theory.

Now, do we want to avoid actually creating overlapping regions in the
array ? I think we should look into it, but then we should fix
lmb_reserve() to do the right thing here and coalesce all the overlaps.

Cheers,
Ben.

> YH
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/

2010-04-13 05:15:11

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 26/39] nobootmem: use lmb.default_alloc_limit in alloc_bootmem path

On Mon, 2010-04-12 at 21:50 -0700, Yinghai wrote:
> On 04/12/2010 09:23 PM, Benjamin Herrenschmidt wrote:
> > On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
> >> Generic version __lmb_find_area() is going from high to low, and for 32bit
> >> active_region for 32bit does include high pages
> >>
> >> need to replace the limit with lmb.default_alloc_limit, aka get_max_mapped()
> >>
> >> with this patch, x86 32bit could use generic version of __lmb_find_area()
> >
> > So you unconditionally add access to some lmb specific data structure to
> > generic code ? That isn't going to work very well on archs that don't
> > use lmb.
>
> the whole function
>
> #ifdef CONFIG_HAVE_LMB
> u64 __init find_memory_core_early(int nid, u64 size, u64 align,
> u64 goal, u64 limit)
> {
> int i;
>
> /* Need to go over early_node_map to find out good range for node */
> for_each_active_range_index_in_nid(i, nid) {
> u64 addr;
> u64 ei_start, ei_last;
>
> ei_last = early_node_map[i].end_pfn;
> ei_last <<= PAGE_SHIFT;
> ei_start = early_node_map[i].start_pfn;
> ei_start <<= PAGE_SHIFT;
> addr = __lmb_find_area(ei_start, ei_last,
> goal, limit, size, align);
>
> if (addr == -1ULL)
> continue;
>
> return addr;
> }
>
> return -1ULL;
> }
> #endif
>
> need to access early_node_map[], so leave the function here.

Hrm... find_memory_core_early() is a broken API anyways. Did you add
that ? Again, you insist on pushing all over the place that crakpot
find/reserve API instead of doing a proper allocation, and it's now
leaking with ifdef's & all into the generic code.

This is just all a pile of shit.

I'm tempted to NACK the whole thing and wait for somebody who can code
to come up with something half decent.

Ben.


> >
> > Also, those things should be local to lmb_* anyways.
>
> if you insist, could move it to lmb.c and use work_with_active_regions() around it.
>
> YH
> --
> To unsubscribe from this list: send the line "unsubscribe linux-arch" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html

2010-04-13 05:29:06

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 07/39] lmb: Add lmb_find_area()

On 04/12/2010 10:07 PM, Benjamin Herrenschmidt wrote:
> On Mon, 2010-04-12 at 21:29 -0700, Yinghai wrote:
>>
>>> Haven't you noticed there's already way too many functions walking
>> the
>>> LMBs ? :-)
>>
>> x86 is using original lmb_reserve, lmb_free(), but have own version
>> lmb_find_area(), and it will be dropped after
>> more testing of generic version of lmb_find_area()
>
> Do -not- add no APIs that are meant to be dropped. They never are in
> practice. What I'm saying here is that the LMB code (including existing
> stuff) could use some factoring in this area.

current generic lmb_find_area() is allocating from high to low.
x86 32bit seems have problem with that.
in this patchset, it is fixed, but not sure if i missed sth.
so could remove x86 lmb_find_area after more test coverage.

>
>>>
>>> I think the ones doing nid alloc could/should be also rewritten to
>> use
>>> one single low level __lmb_find_* no ?
>>
>> that nid_alloc() only has one user (sparc64).
>>
>> maybe could be replaced by lmd_find_area_node(), but need to make sure
>> early_node_map[] is filled at first.
>
> How does it work today ? IE. Which ever mechanism is used that works I
> don't care but we shouldn't use 2 different ones.

x86 is only use find_area_early() with node area scope, David point that could have problem with cross node mem map.

YH

2010-04-13 05:41:24

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 18/39] lmb: Add lmb_reserve_area_overlap_ok()

On 04/12/2010 10:10 PM, Benjamin Herrenschmidt wrote:
> On Mon, 2010-04-12 at 21:44 -0700, Yinghai wrote:
>
>> that is only for some special cases about area that is reserved for fw region.
>
> And ? From what I can see in the code, it will still not work properly
> unless all your special cases end up fitting right with the bug you
> effectively have..

not sure.

noticed there some overlapped, and with this patch, those overlapped area disappeared

>
>> and even there is overlapped area, it the code still can go through when lmb_to_bootmem or create range list for slab.
>> because they are using range array subtract.
>
> Well, either we just have overlapped areas or we don't ... we shouldn't
> have some kind of overlap_ok() thing that does the right thing ..
> sometimes, but maybe not, but we don't care anyways, which is what you
> seem to be saying.
yes.
>
>>> Besides, lmb_reserve_area_overlap_ok() sucks as a name :-)
>>
>> any suggestion for better name?
>
> Well, what you actually implemented is
>
> lmb_reserve_area_overlap_maybe_ok_not_too_sure_though()
>
> What we should decide is once for all, is it ok to have lmb_reserve() be
> called for overlapping ranges. I think the answer is yes and in fact, we
> don't take special care in powerpc either there so overlap could happen
> in theory.
>
> Now, do we want to avoid actually creating overlapping regions in the
> array ? I think we should look into it, but then we should fix
> lmb_reserve() to do the right thing here and coalesce all the overlaps.

not sure if is needed. lmb_free/lmb_reserve seems work.

YH

2010-04-13 05:45:39

by Yinghai Lu

[permalink] [raw]
Subject: Re: [PATCH 26/39] nobootmem: use lmb.default_alloc_limit in alloc_bootmem path

On 04/12/2010 10:13 PM, Benjamin Herrenschmidt wrote:
> On Mon, 2010-04-12 at 21:50 -0700, Yinghai wrote:
>> On 04/12/2010 09:23 PM, Benjamin Herrenschmidt wrote:
>>> On Thu, 2010-04-08 at 23:03 -0700, Yinghai Lu wrote:
>>>> Generic version __lmb_find_area() is going from high to low, and for 32bit
>>>> active_region for 32bit does include high pages
>>>>
>>>> need to replace the limit with lmb.default_alloc_limit, aka get_max_mapped()
>>>>
>>>> with this patch, x86 32bit could use generic version of __lmb_find_area()
>>>
>>> So you unconditionally add access to some lmb specific data structure to
>>> generic code ? That isn't going to work very well on archs that don't
>>> use lmb.
>>
>> the whole function
>>
>> #ifdef CONFIG_HAVE_LMB
>> u64 __init find_memory_core_early(int nid, u64 size, u64 align,
>> u64 goal, u64 limit)
>> {
>> int i;
>>
>> /* Need to go over early_node_map to find out good range for node */
>> for_each_active_range_index_in_nid(i, nid) {
>> u64 addr;
>> u64 ei_start, ei_last;
>>
>> ei_last = early_node_map[i].end_pfn;
>> ei_last <<= PAGE_SHIFT;
>> ei_start = early_node_map[i].start_pfn;
>> ei_start <<= PAGE_SHIFT;
>> addr = __lmb_find_area(ei_start, ei_last,
>> goal, limit, size, align);
>>
>> if (addr == -1ULL)
>> continue;
>>
>> return addr;
>> }
>>
>> return -1ULL;
>> }
>> #endif
>>
>> need to access early_node_map[], so leave the function here.
>
> Hrm... find_memory_core_early() is a broken API anyways. Did you add
> that ? Again, you insist on pushing all over the place that crakpot
> find/reserve API instead of doing a proper allocation, and it's now
> leaking with ifdef's & all into the generic code.
>
__alloc_memory_core_early() already include that find_memory_core_early() lines
it is merged for CONFIG_NO_BOOTMEM support.

I split it out, so lmb_find_area_node() could reuse those lines.

YH

2010-04-13 05:53:27

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH 07/39] lmb: Add lmb_find_area()

On 04/12/2010 10:26 PM, Yinghai wrote:
>
> current generic lmb_find_area() is allocating from high to low.
> x86 32bit seems have problem with that.

Presumably because it fills up ZONE_DMA.

-hpa
--
H. Peter Anvin, Intel Open Source Technology Center
I work for Intel. I don't speak on their behalf.

2010-04-13 10:16:43

by Benjamin Herrenschmidt

[permalink] [raw]
Subject: Re: [PATCH 07/39] lmb: Add lmb_find_area()

On Mon, 2010-04-12 at 22:46 -0700, H. Peter Anvin wrote:
> On 04/12/2010 10:26 PM, Yinghai wrote:
> >
> > current generic lmb_find_area() is allocating from high to low.
> > x86 32bit seems have problem with that.
>
> Presumably because it fills up ZONE_DMA.

I'm working on some LMB cleanups now, among others trying to take into
account what I think are Yinghai requirements. Give me a few days.

For the specific problem above, my idea is to have the low level alloc
function be able to take both low and high limits, so that x86 can
figure out what's best for a given allocation.

Cheers,
Ben.