2007-05-03 01:56:18

by Jeremy Fitzhardinge

[permalink] [raw]
Subject: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

This fixes two bugs:
- the stack allocation must be marked __cpuinit, since it gets called
on resume as well.
- presumably the interrupt stack should be freed on unplug if its
going to get reallocated on every plug.

[ Only non-vmalloced stacks tested. ]

Signed-off-by: Jeremy Fitzhardinge <[email protected]>
---
arch/i386/kernel/irq.c | 42 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 37 insertions(+), 5 deletions(-)

===================================================================
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -195,10 +195,13 @@ static int __init irq_guard_cpu0(void)
}
core_initcall(irq_guard_cpu0);

-static void * __init __alloc_irqstack(int cpu)
+static DEFINE_PER_CPU(struct page *, irqstack_pages[THREAD_SIZE/PAGE_SIZE]);
+static DEFINE_PER_CPU(struct vm_struct *, irqstack_area);
+
+static void * __cpuinit __alloc_irqstack(int cpu)
{
int i;
- struct page *pages[THREAD_SIZE/PAGE_SIZE], **tmp = pages;
+ struct page **pages = per_cpu(irqstack_pages, cpu), **tmp = pages;
struct vm_struct *area;

if (!cpu)
@@ -207,13 +210,27 @@ static void * __init __alloc_irqstack(in

/* failures here are unrecoverable anyway */
area = get_vm_area(THREAD_SIZE, VM_IOREMAP);
- for (i = 0; i < ARRAY_SIZE(pages); ++i)
+ for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
pages[i] = alloc_page(GFP_HIGHUSER);
map_vm_area(area, PAGE_KERNEL, &tmp);
+ per_cpu(irqstack_area, cpu) = area;
return area->addr;
}
+
+static void __cpuinit __free_irqstack(int cpu, void *stk)
+{
+ int i;
+
+ if (!cpu)
+ return;
+
+ unmap_vm_area(per_cpu(irqstack_area, cpu));
+
+ for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
+ __free_page(per_cpu(irqstack_pages, cpu)[i]);
+}
#else /* !CONFIG_VMALLOC_STACK */
-static void * __init __alloc_irqstack(int cpu)
+static void * __cpuinit __alloc_irqstack(int cpu)
{
if (!cpu)
return __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
@@ -222,12 +239,26 @@ static void * __init __alloc_irqstack(in
return (void *)__get_free_pages(GFP_KERNEL,
ilog2(THREAD_SIZE/PAGE_SIZE));
}
+
+static void __cpuinit __free_irqstack(int cpu, void *stk)
+{
+ if (!cpu)
+ return;
+
+ free_pages((unsigned long)stk, ilog2(THREAD_SIZE/PAGE_SIZE));
+}
#endif /* !CONFIG_VMALLOC_STACK */

-static void __init alloc_irqstacks(int cpu)
+static void __cpuinit alloc_irqstacks(int cpu)
{
per_cpu(softirq_stack, cpu) = __alloc_irqstack(cpu);
per_cpu(hardirq_stack, cpu) = __alloc_irqstack(cpu);
+}
+
+static void __cpuinit free_irqstacks(int cpu)
+{
+ __free_irqstack(cpu, per_cpu(softirq_stack, cpu));
+ __free_irqstack(cpu, per_cpu(hardirq_stack, cpu));
}

/*
@@ -266,6 +297,7 @@ void irq_ctx_init(int cpu)

void irq_ctx_exit(int cpu)
{
+ free_irqstacks(cpu);
per_cpu(hardirq_ctx, cpu) = NULL;
}



2007-05-03 02:25:41

by Bill Irwin

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

On Wed, May 02, 2007 at 06:56:09PM -0700, Jeremy Fitzhardinge wrote:
> +static void __cpuinit __free_irqstack(int cpu, void *stk)
> +{
> + int i;
> +
> + if (!cpu)
> + return;
> +
> + unmap_vm_area(per_cpu(irqstack_area, cpu));
> +
> + for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
> + __free_page(per_cpu(irqstack_pages, cpu)[i]);
> +}

This will leak the vm_struct and also leave it in the vmlist.
work_free_thread_info() needs to be redone too. It should be

remove_vm_area( /* unmap and remove the vm_struct from vmlist */ );
kfree( /* free the vm_struct */ );
for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
__free_page( /* dredge up the appropriate page to free */ );


-- wli

2007-05-03 02:31:15

by Bill Irwin

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

On Wed, May 02, 2007 at 06:56:09PM -0700, Jeremy Fitzhardinge wrote:
>> +static void __cpuinit __free_irqstack(int cpu, void *stk)
>> +{
>> + int i;
>> +
>> + if (!cpu)
>> + return;
>> +
>> + unmap_vm_area(per_cpu(irqstack_area, cpu));
>> +
>> + for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
>> + __free_page(per_cpu(irqstack_pages, cpu)[i]);
>> +}

On Wed, May 02, 2007 at 07:25:34PM -0700, Bill Irwin wrote:
[...]

Not sure if cpu 0 can ever be offlined, but it is remapped and so on.
So its virtual mapping should be undone if it can be offlined, but we
probably shouldn't attempt to free its underlying bootmem allocation.


-- wli

2007-05-03 05:48:17

by Bill Irwin

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

On Wed, May 02, 2007 at 06:56:09PM -0700, Jeremy Fitzhardinge wrote:
> This fixes two bugs:
> - the stack allocation must be marked __cpuinit, since it gets called
> on resume as well.
> - presumably the interrupt stack should be freed on unplug if its
> going to get reallocated on every plug.
> [ Only non-vmalloced stacks tested. ]
> Signed-off-by: Jeremy Fitzhardinge <[email protected]>
> ---
> arch/i386/kernel/irq.c | 42 +++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 37 insertions(+), 5 deletions(-)

Updated patch follows. Please add your Signed-off-by: if it meets your
approval; I am operating on the assumption I should never do so myself.
I'm a bit unsure of how to handle cpu 0 vs. potential freeing of per_cpu
areas and error returns from __cpuinit affairs, but anyhow:

This fixes three bugs:
- the stack allocation must be marked __cpuinit, since it gets called
on resume as well.
- presumably the interrupt stack should be freed on unplug if its
going to get reallocated on every plug.
- the vm_struct got leaked by thread info freeing callbacks.
Signed-off-by: William Irwin <[email protected]>


Index: stack-paranoia/arch/i386/kernel/irq.c
===================================================================
--- stack-paranoia.orig/arch/i386/kernel/irq.c 2007-05-02 19:33:23.937945981 -0700
+++ stack-paranoia/arch/i386/kernel/irq.c 2007-05-02 21:17:41.134523293 -0700
@@ -142,18 +142,19 @@
* These should really be __section__(".bss.page_aligned") as well, but
* gcc's 3.0 and earlier don't handle that correctly.
*/
-static DEFINE_PER_CPU(char *, softirq_stack);
-static DEFINE_PER_CPU(char *, hardirq_stack);
-
+struct irq_stack_info {
+ char *stack;
#ifdef CONFIG_DEBUG_STACK
-static void * __init irq_remap_stack(void *stack)
-{
- int i;
struct page *pages[THREAD_SIZE/PAGE_SIZE];
+#endif /* CONFIG_DEBUG_STACK */
+};
+static DEFINE_PER_CPU(struct irq_stack_info, softirq_stack_info);
+static DEFINE_PER_CPU(struct irq_stack_info, hardirq_stack_info);

- for (i = 0; i < ARRAY_SIZE(pages); ++i)
- pages[i] = virt_to_page(stack + PAGE_SIZE*i);
- return vmap(pages, THREAD_SIZE/PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
+#ifdef CONFIG_DEBUG_STACK
+static void * __init irq_remap_stack(struct irq_stack_info *info)
+{
+ return vmap(info->pages, ARRAY_SIZE(info->pages), VM_IOREMAP, PAGE_KERNEL);
}

static int __init irq_guard_cpu0(void)
@@ -161,59 +162,110 @@
unsigned long flags;
void *tmp;

- tmp = irq_remap_stack(per_cpu(softirq_stack, 0));
+ tmp = irq_remap_stack(&per_cpu(softirq_stack_info, 0));
if (!tmp)
return -ENOMEM;
else {
local_irq_save(flags);
- per_cpu(softirq_stack, 0) = tmp;
+ per_cpu(softirq_stack_info, 0).stack = tmp;
local_irq_restore(flags);
}
- tmp = irq_remap_stack(per_cpu(hardirq_stack, 0));
+ tmp = irq_remap_stack(&per_cpu(hardirq_stack_info, 0));
if (!tmp)
return -ENOMEM;
else {
local_irq_save(flags);
- per_cpu(hardirq_stack, 0) = tmp;
+ per_cpu(hardirq_stack_info, 0).stack = tmp;
local_irq_restore(flags);
}
return 0;
}
core_initcall(irq_guard_cpu0);

-static void * __init __alloc_irqstack(int cpu)
+static int __cpuinit __alloc_irqstack(int cpu, struct irq_stack_info *info)
{
int i;
- struct page *pages[THREAD_SIZE/PAGE_SIZE], **tmp = pages;
- struct vm_struct *area;

- if (!slab_is_available())
- return __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
+ if (!slab_is_available()) {
+ WARN_ON(cpu != 0);
+ info->stack = __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
__pa(MAX_DMA_ADDRESS));
+ info->pages[0] = virt_to_page(info->stack);
+ for (i = 1; i < ARRAY_SIZE(info->pages); ++i)
+ info->pages[i] = info->pages[0] + i;
+ return 0;
+ }
+ for (i = 0; i < ARRAY_SIZE(info->pages); ++i) {
+ if (!cpu)
+ WARN_ON(!info->pages[i]);
+ else {
+ info->pages[i] = alloc_page(GFP_HIGHUSER);
+ if (!info->pages[i])
+ goto out;
+ }
+ }
+ info->stack = irq_remap_stack(info);
+ if (info->stack)
+ return 0;
+out:
+ if (cpu) {
+ for (--i; i >= 0; --i) {
+ __free_page(info->pages[i]);
+ info->pages[i] = NULL;
+ }
+ }
+ return -1;
+}
+
+static void __cpuinit __free_irqstack(int cpu, struct irq_stack_info *info)
+{
+ int i;

- /* failures here are unrecoverable anyway */
- area = get_vm_area(THREAD_SIZE, VM_IOREMAP);
- for (i = 0; i < ARRAY_SIZE(pages); ++i)
- pages[i] = alloc_page(GFP_HIGHUSER);
- map_vm_area(area, PAGE_KERNEL, &tmp);
- return area->addr;
+ vunmap(info->stack);
+ /* cpu 0 bootmem allocates its underlying pages */
+ if (!cpu)
+ return;
+ for (i = 0; i < ARRAY_SIZE(info->pages); ++i) {
+ __free_page(info->pages[i]);
+ info->pages[i] = NULL;
+ }
}
#else /* !CONFIG_DEBUG_STACK */
-static void * __init __alloc_irqstack(int cpu)
+static int __cpuinit __alloc_irqstack(int cpu, struct irq_stack_info *info)
{
- if (!slab_is_available())
- return __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
+ if (cpu)
+ info->stack = (void *)__get_free_pages(GFP_KERNEL,
+ ilog2(THREAD_SIZE/PAGE_SIZE));
+ else if (!slab_is_available())
+ info->stack = __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
__pa(MAX_DMA_ADDRESS));
+ else
+ BUG_ON(!info->stack);
+ return 0;
+}

- return (void *)__get_free_pages(GFP_KERNEL,
- ilog2(THREAD_SIZE/PAGE_SIZE));
+static void __cpuinit __free_irqstack(int cpu, struct irq_stack_info *info)
+{
+ if (cpu)
+ free_pages((unsigned long)info->stack, ilog2(THREAD_SIZE/PAGE_SIZE));
}
#endif /* !CONFIG_DEBUG_STACK */

-static void __init alloc_irqstacks(int cpu)
+static int __cpuinit alloc_irqstacks(int cpu)
+{
+ if (__alloc_irqstack(cpu, &per_cpu(softirq_stack_info, cpu)))
+ return -1;
+ if (__alloc_irqstack(cpu, &per_cpu(hardirq_stack_info, cpu))) {
+ __free_irqstack(cpu, &per_cpu(softirq_stack_info, cpu));
+ return -1;
+ }
+ return 0;
+}
+
+static void __cpuinit free_irqstacks(int cpu)
{
- per_cpu(softirq_stack, cpu) = __alloc_irqstack(cpu);
- per_cpu(hardirq_stack, cpu) = __alloc_irqstack(cpu);
+ __free_irqstack(cpu, &per_cpu(softirq_stack_info, cpu));
+ __free_irqstack(cpu, &per_cpu(hardirq_stack_info, cpu));
}

/*
@@ -228,7 +280,7 @@

alloc_irqstacks(cpu);

- irqctx = (union irq_ctx*)per_cpu(hardirq_stack, cpu);
+ irqctx = (union irq_ctx*)per_cpu(hardirq_stack_info, cpu).stack;
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -237,7 +289,7 @@

per_cpu(hardirq_ctx, cpu) = irqctx;

- irqctx = (union irq_ctx*)per_cpu(softirq_stack, cpu);
+ irqctx = (union irq_ctx*)per_cpu(softirq_stack_info, cpu).stack;
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -252,6 +304,7 @@

void irq_ctx_exit(int cpu)
{
+ free_irqstacks(cpu);
per_cpu(hardirq_ctx, cpu) = NULL;
}

Index: stack-paranoia/arch/i386/kernel/process.c
===================================================================
--- stack-paranoia.orig/arch/i386/kernel/process.c 2007-05-02 20:15:05.412496892 -0700
+++ stack-paranoia/arch/i386/kernel/process.c 2007-05-02 21:15:15.958250168 -0700
@@ -327,43 +327,38 @@
struct thread_info *alloc_thread_info(struct task_struct *unused)
{
int i;
- struct page *pages[THREAD_SIZE/PAGE_SIZE], **tmp = pages;
- struct vm_struct *area;
+ struct page *pages[THREAD_SIZE/PAGE_SIZE];
+ struct thread_info *info;

/*
* passing VM_IOREMAP for the sake of alignment is why
* all this is done by hand.
*/
- area = get_vm_area(THREAD_SIZE, VM_IOREMAP);
- if (!area)
- return NULL;
for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i) {
pages[i] = alloc_page(GFP_HIGHUSER);
if (!pages[i])
goto out_free_pages;
}
- /* implicitly transfer page refcounts to the vm_struct */
- if (map_vm_area(area, PAGE_KERNEL, &tmp))
- goto out_remove_area;
- /* it may be worth poisoning, save thread_info proper */
- return (struct thread_info *)area->addr;
-out_remove_area:
- remove_vm_area(area);
+ info = vmap(pages, THREAD_SIZE/PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
+ if (info)
+ return info;
out_free_pages:
- do {
- __free_page(pages[--i]);
- } while (i >= 0);
+ for (--i; i >= 0; --i)
+ __free_page(pages[i]);
return NULL;
}

static void work_free_thread_info(struct work_struct *work)
{
int i;
+ struct page *pages[THREAD_SIZE/PAGE_SIZE];
void *p = work;

for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
- __free_page(vmalloc_to_page(p + PAGE_SIZE*i));
- vfree(p);
+ pages[i] = vmalloc_to_page(p + PAGE_SIZE*i);
+ vunmap(work);
+ for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
+ __free_page(pages[i]);
}

void free_thread_info(struct thread_info *info)

2007-05-03 06:01:12

by Jeremy Fitzhardinge

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

Bill Irwin wrote:
> On Wed, May 02, 2007 at 06:56:09PM -0700, Jeremy Fitzhardinge wrote:
>
>> This fixes two bugs:
>> - the stack allocation must be marked __cpuinit, since it gets called
>> on resume as well.
>> - presumably the interrupt stack should be freed on unplug if its
>> going to get reallocated on every plug.
>> [ Only non-vmalloced stacks tested. ]
>> Signed-off-by: Jeremy Fitzhardinge <[email protected]>
>> ---
>> arch/i386/kernel/irq.c | 42 +++++++++++++++++++++++++++++++++++++-----
>> 1 file changed, 37 insertions(+), 5 deletions(-)
>>
>
> Updated patch follows. Please add your Signed-off-by: if it meets your
> approval;

What does it apply to? I'm getting conflicts if I replace my patch with
this. Or does it replace one of your patches?

J

2007-05-03 06:12:39

by Bill Irwin

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

Bill Irwin wrote:
>> Updated patch follows. Please add your Signed-off-by: if it meets your
>> approval;

On Wed, May 02, 2007 at 11:01:05PM -0700, Jeremy Fitzhardinge wrote:
> What does it apply to? I'm getting conflicts if I replace my patch with
> this. Or does it replace one of your patches?

I had the same question about yours and just brute-force merged. Not a
big deal for me to rediff against whatever everyone's working off of.


-- wli

2007-05-03 06:41:47

by Bill Irwin

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

On Wed, May 02, 2007 at 10:48:09PM -0700, Bill Irwin wrote:
> Updated patch follows. Please add your Signed-off-by: if it meets your
> approval; I am operating on the assumption I should never do so myself.
> I'm a bit unsure of how to handle cpu 0 vs. potential freeing of per_cpu
> areas and error returns from __cpuinit affairs, but anyhow:
>
> This fixes three bugs:
> - the stack allocation must be marked __cpuinit, since it gets called
> on resume as well.
> - presumably the interrupt stack should be freed on unplug if its
> going to get reallocated on every plug.
> - the vm_struct got leaked by thread info freeing callbacks.
> Signed-off-by: William Irwin <[email protected]>

I think just normalizing cpu 0's stack suffices here. Still no idea what
to do about backpropagating errors from __cpuinit bits just yet.

Index: stack-paranoia/arch/i386/kernel/irq.c
===================================================================
--- stack-paranoia.orig/arch/i386/kernel/irq.c 2007-05-02 19:33:23.937945981 -0700
+++ stack-paranoia/arch/i386/kernel/irq.c 2007-05-02 23:37:07.879316906 -0700
@@ -142,78 +142,138 @@
* These should really be __section__(".bss.page_aligned") as well, but
* gcc's 3.0 and earlier don't handle that correctly.
*/
-static DEFINE_PER_CPU(char *, softirq_stack);
-static DEFINE_PER_CPU(char *, hardirq_stack);
-
+struct irq_stack_info {
+ char *stack;
#ifdef CONFIG_DEBUG_STACK
-static void * __init irq_remap_stack(void *stack)
-{
- int i;
struct page *pages[THREAD_SIZE/PAGE_SIZE];
+#endif /* CONFIG_DEBUG_STACK */
+};
+static DEFINE_PER_CPU(struct irq_stack_info, softirq_stack_info);
+static DEFINE_PER_CPU(struct irq_stack_info, hardirq_stack_info);

- for (i = 0; i < ARRAY_SIZE(pages); ++i)
- pages[i] = virt_to_page(stack + PAGE_SIZE*i);
- return vmap(pages, THREAD_SIZE/PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
-}
-
-static int __init irq_guard_cpu0(void)
+#ifdef CONFIG_DEBUG_STACK
+static int __init irq_remap_stack(struct irq_stack_info *info)
{
+ struct page *page, *pages[THREAD_SIZE/PAGE_SIZE];
unsigned long flags;
+ int i;
void *tmp;

- tmp = irq_remap_stack(per_cpu(softirq_stack, 0));
- if (!tmp)
- return -ENOMEM;
- else {
- local_irq_save(flags);
- per_cpu(softirq_stack, 0) = tmp;
- local_irq_restore(flags);
+ for (i = 0; i < ARRAY_SIZE(pages); ++i) {
+ pages[i] = alloc_page(GFP_HIGHUSER);
+ if (!pages[i])
+ goto out_free_pages;
}
- tmp = irq_remap_stack(per_cpu(hardirq_stack, 0));
+ tmp = vmap(pages, ARRAY_SIZE(info->pages), VM_IOREMAP, PAGE_KERNEL);
if (!tmp)
- return -ENOMEM;
+ goto out_free_pages;
else {
local_irq_save(flags);
- per_cpu(hardirq_stack, 0) = tmp;
+ memcpy(info->pages, pages, sizeof(pages));
+ page = virt_to_page(info->stack);
+ for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i) {
+ ClearPageReserved(&page[i]);
+ init_page_count(&page[i]);
+ __free_page(&page[i]);
+ }
+ info->stack = tmp;
local_irq_restore(flags);
}
return 0;
+out_free_pages:
+ for (--i; i >= 0; --i)
+ __free_page(pages[i]);
+ return -1;
+}
+
+static int __init irq_guard_cpu0(void)
+{
+ if (irq_remap_stack(&per_cpu(softirq_stack_info, 0)))
+ return -ENOMEM;
+ if (irq_remap_stack(&per_cpu(hardirq_stack_info, 0)))
+ return -ENOMEM;
+ return 0;
}
core_initcall(irq_guard_cpu0);

-static void * __init __alloc_irqstack(int cpu)
+static int __cpuinit __alloc_irqstack(int cpu, struct irq_stack_info *info)
{
int i;
- struct page *pages[THREAD_SIZE/PAGE_SIZE], **tmp = pages;
- struct vm_struct *area;

- if (!slab_is_available())
- return __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
+ if (!slab_is_available()) {
+ info->stack = __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
__pa(MAX_DMA_ADDRESS));
+ info->pages[0] = virt_to_page(info->stack);
+ for (i = 1; i < ARRAY_SIZE(info->pages); ++i)
+ info->pages[i] = info->pages[0] + i;
+ return 0;
+ }
+ for (i = 0; i < ARRAY_SIZE(info->pages); ++i) {
+ info->pages[i] = alloc_page(GFP_HIGHUSER);
+ if (!info->pages[i])
+ goto out;
+ }
+ info->stack = vmap(info->pages, ARRAY_SIZE(info->pages), VM_IOREMAP,
+ PAGE_KERNEL);
+ if (info->stack)
+ return 0;
+out:
+ for (--i; i >= 0; --i) {
+ __free_page(info->pages[i]);
+ info->pages[i] = NULL;
+ }
+ return -1;
+}

- /* failures here are unrecoverable anyway */
- area = get_vm_area(THREAD_SIZE, VM_IOREMAP);
- for (i = 0; i < ARRAY_SIZE(pages); ++i)
- pages[i] = alloc_page(GFP_HIGHUSER);
- map_vm_area(area, PAGE_KERNEL, &tmp);
- return area->addr;
+static void __cpuinit __free_irqstack(int cpu, struct irq_stack_info *info)
+{
+ int i;
+
+ vunmap(info->stack);
+ for (i = 0; i < ARRAY_SIZE(info->pages); ++i) {
+ if (!PageReserved(info->pages[i]))
+ __free_page(info->pages[i]);
+ info->pages[i] = NULL;
+ }
+ info->stack = NULL;
}
#else /* !CONFIG_DEBUG_STACK */
-static void * __init __alloc_irqstack(int cpu)
+static int __cpuinit __alloc_irqstack(int cpu, struct irq_stack_info *info)
{
if (!slab_is_available())
- return __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
+ info->stack = __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
__pa(MAX_DMA_ADDRESS));
-
- return (void *)__get_free_pages(GFP_KERNEL,
+ else
+ info->stack = (void *)__get_free_pages(GFP_KERNEL,
ilog2(THREAD_SIZE/PAGE_SIZE));
+ return info->stack ? 0 : -1;
+}
+
+static void __cpuinit __free_irqstack(int cpu, struct irq_stack_info *info)
+{
+ struct page *page = virt_to_page(info->stack);
+
+ if (!PageReserved(page))
+ __free_pages(page, ilog2(THREAD_SIZE/PAGE_SIZE));
+ info->stack = NULL;
}
#endif /* !CONFIG_DEBUG_STACK */

-static void __init alloc_irqstacks(int cpu)
+static int __cpuinit alloc_irqstacks(int cpu)
+{
+ if (__alloc_irqstack(cpu, &per_cpu(softirq_stack_info, cpu)))
+ return -1;
+ if (__alloc_irqstack(cpu, &per_cpu(hardirq_stack_info, cpu))) {
+ __free_irqstack(cpu, &per_cpu(softirq_stack_info, cpu));
+ return -1;
+ }
+ return 0;
+}
+
+static void __cpuinit free_irqstacks(int cpu)
{
- per_cpu(softirq_stack, cpu) = __alloc_irqstack(cpu);
- per_cpu(hardirq_stack, cpu) = __alloc_irqstack(cpu);
+ __free_irqstack(cpu, &per_cpu(softirq_stack_info, cpu));
+ __free_irqstack(cpu, &per_cpu(hardirq_stack_info, cpu));
}

/*
@@ -228,7 +288,7 @@

alloc_irqstacks(cpu);

- irqctx = (union irq_ctx*)per_cpu(hardirq_stack, cpu);
+ irqctx = (union irq_ctx*)per_cpu(hardirq_stack_info, cpu).stack;
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -237,7 +297,7 @@

per_cpu(hardirq_ctx, cpu) = irqctx;

- irqctx = (union irq_ctx*)per_cpu(softirq_stack, cpu);
+ irqctx = (union irq_ctx*)per_cpu(softirq_stack_info, cpu).stack;
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -252,6 +312,7 @@

void irq_ctx_exit(int cpu)
{
+ free_irqstacks(cpu);
per_cpu(hardirq_ctx, cpu) = NULL;
}

Index: stack-paranoia/arch/i386/kernel/process.c
===================================================================
--- stack-paranoia.orig/arch/i386/kernel/process.c 2007-05-02 20:15:05.412496892 -0700
+++ stack-paranoia/arch/i386/kernel/process.c 2007-05-02 21:15:15.958250168 -0700
@@ -327,43 +327,38 @@
struct thread_info *alloc_thread_info(struct task_struct *unused)
{
int i;
- struct page *pages[THREAD_SIZE/PAGE_SIZE], **tmp = pages;
- struct vm_struct *area;
+ struct page *pages[THREAD_SIZE/PAGE_SIZE];
+ struct thread_info *info;

/*
* passing VM_IOREMAP for the sake of alignment is why
* all this is done by hand.
*/
- area = get_vm_area(THREAD_SIZE, VM_IOREMAP);
- if (!area)
- return NULL;
for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i) {
pages[i] = alloc_page(GFP_HIGHUSER);
if (!pages[i])
goto out_free_pages;
}
- /* implicitly transfer page refcounts to the vm_struct */
- if (map_vm_area(area, PAGE_KERNEL, &tmp))
- goto out_remove_area;
- /* it may be worth poisoning, save thread_info proper */
- return (struct thread_info *)area->addr;
-out_remove_area:
- remove_vm_area(area);
+ info = vmap(pages, THREAD_SIZE/PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
+ if (info)
+ return info;
out_free_pages:
- do {
- __free_page(pages[--i]);
- } while (i >= 0);
+ for (--i; i >= 0; --i)
+ __free_page(pages[i]);
return NULL;
}

static void work_free_thread_info(struct work_struct *work)
{
int i;
+ struct page *pages[THREAD_SIZE/PAGE_SIZE];
void *p = work;

for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
- __free_page(vmalloc_to_page(p + PAGE_SIZE*i));
- vfree(p);
+ pages[i] = vmalloc_to_page(p + PAGE_SIZE*i);
+ vunmap(work);
+ for (i = 0; i < THREAD_SIZE/PAGE_SIZE; ++i)
+ __free_page(pages[i]);
}

void free_thread_info(struct thread_info *info)

2007-05-03 07:07:29

by Jeremy Fitzhardinge

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

Bill Irwin wrote:
> I had the same question about yours and just brute-force merged. Not a
> big deal for me to rediff against whatever everyone's working off of.
>

I picked up one version of your patches you posted a couple of days ago,
but I guess you've posted the series multiple times, presumably with
differences.

How about posting the series again and I can test it out?

J

2007-05-03 07:39:46

by Bill Irwin

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

Bill Irwin wrote:
>> I had the same question about yours and just brute-force merged. Not a
>> big deal for me to rediff against whatever everyone's working off of.

On Thu, May 03, 2007 at 12:07:29AM -0700, Jeremy Fitzhardinge wrote:
> I picked up one version of your patches you posted a couple of days ago,
> but I guess you've posted the series multiple times, presumably with
> differences.
> How about posting the series again and I can test it out?

>From the looks of it, I'm totally out-of-synch. Here are the 4 (the
fourth should be partially folded into the third but also partially
folded into the first), included as MIME attachments.

As an aside, it looks like failures here need to eventually propagate
to __cpu_up(). irq_ctx_init() needs to return a status, and its callers
need to check it. irq_ctx_init() probably also needs to be __cpuinit.


-- wli


Attachments:
(No filename) (870.00 B)
dynamic-irq-stacks.patch (4.05 kB)
dynamic-irq-stacks.patch
unconditional-i386-irq-stacks.patch (2.43 kB)
unconditional-i386-irq-stacks.patch
debug-stack.patch (9.11 kB)
debug-stack.patch
fitz.patch (7.70 kB)
fitz.patch
Download all attachments

2007-05-03 08:04:25

by Bill Irwin

[permalink] [raw]
Subject: Re: [PATCH] i386: fix suspend/resume with dynamically allocated irq stacks

On Thu, May 03, 2007 at 12:39:30AM -0700, Bill Irwin wrote:
> As an aside, it looks like failures here need to eventually propagate
> to __cpu_up(). irq_ctx_init() needs to return a status, and its callers
> need to check it. irq_ctx_init() probably also needs to be __cpuinit.

Ignoring the general irq_ctx_init() cleanup this takes care of it.


Index: stack-paranoia/arch/i386/kernel/irq.c
===================================================================
--- stack-paranoia.orig/arch/i386/kernel/irq.c 2007-05-03 00:41:26.779223079 -0700
+++ stack-paranoia/arch/i386/kernel/irq.c 2007-05-03 01:04:06.984736774 -0700
@@ -279,35 +279,31 @@
/*
* allocate per-cpu stacks for hardirq and for softirq processing
*/
-void irq_ctx_init(int cpu)
+static void __cpuinit
+__irq_ctx_init(union irq_ctx **irqctx, struct irq_stack_info *info,
+ int cpu, int preempt_count)
{
- union irq_ctx *irqctx;
+ *irqctx = (union irq_ctx*)info->stack;
+ (*irqctx)->tinfo.task = NULL;
+ (*irqctx)->tinfo.exec_domain = NULL;
+ (*irqctx)->tinfo.cpu = cpu;
+ (*irqctx)->tinfo.preempt_count = preempt_count;
+ (*irqctx)->tinfo.addr_limit = MAKE_MM_SEG(0);
+}

+int irq_ctx_init(int cpu)
+{
if (per_cpu(hardirq_ctx, cpu))
- return;
-
- alloc_irqstacks(cpu);
-
- irqctx = (union irq_ctx*)per_cpu(hardirq_stack_info, cpu).stack;
- irqctx->tinfo.task = NULL;
- irqctx->tinfo.exec_domain = NULL;
- irqctx->tinfo.cpu = cpu;
- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
-
- per_cpu(hardirq_ctx, cpu) = irqctx;
-
- irqctx = (union irq_ctx*)per_cpu(softirq_stack_info, cpu).stack;
- irqctx->tinfo.task = NULL;
- irqctx->tinfo.exec_domain = NULL;
- irqctx->tinfo.cpu = cpu;
- irqctx->tinfo.preempt_count = 0;
- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
-
- per_cpu(softirq_ctx, cpu) = irqctx;
-
+ return 0;
+ if (alloc_irqstacks(cpu))
+ return -1;
+ __irq_ctx_init(&per_cpu(hardirq_ctx, cpu),
+ &per_cpu(hardirq_stack_info, cpu), cpu, HARDIRQ_OFFSET);
+ __irq_ctx_init(&per_cpu(softirq_ctx, cpu),
+ &per_cpu(softirq_stack_info, cpu), cpu, 0);
printk("CPU %u irqstacks, hard=%p soft=%p\n",
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
+ return 0;
}

void irq_ctx_exit(int cpu)
Index: stack-paranoia/include/asm-i386/irq.h
===================================================================
--- stack-paranoia.orig/include/asm-i386/irq.h 2007-05-03 00:48:40.015911831 -0700
+++ stack-paranoia/include/asm-i386/irq.h 2007-05-03 00:48:45.056199061 -0700
@@ -24,7 +24,7 @@
# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif

-void irq_ctx_init(int);
+int irq_ctx_init(int);
void irq_ctx_exit(int);
#define __ARCH_HAS_DO_SOFTIRQ

Index: stack-paranoia/arch/i386/kernel/i8259.c
===================================================================
--- stack-paranoia.orig/arch/i386/kernel/i8259.c 2007-05-03 00:49:04.185289165 -0700
+++ stack-paranoia/arch/i386/kernel/i8259.c 2007-05-03 00:54:49.104945016 -0700
@@ -417,5 +417,8 @@
if (boot_cpu_data.hard_math && !cpu_has_fpu)
setup_irq(FPU_IRQ, &fpu_irq);

- irq_ctx_init(smp_processor_id());
+ if (irq_ctx_init(smp_processor_id()))
+ printk(KERN_INFO
+ "Couldn't allocate IRQ context for CPU %d\n",
+ smp_processor_id());
}
Index: stack-paranoia/arch/i386/kernel/smpboot.c
===================================================================
--- stack-paranoia.orig/arch/i386/kernel/smpboot.c 2007-05-03 00:49:32.942927970 -0700
+++ stack-paranoia/arch/i386/kernel/smpboot.c 2007-05-03 00:52:03.739521378 -0700
@@ -828,9 +828,11 @@
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
/* Stack for startup_32 can be just as for start_secondary onwards */
stack_start.esp = (void *) idle->thread.esp;
-
- irq_ctx_init(cpu);
-
+ if (irq_ctx_init(cpu)) {
+ printk(KERN_INFO
+ "Couldn't allocate IRQ contexts for CPU %d\n", cpu);
+ return -1;
+ }
x86_cpu_to_apicid[cpu] = apicid;
/*
* This grunge runs the startup process for
Index: stack-paranoia/arch/i386/mach-voyager/voyager_smp.c
===================================================================
--- stack-paranoia.orig/arch/i386/mach-voyager/voyager_smp.c 2007-05-03 00:52:39.609565495 -0700
+++ stack-paranoia/arch/i386/mach-voyager/voyager_smp.c 2007-05-03 00:53:32.516580494 -0700
@@ -589,8 +589,12 @@
return;
}

- irq_ctx_init(cpu);
-
+ if (irq_ctx_init(cpu)) {
+ printk(KERN_INFO
+ "Couldn't allocate IRQ context for CPU %d\n", cpu);
+ cpucount--;
+ return;
+ }
/* Note: Don't modify initial ss override */
VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
(unsigned long)hijack_source.val, hijack_source.idt.Segment,