diff -urN linux.orig/arch/i386/kernel/i386_ksyms.c linux/arch/i386/kernel/i386_ksyms.c
--- linux.orig/arch/i386/kernel/i386_ksyms.c Mon Oct 21 13:25:58 2002
+++ linux/arch/i386/kernel/i386_ksyms.c Mon Oct 21 20:04:35 2002
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/tty.h>
+#include <linux/nmi.h>
#include <linux/highmem.h>
#include <asm/semaphore.h>
@@ -89,6 +90,9 @@
EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(cpu_khz);
EXPORT_SYMBOL(apm_info);
+
+EXPORT_SYMBOL(request_nmi);
+EXPORT_SYMBOL(release_nmi);
#ifdef CONFIG_DEBUG_IOVIRT
EXPORT_SYMBOL(__io_virt_debug);
diff -urN linux.orig/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
--- linux.orig/arch/i386/kernel/traps.c Mon Oct 21 13:25:45 2002
+++ linux/arch/i386/kernel/traps.c Mon Oct 21 20:06:43 2002
@@ -485,6 +485,95 @@
printk("Do you have a strange power saving mode enabled?\n");
}
+/* A list of handlers for NMIs. This list will be called in order
+ when an NMI from an otherwise unidentifiable source somes in. If
+ one of these handles the NMI, it should return 1. NMI handlers
+ cannot claim spinlocks, so we have to handle mutex in a different
+ manner. A spinlock protects the list from multiple writers. When
+ something is removed from the list, it will check to see that
+ calling_nmi_handlers is 0 before returning. If it is not zero,
+ another processor is handling an NMI, and it should wait until it
+ goes to zero to return and allow the user to free that data. */
+static volatile struct nmi_handler *nmi_handler_list = NULL;
+static spinlock_t nmi_handler_lock = SPIN_LOCK_UNLOCKED;
+static atomic_t calling_nmi_handlers = ATOMIC_INIT(0);
+
+int request_nmi(struct nmi_handler *handler)
+{
+ volatile struct nmi_handler *curr;
+
+ spin_lock(&nmi_handler_lock);
+
+ /* Make sure the thing is not already in the list. */
+ curr = nmi_handler_list;
+ while (curr) {
+ if (curr == handler) {
+ break;
+ }
+ curr = curr->next;
+ }
+ if (curr)
+ return EBUSY;
+
+ handler->next = nmi_handler_list;
+ xchg(&nmi_handler_list, handler);
+
+ spin_unlock(&nmi_handler_lock);
+ return 0;
+}
+
+void release_nmi(struct nmi_handler *handler)
+{
+ volatile struct nmi_handler *curr, *prev;
+
+ spin_lock(&nmi_handler_lock);
+
+ /* Find it in the list. */
+ curr = nmi_handler_list;
+ prev = NULL;
+ while (curr) {
+ if (curr == handler) {
+ break;
+ }
+ prev = curr;
+ curr = curr->next;
+ }
+
+ if (curr) {
+ /* If it was found, remove it from the list. We
+ assume the write operation here is atomic. */
+ if (prev)
+ xchg(&(prev->next), curr->next);
+ else
+ xchg(&nmi_handler_list, curr->next);
+
+ /* If some other part of the kernel is handling an
+ NMI, we make sure that we don't release the handler
+ (or really, allow the user to release the handler)
+ until it has finished handling the NMI. */
+ while (atomic_read(&calling_nmi_handlers))
+ ;
+ }
+ spin_unlock(&nmi_handler_lock);
+}
+
+static int call_nmi_handlers(struct pt_regs * regs)
+{
+ volatile struct nmi_handler *curr;
+ int handled = 0;
+
+ atomic_inc(&calling_nmi_handlers);
+ smp_mb();
+ curr = nmi_handler_list;
+ while (curr) {
+ handled |= curr->handler(curr->dev_id, regs);
+ curr = curr->next;
+ }
+ smp_mb();
+ atomic_dec(&calling_nmi_handlers);
+ return handled;
+}
+
static void default_do_nmi(struct pt_regs * regs)
{
unsigned char reason = inb(0x61);
@@ -500,6 +589,12 @@
return;
}
#endif
+
+ /* Check our handler list to see if anyone can handle this
+ nmi. */
+ if (call_nmi_handlers(regs))
+ return;
+
unknown_nmi_error(reason, regs);
return;
}
diff -urN linux.orig/include/asm-i386/irq.h linux/include/asm-i386/irq.h
--- linux.orig/include/asm-i386/irq.h Mon Oct 21 13:24:41 2002
+++ linux/include/asm-i386/irq.h Mon Oct 21 20:03:52 2002
@@ -28,4 +28,16 @@
#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif
+
+/* Structure for handling NMIs */
+#define HAVE_NMI_HANDLER 1
+struct nmi_handler
+{
+ char *dev_name;
+ void *dev_id;
+ int (*handler)(void *dev_id, struct pt_regs *regs);
+
+ volatile struct nmi_handler *next;
+};
+
#endif /* _ASM_IRQ_H */
diff -urN linux.orig/include/linux/nmi.h linux/include/linux/nmi.h
--- linux.orig/include/linux/nmi.h Thu Jun 20 17:53:40 2002
+++ linux/include/linux/nmi.h Mon Oct 21 20:03:53 2002
@@ -19,4 +19,11 @@
# define touch_nmi_watchdog() do { } while(0)
#endif
+/**
+ * Register a handler to get called when an NMI occurs. If the handler
+ * actually handles the NMI, it should return 1.
+ */
+int request_nmi(struct nmi_handler *handler);
+void release_nmi(struct nmi_handler *handler);
+
#endif
On Mon, Oct 21, 2002 at 08:32:47PM -0500, Corey Minyard wrote:
> The attached patch implements a way to request to receive an NMI if it
> comes from an otherwise unknown source. I needed this for handling NMIs
> with the IPMI watchdog. This function was discussed a little a while
Then NMI watchdog and oprofile should be changed to use this too. We
also need priority and/or equivalent of NOTIFY_STOP_MASK so we can break
out of calling all the handlers. Actually, why do you continue if one
of the handlers returns 1 anyway ?
> + atomic_inc(&calling_nmi_handlers);
Isn't this going to cause cacheline ping pong ?
> + curr = nmi_handler_list;
> + while (curr) {
> + handled |= curr->handler(curr->dev_id, regs);
dev_name is never used at all. What is it for ? Also, would be nice
to do an smp_processor_id() just once and pass that in to prevent
multiple calls to get_current().
Couldn't you modify the notifier code to do the xchg()s (though that's
not available on all CPU types ...)
> +#define HAVE_NMI_HANDLER 1
What uses this ?
> + volatile struct nmi_handler *next;
Hmm ...
Is it not possible to use linux/rcupdate.h for this stuff ?
regards
john
--
"Lots of companies would love to be in our hole."
- Scott McNealy
John Levon wrote:
>On Mon, Oct 21, 2002 at 08:32:47PM -0500, Corey Minyard wrote:
>
>>The attached patch implements a way to request to receive an NMI if it
>>comes from an otherwise unknown source. I needed this for handling NMIs
>>with the IPMI watchdog. This function was discussed a little a while
>>
>>
>Then NMI watchdog and oprofile should be changed to use this too.
>
Maybe so. If we get the infrastructure into place, we can change that
afterwards.
> We
>also need priority and/or equivalent of NOTIFY_STOP_MASK so we can break
>out of calling all the handlers. Actually, why do you continue if one
>of the handlers returns 1 anyway ?
>
What if there's an NMI from multiple things? Not that it's likely, but
it's possible, right? I could easily add priority and sort the list by
it, and add a NOTIFY_STOP_MASK, if there is some benefit.
>>+ atomic_inc(&calling_nmi_handlers);
>>
>>
>Isn't this going to cause cacheline ping pong ?
>
This is an NMI, does it really matter? And is there another way to do
this without locks?
>>+ curr = nmi_handler_list;
>>+ while (curr) {
>>+ handled |= curr->handler(curr->dev_id, regs);
>>
>>
>dev_name is never used at all. What is it for ? Also, would be nice
>to do an smp_processor_id() just once and pass that in to prevent
>multiple calls to get_current().
>
dev_name could be removed, although it would be nice for reporting
later. Basically, for the same reason it's there for interrupts. And
again, this is an NMI, I don't think performance really matters (unless
we perhaps add the NMI watchdog to this).
>Couldn't you modify the notifier code to do the xchg()s (though that's
>not available on all CPU types ...)
>
I don't understand. The xchg()s are for atomicity between the
request/release code and the NMI handler. How could the notifier code
do it?
>>+#define HAVE_NMI_HANDLER 1
>>
>>
>What uses this ?
>
This is so the user code can know if it's available or not.
>>+ volatile struct nmi_handler *next;
>>
>>
>Hmm ...
>
>Is it not possible to use linux/rcupdate.h for this stuff ?
>
I'm not sure. It looks possible, but remember, this is an NMI, normal
rules may not apply. Particularly, you cannot block or spin waiting for
something else, the NMI code has to run. An NMI can happen at ANY time.
If the rcu code can handle this, I could use it, but I have not looked
to see if it can.
Thanks,
-Corey
On Mon, Oct 21, 2002 at 09:32:07PM -0500, Corey Minyard wrote:
> This is an NMI, does it really matter?
Yes. Both for oprofile and the NMI watchdog (which was firing awfully
often last time I checked). The handler needs to be as streamlined as
possible.
> dev_name could be removed, although it would be nice for reporting
> later.
Reporting what ? from where ?
> >Couldn't you modify the notifier code to do the xchg()s (though that's
> >not available on all CPU types ...)
> >
> I don't understand. The xchg()s are for atomicity between the
> request/release code and the NMI handler. How could the notifier code
> do it?
You are using the xchg()s in an attempt to thread onto/off the list
safely no ?
> >>+#define HAVE_NMI_HANDLER 1
> This is so the user code can know if it's available or not.
If we had that for every API or API change, the kernel would be mostly
HAVE_*. It's either available or it's not. If you're maintaining an
external module, then autoconf or similar is the proper way to check for
its existence.
> >Is it not possible to use linux/rcupdate.h for this stuff ?
>
> I'm not sure. It looks possible, but remember, this is an NMI, normal
> rules may not apply. Particularly, you cannot block or spin waiting for
> something else, the NMI code has to run. An NMI can happen at ANY time.
Believe me, I know :)
> If the rcu code can handle this, I could use it, but I have not looked
> to see if it can.
If it's possible (and I have no idea, not having looked at RCU at all)
it seems the right way.
regards
john
--
"Lots of companies would love to be in our hole."
- Scott McNealy
On Tue, 22 Oct 2002 07:46:49 +0530, John Levon wrote:
> On Mon, Oct 21, 2002 at 08:32:47PM -0500, Corey Minyard wrote:
>
>> The attached patch implements a way to request to receive an NMI if it
>> comes from an otherwise unknown source. I needed this for handling
>> NMIs with the IPMI watchdog. This function was discussed a little a
>> while
>
> Then NMI watchdog and oprofile should be changed to use this too.
Perhaps even LKCD can make use of such a framework if it works
out.
We
> also need priority and/or equivalent of NOTIFY_STOP_MASK so we can break
> out of calling all the handlers. Actually, why do you continue if one of
> the handlers returns 1 anyway ?
>
>> + atomic_inc(&calling_nmi_handlers);
>
> Isn't this going to cause cacheline ping pong ?
>
>> + curr = nmi_handler_list;
>> + while (curr) {
>> + handled |= curr->handler(curr->dev_id, regs);
>
> dev_name is never used at all. What is it for ? Also, would be nice to
> do an smp_processor_id() just once and pass that in to prevent multiple
> calls to get_current().
>
> Couldn't you modify the notifier code to do the xchg()s (though that's
> not available on all CPU types ...)
>
>> +#define HAVE_NMI_HANDLER 1
>
> What uses this ?
>
>> + volatile struct nmi_handler *next;
>
> Hmm ...
>
> Is it not possible to use linux/rcupdate.h for this stuff ?
>
> regards
> john
John Levon wrote:
>On Mon, Oct 21, 2002 at 09:32:07PM -0500, Corey Minyard wrote:
>
>>This is an NMI, does it really matter?
>>
>>
>Yes. Both for oprofile and the NMI watchdog (which was firing awfully
>often last time I checked). The handler needs to be as streamlined as
>possible.
>
Ok. I'd be inclined to leave the high-usage things where they are,
although it would be nice to be able to make the NMI watchdog a module.
oprofile should probably stay where it is. Do you have an alternate
implementation that would be more efficient?
>>dev_name could be removed, although it would be nice for reporting
>>later.
>>
>>
>Reporting what ? from where ?
>
Registered NMI users in procfs.
>>>Couldn't you modify the notifier code to do the xchg()s (though that's
>>>not available on all CPU types ...)
>>>
>>I don't understand. The xchg()s are for atomicity between the
>>request/release code and the NMI handler. How could the notifier code
>>do it?
>>
>>
>You are using the xchg()s in an attempt to thread onto/off the list
>safely no ?
>
Yes. But I don't understand why they would be used in the notifier code.
>>>>+#define HAVE_NMI_HANDLER 1
>>>>
>>>>
>>This is so the user code can know if it's available or not.
>>
>>
>If we had that for every API or API change, the kernel would be mostly
>HAVE_*. It's either available or it's not. If you're maintaining an
>external module, then autoconf or similar is the proper way to check for
>its existence.
>
I'm not worried about kernel versions so much as processor capability.
Some processors may not have NMIs, or may not be capable of doing this.
A few of these exist (like __HAVE_ARCH_CMPXCHG). The name's probably
bad, maybe it should be "__HAVE_ARCH_NMI_HANDLER"?
>>If the rcu code can handle this, I could use it, but I have not looked
>>to see if it can.
>>
>>
>If it's possible (and I have no idea, not having looked at RCU at all)
>it seems the right way.
>
I looked, and the rcu code relys on turning off interrupts to avoid
preemption. So it won't work.
Thanks again,
-Corey
On Tue, Oct 22, 2002 at 08:02:11AM -0500, Corey Minyard wrote:
> Ok. I'd be inclined to leave the high-usage things where they are,
> although it would be nice to be able to make the NMI watchdog a module.
> oprofile should probably stay where it is. Do you have an alternate
> implementation that would be more efficient?
I'm beginning to think you're right. You should ask Keith Owens if kdb
etc. can use your API successfully.
> >>dev_name could be removed, although it would be nice for reporting
> >>
> >Reporting what ? from where ?
> >
> Registered NMI users in procfs.
Then if you add such code, you can add dev_name ... I hate code that
does nothing ...
> Yes. But I don't understand why they would be used in the notifier code.
I'm trying to reduce code duplication - you do basically the same thing
notifier register/unregister does.
btw, the stuff you add to header files should all be in asm-i386/nmi.h
IMHO.
It would make it clear that there's a fast-path "set nmi handler" and
the slow one, and you can document the difference there, if that's what
we're going to do.
regards
john
--
"Lots of companies would love to be in our hole."
- Scott McNealy
John Levon wrote:
>On Tue, Oct 22, 2002 at 08:02:11AM -0500, Corey Minyard wrote:
>
>>Ok. I'd be inclined to leave the high-usage things where they are,
>>although it would be nice to be able to make the NMI watchdog a module.
>>oprofile should probably stay where it is. Do you have an alternate
>>implementation that would be more efficient?
>>
>>
>I'm beginning to think you're right. You should ask Keith Owens if kdb
>etc. can use your API successfully.
>
Ok. Good thought, that would decouple kdb a little.
>>>>dev_name could be removed, although it would be nice for reporting
>>>>
>>>Reporting what ? from where ?
>>>
>>Registered NMI users in procfs.
>>
>>
>Then if you add such code, you can add dev_name ... I hate code that
>does nothing ...
>
Ok, I'll add a procfs interface then :-). IMHO, there's a different
between stuff in an interface that is looking forward and dead code,
though. If I added it later, I would break all the users. But there is
a balance.
>>Yes. But I don't understand why they would be used in the notifier code.
>>
>>
>I'm trying to reduce code duplication - you do basically the same thing
>notifier register/unregister does.
>
Ah. Yes, there is some stuff that looks the same but is subtly
different. I'll see what I can do.
>btw, the stuff you add to header files should all be in asm-i386/nmi.h
>IMHO.
>
Ok, I agree.
>It would make it clear that there's a fast-path "set nmi handler" and
>the slow one, and you can document the difference there, if that's what
>we're going to do.
>
>regards
>john
>
>
>
Thanks,
-Corey
On Tue, 2002-10-22 at 09:02, Corey Minyard wrote:
> I looked, and the rcu code relys on turning off interrupts to avoid
> preemption. So it won't work.
At least on the variant of RCU that is in 2.5, the RCU code does the
read side by disabling preemption. Nothing else.
The write side is the same with or without preemption - wait until all
readers are quiescent, change the copy, etc.
But anyhow, disabling interrupts should not affect NMIs, no?
Robert Love
On Tue, Oct 22, 2002 at 03:10:06PM +0200, Corey Minyard wrote:
> >If it's possible (and I have no idea, not having looked at RCU at all)
> >it seems the right way.
> >
> I looked, and the rcu code relys on turning off interrupts to avoid
> preemption. So it won't work.
>
Hmm.. Let me see -
You need to walk the list in call_nmi_handlers from nmi interrupt handler where
preemption is not an issue anyway. Using RCU you can possibly do a safe
walking of the nmi handlers. To do this, your update side code
(request/release nmi) will still have to be serialized (spinlock), but
you should not need to wait for completion of any other CPU executing
the nmi handler, instead provide wrappers for nmi_handler
allocation/free and there free the nmi_handler using an RCU callback
(call_rcu()). The nmi_handler will not be freed until all the CPUs
have done a contex switch or executed user-level or been idle.
This will gurantee that *this* nmi_handler is not in execution
and can safely be freed.
This of course is a very simplistic view of the things, there could
be complications that I may have overlooked. But I would be happy
to help out on this if you want.
Thanks
Dipankar
Dipankar Sarma wrote:
>On Tue, Oct 22, 2002 at 03:10:06PM +0200, Corey Minyard wrote:
>
>
>>>If it's possible (and I have no idea, not having looked at RCU at all)
>>>it seems the right way.
>>>
>>>
>>>
>>I looked, and the rcu code relys on turning off interrupts to avoid
>>preemption. So it won't work.
>>
>>
>>
>
>Hmm.. Let me see -
>
>You need to walk the list in call_nmi_handlers from nmi interrupt handler where
>preemption is not an issue anyway. Using RCU you can possibly do a safe
>walking of the nmi handlers. To do this, your update side code
>(request/release nmi) will still have to be serialized (spinlock), but
>you should not need to wait for completion of any other CPU executing
>the nmi handler, instead provide wrappers for nmi_handler
>allocation/free and there free the nmi_handler using an RCU callback
>(call_rcu()). The nmi_handler will not be freed until all the CPUs
>have done a contex switch or executed user-level or been idle.
>This will gurantee that *this* nmi_handler is not in execution
>and can safely be freed.
>
>This of course is a very simplistic view of the things, there could
>be complications that I may have overlooked. But I would be happy
>to help out on this if you want.
>
This doesn't sound any simpler than what I am doing right now. In fact,
it sounds more complex. Am I correct? What I am doing is pretty simple
and correct. Maybe more complexity would be required if you couldn't
atomically update a pointer, but I think simplicity should win here.
Thanks,
-Corey
On Tue, Oct 22, 2002 at 01:05:57PM -0500, Corey Minyard wrote:
> >You need to walk the list in call_nmi_handlers from nmi interrupt handler where
> >preemption is not an issue anyway. Using RCU you can possibly do a safe
> >walking of the nmi handlers. To do this, your update side code
> >(request/release nmi) will still have to be serialized (spinlock), but
> >you should not need to wait for completion of any other CPU executing
> >the nmi handler, instead provide wrappers for nmi_handler
> >allocation/free and there free the nmi_handler using an RCU callback
> >(call_rcu()). The nmi_handler will not be freed until all the CPUs
> >have done a contex switch or executed user-level or been idle.
> >This will gurantee that *this* nmi_handler is not in execution
> >and can safely be freed.
> >
> >This of course is a very simplistic view of the things, there could
> >be complications that I may have overlooked. But I would be happy
> >to help out on this if you want.
> >
> This doesn't sound any simpler than what I am doing right now. In fact,
> it sounds more complex. Am I correct? What I am doing is pretty simple
> and correct. Maybe more complexity would be required if you couldn't
> atomically update a pointer, but I think simplicity should win here.
I would vote for simplicity and would normally agree with you here. But
it seems to me that using RCU, you can avoid atmic operations
and cache line bouncing of calling_nmi_handlers in the fast path
(nmi interrupt handler). One could argue whether it is really
a fast path or not, but if you are using it for profiling, I would
say it is. No ?
Thanks
Dipankar
Dipankar Sarma wrote:
>On Tue, Oct 22, 2002 at 01:05:57PM -0500, Corey Minyard wrote:
>
>
>>>You need to walk the list in call_nmi_handlers from nmi interrupt handler where
>>>preemption is not an issue anyway. Using RCU you can possibly do a safe
>>>walking of the nmi handlers. To do this, your update side code
>>>(request/release nmi) will still have to be serialized (spinlock), but
>>>you should not need to wait for completion of any other CPU executing
>>>the nmi handler, instead provide wrappers for nmi_handler
>>>allocation/free and there free the nmi_handler using an RCU callback
>>>(call_rcu()). The nmi_handler will not be freed until all the CPUs
>>>have done a contex switch or executed user-level or been idle.
>>>This will gurantee that *this* nmi_handler is not in execution
>>>and can safely be freed.
>>>
>>>This of course is a very simplistic view of the things, there could
>>>be complications that I may have overlooked. But I would be happy
>>>to help out on this if you want.
>>>
>>>
>>>
>>This doesn't sound any simpler than what I am doing right now. In fact,
>>it sounds more complex. Am I correct? What I am doing is pretty simple
>>and correct. Maybe more complexity would be required if you couldn't
>>atomically update a pointer, but I think simplicity should win here.
>>
>>
>
>I would vote for simplicity and would normally agree with you here. But
>it seems to me that using RCU, you can avoid atmic operations
>and cache line bouncing of calling_nmi_handlers in the fast path
>(nmi interrupt handler). One could argue whether it is really
>a fast path or not, but if you are using it for profiling, I would
>say it is. No ?
>
I would vote against using it for profiling; profiling has it's own
special fast-path, anyway. The NMI watchdog only gets hit once every
minute or so, it seems, so that seems suitable for this.
I've looked at the RCU code a little more, and I think I understand it
better. I think your scenario will work, if it's true that it won't be
called until all CPUs have done what you say. I'll look at it a little
more.
-Corey
Robert Love wrote:
>On Tue, 2002-10-22 at 09:02, Corey Minyard wrote:
>
>
>
>>I looked, and the rcu code relys on turning off interrupts to avoid
>>preemption. So it won't work.
>>
>>
>
>At least on the variant of RCU that is in 2.5, the RCU code does the
>read side by disabling preemption. Nothing else.
>
In 2.5.44, stock from kernel.org, rcu_process_callbacks() calls
local_irq_disable(). Is that just preemption disabling, now?
>The write side is the same with or without preemption - wait until all
>readers are quiescent, change the copy, etc.
>
>But anyhow, disabling interrupts should not affect NMIs, no?
>
You are correct. disabling preemption or interrupts has no effect on NMIs.
-Corey
On Tue, 2002-10-22 at 14:08, Corey Minyard wrote:
> In 2.5.44, stock from kernel.org, rcu_process_callbacks() calls
> local_irq_disable(). Is that just preemption disabling, now?
No, but rcu_process_callbacks() is for the copy update part.
Look at rcu_read_lock() and rcu_read_unlock()... those are the read
paths.
Of course, I can be very wrong about some of this, RCU grosses^Wconfuses
me. But the read paths do just seem to call rcu_read_lock/unlock which
just do a preempt_disable/enable. Otherwise the read path is entirely
transparent.
Robert Love
On Tue, Oct 22, 2002 at 01:29:55PM -0500, Corey Minyard wrote:
> I would vote against using it for profiling; profiling has it's own
> special fast-path, anyway.
But it would be good (less code, simpler, and even possibly for keeping
NMI watchdog ticking when oprofile is running) if we could merge the two
cases.
> The NMI watchdog only gets hit once every
> minute or so, it seems, so that seems suitable for this.
It can easily be much more frequent than that (though you could argue
this is a mis-setup).
> I've looked at the RCU code a little more, and I think I understand it
> better. I think your scenario will work, if it's true that it won't be
> called until all CPUs have done what you say. I'll look at it a little
> more.
Thanks for looking into this ...
regards
john
--
"This is mindless pedantism up with which I will not put."
- Donald Knuth on Pascal's lack of default: case statement
On Tue, Oct 22, 2002 at 08:30:16PM +0200, Corey Minyard wrote:
> Robert Love wrote:
> >At least on the variant of RCU that is in 2.5, the RCU code does the
> >read side by disabling preemption. Nothing else.
> >
> In 2.5.44, stock from kernel.org, rcu_process_callbacks() calls
> local_irq_disable(). Is that just preemption disabling, now?
No, that is to allow queueing of callbacks from irq context - see
call_rcu(). Since the queues are per-CPU, we don't need any spinlock.
rcu_process_callbacks() is always invoked from the RCU per-CPU tasklet,
so preemption doesn't come into picture. But irq disabling is needed
so that it doesn't race with call_rcu().
Only preemption related issue with RCU is that in the reader
side (in your case traversing the nmi handler list for invocation),
there should not be any preemption (not possible anyway in your case).
This is achieved by rcu_read_lock()/rcu_read_unlock() which essentially
disables/enables preemption.
The idea is that if you get preempted while holding reference to
some RCU protected data, it is not safe to invoke the RCU callback.
Once you get preempted, you can run on a different CPU and keeping
track of the preempted tasks become difficult. Besides preempted
tasks with low priority can delay RCU update for long periods.
Hence the disabling of preemption which is not worse than locks.
> >But anyhow, disabling interrupts should not affect NMIs, no?
> >
> You are correct. disabling preemption or interrupts has no effect on NMIs.
Yes.
Thanks
Dipankar