The functions prepare_set and post_set in kernel/cpu/mtrr/generic.c wrap the
spinlock set_atomicity_lock: prepare_set returns with the lock held, and
post_set releases the lock without acquiring it. Add lock annotations to
these two functions so that sparse can check callers for lock pairing, and so
that sparse will not complain about these functions since they intentionally
use locks in this manner.
Signed-off-by: Josh Triplett <[email protected]>
---
arch/i386/kernel/cpu/mtrr/generic.c | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index 169ac8e..0b61eed 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -243,7 +243,7 @@ static DEFINE_SPINLOCK(set_atomicity_loc
* has been called.
*/
-static void prepare_set(void)
+static void prepare_set(void) __acquires(set_atomicity_lock)
{
unsigned long cr0;
@@ -274,7 +274,7 @@ static void prepare_set(void)
mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
}
-static void post_set(void)
+static void post_set(void) __releases(set_atomicity_lock)
{
/* Flush TLBs (no need to flush caches - they are disabled) */
__flush_tlb();