2008-03-23 08:13:38

by Joe Perches

[permalink] [raw]
Subject: [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only

Cleanup to standardize formatting of .h files

Checkpatch now reports "mostly" clean.

Checkpatch reports errors and warnings that are inappropriate for
asm files.

Ingo's code-quality script totals for include/asm-x86

Errors LOC
Before: 1457 31320
After: 252 31729

Changes:

s/__asm__/asm/g
s/__volatile__/volatile/g
s/__inline__/inline/g
80 columns
no spaces before casts
do {} while macro formatting

No objdump -D code changes in x86 defconfig and allyesconfig

There are __LINE__, __DATE__, and __TIME__ changes
in the objects, but no code changes as far I can tell.


2008-03-23 08:07:47

by Joe Perches

[permalink] [raw]
Subject: [PATCH 011/148] include/asm-x86/bug.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/bug.h | 34 +++++++++++++++++-----------------
1 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
index 8d477a2..b69aa64 100644
--- a/include/asm-x86/bug.h
+++ b/include/asm-x86/bug.h
@@ -12,25 +12,25 @@
# define __BUG_C0 "2:\t.quad 1b, %c0\n"
#endif

-#define BUG() \
- do { \
- asm volatile("1:\tud2\n" \
- ".pushsection __bug_table,\"a\"\n" \
- __BUG_C0 \
- "\t.word %c1, 0\n" \
- "\t.org 2b+%c2\n" \
- ".popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (sizeof(struct bug_entry))); \
- for(;;) ; \
- } while(0)
+#define BUG() \
+do { \
+ asm volatile("1:\tud2\n" \
+ ".pushsection __bug_table,\"a\"\n" \
+ __BUG_C0 \
+ "\t.word %c1, 0\n" \
+ "\t.org 2b+%c2\n" \
+ ".popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (sizeof(struct bug_entry))); \
+ for (;;) ; \
+} while (0)

#else
-#define BUG() \
- do { \
- asm volatile("ud2"); \
- for(;;) ; \
- } while(0)
+#define BUG() \
+do { \
+ asm volatile("ud2"); \
+ for (;;) ; \
+} while (0)
#endif

#endif /* !CONFIG_BUG */
--
1.5.4.rc2

2008-03-23 08:08:18

by Joe Perches

[permalink] [raw]
Subject: [PATCH 010/148] include/asm-x86/bitops.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/bitops.h | 95 ++++++++++++++++++++--------------------------
1 files changed, 41 insertions(+), 54 deletions(-)

diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index d10c501..a66143a 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -23,13 +23,13 @@
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
-#define ADDR "=m" (*(volatile long *) addr)
-#define BIT_ADDR "=m" (((volatile int *) addr)[nr >> 5])
+#define ADDR "=m" (*(volatile long *)addr)
+#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5])
#else
#define ADDR "+m" (*(volatile long *) addr)
-#define BIT_ADDR "+m" (((volatile int *) addr)[nr >> 5])
+#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5])
#endif
-#define BASE_ADDR "m" (*(volatile int *) addr)
+#define BASE_ADDR "m" (*(volatile int *)addr)

/**
* set_bit - Atomically set a bit in memory
@@ -48,9 +48,7 @@
*/
static inline void set_bit(int nr, volatile void *addr)
{
- asm volatile(LOCK_PREFIX "bts %1,%0"
- : ADDR
- : "Ir" (nr) : "memory");
+ asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}

/**
@@ -64,9 +62,7 @@ static inline void set_bit(int nr, volatile void *addr)
*/
static inline void __set_bit(int nr, volatile void *addr)
{
- asm volatile("bts %1,%0"
- : ADDR
- : "Ir" (nr) : "memory");
+ asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}

/**
@@ -81,8 +77,7 @@ static inline void __set_bit(int nr, volatile void *addr)
*/
static inline void clear_bit(int nr, volatile void *addr)
{
- asm volatile(LOCK_PREFIX "btr %1,%2"
- : BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
}

/*
@@ -150,8 +145,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*/
static inline void change_bit(int nr, volatile void *addr)
{
- asm volatile(LOCK_PREFIX "btc %1,%2"
- : BIT_ADDR : "Ir" (nr), BASE_ADDR);
+ asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR);
}

/**
@@ -167,9 +161,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
int oldbit;

asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");

return oldbit;
}
@@ -201,8 +193,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)

asm volatile("bts %2,%3\n\t"
"sbb %0,%0"
- : "=r" (oldbit), BIT_ADDR
- : "Ir" (nr), BASE_ADDR);
+ : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
return oldbit;
}

@@ -220,8 +211,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)

asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
"sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");

return oldbit;
}
@@ -241,8 +231,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)

asm volatile("btr %2,%3\n\t"
"sbb %0,%0"
- : "=r" (oldbit), BIT_ADDR
- : "Ir" (nr), BASE_ADDR);
+ : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);
return oldbit;
}

@@ -253,8 +242,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)

asm volatile("btc %2,%3\n\t"
"sbb %0,%0"
- : "=r" (oldbit), BIT_ADDR
- : "Ir" (nr), BASE_ADDR);
+ : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR);

return oldbit;
}
@@ -273,8 +261,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)

asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
"sbb %0,%0"
- : "=r" (oldbit), ADDR
- : "Ir" (nr) : "memory");
+ : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");

return oldbit;
}
@@ -307,10 +294,10 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
static int test_bit(int nr, const volatile unsigned long *addr);
#endif

-#define test_bit(nr,addr) \
- (__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
+#define test_bit(nr, addr) \
+ (__builtin_constant_p((nr)) \
+ ? constant_test_bit((nr), (addr)) \
+ : variable_test_bit((nr), (addr)))

/**
* __ffs - find first set bit in word
@@ -320,9 +307,9 @@ static int test_bit(int nr, const volatile unsigned long *addr);
*/
static inline unsigned long __ffs(unsigned long word)
{
- __asm__("bsf %1,%0"
- :"=r" (word)
- :"rm" (word));
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "rm" (word));
return word;
}

@@ -334,9 +321,9 @@ static inline unsigned long __ffs(unsigned long word)
*/
static inline unsigned long ffz(unsigned long word)
{
- __asm__("bsf %1,%0"
- :"=r" (word)
- :"r" (~word));
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "r" (~word));
return word;
}

@@ -348,9 +335,9 @@ static inline unsigned long ffz(unsigned long word)
*/
static inline unsigned long __fls(unsigned long word)
{
- __asm__("bsr %1,%0"
- :"=r" (word)
- :"rm" (word));
+ asm("bsr %1,%0"
+ : "=r" (word)
+ : "rm" (word));
return word;
}

@@ -370,14 +357,14 @@ static inline int ffs(int x)
{
int r;
#ifdef CONFIG_X86_CMOV
- __asm__("bsfl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=r" (r) : "rm" (x), "r" (-1));
+ asm("bsfl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=r" (r) : "rm" (x), "r" (-1));
#else
- __asm__("bsfl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
+ asm("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
@@ -397,14 +384,14 @@ static inline int fls(int x)
{
int r;
#ifdef CONFIG_X86_CMOV
- __asm__("bsrl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=&r" (r) : "rm" (x), "rm" (-1));
+ asm("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
#else
- __asm__("bsrl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
+ asm("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
--
1.5.4.rc2

2008-03-23 08:08:38

by Joe Perches

[permalink] [raw]
Subject: [PATCH 012/148] include/asm-x86/byteorder.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/byteorder.h | 39 ++++++++++++++++++++++++---------------
1 files changed, 24 insertions(+), 15 deletions(-)

diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index fe2f2e5..e02ae2d 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -8,50 +8,59 @@

#ifdef __i386__

-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
#ifdef CONFIG_X86_BSWAP
- __asm__("bswap %0" : "=r" (x) : "0" (x));
+ asm("bswap %0" : "=r" (x) : "0" (x));
#else
- __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
- "rorl $16,%0\n\t" /* swap words */
- "xchgb %b0,%h0" /* swap higher bytes */
- :"=q" (x)
- : "0" (x));
+ asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
+ "rorl $16,%0\n\t" /* swap words */
+ "xchgb %b0,%h0" /* swap higher bytes */
+ : "=q" (x)
+ : "0" (x));
#endif
return x;
}

-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
{
union {
- struct { __u32 a,b; } s;
+ struct {
+ __u32 a;
+ __u32 b;
+ } s;
__u64 u;
} v;
v.u = val;
#ifdef CONFIG_X86_BSWAP
- __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
+ asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
#else
v.s.a = ___arch__swab32(v.s.a);
v.s.b = ___arch__swab32(v.s.b);
- __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
+ asm("xchgl %0,%1"
+ : "=r" (v.s.a), "=r" (v.s.b)
+ : "0" (v.s.a), "1" (v.s.b));
#endif
return v.u;
}

#else /* __i386__ */

-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
- __asm__("bswapq %0" : "=r" (x) : "0" (x));
+ asm("bswapq %0"
+ : "=r" (x)
+ : "0" (x));
return x;
}

-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
- __asm__("bswapl %0" : "=r" (x) : "0" (x));
+ asm("bswapl %0"
+ : "=r" (x)
+ : "0" (x));
return x;
}

--
1.5.4.rc2

2008-03-23 08:09:06

by Joe Perches

[permalink] [raw]
Subject: [PATCH 020/148] include/asm-x86/current_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/current_32.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h
index d352485..5af9bdb 100644
--- a/include/asm-x86/current_32.h
+++ b/include/asm-x86/current_32.h
@@ -11,7 +11,7 @@ static __always_inline struct task_struct *get_current(void)
{
return x86_read_percpu(current_task);
}
-
+
#define current get_current()

#endif /* !(_I386_CURRENT_H) */
--
1.5.4.rc2

2008-03-23 08:09:29

by Joe Perches

[permalink] [raw]
Subject: [PATCH 013/148] include/asm-x86/cacheflush.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/cacheflush.h | 14 +++++++-------
1 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 90437d3..7ab5b52 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -14,18 +14,18 @@
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+#define flush_icache_page(vma, pg) do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)

-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))

int __deprecated_for_modules change_page_attr(struct page *page, int numpages,
- pgprot_t prot);
+ pgprot_t prot);

int set_pages_uc(struct page *page, int numpages);
int set_pages_wb(struct page *page, int numpages);
--
1.5.4.rc2

2008-03-23 08:09:53

by Joe Perches

[permalink] [raw]
Subject: [PATCH 021/148] include/asm-x86/current_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/current_64.h | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h
index bc8adec..2d368ed 100644
--- a/include/asm-x86/current_64.h
+++ b/include/asm-x86/current_64.h
@@ -1,23 +1,23 @@
#ifndef _X86_64_CURRENT_H
#define _X86_64_CURRENT_H

-#if !defined(__ASSEMBLY__)
+#if !defined(__ASSEMBLY__)
struct task_struct;

#include <asm/pda.h>

-static inline struct task_struct *get_current(void)
-{
- struct task_struct *t = read_pda(pcurrent);
+static inline struct task_struct *get_current(void)
+{
+ struct task_struct *t = read_pda(pcurrent);
return t;
-}
+}

#define current get_current()

#else

#ifndef ASM_OFFSET_H
-#include <asm/asm-offsets.h>
+#include <asm/asm-offsets.h>
#endif

#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
--
1.5.4.rc2

2008-03-23 08:10:23

by Joe Perches

[permalink] [raw]
Subject: [PATCH 014/148] include/asm-x86/checksum_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/checksum_32.h | 152 ++++++++++++++++++++---------------------
1 files changed, 75 insertions(+), 77 deletions(-)

diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
index 75194ab..52bbb0d 100644
--- a/include/asm-x86/checksum_32.h
+++ b/include/asm-x86/checksum_32.h
@@ -28,7 +28,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
*/

asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);

/*
* Note: when you get a NULL pointer exception here this means someone
@@ -37,20 +38,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* If you use these functions directly please don't forget the
* access_ok().
*/
-static __inline__
-__wsum csum_partial_copy_nocheck (const void *src, void *dst,
- int len, __wsum sum)
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
- return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}

-static __inline__
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+static inline __wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst,
+ int len, __wsum sum,
+ int *err_ptr)
{
might_sleep();
return csum_partial_copy_generic((__force void *)src, dst,
- len, sum, err_ptr, NULL);
+ len, sum, err_ptr, NULL);
}

/*
@@ -64,30 +65,29 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;

- __asm__ __volatile__(
- "movl (%1), %0 ;\n"
- "subl $4, %2 ;\n"
- "jbe 2f ;\n"
- "addl 4(%1), %0 ;\n"
- "adcl 8(%1), %0 ;\n"
- "adcl 12(%1), %0 ;\n"
-"1: adcl 16(%1), %0 ;\n"
- "lea 4(%1), %1 ;\n"
- "decl %2 ;\n"
- "jne 1b ;\n"
- "adcl $0, %0 ;\n"
- "movl %0, %2 ;\n"
- "shrl $16, %0 ;\n"
- "addw %w2, %w0 ;\n"
- "adcl $0, %0 ;\n"
- "notl %0 ;\n"
-"2: ;\n"
+ asm volatile("movl (%1), %0 ;\n"
+ "subl $4, %2 ;\n"
+ "jbe 2f ;\n"
+ "addl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0;\n"
+ "1: adcl 16(%1), %0 ;\n"
+ "lea 4(%1), %1 ;\n"
+ "decl %2 ;\n"
+ "jne 1b ;\n"
+ "adcl $0, %0 ;\n"
+ "movl %0, %2 ;\n"
+ "shrl $16, %0 ;\n"
+ "addw %w2, %w0 ;\n"
+ "adcl $0, %0 ;\n"
+ "notl %0 ;\n"
+ "2: ;\n"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
- : "=r" (sum), "=r" (iph), "=r" (ihl)
- : "1" (iph), "2" (ihl)
- : "memory");
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
return (__force __sum16)sum;
}

@@ -97,29 +97,27 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)

static inline __sum16 csum_fold(__wsum sum)
{
- __asm__(
- "addl %1, %0 ;\n"
- "adcl $0xffff, %0 ;\n"
- : "=r" (sum)
- : "r" ((__force u32)sum << 16),
- "0" ((__force u32)sum & 0xffff0000)
- );
+ asm("addl %1, %0 ;\n"
+ "adcl $0xffff, %0 ;\n"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}

static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
- __asm__(
- "addl %1, %0 ;\n"
- "adcl %2, %0 ;\n"
- "adcl %3, %0 ;\n"
- "adcl $0, %0 ;\n"
- : "=r" (sum)
- : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
- return sum;
+ asm("addl %1, %0 ;\n"
+ "adcl %2, %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=r" (sum)
+ : "g" (daddr), "g"(saddr),
+ "g" ((len + proto) << 8), "0" (sum));
+ return sum;
}

/*
@@ -127,11 +125,11 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}

/*
@@ -141,30 +139,29 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,

static inline __sum16 ip_compute_csum(const void *buff, int len)
{
- return csum_fold (csum_partial(buff, len, 0));
+ return csum_fold(csum_partial(buff, len, 0));
}

#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
{
- __asm__(
- "addl 0(%1), %0 ;\n"
- "adcl 4(%1), %0 ;\n"
- "adcl 8(%1), %0 ;\n"
- "adcl 12(%1), %0 ;\n"
- "adcl 0(%2), %0 ;\n"
- "adcl 4(%2), %0 ;\n"
- "adcl 8(%2), %0 ;\n"
- "adcl 12(%2), %0 ;\n"
- "adcl %3, %0 ;\n"
- "adcl %4, %0 ;\n"
- "adcl $0, %0 ;\n"
- : "=&r" (sum)
- : "r" (saddr), "r" (daddr),
- "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
+ asm("addl 0(%1), %0 ;\n"
+ "adcl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0 ;\n"
+ "adcl 0(%2), %0 ;\n"
+ "adcl 4(%2), %0 ;\n"
+ "adcl 8(%2), %0 ;\n"
+ "adcl 12(%2), %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl %4, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=&r" (sum)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));

return csum_fold(sum);
}
@@ -173,14 +170,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src,
- void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+static inline __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum,
+ int *err_ptr)
{
might_sleep();
if (access_ok(VERIFY_WRITE, dst, len))
- return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr);
+ return csum_partial_copy_generic(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);

if (len)
*err_ptr = -EFAULT;
--
1.5.4.rc2

2008-03-23 08:10:49

by Joe Perches

[permalink] [raw]
Subject: [PATCH 022/148] include/asm-x86/desc_defs.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/desc_defs.h | 15 ++++++++-------
1 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
index e33f078..eccb4ea 100644
--- a/include/asm-x86/desc_defs.h
+++ b/include/asm-x86/desc_defs.h
@@ -18,17 +18,19 @@
* incrementally. We keep the signature as a struct, rather than an union,
* so we can get rid of it transparently in the future -- glommer
*/
-// 8 byte segment descriptor
+/* 8 byte segment descriptor */
struct desc_struct {
union {
- struct { unsigned int a, b; };
+ struct {
+ unsigned int a;
+ unsigned int b;
+ };
struct {
u16 limit0;
u16 base0;
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
};
-
};
} __attribute__((packed));

@@ -39,7 +41,7 @@ enum {
GATE_TASK = 0x5,
};

-// 16byte gate
+/* 16byte gate */
struct gate_struct64 {
u16 offset_low;
u16 segment;
@@ -56,10 +58,10 @@ struct gate_struct64 {
enum {
DESC_TSS = 0x9,
DESC_LDT = 0x2,
- DESCTYPE_S = 0x10, /* !system */
+ DESCTYPE_S = 0x10, /* !system */
};

-// LDT or TSS descriptor in the GDT. 16 bytes.
+/* LDT or TSS descriptor in the GDT. 16 bytes. */
struct ldttss_desc64 {
u16 limit0;
u16 base0;
@@ -84,7 +86,6 @@ struct desc_ptr {
unsigned long address;
} __attribute__((packed)) ;

-
#endif /* !__ASSEMBLY__ */

#endif
--
1.5.4.rc2

2008-03-23 08:11:16

by Joe Perches

[permalink] [raw]
Subject: [PATCH 015/148] include/asm-x86/checksum_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/checksum_64.h | 118 ++++++++++++++++++++---------------------
1 files changed, 57 insertions(+), 61 deletions(-)

diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
index e5f7999..8bd861c 100644
--- a/include/asm-x86/checksum_64.h
+++ b/include/asm-x86/checksum_64.h
@@ -1,33 +1,31 @@
#ifndef _X86_64_CHECKSUM_H
#define _X86_64_CHECKSUM_H

-/*
- * Checksums for x86-64
- * Copyright 2002 by Andi Kleen, SuSE Labs
+/*
+ * Checksums for x86-64
+ * Copyright 2002 by Andi Kleen, SuSE Labs
* with some code from asm-x86/checksum.h
- */
+ */

#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>

-/**
+/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
- *
+ *
* Fold a 32bit running checksum to 16bit and invert it. This is usually
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*/
static inline __sum16 csum_fold(__wsum sum)
{
- __asm__(
- " addl %1,%0\n"
- " adcl $0xffff,%0"
- : "=r" (sum)
- : "r" ((__force u32)sum << 16),
- "0" ((__force u32)sum & 0xffff0000)
- );
+ asm(" addl %1,%0\n"
+ " adcl $0xffff,%0"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}

@@ -43,46 +41,46 @@ static inline __sum16 csum_fold(__wsum sum)
* ip_fast_csum - Compute the IPv4 header checksum efficiently.
* iph: ipv4 header
* ihl: length of header / 4
- */
+ */
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;

- asm( " movl (%1), %0\n"
- " subl $4, %2\n"
- " jbe 2f\n"
- " addl 4(%1), %0\n"
- " adcl 8(%1), %0\n"
- " adcl 12(%1), %0\n"
- "1: adcl 16(%1), %0\n"
- " lea 4(%1), %1\n"
- " decl %2\n"
- " jne 1b\n"
- " adcl $0, %0\n"
- " movl %0, %2\n"
- " shrl $16, %0\n"
- " addw %w2, %w0\n"
- " adcl $0, %0\n"
- " notl %0\n"
- "2:"
+ asm(" movl (%1), %0\n"
+ " subl $4, %2\n"
+ " jbe 2f\n"
+ " addl 4(%1), %0\n"
+ " adcl 8(%1), %0\n"
+ " adcl 12(%1), %0\n"
+ "1: adcl 16(%1), %0\n"
+ " lea 4(%1), %1\n"
+ " decl %2\n"
+ " jne 1b\n"
+ " adcl $0, %0\n"
+ " movl %0, %2\n"
+ " shrl $16, %0\n"
+ " addw %w2, %w0\n"
+ " adcl $0, %0\n"
+ " notl %0\n"
+ "2:"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
- : "=r" (sum), "=r" (iph), "=r" (ihl)
- : "1" (iph), "2" (ihl)
- : "memory");
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
return (__force __sum16)sum;
}

-/**
+/**
* csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded)
- *
- * Returns the pseudo header checksum the input data. Result is
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/
static inline __wsum
@@ -93,32 +91,32 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
" adcl %2, %0\n"
" adcl %3, %0\n"
" adcl $0, %0\n"
- : "=r" (sum)
+ : "=r" (sum)
: "g" (daddr), "g" (saddr),
"g" ((len + proto)<<8), "0" (sum));
- return sum;
+ return sum;
}


-/**
+/**
* csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
- * @sum: initial sum to be added in (32bit unfolded)
- *
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
* Returns the 16bit pseudo header checksum the input data already
* complemented and ready to be filled in.
*/
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len, unsigned short proto, __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto, __wsum sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}

-/**
+/**
* csum_partial - Compute an internet checksum.
* @buff: buffer to be checksummed
* @len: length of buffer.
@@ -127,7 +125,7 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr,
* Returns the 32bit unfolded internet checksum of the buffer.
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
- */
+ */
extern __wsum csum_partial(const void *buff, int len, __wsum sum);

#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
@@ -136,23 +134,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);

/* Do not call this directly. Use the wrappers below */
extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
- int len,
- __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);


extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum isum, int *errp);
+ int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
- int len, __wsum isum, int *errp);
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
- __wsum sum);
+ int len, __wsum isum, int *errp);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);

/* Old names. To be removed. */
#define csum_and_copy_to_user csum_partial_copy_to_user
#define csum_and_copy_from_user csum_partial_copy_from_user

-/**
+/**
* ip_compute_csum - Compute an 16bit IP checksum.
* @buff: buffer address.
* @len: length of buffer.
@@ -170,7 +167,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len);
* @proto: protocol of packet
* @sum: initial sum (32bit unfolded) to be added in
*
- * Computes an IPv6 pseudo header checksum. This sum is added the checksum
+ * Computes an IPv6 pseudo header checksum. This sum is added the checksum
* into UDP/TCP packets and contains some link layer information.
* Returns the unfolded 32bit checksum.
*/
@@ -185,11 +182,10 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
static inline unsigned add32_with_carry(unsigned a, unsigned b)
{
asm("addl %2,%0\n\t"
- "adcl $0,%0"
- : "=r" (a)
+ "adcl $0,%0"
+ : "=r" (a)
: "0" (a), "r" (b));
return a;
}

#endif
-
--
1.5.4.rc2

2008-03-23 08:11:42

by Joe Perches

[permalink] [raw]
Subject: [PATCH 002/148] include/asm-x86/alternative.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/alternative.h | 9 ++++-----
1 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
index d26416b..1f6a9ca 100644
--- a/include/asm-x86/alternative.h
+++ b/include/asm-x86/alternative.h
@@ -66,8 +66,8 @@ extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
- void *locks, void *locks_end,
- void *text, void *text_end) {}
+ void *locks, void *locks_end,
+ void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
#endif /* CONFIG_SMP */
@@ -148,9 +148,8 @@ struct paravirt_patch_site;
void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end);
#else
-static inline void
-apply_paravirt(struct paravirt_patch_site *start,
- struct paravirt_patch_site *end)
+static inline void apply_paravirt(struct paravirt_patch_site *start,
+ struct paravirt_patch_site *end)
{}
#define __parainstructions NULL
#define __parainstructions_end NULL
--
1.5.4.rc2

2008-03-23 08:12:10

by Joe Perches

[permalink] [raw]
Subject: [PATCH 001/148] include/asm-x86/acpi.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/acpi.h | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 7a72d6a..14411c9 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -67,16 +67,16 @@ int __acpi_release_global_lock(unsigned int *lock);
*/
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
asm("divl %2;" \
- :"=a"(q32), "=d"(r32) \
- :"r"(d32), \
+ : "=a"(q32), "=d"(r32) \
+ : "r"(d32), \
"0"(n_lo), "1"(n_hi))


#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
asm("shrl $1,%2 ;" \
"rcrl $1,%3;" \
- :"=r"(n_hi), "=r"(n_lo) \
- :"0"(n_hi), "1"(n_lo))
+ : "=r"(n_hi), "=r"(n_lo) \
+ : "0"(n_hi), "1"(n_lo))

#ifdef CONFIG_ACPI
extern int acpi_lapic;
--
1.5.4.rc2

2008-03-23 08:12:33

by Joe Perches

[permalink] [raw]
Subject: [PATCH 016/148] include/asm-x86/cmpxchg_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/cmpxchg_32.h | 253 ++++++++++++++++++++++--------------------
1 files changed, 132 insertions(+), 121 deletions(-)

diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
index 959fad0..bf5a69d 100644
--- a/include/asm-x86/cmpxchg_32.h
+++ b/include/asm-x86/cmpxchg_32.h
@@ -8,9 +8,12 @@
* you need to test for the feature in boot_cpu_data.
*/

-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))

-struct __xchg_dummy { unsigned long a[100]; };
+struct __xchg_dummy {
+ unsigned long a[100];
+};
#define __xg(x) ((struct __xchg_dummy *)(x))

/*
@@ -27,72 +30,74 @@ struct __xchg_dummy { unsigned long a[100]; };
* of the instruction set reference 24319102.pdf. We need
* the reader side to see the coherent 64bit value.
*/
-static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
+static inline void __set_64bit(unsigned long long *ptr,
+ unsigned int low, unsigned int high)
{
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- LOCK_PREFIX "cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
+ asm volatile("\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ LOCK_PREFIX "cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax", "dx", "memory");
}

-static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
+static inline void __set_64bit_constant(unsigned long long *ptr,
+ unsigned long long value)
{
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+ __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
}
-#define ll_low(x) *(((unsigned int*)&(x))+0)
-#define ll_high(x) *(((unsigned int*)&(x))+1)

-static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
+#define ll_low(x) *(((unsigned int *)&(x)) + 0)
+#define ll_high(x) *(((unsigned int *)&(x)) + 1)
+
+static inline void __set_64bit_var(unsigned long long *ptr,
+ unsigned long long value)
{
- __set_64bit(ptr,ll_low(value), ll_high(value));
+ __set_64bit(ptr, ll_low(value), ll_high(value));
}

-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
+#define set_64bit(ptr, value) \
+ (__builtin_constant_p((value)) \
+ ? __set_64bit_constant((ptr), (value)) \
+ : __set_64bit_var((ptr), (value)))

-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
+#define _set_64bit(ptr, value) \
+ (__builtin_constant_p(value) \
+ ? __set_64bit(ptr, (unsigned int)(value), \
+ (unsigned int)((value) >> 32)) \
+ : __set_64bit(ptr, ll_low((value)), ll_high((value))))

/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
{
switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
+ case 1:
+ asm volatile("xchgb %b0,%1"
+ : "=q" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 2:
+ asm volatile("xchgw %w0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 4:
+ asm volatile("xchgl %0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
}
return x;
}
@@ -105,24 +110,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz

#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#define sync_cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+#define sync_cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
#endif

#ifdef CONFIG_X86_CMPXCHG64
-#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
- (unsigned long long)(n)))
-#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\
- (unsigned long long)(n)))
+#define cmpxchg64(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
+ (unsigned long long)(n)))
+#define cmpxchg64_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
+ (unsigned long long)(n)))
#endif

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -131,22 +139,22 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
@@ -158,85 +166,88 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* isn't.
*/
static inline unsigned long __sync_cmpxchg(volatile void *ptr,
- unsigned long old,
- unsigned long new, int size)
+ unsigned long old,
+ unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("lock; cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("lock; cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__("lock; cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("lock; cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
}

static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old, unsigned long new, int size)
+ unsigned long old,
+ unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__("cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
}

static inline unsigned long long __cmpxchg64(volatile void *ptr,
- unsigned long long old, unsigned long long new)
+ unsigned long long old,
+ unsigned long long new)
{
unsigned long long prev;
- __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
- : "=A"(prev)
- : "b"((unsigned long)new),
- "c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
- "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
return prev;
}

static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
- unsigned long long old, unsigned long long new)
+ unsigned long long old,
+ unsigned long long new)
{
unsigned long long prev;
- __asm__ __volatile__("cmpxchg8b %3"
- : "=A"(prev)
- : "b"((unsigned long)new),
- "c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
- "0"(old)
- : "memory");
+ asm volatile("cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
return prev;
}

@@ -252,7 +263,7 @@ extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);

static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
+ unsigned long new, int size)
{
switch (size) {
case 1:
--
1.5.4.rc2

2008-03-23 08:12:55

by Joe Perches

[permalink] [raw]
Subject: [PATCH 023/148] include/asm-x86/desc.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/desc.h | 61 ++++++++++++++++++++++++-----------------------
1 files changed, 31 insertions(+), 30 deletions(-)

diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index 5b6a05d..268a012 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -62,8 +62,8 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
}

static inline void pack_gate(gate_desc *gate, unsigned char type,
- unsigned long base, unsigned dpl, unsigned flags, unsigned short seg)
-
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
{
gate->a = (seg << 16) | (base & 0xffff);
gate->b = (base & 0xffff0000) |
@@ -84,22 +84,23 @@ static inline int desc_empty(const void *ptr)
#define load_TR_desc() native_load_tr_desc()
#define load_gdt(dtr) native_load_gdt(dtr)
#define load_idt(dtr) native_load_idt(dtr)
-#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
-#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
+#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
+#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))

#define store_gdt(dtr) native_store_gdt(dtr)
#define store_idt(dtr) native_store_idt(dtr)
#define store_tr(tr) (tr = native_store_tr())
-#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
+#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))

#define load_TLS(t, cpu) native_load_tls(t, cpu)
#define set_ldt native_set_ldt

-#define write_ldt_entry(dt, entry, desc) \
- native_write_ldt_entry(dt, entry, desc)
-#define write_gdt_entry(dt, entry, desc, type) \
- native_write_gdt_entry(dt, entry, desc, type)
-#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
+#define write_ldt_entry(dt, entry, desc) \
+ native_write_ldt_entry(dt, entry, desc)
+#define write_gdt_entry(dt, entry, desc, type) \
+ native_write_gdt_entry(dt, entry, desc, type)
+#define write_idt_entry(dt, entry, g) \
+ native_write_idt_entry(dt, entry, g)
#endif

static inline void native_write_idt_entry(gate_desc *idt, int entry,
@@ -138,8 +139,8 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
{
desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
- (limit & 0x000f0000) | ((type & 0xff) << 8) |
- ((flags & 0xf) << 20);
+ (limit & 0x000f0000) | ((type & 0xff) << 8) |
+ ((flags & 0xf) << 20);
desc->p = 1;
}

@@ -159,7 +160,6 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
desc->base3 = PTR_HIGH(addr);
#else
-
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
#endif
}
@@ -177,7 +177,8 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
* last valid byte
*/
set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
- IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
+ IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
+ sizeof(unsigned long) - 1);
write_gdt_entry(d, entry, &tss, DESC_TSS);
}

@@ -186,7 +187,7 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
static inline void native_set_ldt(const void *addr, unsigned int entries)
{
if (likely(entries == 0))
- __asm__ __volatile__("lldt %w0"::"q" (0));
+ asm volatile("lldt %w0"::"q" (0));
else {
unsigned cpu = smp_processor_id();
ldt_desc ldt;
@@ -195,7 +196,7 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
DESC_LDT, entries * sizeof(ldt) - 1);
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
&ldt, DESC_LDT);
- __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+ asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
}
}

@@ -240,15 +241,15 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}

-#define _LDT_empty(info) (\
- (info)->base_addr == 0 && \
- (info)->limit == 0 && \
- (info)->contents == 0 && \
- (info)->read_exec_only == 1 && \
- (info)->seg_32bit == 0 && \
- (info)->limit_in_pages == 0 && \
- (info)->seg_not_present == 1 && \
- (info)->useable == 0)
+#define _LDT_empty(info) \
+ ((info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0)

#ifdef CONFIG_X86_64
#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
@@ -287,7 +288,7 @@ static inline unsigned long get_desc_limit(const struct desc_struct *desc)
}

static inline void _set_gate(int gate, unsigned type, void *addr,
- unsigned dpl, unsigned ist, unsigned seg)
+ unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
@@ -370,10 +371,10 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist)
* Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
*/
#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
- movb idx*8+4(gdt), lo_b; \
- movb idx*8+7(gdt), hi_b; \
- shll $16, base; \
- movw idx*8+2(gdt), lo_w;
+ movb idx * 8 + 4(gdt), lo_b; \
+ movb idx * 8 + 7(gdt), hi_b; \
+ shll $16, base; \
+ movw idx * 8 + 2(gdt), lo_w;


#endif /* __ASSEMBLY__ */
--
1.5.4.rc2

2008-03-23 08:13:18

by Joe Perches

[permalink] [raw]
Subject: [PATCH 017/148] include/asm-x86/cmpxchg_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/cmpxchg_64.h | 134 +++++++++++++++++++++--------------------
1 files changed, 69 insertions(+), 65 deletions(-)

diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index 56f5b41..d9b26b9 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -3,7 +3,8 @@

#include <asm/alternative.h> /* Provides LOCK_PREFIX */

-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
+ (ptr), sizeof(*(ptr))))

#define __xg(x) ((volatile long *)(x))

@@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
{
switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %k0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 8:
- __asm__ __volatile__("xchgq %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
+ case 1:
+ asm volatile("xchgb %b0,%1"
+ : "=q" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 2:
+ asm volatile("xchgw %w0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 4:
+ asm volatile("xchgl %k0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
+ case 8:
+ asm volatile("xchgq %0,%1"
+ : "=r" (x)
+ : "m" (*__xg(ptr)), "0" (x)
+ : "memory");
+ break;
}
return x;
}
@@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
}

static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old, unsigned long new, int size)
+ unsigned long old,
+ unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 2:
- __asm__ __volatile__("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 4:
- __asm__ __volatile__("cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
case 8:
- __asm__ __volatile__("cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
+ asm volatile("cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
return prev;
}
return old;
@@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,

#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+ (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ({ \
+({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
- })
+})
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) \
- ({ \
+({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
- })
+})

#endif
--
1.5.4.rc2

2008-03-23 08:14:05

by Joe Perches

[permalink] [raw]
Subject: [PATCH 030/148] include/asm-x86/e820_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/e820_64.h | 16 +++++++++-------
1 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
index d57a8c8..4c6ad98 100644
--- a/include/asm-x86/e820_64.h
+++ b/include/asm-x86/e820_64.h
@@ -14,22 +14,24 @@
#include <linux/ioport.h>

#ifndef __ASSEMBLY__
-extern unsigned long find_e820_area(unsigned long start, unsigned long end,
+extern unsigned long find_e820_area(unsigned long start, unsigned long end,
unsigned long size, unsigned long align);
extern unsigned long find_e820_area_size(unsigned long start,
unsigned long *sizep,
unsigned long align);
-extern void add_memory_region(unsigned long start, unsigned long size,
+extern void add_memory_region(unsigned long start, unsigned long size,
int type);
extern void update_memory_range(u64 start, u64 size, unsigned old_type,
unsigned new_type);
extern void setup_memory_region(void);
-extern void contig_e820_setup(void);
+extern void contig_e820_setup(void);
extern unsigned long e820_end_of_ram(void);
extern void e820_reserve_resources(void);
extern void e820_mark_nosave_regions(void);
-extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
-extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
+extern int e820_any_mapped(unsigned long start, unsigned long end,
+ unsigned type);
+extern int e820_all_mapped(unsigned long start, unsigned long end,
+ unsigned type);
extern int e820_any_non_reserved(unsigned long start, unsigned long end);
extern int is_memory_any_valid(unsigned long start, unsigned long end);
extern int e820_all_non_reserved(unsigned long start, unsigned long end);
@@ -37,8 +39,8 @@ extern int is_memory_all_valid(unsigned long start, unsigned long end);
extern unsigned long e820_hole_size(unsigned long start, unsigned long end);

extern void e820_setup_gap(void);
-extern void e820_register_active_regions(int nid,
- unsigned long start_pfn, unsigned long end_pfn);
+extern void e820_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);

extern void finish_e820_parsing(void);

--
1.5.4.rc2

2008-03-23 08:14:29

by Joe Perches

[permalink] [raw]
Subject: [PATCH 029/148] include/asm-x86/e820_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/e820_32.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
index e7207a6..43b1a8b 100644
--- a/include/asm-x86/e820_32.h
+++ b/include/asm-x86/e820_32.h
@@ -34,8 +34,8 @@ extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size);
extern void print_memory_map(char *who);
extern void init_iomem_resources(struct resource *code_resource,
- struct resource *data_resource,
- struct resource *bss_resource);
+ struct resource *data_resource,
+ struct resource *bss_resource);

#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
extern void e820_mark_nosave_regions(void);
--
1.5.4.rc2

2008-03-23 08:14:49

by Joe Perches

[permalink] [raw]
Subject: [PATCH 117/148] include/asm-x86/srat.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/srat.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h
index 165ab4b..f4bba13 100644
--- a/include/asm-x86/srat.h
+++ b/include/asm-x86/srat.h
@@ -1,5 +1,5 @@
/*
- * Some of the code in this file has been gleaned from the 64 bit
+ * Some of the code in this file has been gleaned from the 64 bit
* discontigmem support code base.
*
* Copyright (C) 2002, IBM Corp.
--
1.5.4.rc2

2008-03-23 08:15:18

by Joe Perches

[permalink] [raw]
Subject: [PATCH 134/148] include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/uaccess_64.h | 376 +++++++++++++++++++++++++-----------------
1 files changed, 227 insertions(+), 149 deletions(-)

diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index b87eb4b..5bab0be 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -29,23 +29,27 @@
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))

-#define segment_eq(a,b) ((a).seg == (b).seg)
+#define segment_eq(a, b) ((a).seg == (b).seg)

-#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
+#define __addr_ok(addr) (!((unsigned long)(addr) & \
+ (current_thread_info()->addr_limit.seg)))

/*
* Uhhuh, this needs 65-bit arithmetic. We have a carry..
*/
-#define __range_not_ok(addr,size) ({ \
- unsigned long flag,roksum; \
- __chk_user_ptr(addr); \
- asm("# range_ok\n\r" \
- "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
- :"=&r" (flag), "=r" (roksum) \
- :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
- flag; })
+#define __range_not_ok(addr, size) \
+({ \
+ unsigned long flag, roksum; \
+ __chk_user_ptr(addr); \
+ asm("# range_ok\n\r" \
+ "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
+ : "=&r" (flag), "=r" (roksum) \
+ : "1" (addr), "g" ((long)(size)), \
+ "g" (current_thread_info()->addr_limit.seg)); \
+ flag;
+})

-#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
+#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)

/*
* The exception table consists of pairs of addresses: the first is the
@@ -60,8 +64,7 @@
* on our cache or tlb entries.
*/

-struct exception_table_entry
-{
+struct exception_table_entry {
unsigned long insn, fixup;
};

@@ -84,23 +87,36 @@ extern int fixup_exception(struct pt_regs *regs);
* accesses to the same area of user memory).
*/

-#define __get_user_x(size,ret,x,ptr) \
- asm volatile("call __get_user_" #size \
- :"=a" (ret),"=d" (x) \
- :"c" (ptr) \
- :"r8")
+#define __get_user_x(size, ret, x, ptr) \
+ asm volatile("call __get_user_" #size \
+ : "=a" (ret),"=d" (x) \
+ : "c" (ptr) \
+ : "r8")
+
+/* Careful: we have to cast the result to the type of the pointer
+ * for sign reasons */

-/* Careful: we have to cast the result to the type of the pointer for sign reasons */
-#define get_user(x,ptr) \
-({ unsigned long __val_gu; \
- int __ret_gu; \
+#define get_user(x, ptr) \
+({ \
+ unsigned long __val_gu; \
+ int __ret_gu; \
__chk_user_ptr(ptr); \
- switch(sizeof (*(ptr))) { \
- case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
- case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
- case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
- case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
- default: __get_user_bad(); break; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_user_x(1, __ret_gu, __val_gu, ptr); \
+ break; \
+ case 2: \
+ __get_user_x(2, __ret_gu, __val_gu, ptr); \
+ break; \
+ case 4: \
+ __get_user_x(4, __ret_gu, __val_gu, ptr); \
+ break; \
+ case 8: \
+ __get_user_x(8, __ret_gu, __val_gu, ptr); \
+ break; \
+ default: \
+ __get_user_bad(); \
+ break; \
} \
(x) = (__force typeof(*(ptr)))__val_gu; \
__ret_gu; \
@@ -112,55 +128,73 @@ extern void __put_user_4(void);
extern void __put_user_8(void);
extern void __put_user_bad(void);

-#define __put_user_x(size,ret,x,ptr) \
- asm volatile("call __put_user_" #size \
- :"=a" (ret) \
- :"c" (ptr),"d" (x) \
- :"r8")
+#define __put_user_x(size, ret, x, ptr) \
+ asm volatile("call __put_user_" #size \
+ :"=a" (ret) \
+ :"c" (ptr),"d" (x) \
+ :"r8")

-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))

-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))

#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user

-#define __put_user_nocheck(x,ptr,size) \
+#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
- __put_user_size((x),(ptr),(size),__pu_err); \
+ __put_user_size((x), (ptr), (size), __pu_err); \
__pu_err; \
})


-#define __put_user_check(x,ptr,size) \
-({ \
- int __pu_err; \
- typeof(*(ptr)) __user *__pu_addr = (ptr); \
- switch (size) { \
- case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \
- case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \
- case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \
- case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \
- default: __put_user_bad(); \
- } \
- __pu_err; \
+#define __put_user_check(x, ptr, size) \
+({ \
+ int __pu_err; \
+ typeof(*(ptr)) __user *__pu_addr = (ptr); \
+ switch (size) { \
+ case 1: \
+ __put_user_x(1, __pu_err, x, __pu_addr); \
+ break; \
+ case 2: \
+ __put_user_x(2, __pu_err, x, __pu_addr); \
+ break; \
+ case 4: \
+ __put_user_x(4, __pu_err, x, __pu_addr); \
+ break; \
+ case 8: \
+ __put_user_x(8, __pu_err, x, __pu_addr); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ } \
+ __pu_err; \
})

-#define __put_user_size(x,ptr,size,retval) \
+#define __put_user_size(x, ptr, size, retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
- case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
- case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
- case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
- default: __put_user_bad(); \
+ case 1: \
+ __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
+ break; \
+ case 2: \
+ __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
+ break; \
+ case 4: \
+ __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
+ break; \
+ case 8: \
+ __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
+ break; \
+ default: \
+ __put_user_bad(); \
} \
} while (0)

@@ -174,23 +208,22 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
- asm volatile( \
- "1: mov"itype" %"rtype"1,%2\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b,3b) \
- : "=r"(err) \
- : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
-
-
-#define __get_user_nocheck(x,ptr,size) \
+ asm volatile("1: mov"itype" %"rtype"1,%2\n" \
+ "2:\n" \
+ ".section .fixup, \"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+ : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
+
+
+#define __get_user_nocheck(x, ptr, size) \
({ \
int __gu_err; \
unsigned long __gu_val; \
- __get_user_size(__gu_val,(ptr),(size),__gu_err); \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force typeof(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -201,31 +234,39 @@ extern int __get_user_4(void);
extern int __get_user_8(void);
extern int __get_user_bad(void);

-#define __get_user_size(x,ptr,size,retval) \
+#define __get_user_size(x, ptr, size, retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
- case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
- case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
- case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
- default: (x) = __get_user_bad(); \
+ case 1: \
+ __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
+ break; \
+ case 2: \
+ __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
+ break; \
+ case 4: \
+ __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
+ break; \
+ case 8: \
+ __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
+ break; \
+ default: \
+ (x) = __get_user_bad(); \
} \
} while (0)

#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
- asm volatile( \
- "1: mov"itype" %2,%"rtype"1\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " xor"itype" %"rtype"1,%"rtype"1\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b,3b) \
- : "=r"(err), ltype (x) \
- : "m"(__m(addr)), "i"(errno), "0"(err))
+ asm volatile("1: mov"itype" %2,%"rtype"1\n" \
+ "2:\n" \
+ ".section .fixup, \"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i"(errno), "0"(err))

/*
* Copy To/From Userspace
@@ -244,110 +285,142 @@ copy_in_user(void __user *to, const void __user *from, unsigned len);

static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
-{
+{
int ret = 0;
if (!__builtin_constant_p(size))
- return copy_user_generic(dst,(__force void *)src,size);
- switch (size) {
- case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
+ return copy_user_generic(dst, (__force void *)src, size);
+ switch (size) {
+ case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ ret, "b", "b", "=q", 1);
return ret;
- case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
+ case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+ ret, "w", "w", "=r", 2);
return ret;
- case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
+ case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+ case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ ret, "q", "", "=r", 8);
return ret;
- case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
- return ret;
case 10:
- __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
- if (unlikely(ret)) return ret;
- __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
- return ret;
+ __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+ (u16 __user *)(8 + (char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
case 16:
- __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
- if (unlikely(ret)) return ret;
- __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
- return ret;
+ __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+ (u64 __user *)(8 + (char __user *)src),
+ ret, "q", "", "=r", 8);
+ return ret;
default:
- return copy_user_generic(dst,(__force void *)src,size);
+ return copy_user_generic(dst, (__force void *)src, size);
}
-}
+}

static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
-{
+{
int ret = 0;
if (!__builtin_constant_p(size))
- return copy_user_generic((__force void *)dst,src,size);
- switch (size) {
- case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
+ return copy_user_generic((__force void *)dst, src, size);
+ switch (size) {
+ case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
return ret;
- case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
+ case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
return ret;
- case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
+ case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+ case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "ir", 8);
return ret;
- case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
- return ret;
case 10:
- __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
- if (unlikely(ret)) return ret;
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "ir", 10);
+ if (unlikely(ret))
+ return ret;
asm("":::"memory");
- __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
- return ret;
+ __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
case 16:
- __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
- if (unlikely(ret)) return ret;
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "ir", 16);
+ if (unlikely(ret))
+ return ret;
asm("":::"memory");
- __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
- return ret;
+ __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "ir", 8);
+ return ret;
default:
- return copy_user_generic((__force void *)dst,src,size);
+ return copy_user_generic((__force void *)dst, src, size);
}
-}
+}

static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
-{
+{
int ret = 0;
if (!__builtin_constant_p(size))
- return copy_user_generic((__force void *)dst,(__force void *)src,size);
- switch (size) {
- case 1: {
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
+ switch (size) {
+ case 1: {
u8 tmp;
- __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
+ __get_user_asm(tmp, (u8 __user *)src,
+ ret, "b", "b", "=q", 1);
if (likely(!ret))
- __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
+ __put_user_asm(tmp, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
return ret;
}
- case 2: {
+ case 2: {
u16 tmp;
- __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
+ __get_user_asm(tmp, (u16 __user *)src,
+ ret, "w", "w", "=r", 2);
if (likely(!ret))
- __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
+ __put_user_asm(tmp, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
return ret;
}

- case 4: {
+ case 4: {
u32 tmp;
- __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
+ __get_user_asm(tmp, (u32 __user *)src,
+ ret, "l", "k", "=r", 4);
if (likely(!ret))
- __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
+ __put_user_asm(tmp, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
return ret;
}
- case 8: {
+ case 8: {
u64 tmp;
- __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
+ __get_user_asm(tmp, (u64 __user *)src,
+ ret, "q", "", "=r", 8);
if (likely(!ret))
- __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
+ __put_user_asm(tmp, (u64 __user *)dst,
+ ret, "q", "", "ir", 8);
return ret;
}
default:
- return copy_user_generic((__force void *)dst,(__force void *)src,size);
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
}
-}
+}

-__must_check long
+__must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long
+__must_check long
__strncpy_from_user(char *dst, const char __user *src, long count);
__must_check long strnlen_user(const char __user *str, long n);
__must_check long __strnlen_user(const char __user *str, long n);
@@ -355,7 +428,8 @@ __must_check long strlen_user(const char __user *str);
__must_check unsigned long clear_user(void __user *mem, unsigned long len);
__must_check unsigned long __clear_user(void __user *mem, unsigned long len);

-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
+__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
+ unsigned size);

static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
@@ -364,15 +438,19 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
}

#define ARCH_HAS_NOCACHE_UACCESS 1
-extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest);
+extern long __copy_user_nocache(void *dst, const void __user *src,
+ unsigned size, int zerorest);

-static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+static inline int __copy_from_user_nocache(void *dst, const void __user *src,
+ unsigned size)
{
might_sleep();
return __copy_user_nocache(dst, src, size, 1);
}

-static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size)
+static inline int __copy_from_user_inatomic_nocache(void *dst,
+ const void __user *src,
+ unsigned size)
{
return __copy_user_nocache(dst, src, size, 0);
}
--
1.5.4.rc2

2008-03-23 08:15:46

by Joe Perches

[permalink] [raw]
Subject: [PATCH 135/148] include/asm-x86/unaligned.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/unaligned.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index 913598d..d270ffe 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -32,6 +32,6 @@
*
* Note that unaligned accesses can be very expensive on some architectures.
*/
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define put_unaligned(val, ptr) ((void)(*(ptr) = (val)))

#endif /* _ASM_X86_UNALIGNED_H */
--
1.5.4.rc2

2008-03-23 08:16:08

by Joe Perches

[permalink] [raw]
Subject: [PATCH 138/148] include/asm-x86/user_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/user_32.h | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h
index 6157da6..d6e51ed 100644
--- a/include/asm-x86/user_32.h
+++ b/include/asm-x86/user_32.h
@@ -100,10 +100,10 @@ struct user_regs_struct {
struct user{
/* We start with the registers, to mimic the way that "memory" is returned
from the ptrace(3,...) function. */
- struct user_regs_struct regs; /* Where the registers are actually stored */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
/* ptrace does not yet supply these. Someday.... */
int u_fpvalid; /* True if math co-processor being used. */
- /* for this mess. Not yet used. */
+ /* for this mess. Not yet used. */
struct user_i387_struct i387; /* Math Co-processor registers. */
/* The rest of this junk is to help gdb figure out what goes where */
unsigned long int u_tsize; /* Text segment size (pages). */
@@ -118,7 +118,7 @@ struct user{
int reserved; /* No longer used */
unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
- struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
+ struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
int u_debugreg[8];
--
1.5.4.rc2

2008-03-23 08:16:31

by Joe Perches

[permalink] [raw]
Subject: [PATCH 124/148] include/asm-x86/system.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/system.h | 223 ++++++++++++++++++++++++----------------------
1 files changed, 116 insertions(+), 107 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 9161b50..df7133c 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -38,35 +38,33 @@ do { \
*/ \
unsigned long ebx, ecx, edx, esi, edi; \
\
- asm volatile( \
- "pushfl \n\t" /* save flags */ \
- "pushl %%ebp \n\t" /* save EBP */ \
- "movl %%esp,%[prev_sp] \n\t" /* save ESP */ \
- "movl %[next_sp],%%esp \n\t" /* restore ESP */ \
- "movl $1f,%[prev_ip] \n\t" /* save EIP */ \
- "pushl %[next_ip] \n\t" /* restore EIP */ \
- "jmp __switch_to \n" /* regparm call */ \
- "1: \t" \
- "popl %%ebp \n\t" /* restore EBP */ \
- "popfl \n" /* restore flags */ \
+ asm volatile("pushfl\n\t" /* save flags */ \
+ "pushl %%ebp\n\t" /* save EBP */ \
+ "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
+ "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
+ "pushl %[next_ip]\n\t" /* restore EIP */ \
+ "jmp __switch_to\n" /* regparm call */ \
+ "1:\t" \
+ "popl %%ebp\n\t" /* restore EBP */ \
+ "popfl\n" /* restore flags */ \
\
- /* output parameters */ \
- : [prev_sp] "=m" (prev->thread.sp), \
- [prev_ip] "=m" (prev->thread.ip), \
- "=a" (last), \
+ /* output parameters */ \
+ : [prev_sp] "=m" (prev->thread.sp), \
+ [prev_ip] "=m" (prev->thread.ip), \
+ "=a" (last), \
\
- /* clobbered output registers: */ \
- "=b" (ebx), "=c" (ecx), "=d" (edx), \
- "=S" (esi), "=D" (edi) \
- \
- /* input parameters: */ \
- : [next_sp] "m" (next->thread.sp), \
- [next_ip] "m" (next->thread.ip), \
- \
- /* regparm parameters for __switch_to(): */ \
- [prev] "a" (prev), \
- [next] "d" (next) \
- ); \
+ /* clobbered output registers: */ \
+ "=b" (ebx), "=c" (ecx), "=d" (edx), \
+ "=S" (esi), "=D" (edi) \
+ \
+ /* input parameters: */ \
+ : [next_sp] "m" (next->thread.sp), \
+ [next_ip] "m" (next->thread.ip), \
+ \
+ /* regparm parameters for __switch_to(): */ \
+ [prev] "a" (prev), \
+ [next] "d" (next)); \
} while (0)

/*
@@ -81,66 +79,77 @@ do { \
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"

-#define __EXTRA_CLOBBER \
- , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
- "r12", "r13", "r14", "r15"
+#define __EXTRA_CLOBBER \
+ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11" \
+ , "r12", "r13", "r14", "r15"

/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
- asm volatile(SAVE_CONTEXT \
- "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
- "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
- "call __switch_to\n\t" \
- ".globl thread_return\n" \
- "thread_return:\n\t" \
- "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
- "movq %P[task_canary](%%rsi),%%r8\n\t" \
- "movq %%r8,%%gs:%P[pda_canary]\n\t" \
- "movq %P[thread_info](%%rsi),%%r8\n\t" \
- LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
- "movq %%rax,%%rdi\n\t" \
- "jc ret_from_fork\n\t" \
- RESTORE_CONTEXT \
- : "=a" (last) \
- : [next] "S" (next), [prev] "D" (prev), \
- [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
- [ti_flags] "i" (offsetof(struct thread_info, flags)), \
- [tif_fork] "i" (TIF_FORK), \
- [thread_info] "i" (offsetof(struct task_struct, stack)), \
- [task_canary] "i" (offsetof(struct task_struct, stack_canary)),\
- [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)), \
- [pda_canary] "i" (offsetof(struct x8664_pda, stack_canary))\
- : "memory", "cc" __EXTRA_CLOBBER)
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
+ "call __switch_to\n\t" \
+ ".globl thread_return\n" \
+ "thread_return:\n\t" \
+ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
+ "movq %P[task_canary](%%rsi),%%r8\n\t" \
+ "movq %%r8,%%gs:%P[pda_canary]\n\t" \
+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
+ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "jc ret_from_fork\n\t" \
+ RESTORE_CONTEXT \
+ : "=a" (last) \
+ : [next] "S" (next), [prev] "D" (prev), \
+ [threadrsp] \
+ "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] \
+ "i" (offsetof(struct thread_info, flags)), \
+ [tif_fork] \
+ "i" (TIF_FORK), \
+ [thread_info] \
+ "i" (offsetof(struct task_struct, stack)), \
+ [task_canary] \
+ "i" (offsetof(struct task_struct, stack_canary)), \
+ [pda_pcurrent] \
+ "i" (offsetof(struct x8664_pda, pcurrent)), \
+ [pda_canary] \
+ "i" (offsetof(struct x8664_pda, stack_canary)) \
+ : "memory", "cc" __EXTRA_CLOBBER)
#endif

#ifdef __KERNEL__
-#define _set_base(addr, base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while (0)
-
-#define _set_limit(addr, limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while (0)
-
-#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
-#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
+#define _set_base(addr, base) \
+do { \
+ unsigned long __pr; \
+ asm volatile ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ : "=&d" (__pr) \
+ : "m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "0" (base)); \
+} while (0)
+
+#define _set_limit(addr, limit) \
+do { \
+ unsigned long __lr; \
+ asm volatile ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %2,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%2" \
+ : "=&d" (__lr) \
+ : "m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "0" (limit)); \
+} while (0)
+
+#define set_base(ldt, base) _set_base(((char *)&(ldt)), (base))
+#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)), ((limit) - 1))

extern void load_gs_index(unsigned);

@@ -150,35 +159,34 @@ extern void load_gs_index(unsigned);
*/
#define loadsegment(seg, value) \
asm volatile("\n" \
- "1:\t" \
- "movl %k0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "movl %k1, %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b,3b) \
- : :"r" (value), "r" (0))
+ "1:\t" \
+ "movl %k0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "movl %k1, %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b,3b) \
+ : :"r" (value), "r" (0))


/*
* Save a segment register away
*/
-#define savesegment(seg, value) \
+#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))

static inline unsigned long get_limit(unsigned long segment)
{
unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+ return __limit + 1;
}

static inline void native_clts(void)
{
- asm volatile ("clts");
+ asm volatile("clts");
}

/*
@@ -193,43 +201,43 @@ static unsigned long __force_order;
static inline unsigned long native_read_cr0(void)
{
unsigned long val;
- asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}

static inline void native_write_cr0(unsigned long val)
{
- asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
+ asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
}

static inline unsigned long native_read_cr2(void)
{
unsigned long val;
- asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}

static inline void native_write_cr2(unsigned long val)
{
- asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
+ asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
}

static inline unsigned long native_read_cr3(void)
{
unsigned long val;
- asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}

static inline void native_write_cr3(unsigned long val)
{
- asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
+ asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
}

static inline unsigned long native_read_cr4(void)
{
unsigned long val;
- asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
+ asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}

@@ -241,7 +249,7 @@ static inline unsigned long native_read_cr4_safe(void)
#ifdef CONFIG_X86_32
asm volatile("1: mov %%cr4, %0\n"
"2:\n"
- _ASM_EXTABLE(1b,2b)
+ _ASM_EXTABLE(1b, 2b)
: "=r" (val), "=m" (__force_order) : "0" (0));
#else
val = native_read_cr4();
@@ -251,7 +259,7 @@ static inline unsigned long native_read_cr4_safe(void)

static inline void native_write_cr4(unsigned long val)
{
- asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
+ asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
}

#ifdef CONFIG_X86_64
@@ -272,6 +280,7 @@ static inline void native_wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
}
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
@@ -304,7 +313,7 @@ static inline void clflush(volatile void *__p)
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
}

-#define nop() __asm__ __volatile__ ("nop")
+#define nop() asm volatile ("nop")

void disable_hlt(void);
void enable_hlt(void);
@@ -403,7 +412,7 @@ void default_idle(void);
# define smp_wmb() barrier()
#endif
#define smp_read_barrier_depends() read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
--
1.5.4.rc2

2008-03-23 08:16:51

by Joe Perches

[permalink] [raw]
Subject: [PATCH 139/148] include/asm-x86/user32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/user32.h | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h
index f769872..a3d9100 100644
--- a/include/asm-x86/user32.h
+++ b/include/asm-x86/user32.h
@@ -1,7 +1,8 @@
#ifndef USER32_H
#define USER32_H 1

-/* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */
+/* IA32 compatible user structures for ptrace.
+ * These should be used for 32bit coredumps too. */

struct user_i387_ia32_struct {
u32 cwd;
@@ -42,9 +43,9 @@ struct user_regs_struct32 {
};

struct user32 {
- struct user_regs_struct32 regs; /* Where the registers are actually stored */
+ struct user_regs_struct32 regs; /* Where the registers are actually stored */
int u_fpvalid; /* True if math co-processor being used. */
- /* for this mess. Not yet used. */
+ /* for this mess. Not yet used. */
struct user_i387_ia32_struct i387; /* Math Co-processor registers. */
/* The rest of this junk is to help gdb figure out what goes where */
__u32 u_tsize; /* Text segment size (pages). */
--
1.5.4.rc2

2008-03-23 08:17:10

by Joe Perches

[permalink] [raw]
Subject: [PATCH 043/148] include/asm-x86/i387.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/i387.h | 94 ++++++++++++++++++++++-------------------------
1 files changed, 44 insertions(+), 50 deletions(-)

diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index a365de5..d552843 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -42,7 +42,7 @@ static inline void tolerant_fwait(void)
{
asm volatile("1: fwait\n"
"2:\n"
- _ASM_EXTABLE(1b,2b));
+ _ASM_EXTABLE(1b, 2b));
}

static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
@@ -55,7 +55,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- _ASM_EXTABLE(1b,3b)
+ _ASM_EXTABLE(1b, 3b)
: [err] "=r" (err)
#if 0 /* See comment in __save_init_fpu() below. */
: [fx] "r" (fx), "m" (*fx), "0" (0));
@@ -77,11 +77,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
{
if (unlikely(fx->swd & X87_FSW_ES))
- asm volatile("fnclex");
+ asm volatile("fnclex");
alternative_input(ASM_NOP8 ASM_NOP2,
- " emms\n" /* clear stack tags */
- " fildl %%gs:0", /* load to clear state */
- X86_FEATURE_FXSAVE_LEAK);
+ " emms\n" /* clear stack tags */
+ " fildl %%gs:0", /* load to clear state */
+ X86_FEATURE_FXSAVE_LEAK);
}

static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
@@ -94,14 +94,15 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- _ASM_EXTABLE(1b,3b)
+ _ASM_EXTABLE(1b, 3b)
: [err] "=r" (err), "=m" (*fx)
#if 0 /* See comment in __fxsave_clear() below. */
: [fx] "r" (fx), "0" (0));
#else
: [fx] "cdaSDb" (fx), "0" (0));
#endif
- if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct)))
+ if (unlikely(err) &&
+ __clear_user(fx, sizeof(struct i387_fxsave_struct)))
err = -EFAULT;
/* No need to clear here because the caller clears USED_MATH */
return err;
@@ -116,21 +117,21 @@ static inline void __save_init_fpu(struct task_struct *tsk)
#if 0
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
- __asm__ __volatile__("fxsaveq %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ asm volatile("fxsaveq %0"
+ : "=m" (tsk->thread.xstate->fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
- __asm__ __volatile__("rex64/fxsave %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ asm volatile("rex64/fxsave %0"
+ : "=m" (tsk->thread.xstate->fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
- __asm__ __volatile__("rex64/fxsave (%1)"
- : "=m" (tsk->thread.xstate->fxsave)
- : "cdaSDb" (&tsk->thread.xstate->fxsave));
+ asm volatile("rex64/fxsave (%1)"
+ : "=m" (tsk->thread.xstate->fxsave)
+ : "cdaSDb" (&tsk->thread.xstate->fxsave));
#endif
clear_fpu_state(&tsk->thread.xstate->fxsave);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
@@ -146,7 +147,7 @@ static inline int save_i387(struct _fpstate __user *buf)
int err = 0;

BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
- sizeof(tsk->thread.xstate->fxsave));
+ sizeof(tsk->thread.xstate->fxsave));

if ((unsigned long)buf % 16)
printk("save_i387: bad fpstate %p\n", buf);
@@ -155,8 +156,10 @@ static inline int save_i387(struct _fpstate __user *buf)
return 0;
clear_used_math(); /* trigger finit */
if (task_thread_info(tsk)->status & TS_USEDFPU) {
- err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
- if (err) return err;
+ err = save_i387_checking((struct i387_fxsave_struct __user *)
+ buf);
+ if (err)
+ return err;
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
} else {
@@ -190,14 +193,12 @@ static inline void tolerant_fwait(void)
static inline void restore_fpu(struct task_struct *tsk)
{
/*
- * The "nop" is needed to make the instructions the same
- * length.
+ * The "nop" is needed to make the instructions the same length.
*/
- alternative_input(
- "nop ; frstor %1",
- "fxrstor %1",
- X86_FEATURE_FXSR,
- "m" (tsk->thread.xstate->fxsave));
+ alternative_input("nop ; frstor %1",
+ "fxrstor %1",
+ X86_FEATURE_FXSR,
+ "m" (tsk->thread.xstate->fxsave));
}

/* We need a safe address that is cheap to find and that is already
@@ -216,22 +217,21 @@ static inline void __save_init_fpu(struct task_struct *tsk)
{
/* Use more nops than strictly needed in case the compiler
varies code */
- alternative_input(
- "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
- "fxsave %[fx]\n"
- "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
- X86_FEATURE_FXSR,
- [fx] "m" (tsk->thread.xstate->fxsave),
- [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
+ alternative_input("fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
+ "fxsave %[fx]\n"
+ "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
+ X86_FEATURE_FXSR,
+ [fx] "m" (tsk->thread.xstate->fxsave),
+ [fsw] "m" (tsk->thread.xstate->fxsave.swd)
+ : "memory");
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */
- alternative_input(
- GENERIC_NOP8 GENERIC_NOP2,
- "emms\n\t" /* clear stack tags */
- "fildl %[addr]", /* set F?P to defined value */
- X86_FEATURE_FXSAVE_LEAK,
- [addr] "m" (safe_address));
+ alternative_input(GENERIC_NOP8 GENERIC_NOP2,
+ "emms\n\t" /* clear stack tags */
+ "fildl %[addr]", /* set F?P to defined value */
+ X86_FEATURE_FXSAVE_LEAK,
+ [addr] "m" (safe_address));
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}

@@ -322,29 +322,23 @@ static inline void clear_fpu(struct task_struct *tsk)
*/
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
- if (cpu_has_fxsr) {
+ if (cpu_has_fxsr)
return tsk->thread.xstate->fxsave.cwd;
- } else {
- return (unsigned short) tsk->thread.xstate->fsave.cwd;
- }
+ return (unsigned short)tsk->thread.xstate->fsave.cwd;
}

static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
- if (cpu_has_fxsr) {
+ if (cpu_has_fxsr)
return tsk->thread.xstate->fxsave.swd;
- } else {
- return (unsigned short) tsk->thread.xstate->fsave.swd;
- }
+ return (unsigned short)tsk->thread.xstate->fsave.swd;
}

static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
- if (cpu_has_xmm) {
+ if (cpu_has_xmm)
return tsk->thread.xstate->fxsave.mxcsr;
- } else {
- return MXCSR_DEFAULT;
- }
+ return MXCSR_DEFAULT;
}

#endif /* _ASM_X86_I387_H */
--
1.5.4.rc2

2008-03-23 08:17:33

by Joe Perches

[permalink] [raw]
Subject: [PATCH 140/148] include/asm-x86/user_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/user_64.h | 16 +++++++++-------
1 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h
index 9636164..6037b63 100644
--- a/include/asm-x86/user_64.h
+++ b/include/asm-x86/user_64.h
@@ -45,12 +45,13 @@
*/

/* This matches the 64bit FXSAVE format as defined by AMD. It is the same
- as the 32bit format defined by Intel, except that the selector:offset pairs for
- data and eip are replaced with flat 64bit pointers. */
+ as the 32bit format defined by Intel, except that the selector:offset pairs
+ for data and eip are replaced with flat 64bit pointers. */
struct user_i387_struct {
unsigned short cwd;
unsigned short swd;
- unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
+ unsigned short twd; /* Note this is not the same as
+ the 32bit/x87/FSAVE twd */
unsigned short fop;
__u64 rip;
__u64 rdp;
@@ -97,13 +98,14 @@ struct user_regs_struct {
/* When the kernel dumps core, it starts by dumping the user struct -
this will be used by gdb to figure out where the data and stack segments
are within the file, and what virtual addresses to use. */
-struct user{
+
+struct user {
/* We start with the registers, to mimic the way that "memory" is returned
from the ptrace(3,...) function. */
- struct user_regs_struct regs; /* Where the registers are actually stored */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
/* ptrace does not yet supply these. Someday.... */
int u_fpvalid; /* True if math co-processor being used. */
- /* for this mess. Not yet used. */
+ /* for this mess. Not yet used. */
int pad0;
struct user_i387_struct i387; /* Math Co-processor registers. */
/* The rest of this junk is to help gdb figure out what goes where */
@@ -120,7 +122,7 @@ struct user{
int pad1;
unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
- struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
+ struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
unsigned long u_debugreg[8];
--
1.5.4.rc2

2008-03-23 08:17:51

by Joe Perches

[permalink] [raw]
Subject: [PATCH 055/148] include/asm-x86/irqflags.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/irqflags.h | 30 +++++++++++++-----------------
1 files changed, 13 insertions(+), 17 deletions(-)

diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index 92021c1..c242527 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -12,25 +12,21 @@ static inline unsigned long native_save_fl(void)
{
unsigned long flags;

- __asm__ __volatile__(
- "# __raw_save_flags\n\t"
- "pushf ; pop %0"
- : "=g" (flags)
- : /* no input */
- : "memory"
- );
+ asm volatile("# __raw_save_flags\n\t"
+ "pushf ; pop %0"
+ : "=g" (flags)
+ : /* no input */
+ : "memory");

return flags;
}

static inline void native_restore_fl(unsigned long flags)
{
- __asm__ __volatile__(
- "push %0 ; popf"
- : /* no output */
- :"g" (flags)
- :"memory", "cc"
- );
+ asm volatile("push %0 ; popf"
+ : /* no output */
+ :"g" (flags)
+ :"memory", "cc");
}

static inline void native_irq_disable(void)
@@ -131,11 +127,11 @@ static inline unsigned long __raw_local_irq_save(void)
#endif /* CONFIG_PARAVIRT */

#ifndef __ASSEMBLY__
-#define raw_local_save_flags(flags) \
- do { (flags) = __raw_local_save_flags(); } while (0)
+#define raw_local_save_flags(flags) \
+ do { (flags) = __raw_local_save_flags(); } while (0)

-#define raw_local_irq_save(flags) \
- do { (flags) = __raw_local_irq_save(); } while (0)
+#define raw_local_irq_save(flags) \
+ do { (flags) = __raw_local_irq_save(); } while (0)

static inline int raw_irqs_disabled_flags(unsigned long flags)
{
--
1.5.4.rc2

2008-03-23 08:18:12

by Joe Perches

[permalink] [raw]
Subject: [PATCH 141/148] include/asm-x86/vdso.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/vdso.h | 16 ++++++++++------
1 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h
index 9bb8689..6ac2a06 100644
--- a/include/asm-x86/vdso.h
+++ b/include/asm-x86/vdso.h
@@ -8,9 +8,11 @@ extern const char VDSO64_PRELINK[];
* Given a pointer to the vDSO image, find the pointer to VDSO64_name
* as that symbol is defined in the vDSO sources or linker script.
*/
-#define VDSO64_SYMBOL(base, name) ({ \
- extern const char VDSO64_##name[]; \
- (void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); })
+#define VDSO64_SYMBOL(base, name)
+({ \
+ extern const char VDSO64_##name[]; \
+ (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \
+})
#endif

#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
@@ -20,9 +22,11 @@ extern const char VDSO32_PRELINK[];
* Given a pointer to the vDSO image, find the pointer to VDSO32_name
* as that symbol is defined in the vDSO sources or linker script.
*/
-#define VDSO32_SYMBOL(base, name) ({ \
- extern const char VDSO32_##name[]; \
- (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); })
+#define VDSO32_SYMBOL(base, name) \
+({ \
+ extern const char VDSO32_##name[]; \
+ (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
+})
#endif

/*
--
1.5.4.rc2

2008-03-23 08:18:33

by Joe Perches

[permalink] [raw]
Subject: [PATCH 137/148] include/asm-x86/unistd_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/unistd_64.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index 3883ceb..fe26e36 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -2,7 +2,7 @@
#define _ASM_X86_64_UNISTD_H_

#ifndef __SYSCALL
-#define __SYSCALL(a,b)
+#define __SYSCALL(a, b)
#endif

/*
--
1.5.4.rc2

2008-03-23 08:18:57

by Joe Perches

[permalink] [raw]
Subject: [PATCH 007/148] include/asm-x86/atomic_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/atomic_64.h | 251 ++++++++++++++++++++-----------------------
1 files changed, 119 insertions(+), 132 deletions(-)

diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 2d20a7a..3e0cd7d 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -22,140 +22,135 @@
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
-typedef struct { int counter; } atomic_t;
+typedef struct {
+ int counter;
+} atomic_t;

#define ATOMIC_INIT(i) { (i) }

/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically reads the value of @v.
- */
+ */
#define atomic_read(v) ((v)->counter)

/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
- *
+ *
* Atomically sets the value of @v to @i.
- */
-#define atomic_set(v,i) (((v)->counter) = (i))
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))

/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
- *
+ *
* Atomically adds @i to @v.
*/
-static __inline__ void atomic_add(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}

/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v.
*/
-static __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "subl %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}

/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}

/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
+ */
+static inline void atomic_inc(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "incl %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}

/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
+ */
+static inline void atomic_dec(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "decl %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}

/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "decl %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}

/**
- * atomic_inc_and_test - increment and test
+ * atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
+ */
+static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "incl %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}

@@ -163,19 +158,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
* atomic_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type atomic_t
- *
+ *
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
+ */
+static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}

@@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static __inline__ int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
}

-static __inline__ int atomic_sub_return(int i, atomic_t *v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- return atomic_add_return(-i,v);
+ return atomic_add_return(-i, v);
}

-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
+#define atomic_inc_return(v) (atomic_add_return(1, v))
+#define atomic_dec_return(v) (atomic_sub_return(1, v))

/* An 64bit atomic type */

-typedef struct { long counter; } atomic64_t;
+typedef struct {
+ long counter;
+} atomic64_t;

#define ATOMIC64_INIT(i) { (i) }

@@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t;
*
* Atomically sets the value of @v to @i.
*/
-#define atomic64_set(v,i) (((v)->counter) = (i))
+#define atomic64_set(v, i) (((v)->counter) = (i))

/**
* atomic64_add - add integer to atomic64 variable
@@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t;
*
* Atomically adds @i to @v.
*/
-static __inline__ void atomic64_add(long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "addq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}

/**
@@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v)
*
* Atomically subtracts @i from @v.
*/
-static __inline__ void atomic64_sub(long i, atomic64_t *v)
+static inline void atomic64_sub(long i, atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "subq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ asm volatile(LOCK_PREFIX "subq %1,%0"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
}

/**
@@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
+static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "subq %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}

@@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
-static __inline__ void atomic64_inc(atomic64_t *v)
+static inline void atomic64_inc(atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "incq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}

/**
@@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
-static __inline__ void atomic64_dec(atomic64_t *v)
+static inline void atomic64_dec(atomic64_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "decq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ asm volatile(LOCK_PREFIX "decq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
}

/**
@@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static __inline__ int atomic64_dec_and_test(atomic64_t *v)
+static inline int atomic64_dec_and_test(atomic64_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "decq %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "decq %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}

@@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic64_inc_and_test(atomic64_t *v)
+static inline int atomic64_inc_and_test(atomic64_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "incq %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "incq %0; sete %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
return c != 0;
}

@@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
+static inline int atomic64_add_negative(long i, atomic64_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "addq %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
return c;
}

@@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static __inline__ long atomic64_add_return(long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddq %0, %1;"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
+ asm volatile(LOCK_PREFIX "xaddq %0, %1;"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
}

-static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
+static inline long atomic64_sub_return(long i, atomic64_t *v)
{
- return atomic64_add_return(-i,v);
+ return atomic64_add_return(-i, v);
}

-#define atomic64_inc_return(v) (atomic64_add_return(1,v))
-#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
+#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
+#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))

-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))

/**
* atomic_add_unless - add unless the number is a given value
@@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
@@ -448,13 +434,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)

/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
-
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
+#define atomic_clear_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "andl %0,%1" \
+ : : "r" (~(mask)), "m" (*(addr)) : "memory")
+
+#define atomic_set_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "orl %0,%1" \
+ : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+ : "memory")

/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
--
1.5.4.rc2

2008-03-23 08:19:18

by Joe Perches

[permalink] [raw]
Subject: [PATCH 056/148] include/asm-x86/kdebug.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/kdebug.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index 3523124..96651bb 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -24,12 +24,12 @@ enum die_val {
};

extern void printk_address(unsigned long address, int reliable);
-extern void die(const char *,struct pt_regs *,long);
+extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_registers(struct pt_regs *regs);
extern void __show_registers(struct pt_regs *, int all);
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
- unsigned long *sp, unsigned long bp);
+ unsigned long *sp, unsigned long bp);
extern void __show_regs(struct pt_regs *regs);
extern void show_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
--
1.5.4.rc2

2008-03-23 08:19:45

by Joe Perches

[permalink] [raw]
Subject: [PATCH 143/148] include/asm-x86/vm86.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/vm86.h | 9 ++++++---
1 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index c92fe4a..a2be241 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -42,9 +42,11 @@
#define VM86_ARG(retval) ((retval) >> 8)

#define VM86_SIGNAL 0 /* return due to signal */
-#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */
+#define VM86_UNKNOWN 1 /* unhandled GP fault
+ - IO-instruction or similar */
#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
-#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */
+#define VM86_STI 3 /* sti/popf/iret instruction enabled
+ virtual interrupts */

/*
* Additional return values when invoking new vm86()
@@ -205,7 +207,8 @@ void release_vm86_irqs(struct task_struct *);
#define handle_vm86_fault(a, b)
#define release_vm86_irqs(a)

-static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) {
+static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
+{
return 0;
}

--
1.5.4.rc2

2008-03-23 08:20:08

by Joe Perches

[permalink] [raw]
Subject: [PATCH 048/148] include/asm-x86/io_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/io_64.h | 110 +++++++++++++++++++++++++++++------------------
1 files changed, 68 insertions(+), 42 deletions(-)

diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index 084d60c..c3346bf 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -58,60 +58,75 @@ static inline void slow_down_io(void)
/*
* Talk about misusing macros..
*/
-#define __OUT1(s,x) \
+#define __OUT1(s, x) \
static inline void out##s(unsigned x value, unsigned short port) {

-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+#define __OUT2(s, s1, s2) \
+asm volatile ("out" #s " %" s1 "0,%" s2 "1"

#ifndef REALLY_SLOW_IO
#define REALLY_SLOW_IO
#define UNSET_REALLY_SLOW_IO
#endif

-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
- slow_down_io(); }
+#define __OUT(s, s1, x) \
+ __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
+ } \
+ __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
+ slow_down_io(); \
+}

-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+#define __IN1(s) \
+static inline RETURN_TYPE in##s(unsigned short port) \
+{ \
+ RETURN_TYPE _v;

-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+#define __IN2(s, s1, s2) \
+ asm volatile ("in" #s " %" s2 "1,%" s1 "0"

-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \
-__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
- slow_down_io(); return _v; }
+#define __IN(s, s1, i...) \
+ __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
+ return _v; \
+ } \
+ __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
+ slow_down_io(); \
+ return _v; }

#ifdef UNSET_REALLY_SLOW_IO
#undef REALLY_SLOW_IO
#endif

-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void *addr, \
+ unsigned long count) \
+{ \
+ asm volatile ("rep ; ins" #s \
+ : "=D" (addr), "=c" (count) \
+ : "d" (port), "0" (addr), "1" (count)); \
+}

-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void *addr, \
+ unsigned long count) \
+{ \
+ asm volatile ("rep ; outs" #s \
+ : "=S" (addr), "=c" (count) \
+ : "d" (port), "0" (addr), "1" (count)); \
+}

#define RETURN_TYPE unsigned char
-__IN(b,"")
+__IN(b, "")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned short
-__IN(w,"")
+__IN(w, "")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned int
-__IN(l,"")
+__IN(l, "")
#undef RETURN_TYPE

-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
+__OUT(b, "b", char)
+__OUT(w, "w", short)
+__OUT(l, , int)

__INS(b)
__INS(w)
@@ -132,12 +147,12 @@ __OUTS(l)
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}

-static inline void * phys_to_virt(unsigned long address)
+static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
@@ -200,18 +215,22 @@ static inline __u8 __readb(const volatile void __iomem *addr)
{
return *(__force volatile __u8 *)addr;
}
+
static inline __u16 __readw(const volatile void __iomem *addr)
{
return *(__force volatile __u16 *)addr;
}
+
static __always_inline __u32 __readl(const volatile void __iomem *addr)
{
return *(__force volatile __u32 *)addr;
}
+
static inline __u64 __readq(const volatile void __iomem *addr)
{
return *(__force volatile __u64 *)addr;
}
+
#define readb(x) __readb(x)
#define readw(x) __readw(x)
#define readl(x) __readl(x)
@@ -231,37 +250,44 @@ static inline void __writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)addr = b;
}
+
static inline void __writeq(__u64 b, volatile void __iomem *addr)
{
*(__force volatile __u64 *)addr = b;
}
+
static inline void __writeb(__u8 b, volatile void __iomem *addr)
{
*(__force volatile __u8 *)addr = b;
}
+
static inline void __writew(__u16 b, volatile void __iomem *addr)
{
*(__force volatile __u16 *)addr = b;
}
-#define writeq(val,addr) __writeq((val),(addr))
-#define writel(val,addr) __writel((val),(addr))
-#define writew(val,addr) __writew((val),(addr))
-#define writeb(val,addr) __writeb((val),(addr))
+
+#define writeq(val, addr) __writeq((val), (addr))
+#define writel(val, addr) __writel((val), (addr))
+#define writew(val, addr) __writew((val), (addr))
+#define writeb(val, addr) __writeb((val), (addr))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq

-void __memcpy_fromio(void*,unsigned long,unsigned);
-void __memcpy_toio(unsigned long,const void*,unsigned);
+void __memcpy_fromio(void *, unsigned long, unsigned);
+void __memcpy_toio(unsigned long, const void *, unsigned);

-static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
+ unsigned len)
{
- __memcpy_fromio(to,(unsigned long)from,len);
+ __memcpy_fromio(to, (unsigned long)from, len);
}
-static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
+
+static inline void memcpy_toio(volatile void __iomem *to, const void *from,
+ unsigned len)
{
- __memcpy_toio((unsigned long)to,from,len);
+ __memcpy_toio((unsigned long)to, from, len);
}

void memset_io(volatile void __iomem *a, int b, size_t c);
@@ -276,7 +302,7 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))

-#define flush_write_buffers()
+#define flush_write_buffers()

extern int iommu_bio_merge;
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
--
1.5.4.rc2

2008-03-23 08:20:37

by Joe Perches

[permalink] [raw]
Subject: [PATCH 062/148] include/asm-x86/lguest.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/lguest.h | 11 ++++-------
1 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index 9b17571..be4a724 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -34,8 +34,7 @@ extern const char lgstart_iret[], lgend_iret[];
extern void lguest_iret(void);
extern void lguest_init(void);

-struct lguest_regs
-{
+struct lguest_regs {
/* Manually saved part. */
unsigned long eax, ebx, ecx, edx;
unsigned long esi, edi, ebp;
@@ -51,8 +50,7 @@ struct lguest_regs
};

/* This is a guest-specific page (mapped ro) into the guest. */
-struct lguest_ro_state
-{
+struct lguest_ro_state {
/* Host information we need to restore when we switch back. */
u32 host_cr3;
struct desc_ptr host_idt_desc;
@@ -67,8 +65,7 @@ struct lguest_ro_state
struct desc_struct guest_gdt[GDT_ENTRIES];
};

-struct lg_cpu_arch
-{
+struct lg_cpu_arch {
/* The GDT entries copied into lguest_ro_state when running. */
struct desc_struct gdt[GDT_ENTRIES];

@@ -85,7 +82,7 @@ static inline void lguest_set_ts(void)

cr0 = read_cr0();
if (!(cr0 & 8))
- write_cr0(cr0|8);
+ write_cr0(cr0 | 8);
}

/* Full 4G segment descriptors, suitable for CS and DS. */
--
1.5.4.rc2

2008-03-23 08:21:04

by Joe Perches

[permalink] [raw]
Subject: [PATCH 061/148] include/asm-x86/lguest_hcall.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/lguest_hcall.h | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index 758b9a5..743d888 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -46,7 +46,7 @@ hcall(unsigned long call,
{
/* "int" is the Intel instruction to trigger a trap. */
asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
- /* The call in %eax (aka "a") might be overwritten */
+ /* The call in %eax (aka "a") might be overwritten */
: "=a"(call)
/* The arguments are in %eax, %edx, %ebx & %ecx */
: "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
@@ -62,8 +62,7 @@ hcall(unsigned long call,
#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)

#define LHCALL_RING_SIZE 64
-struct hcall_args
-{
+struct hcall_args {
/* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */
unsigned long arg0, arg2, arg3, arg1;
};
--
1.5.4.rc2

2008-03-23 08:22:03

by Joe Perches

[permalink] [raw]
Subject: [PATCH 059/148] include/asm-x86/kvm_host.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/kvm_host.h | 24 +++++++++++++-----------
1 files changed, 13 insertions(+), 11 deletions(-)

diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 4702b04..68ee390 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -22,15 +22,16 @@

#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
-#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
+#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
+ 0xFFFFFF0000000000ULL)

-#define KVM_GUEST_CR0_MASK \
+#define KVM_GUEST_CR0_MASK \
(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
| X86_CR0_NW | X86_CR0_CD)
-#define KVM_VM_CR0_ALWAYS_ON \
+#define KVM_VM_CR0_ALWAYS_ON \
(X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
| X86_CR0_MP)
-#define KVM_GUEST_CR4_MASK \
+#define KVM_GUEST_CR4_MASK \
(X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
@@ -133,12 +134,12 @@ struct kvm_pte_chain {
union kvm_mmu_page_role {
unsigned word;
struct {
- unsigned glevels : 4;
- unsigned level : 4;
- unsigned quadrant : 2;
- unsigned pad_for_nice_hex_output : 6;
- unsigned metaphysical : 1;
- unsigned access : 3;
+ unsigned glevels:4;
+ unsigned level:4;
+ unsigned quadrant:2;
+ unsigned pad_for_nice_hex_output:6;
+ unsigned metaphysical:1;
+ unsigned access:3;
};
};

@@ -606,6 +607,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
-#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+#define RMODE_TSS_SIZE \
+ (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)

#endif
--
1.5.4.rc2

2008-03-23 08:21:38

by Joe Perches

[permalink] [raw]
Subject: [PATCH 057/148] include/asm-x86/kexec.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/kexec.h | 71 +++++++++++++++++++++++------------------------
1 files changed, 35 insertions(+), 36 deletions(-)

diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index c90d3c7..8f855a1 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -94,10 +94,9 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
{
#ifdef CONFIG_X86_32
newregs->sp = (unsigned long)&(oldregs->sp);
- __asm__ __volatile__(
- "xorl %%eax, %%eax\n\t"
- "movw %%ss, %%ax\n\t"
- :"=a"(newregs->ss));
+ asm volatile("xorl %%eax, %%eax\n\t"
+ "movw %%ss, %%ax\n\t"
+ :"=a"(newregs->ss));
#endif
}

@@ -114,39 +113,39 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
crash_fixup_ss_esp(newregs, oldregs);
} else {
#ifdef CONFIG_X86_32
- __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx));
- __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx));
- __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx));
- __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si));
- __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di));
- __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp));
- __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax));
- __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp));
- __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
- __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
- __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds));
- __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es));
- __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags));
+ asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
+ asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
+ asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
+ asm volatile("movl %%esi,%0" : "=m"(newregs->si));
+ asm volatile("movl %%edi,%0" : "=m"(newregs->di));
+ asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
+ asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
+ asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
+ asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
+ asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
+ asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
#else
- __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx));
- __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx));
- __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx));
- __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si));
- __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di));
- __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp));
- __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax));
- __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp));
- __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
- __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
- __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
- __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
- __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
- __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
- __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
- __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
- __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
- __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
- __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags));
+ asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
+ asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
+ asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
+ asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
+ asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
+ asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
+ asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
+ asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
+ asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
+ asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
+ asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
+ asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
+ asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
+ asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
+ asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
+ asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
+ asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
#endif
newregs->ip = (unsigned long)current_text_addr();
}
--
1.5.4.rc2

2008-03-23 08:22:24

by Joe Perches

[permalink] [raw]
Subject: [PATCH 063/148] include/asm-x86/linkage.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/linkage.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index 4e1c2ca..3c7b440 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -11,7 +11,7 @@

#ifdef CONFIG_X86_32
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
-#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret))
+#define prevent_tail_call(ret) asm("" : "=r" (ret) : "0" (ret))
/*
* For 32-bit UML - mark functions implemented in assembly that use
* regparm input parameters:
--
1.5.4.rc2

2008-03-23 08:22:46

by Joe Perches

[permalink] [raw]
Subject: [PATCH 064/148] include/asm-x86/local.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/local.h | 105 ++++++++++++++++++++++------------------------
1 files changed, 50 insertions(+), 55 deletions(-)

diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
index f852c62..330a724 100644
--- a/include/asm-x86/local.h
+++ b/include/asm-x86/local.h
@@ -18,32 +18,28 @@ typedef struct {

static inline void local_inc(local_t *l)
{
- __asm__ __volatile__(
- _ASM_INC "%0"
- :"+m" (l->a.counter));
+ asm volatile(_ASM_INC "%0"
+ : "+m" (l->a.counter));
}

static inline void local_dec(local_t *l)
{
- __asm__ __volatile__(
- _ASM_DEC "%0"
- :"+m" (l->a.counter));
+ asm volatile(_ASM_DEC "%0"
+ : "+m" (l->a.counter));
}

static inline void local_add(long i, local_t *l)
{
- __asm__ __volatile__(
- _ASM_ADD "%1,%0"
- :"+m" (l->a.counter)
- :"ir" (i));
+ asm volatile(_ASM_ADD "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
}

static inline void local_sub(long i, local_t *l)
{
- __asm__ __volatile__(
- _ASM_SUB "%1,%0"
- :"+m" (l->a.counter)
- :"ir" (i));
+ asm volatile(_ASM_SUB "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
}

/**
@@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l)
{
unsigned char c;

- __asm__ __volatile__(
- _ASM_SUB "%2,%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(_ASM_SUB "%2,%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}

@@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l)
{
unsigned char c;

- __asm__ __volatile__(
- _ASM_DEC "%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- : : "memory");
+ asm volatile(_ASM_DEC "%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
return c != 0;
}

@@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l)
{
unsigned char c;

- __asm__ __volatile__(
- _ASM_INC "%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- : : "memory");
+ asm volatile(_ASM_INC "%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
return c != 0;
}

@@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l)
{
unsigned char c;

- __asm__ __volatile__(
- _ASM_ADD "%2,%0; sets %1"
- :"+m" (l->a.counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(_ASM_ADD "%2,%0; sets %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}

@@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l)
#endif
/* Modern 486+ processor */
__i = i;
- __asm__ __volatile__(
- _ASM_XADD "%0, %1;"
- :"+r" (i), "+m" (l->a.counter)
- : : "memory");
+ asm volatile(_ASM_XADD "%0, %1;"
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
return i + __i;

#ifdef CONFIG_M386
@@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l)
#define local_add_unless(l, a, u) \
({ \
long c, old; \
- c = local_read(l); \
+ c = local_read((l)); \
for (;;) { \
if (unlikely(c == (u))) \
break; \
- old = local_cmpxchg((l), c, c + (a)); \
+ old = local_cmpxchg((l), c, c + (a)); \
if (likely(old == c)) \
break; \
c = old; \
@@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l)

/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
+#define cpu_local_wrap_v(l) \
+({ \
+ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; \
+})
#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
+({ \
+ preempt_disable(); \
+ (l); \
+ preempt_enable(); \
+}) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
+
+#define __cpu_local_inc(l) cpu_local_inc((l))
+#define __cpu_local_dec(l) cpu_local_dec((l))
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))

--
1.5.4.rc2

2008-03-23 08:23:40

by Joe Perches

[permalink] [raw]
Subject: [PATCH 066/148] include/asm-x86/mca_dma.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mca_dma.h | 34 +++++++++++++++++-----------------
1 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
index fbb1f3b..c3dca6e 100644
--- a/include/asm-x86/mca_dma.h
+++ b/include/asm-x86/mca_dma.h
@@ -12,18 +12,18 @@
* count by 2 when using 16-bit dma; that is not handled by these functions.
*
* Ramen Noodles are yummy.
- *
- * 1998 Tymm Twillman <[email protected]>
+ *
+ * 1998 Tymm Twillman <[email protected]>
*/

/*
- * Registers that are used by the DMA controller; FN is the function register
+ * Registers that are used by the DMA controller; FN is the function register
* (tell the controller what to do) and EXE is the execution register (how
* to do it)
*/

#define MCA_DMA_REG_FN 0x18
-#define MCA_DMA_REG_EXE 0x1A
+#define MCA_DMA_REG_EXE 0x1A

/*
* Functions that the DMA controller can do
@@ -43,9 +43,9 @@

/*
* Modes (used by setting MCA_DMA_FN_MODE in the function register)
- *
+ *
* Note that the MODE_READ is read from memory (write to device), and
- * MODE_WRITE is vice-versa.
+ * MODE_WRITE is vice-versa.
*/

#define MCA_DMA_MODE_XFER 0x04 /* read by default */
@@ -63,7 +63,7 @@
* IRQ context.
*/

-static __inline__ void mca_enable_dma(unsigned int dmanr)
+static inline void mca_enable_dma(unsigned int dmanr)
{
outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
}
@@ -76,7 +76,7 @@ static __inline__ void mca_enable_dma(unsigned int dmanr)
* IRQ context.
*/

-static __inline__ void mca_disable_dma(unsigned int dmanr)
+static inline void mca_disable_dma(unsigned int dmanr)
{
outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
}
@@ -87,10 +87,10 @@ static __inline__ void mca_disable_dma(unsigned int dmanr)
* @a: 24bit bus address
*
* Load the address register in the DMA controller. This has a 24bit
- * limitation (16Mb).
+ * limitation (16Mb).
*/

-static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
+static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
{
outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
outb(a & 0xff, MCA_DMA_REG_EXE);
@@ -106,14 +106,14 @@ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
* limitation (16Mb). The return is a bus address.
*/

-static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
+static inline unsigned int mca_get_dma_addr(unsigned int dmanr)
{
unsigned int addr;

outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
addr = inb(MCA_DMA_REG_EXE);
addr |= inb(MCA_DMA_REG_EXE) << 8;
- addr |= inb(MCA_DMA_REG_EXE) << 16;
+ addr |= inb(MCA_DMA_REG_EXE) << 16;

return addr;
}
@@ -127,7 +127,7 @@ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
* Setting a count of zero will not do what you expect.
*/

-static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
+static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count)
{
count--; /* transfers one more than count -- correct for this */

@@ -144,7 +144,7 @@ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
* on this DMA channel.
*/

-static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
+static inline unsigned int mca_get_dma_residue(unsigned int dmanr)
{
unsigned short count;

@@ -164,12 +164,12 @@ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
* with an I/O port target.
*/

-static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
+static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
{
/*
* DMA from a port address -- set the io address
*/
-
+
outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
outb(io_addr & 0xff, MCA_DMA_REG_EXE);
outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE);
@@ -192,7 +192,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
* %MCA_DMA_MODE_16 to do 16bit transfers.
*/

-static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
+static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
{
outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
outb(mode, MCA_DMA_REG_EXE);
--
1.5.4.rc2

2008-03-23 08:23:11

by Joe Perches

[permalink] [raw]
Subject: [PATCH 065/148] include/asm-x86/mc146818rtc.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mc146818rtc.h | 16 ++++++++++------
1 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
index cdd9f96..daf1ccd 100644
--- a/include/asm-x86/mc146818rtc.h
+++ b/include/asm-x86/mc146818rtc.h
@@ -42,7 +42,7 @@ extern volatile unsigned long cmos_lock;
static inline void lock_cmos(unsigned char reg)
{
unsigned long new;
- new = ((smp_processor_id()+1) << 8) | reg;
+ new = ((smp_processor_id() + 1) << 8) | reg;
for (;;) {
if (cmos_lock) {
cpu_relax();
@@ -57,22 +57,26 @@ static inline void unlock_cmos(void)
{
cmos_lock = 0;
}
+
static inline int do_i_have_lock_cmos(void)
{
- return (cmos_lock >> 8) == (smp_processor_id()+1);
+ return (cmos_lock >> 8) == (smp_processor_id() + 1);
}
+
static inline unsigned char current_lock_cmos_reg(void)
{
return cmos_lock & 0xff;
}
-#define lock_cmos_prefix(reg) \
+
+#define lock_cmos_prefix(reg) \
do { \
unsigned long cmos_flags; \
local_irq_save(cmos_flags); \
lock_cmos(reg)
-#define lock_cmos_suffix(reg) \
- unlock_cmos(); \
- local_irq_restore(cmos_flags); \
+
+#define lock_cmos_suffix(reg) \
+ unlock_cmos(); \
+ local_irq_restore(cmos_flags); \
} while (0)
#else
#define lock_cmos_prefix(reg) do {} while (0)
--
1.5.4.rc2

2008-03-23 08:24:01

by Joe Perches

[permalink] [raw]
Subject: [PATCH 079/148] include/asm-x86/mutex_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mutex_64.h | 73 ++++++++++++++++++++-----------------------
1 files changed, 34 insertions(+), 39 deletions(-)

diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index 6c2949a..f3fae9b 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -16,23 +16,21 @@
*
* Atomically decrements @v and calls <fail_fn> if the result is negative.
*/
-#define __mutex_fastpath_lock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " decl (%%rdi) \n" \
- " jns 1f \n" \
- " call "#fail_fn" \n" \
- "1:" \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
+#define __mutex_fastpath_lock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
+ " jns 1f \n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
} while (0)

/**
@@ -45,9 +43,8 @@ do { \
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns
*/
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count,
- int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count,
*
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/
-#define __mutex_fastpath_unlock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " incl (%%rdi) \n" \
- " jg 1f \n" \
- " call "#fail_fn" \n" \
- "1: " \
- \
- :"=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
+#define __mutex_fastpath_unlock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
+ " jg 1f\n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
} while (0)

#define __mutex_slowpath_needs_to_unlock() 1
@@ -93,8 +88,8 @@ do { \
* if it wasn't 1 originally. [the fallback function is never used on
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
*/
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;
--
1.5.4.rc2

2008-03-23 08:24:25

by Joe Perches

[permalink] [raw]
Subject: [PATCH 067/148] include/asm-x86/mmu_context_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mmu_context_32.h | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 8198d1c..9756ae0 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -62,7 +62,7 @@ static inline void switch_mm(struct mm_struct *prev,
BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);

if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
+ /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload %cr3.
*/
load_cr3(next->pgd);
@@ -75,10 +75,10 @@ static inline void switch_mm(struct mm_struct *prev,
#define deactivate_mm(tsk, mm) \
asm("movl %0,%%gs": :"r" (0));

-#define activate_mm(prev, next) \
- do { \
- paravirt_activate_mm(prev, next); \
- switch_mm((prev),(next),NULL); \
- } while(0);
+#define activate_mm(prev, next) \
+do { \
+ paravirt_activate_mm((prev), (next)); \
+ switch_mm((prev), (next), NULL); \
+} while (0);

#endif
--
1.5.4.rc2

2008-03-23 08:25:17

by Joe Perches

[permalink] [raw]
Subject: [PATCH 081/148] include/asm-x86/numaq.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/numaq.h | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
index 38f710d..94b86c3 100644
--- a/include/asm-x86/numaq.h
+++ b/include/asm-x86/numaq.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2002, IBM Corp.
*
- * All rights reserved.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,7 +33,8 @@ extern int get_memcfg_numaq(void);
/*
* SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
*/
-#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */
+#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private
+ quad space */

/*
* Communication area for each processor on lynxer-processor tests.
@@ -139,7 +140,7 @@ struct sys_cfg_data {
unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */
unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
/* may not be totally populated */
- unsigned int split_mem_enbl; /* 0 for no low shared memory */
+ unsigned int split_mem_enbl; /* 0 for no low shared memory */
unsigned int mmio_sz; /* Size of total system memory mapped I/O */
/* (in MB). */
unsigned int quad_spin_lock; /* Spare location used for quad */
@@ -152,7 +153,7 @@ struct sys_cfg_data {
/*
* memory configuration area for each quad
*/
- struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
+ struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
};

static inline unsigned long *get_zholes_size(int nid)
--
1.5.4.rc2

2008-03-23 08:24:47

by Joe Perches

[permalink] [raw]
Subject: [PATCH 080/148] include/asm-x86/numa_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/numa_64.h | 5 +++--
1 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 15fe07c..32c22ae 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -1,11 +1,12 @@
-#ifndef _ASM_X8664_NUMA_H
+#ifndef _ASM_X8664_NUMA_H
#define _ASM_X8664_NUMA_H 1

#include <linux/nodemask.h>
#include <asm/apicdef.h>

struct bootnode {
- u64 start,end;
+ u64 start;
+ u64 end;
};

extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
--
1.5.4.rc2

2008-03-23 08:25:59

by Joe Perches

[permalink] [raw]
Subject: [PATCH 082/148] include/asm-x86/page_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/page_32.h | 9 ++++++---
1 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 5f7257f..424e82f 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -47,7 +47,10 @@ typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef unsigned long phys_addr_t;

-typedef union { pteval_t pte, pte_low; } pte_t;
+typedef union {
+ pteval_t pte;
+ pteval_t pte_low;
+} pte_t;

#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
@@ -61,7 +64,7 @@ typedef struct page *pgtable_t;
#endif

#ifndef __ASSEMBLY__
-#define __phys_addr(x) ((x)-PAGE_OFFSET)
+#define __phys_addr(x) ((x) - PAGE_OFFSET)
#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)

#ifdef CONFIG_FLATMEM
@@ -78,7 +81,7 @@ extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;

#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)

#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
--
1.5.4.rc2

2008-03-23 08:26:35

by Joe Perches

[permalink] [raw]
Subject: [PATCH 083/148] include/asm-x86/page_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/page_64.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index aee05c6..f156778 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -5,7 +5,7 @@

#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE-1))
+#define CURRENT_MASK (~(THREAD_SIZE - 1))

#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
@@ -51,7 +51,7 @@
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
-#define KERNEL_IMAGE_SIZE (512*1024*1024)
+#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)

#ifndef __ASSEMBLY__
--
1.5.4.rc2

2008-03-23 08:27:07

by Joe Perches

[permalink] [raw]
Subject: [PATCH 084/148] include/asm-x86/param.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/param.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
index c996ec4..6f0d042 100644
--- a/include/asm-x86/param.h
+++ b/include/asm-x86/param.h
@@ -3,8 +3,8 @@

#ifdef __KERNEL__
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
+# define USER_HZ 100 /* some user interfaces are */
+# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
#endif

#ifndef HZ
--
1.5.4.rc2

2008-03-23 08:27:42

by Joe Perches

[permalink] [raw]
Subject: [PATCH 070/148] include/asm-x86/mmx.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mmx.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
index 46b71da..9408812 100644
--- a/include/asm-x86/mmx.h
+++ b/include/asm-x86/mmx.h
@@ -6,7 +6,7 @@
*/

#include <linux/types.h>
-
+
extern void *_mmx_memcpy(void *to, const void *from, size_t size);
extern void mmx_clear_page(void *page);
extern void mmx_copy_page(void *to, void *from);
--
1.5.4.rc2

2008-03-23 08:28:12

by Joe Perches

[permalink] [raw]
Subject: [PATCH 085/148] include/asm-x86/paravirt.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/paravirt.h | 47 ++++++++++++++++++++++++++-----------------
1 files changed, 28 insertions(+), 19 deletions(-)

diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 3433c12..a7f046f 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -233,7 +233,8 @@ struct pv_mmu_ops {
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval);
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
- void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
void (*pte_update_defer)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);

@@ -248,7 +249,8 @@ struct pv_mmu_ops {
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
- void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
void (*pmd_clear)(pmd_t *pmdp);

#endif /* CONFIG_X86_PAE */
@@ -276,8 +278,7 @@ struct pv_mmu_ops {
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
* what to patch. */
-struct paravirt_patch_template
-{
+struct paravirt_patch_template {
struct pv_init_ops pv_init_ops;
struct pv_time_ops pv_time_ops;
struct pv_cpu_ops pv_cpu_ops;
@@ -662,32 +663,37 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
}

/* These should all do BUG_ON(_err), but our headers are too tangled. */
-#define rdmsr(msr,val1,val2) do { \
+#define rdmsr(msr, val1, val2) \
+do { \
int _err; \
u64 _l = paravirt_read_msr(msr, &_err); \
val1 = (u32)_l; \
val2 = _l >> 32; \
-} while(0)
+} while (0)

-#define wrmsr(msr,val1,val2) do { \
+#define wrmsr(msr, val1, val2) \
+do { \
paravirt_write_msr(msr, val1, val2); \
-} while(0)
+} while (0)

-#define rdmsrl(msr,val) do { \
+#define rdmsrl(msr, val) \
+do { \
int _err; \
val = paravirt_read_msr(msr, &_err); \
-} while(0)
+} while (0)

-#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
-#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
+#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)

/* rdmsr with exception handling */
-#define rdmsr_safe(msr,a,b) ({ \
+#define rdmsr_safe(msr, a, b) \
+({ \
int _err; \
u64 _l = paravirt_read_msr(msr, &_err); \
(*a) = (u32)_l; \
(*b) = _l >> 32; \
- _err; })
+ _err; \
+})


static inline u64 paravirt_read_tsc(void)
@@ -695,10 +701,11 @@ static inline u64 paravirt_read_tsc(void)
return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
}

-#define rdtscl(low) do { \
+#define rdtscl(low) \
+do { \
u64 _l = paravirt_read_tsc(); \
low = (int)_l; \
-} while(0)
+} while (0)

#define rdtscll(val) (val = paravirt_read_tsc())

@@ -713,11 +720,12 @@ static inline unsigned long long paravirt_read_pmc(int counter)
return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
}

-#define rdpmc(counter,low,high) do { \
+#define rdpmc(counter, low, high) \
+do { \
u64 _l = paravirt_read_pmc(counter); \
low = (u32)_l; \
high = _l >> 32; \
-} while(0)
+} while (0)

static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
{
@@ -796,7 +804,8 @@ static inline void set_iopl_mask(unsigned mask)
}

/* The paravirtualized I/O functions */
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
pv_cpu_ops.io_delay();
#ifdef REALLY_SLOW_IO
pv_cpu_ops.io_delay();
--
1.5.4.rc2

2008-03-23 08:28:39

by Joe Perches

[permalink] [raw]
Subject: [PATCH 098/148] include/asm-x86/posix_types_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/posix_types_32.h | 47 +++++++++++++++++++++----------------
1 files changed, 27 insertions(+), 20 deletions(-)

diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
index 015e539..b031efd 100644
--- a/include/asm-x86/posix_types_32.h
+++ b/include/asm-x86/posix_types_32.h
@@ -45,32 +45,39 @@ typedef struct {
#if defined(__KERNEL__)

#undef __FD_SET
-#define __FD_SET(fd,fdsetp) \
- __asm__ __volatile__("btsl %1,%0": \
- "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+#define __FD_SET(fd,fdsetp) \
+ asm volatile("btsl %1,%0": \
+ "+m" (*(__kernel_fd_set *)(fdsetp)) \
+ : "r" ((int)(fd)))

#undef __FD_CLR
-#define __FD_CLR(fd,fdsetp) \
- __asm__ __volatile__("btrl %1,%0": \
- "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+#define __FD_CLR(fd,fdsetp) \
+ asm volatile("btrl %1,%0": \
+ "+m" (*(__kernel_fd_set *)(fdsetp)) \
+ : "r" ((int) (fd)))

#undef __FD_ISSET
-#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
- unsigned char __result; \
- __asm__ __volatile__("btl %1,%2 ; setb %0" \
- :"=q" (__result) :"r" ((int) (fd)), \
- "m" (*(__kernel_fd_set *) (fdsetp))); \
- __result; }))
+#define __FD_ISSET(fd,fdsetp) \
+ (__extension__ \
+ ({ \
+ unsigned char __result; \
+ asm volatile("btl %1,%2 ; setb %0" \
+ : "=q" (__result) \
+ : "r" ((int)(fd)), \
+ "m" (*(__kernel_fd_set *)(fdsetp))); \
+ __result; \
+}))

#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) \
-do { \
- int __d0, __d1; \
- __asm__ __volatile__("cld ; rep ; stosl" \
- :"=m" (*(__kernel_fd_set *) (fdsetp)), \
- "=&c" (__d0), "=&D" (__d1) \
- :"a" (0), "1" (__FDSET_LONGS), \
- "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
+#define __FD_ZERO(fdsetp) \
+do { \
+ int __d0, __d1; \
+ asm volatile("cld ; rep ; stosl" \
+ : "=m" (*(__kernel_fd_set *)(fdsetp)), \
+ "=&c" (__d0), "=&D" (__d1) \
+ : "a" (0), "1" (__FDSET_LONGS), \
+ "2" ((__kernel_fd_set *)(fdsetp)) \
+ : "memory"); \
} while (0)

#endif /* defined(__KERNEL__) */
--
1.5.4.rc2

2008-03-23 08:29:06

by Joe Perches

[permalink] [raw]
Subject: [PATCH 097/148] include/asm-x86/pgtable.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pgtable.h | 197 ++++++++++++++++++++++++++++++++-------------
1 files changed, 142 insertions(+), 55 deletions(-)

diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 0e31e72..2533f79 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -18,27 +18,28 @@
#define _PAGE_BIT_UNUSED2 10
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_HIDDEN 11
-#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+#define _PAGE_BIT_NX 63 /* No execute:
+ * only valid after cpuid check */

/*
* Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a
* sign-extended value on 32-bit with all 1's in the upper word,
* which preserves the upper pte values on 64-bit ptes:
*/
-#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
-#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
-#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
-#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
-#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
-#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
-#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
-#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
-#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
-#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
-#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
-#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
-#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
-#define _PAGE_HIDDEN (_AC(1, L)<<_PAGE_BIT_HIDDEN)
+#define _PAGE_PRESENT (_AC(1, L) << _PAGE_BIT_PRESENT)
+#define _PAGE_RW (_AC(1, L) << _PAGE_BIT_RW)
+#define _PAGE_USER (_AC(1, L) << _PAGE_BIT_USER)
+#define _PAGE_PWT (_AC(1, L) << _PAGE_BIT_PWT)
+#define _PAGE_PCD (_AC(1, L) << _PAGE_BIT_PCD)
+#define _PAGE_ACCESSED (_AC(1, L) << _PAGE_BIT_ACCESSED)
+#define _PAGE_DIRTY (_AC(1, L) << _PAGE_BIT_DIRTY)
+#define _PAGE_PSE (_AC(1, L) << _PAGE_BIT_PSE) /* 2MB page */
+#define _PAGE_GLOBAL (_AC(1, L) << _PAGE_BIT_GLOBAL) /* Global TLB entry */
+#define _PAGE_UNUSED1 (_AC(1, L) << _PAGE_BIT_UNUSED1)
+#define _PAGE_UNUSED2 (_AC(1, L) << _PAGE_BIT_UNUSED2)
+#define _PAGE_PAT (_AC(1, L) << _PAGE_BIT_PAT)
+#define _PAGE_PAT_LARGE (_AC(1, L) << _PAGE_BIT_PAT_LARGE)
+#define _PAGE_HIDDEN (_AC(1, L) << _PAGE_BIT_HIDDEN)

#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
@@ -47,12 +48,15 @@
#endif

/* If _PAGE_PRESENT is clear, we use these: */
-#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
+ * saved PTE; unset:swap */
#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
pte_present gives true */

-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
+ _PAGE_DIRTY)

#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)

@@ -63,14 +67,20 @@
#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)

#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-
-#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
+ _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
#define PAGE_COPY PAGE_COPY_NOEXEC
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)

#ifdef CONFIG_X86_32
#define _PAGE_KERNEL_EXEC \
@@ -88,11 +98,13 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;

#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
-#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | \
+ _PAGE_PWT)
#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | \
+ _PAGE_PWT)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)

@@ -139,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

extern spinlock_t pgd_lock;
@@ -149,30 +161,101 @@ extern struct list_head pgd_list;
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
-static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; }
-static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); }
-
-static inline int pmd_large(pmd_t pte) {
- return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
- (_PAGE_PSE|_PAGE_PRESENT);
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_RW;
+}
+
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_FILE;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PSE;
}

-static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); }
-static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); }
-static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); }
-static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); }
-static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
-static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
-static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
-static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
-static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); }
-static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); }
-static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); }
+static inline int pte_global(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_GLOBAL;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return !(pte_val(pte) & _PAGE_NX);
+}
+
+static inline int pmd_large(pmd_t pte)
+{
+ return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ (_PAGE_PSE | _PAGE_PRESENT);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW);
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_PSE);
+}
+
+static inline pte_t pte_clrhuge(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE);
+}
+
+static inline pte_t pte_mkglobal(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_GLOBAL);
+}
+
+static inline pte_t pte_clrglobal(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
+}

extern pteval_t __supported_pte_mask;

@@ -210,9 +293,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
+ unsigned long size, pgprot_t vma_prot);
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t *vma_prot);
+ unsigned long size, pgprot_t *vma_prot);
#endif

#ifdef CONFIG_PARAVIRT
@@ -331,7 +414,8 @@ extern int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
pte_t pte = native_ptep_get_and_clear(ptep);
pte_update(mm, addr, ptep);
@@ -339,7 +423,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ int full)
{
pte_t pte;
if (full) {
@@ -355,7 +441,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
pte_update(mm, addr, ptep);
--
1.5.4.rc2

2008-03-23 08:29:36

by Joe Perches

[permalink] [raw]
Subject: [PATCH 099/148] include/asm-x86/posix_types_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/posix_types_64.h | 54 +++++++++++++++++++-------------------
1 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h
index 9926aa4..93e8467 100644
--- a/include/asm-x86/posix_types_64.h
+++ b/include/asm-x86/posix_types_64.h
@@ -46,7 +46,7 @@ typedef unsigned long __kernel_old_dev_t;
#ifdef __KERNEL__

#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+<static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
@@ -54,7 +54,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
}

#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
@@ -62,7 +62,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
}

#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
+static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
@@ -74,36 +74,36 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
* for 256 and 1024-bit fd_sets respectively)
*/
#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+static inline void __FD_ZERO(__kernel_fd_set *p)
{
unsigned long *tmp = p->fds_bits;
int i;

if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
- case 32:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
- tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
- tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
- tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
- return;
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
+ case 32:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
+ tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
+ tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
+ tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
+ return;
+ case 16:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ return;
+ case 8:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ return;
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
}
}
i = __FDSET_LONGS;
--
1.5.4.rc2

2008-03-23 08:29:57

by Joe Perches

[permalink] [raw]
Subject: [PATCH 100/148] include/asm-x86/processor.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/processor.h | 73 ++++++++++++++++++++++--------------------
1 files changed, 38 insertions(+), 35 deletions(-)

diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 958106c..7b1e3a8 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -175,12 +175,12 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
- __asm__("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (*eax), "2" (*ecx));
+ asm("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
}

static inline void load_cr3(pgd_t *pgdir)
@@ -430,17 +430,23 @@ static inline unsigned long native_get_debugreg(int regno)

switch (regno) {
case 0:
- asm("mov %%db0, %0" :"=r" (val)); break;
+ asm("mov %%db0, %0" :"=r" (val));
+ break;
case 1:
- asm("mov %%db1, %0" :"=r" (val)); break;
+ asm("mov %%db1, %0" :"=r" (val));
+ break;
case 2:
- asm("mov %%db2, %0" :"=r" (val)); break;
+ asm("mov %%db2, %0" :"=r" (val));
+ break;
case 3:
- asm("mov %%db3, %0" :"=r" (val)); break;
+ asm("mov %%db3, %0" :"=r" (val));
+ break;
case 6:
- asm("mov %%db6, %0" :"=r" (val)); break;
+ asm("mov %%db6, %0" :"=r" (val));
+ break;
case 7:
- asm("mov %%db7, %0" :"=r" (val)); break;
+ asm("mov %%db7, %0" :"=r" (val));
+ break;
default:
BUG();
}
@@ -481,14 +487,14 @@ static inline void native_set_iopl_mask(unsigned mask)
#ifdef CONFIG_X86_32
unsigned int reg;

- __asm__ __volatile__ ("pushfl;"
- "popl %0;"
- "andl %1, %0;"
- "orl %2, %0;"
- "pushl %0;"
- "popfl"
- : "=&r" (reg)
- : "i" (~X86_EFLAGS_IOPL), "r" (mask));
+ asm volatile ("pushfl;"
+ "popl %0;"
+ "andl %1, %0;"
+ "orl %2, %0;"
+ "pushl %0;"
+ "popfl"
+ : "=&r" (reg)
+ : "i" (~X86_EFLAGS_IOPL), "r" (mask));
#endif
}

@@ -526,8 +532,8 @@ static inline void native_swapgs(void)
#define set_debugreg(value, register) \
native_set_debugreg(register, value)

-static inline void
-load_sp0(struct tss_struct *tss, struct thread_struct *thread)
+static inline void load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
{
native_load_sp0(tss, thread);
}
@@ -683,7 +689,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
- __asm__ __volatile__("rep; nop" ::: "memory");
+ asm volatile("rep; nop" ::: "memory");
}

static inline void cpu_relax(void)
@@ -697,32 +703,29 @@ static inline void sync_core(void)
int tmp;

asm volatile("cpuid" : "=a" (tmp) : "0" (1)
- : "ebx", "ecx", "edx", "memory");
+ : "ebx", "ecx", "edx", "memory");
}

-static inline void
-__monitor(const void *eax, unsigned long ecx, unsigned long edx)
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
{
/* "monitor %eax, %ecx, %edx;" */
- asm volatile(
- ".byte 0x0f, 0x01, 0xc8;"
- :: "a" (eax), "c" (ecx), "d"(edx));
+ asm volatile(".byte 0x0f, 0x01, 0xc8;"
+ :: "a" (eax), "c" (ecx), "d"(edx));
}

static inline void __mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax, %ecx;" */
- asm volatile(
- ".byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
}

static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax, %ecx;" */
- asm volatile(
- "sti; .byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
}

extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
--
1.5.4.rc2

2008-03-23 08:30:26

by Joe Perches

[permalink] [raw]
Subject: [PATCH 103/148] include/asm-x86/reboot.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/reboot.h | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index e9e3ffc..6b5233b 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -3,8 +3,7 @@

struct pt_regs;

-struct machine_ops
-{
+struct machine_ops {
void (*restart)(char *cmd);
void (*halt)(void);
void (*power_off)(void);
--
1.5.4.rc2

2008-03-23 08:30:54

by Joe Perches

[permalink] [raw]
Subject: [PATCH 144/148] include/asm-x86/vmi.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/vmi.h | 88 ++++++++++++++++++++++++------------------------
1 files changed, 44 insertions(+), 44 deletions(-)

diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h
index eb8bd89..b7c0dea 100644
--- a/include/asm-x86/vmi.h
+++ b/include/asm-x86/vmi.h
@@ -155,9 +155,9 @@

#ifndef __ASSEMBLY__
struct vmi_relocation_info {
- unsigned char *eip;
- unsigned char type;
- unsigned char reserved[3];
+ unsigned char *eip;
+ unsigned char type;
+ unsigned char reserved[3];
};
#endif

@@ -173,53 +173,53 @@ struct vmi_relocation_info {
#ifndef __ASSEMBLY__

struct vrom_header {
- u16 rom_signature; // option ROM signature
- u8 rom_length; // ROM length in 512 byte chunks
- u8 rom_entry[4]; // 16-bit code entry point
- u8 rom_pad0; // 4-byte align pad
- u32 vrom_signature; // VROM identification signature
- u8 api_version_min;// Minor version of API
- u8 api_version_maj;// Major version of API
- u8 jump_slots; // Number of jump slots
- u8 reserved1; // Reserved for expansion
- u32 virtual_top; // Hypervisor virtual address start
- u16 reserved2; // Reserved for expansion
- u16 license_offs; // Offset to License string
- u16 pci_header_offs;// Offset to PCI OPROM header
- u16 pnp_header_offs;// Offset to PnP OPROM header
- u32 rom_pad3; // PnP reserverd / VMI reserved
- u8 reserved[96]; // Reserved for headers
- char vmi_init[8]; // VMI_Init jump point
- char get_reloc[8]; // VMI_GetRelocationInfo jump point
+ u16 rom_signature; /* option ROM signature */
+ u8 rom_length; /* ROM length in 512 byte chunks */
+ u8 rom_entry[4]; /* 16-bit code entry point */
+ u8 rom_pad0; /* 4-byte align pad */
+ u32 vrom_signature; /* VROM identification signature */
+ u8 api_version_min;/* Minor version of API */
+ u8 api_version_maj;/* Major version of API */
+ u8 jump_slots; /* Number of jump slots */
+ u8 reserved1; /* Reserved for expansion */
+ u32 virtual_top; /* Hypervisor virtual address start */
+ u16 reserved2; /* Reserved for expansion */
+ u16 license_offs; /* Offset to License string */
+ u16 pci_header_offs;/* Offset to PCI OPROM header */
+ u16 pnp_header_offs;/* Offset to PnP OPROM header */
+ u32 rom_pad3; /* PnP reserverd / VMI reserved */
+ u8 reserved[96]; /* Reserved for headers */
+ char vmi_init[8]; /* VMI_Init jump point */
+ char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
} __attribute__((packed));

struct pnp_header {
- char sig[4];
- char rev;
- char size;
- short next;
- short res;
- long devID;
- unsigned short manufacturer_offset;
- unsigned short product_offset;
+ char sig[4];
+ char rev;
+ char size;
+ short next;
+ short res;
+ long devID;
+ unsigned short manufacturer_offset;
+ unsigned short product_offset;
} __attribute__((packed));

struct pci_header {
- char sig[4];
- short vendorID;
- short deviceID;
- short vpdData;
- short size;
- char rev;
- char class;
- char subclass;
- char interface;
- short chunks;
- char rom_version_min;
- char rom_version_maj;
- char codetype;
- char lastRom;
- short reserved;
+ char sig[4];
+ short vendorID;
+ short deviceID;
+ short vpdData;
+ short size;
+ char rev;
+ char class;
+ char subclass;
+ char interface;
+ short chunks;
+ char rom_version_min;
+ char rom_version_maj;
+ char codetype;
+ char lastRom;
+ short reserved;
} __attribute__((packed));

/* Function prototypes for bootstrapping */
--
1.5.4.rc2

2008-03-23 08:31:24

by Joe Perches

[permalink] [raw]
Subject: [PATCH 106/148] include/asm-x86/rwsem.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/rwsem.h | 169 ++++++++++++++++++++++++-----------------------
1 files changed, 86 insertions(+), 83 deletions(-)

diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
index 520a379..750f2a3 100644
--- a/include/asm-x86/rwsem.h
+++ b/include/asm-x86/rwsem.h
@@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore *
/*
* the semaphore definition
*/
-struct rw_semaphore {
- signed long count;
+
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+struct rw_semaphore {
+ signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -78,11 +80,13 @@ struct rw_semaphore {
#endif


-#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_INITIALIZER(name) \
+{ \
+ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
+}

-#define DECLARE_RWSEM(name) \
+#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)

extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
@@ -100,16 +104,16 @@ do { \
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- __asm__ __volatile__(
- "# beginning down_read\n\t"
-LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
- " jns 1f\n"
- " call call_rwsem_down_read_failed\n"
- "1:\n\t"
- "# ending down_read\n\t"
- : "+m" (sem->count)
- : "a" (sem)
- : "memory", "cc");
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX " incl (%%eax)\n\t"
+ /* adds 0x00000001, returns the old value */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+ "1:\n\t"
+ "# ending down_read\n\t"
+ : "+m" (sem->count)
+ : "a" (sem)
+ : "memory", "cc");
}

/*
@@ -118,21 +122,20 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
__s32 result, tmp;
- __asm__ __volatile__(
- "# beginning __down_read_trylock\n\t"
- " movl %0,%1\n\t"
- "1:\n\t"
- " movl %1,%2\n\t"
- " addl %3,%2\n\t"
- " jle 2f\n\t"
-LOCK_PREFIX " cmpxchgl %2,%0\n\t"
- " jnz 1b\n\t"
- "2:\n\t"
- "# ending __down_read_trylock\n\t"
- : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
- : "i" (RWSEM_ACTIVE_READ_BIAS)
- : "memory", "cc");
- return result>=0 ? 1 : 0;
+ asm volatile("# beginning __down_read_trylock\n\t"
+ " movl %0,%1\n\t"
+ "1:\n\t"
+ " movl %1,%2\n\t"
+ " addl %3,%2\n\t"
+ " jle 2f\n\t"
+ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
+ " jnz 1b\n\t"
+ "2:\n\t"
+ "# ending __down_read_trylock\n\t"
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "i" (RWSEM_ACTIVE_READ_BIAS)
+ : "memory", "cc");
+ return result >= 0 ? 1 : 0;
}

/*
@@ -143,17 +146,18 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
int tmp;

tmp = RWSEM_ACTIVE_WRITE_BIAS;
- __asm__ __volatile__(
- "# beginning down_write\n\t"
-LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
- " testl %%edx,%%edx\n\t" /* was the count 0 before? */
- " jz 1f\n"
- " call call_rwsem_down_write_failed\n"
- "1:\n"
- "# ending down_write"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (tmp)
- : "memory", "cc");
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
+ /* subtract 0x0000ffff, returns the old value */
+ " testl %%edx,%%edx\n\t"
+ /* was the count 0 before? */
+ " jz 1f\n"
+ " call call_rwsem_down_write_failed\n"
+ "1:\n"
+ "# ending down_write"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+ : "memory", "cc");
}

static inline void __down_write(struct rw_semaphore *sem)
@@ -167,7 +171,7 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
signed long ret = cmpxchg(&sem->count,
- RWSEM_UNLOCKED_VALUE,
+ RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE)
return 1;
@@ -180,16 +184,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
static inline void __up_read(struct rw_semaphore *sem)
{
__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
- __asm__ __volatile__(
- "# beginning __up_read\n\t"
-LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
- " jns 1f\n\t"
- " call call_rwsem_wake\n"
- "1:\n"
- "# ending __up_read\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (tmp)
- : "memory", "cc");
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n"
+ "1:\n"
+ "# ending __up_read\n"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+ : "memory", "cc");
}

/*
@@ -197,17 +201,18 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- __asm__ __volatile__(
- "# beginning __up_write\n\t"
- " movl %2,%%edx\n\t"
-LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
- " jz 1f\n"
- " call call_rwsem_wake\n"
- "1:\n\t"
- "# ending __up_write\n"
- : "+m" (sem->count)
- : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
- : "memory", "cc", "edx");
+ asm volatile("# beginning __up_write\n\t"
+ " movl %2,%%edx\n\t"
+ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
+ /* tries to transition
+ 0xffff0001 -> 0x00000000 */
+ " jz 1f\n"
+ " call call_rwsem_wake\n"
+ "1:\n\t"
+ "# ending __up_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc", "edx");
}

/*
@@ -215,16 +220,16 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
- __asm__ __volatile__(
- "# beginning __downgrade_write\n\t"
-LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
- " jns 1f\n\t"
- " call call_rwsem_downgrade_wake\n"
- "1:\n\t"
- "# ending __downgrade_write\n"
- : "+m" (sem->count)
- : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
- : "memory", "cc");
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX " addl %2,(%%eax)\n\t"
+ /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+ " jns 1f\n\t"
+ " call call_rwsem_downgrade_wake\n"
+ "1:\n\t"
+ "# ending __downgrade_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
+ : "memory", "cc");
}

/*
@@ -232,10 +237,9 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001
*/
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
- __asm__ __volatile__(
-LOCK_PREFIX "addl %1,%0"
- : "+m" (sem->count)
- : "ir" (delta));
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "+m" (sem->count)
+ : "ir" (delta));
}

/*
@@ -245,12 +249,11 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
int tmp = delta;

- __asm__ __volatile__(
-LOCK_PREFIX "xadd %0,%1"
- : "+r" (tmp), "+m" (sem->count)
- : : "memory");
+ asm volatile(LOCK_PREFIX "xadd %0,%1"
+ : "+r" (tmp), "+m" (sem->count)
+ : : "memory");

- return tmp+delta;
+ return tmp + delta;
}

static inline int rwsem_is_locked(struct rw_semaphore *sem)
--
1.5.4.rc2

2008-03-23 08:31:46

by Joe Perches

[permalink] [raw]
Subject: [PATCH 107/148] include/asm-x86/semaphore_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/semaphore_32.h | 104 +++++++++++++++++++---------------------
1 files changed, 50 insertions(+), 54 deletions(-)

diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h
index ac96d38..42a7e39 100644
--- a/include/asm-x86/semaphore_32.h
+++ b/include/asm-x86/semaphore_32.h
@@ -55,12 +55,12 @@ struct semaphore {
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
}

-#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)

-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)

-static inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init(struct semaphore *sem, int val)
{
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
@@ -73,19 +73,19 @@ static inline void sema_init (struct semaphore *sem, int val)
init_waitqueue_head(&sem->wait);
}

-static inline void init_MUTEX (struct semaphore *sem)
+static inline void init_MUTEX(struct semaphore *sem)
{
sema_init(sem, 1);
}

-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+static inline void init_MUTEX_LOCKED(struct semaphore *sem)
{
sema_init(sem, 0);
}

extern asmregparm void __down_failed(atomic_t *count_ptr);
-extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr);
-extern asmregparm int __down_failed_trylock(atomic_t *count_ptr);
+extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr);
+extern asmregparm int __down_failed_trylock(atomic_t *count_ptr);
extern asmregparm void __up_wakeup(atomic_t *count_ptr);

/*
@@ -93,41 +93,39 @@ extern asmregparm void __up_wakeup(atomic_t *count_ptr);
* "__down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/i386/kernel/semaphore.c
*/
-static inline void down(struct semaphore * sem)
+static inline void down(struct semaphore *sem)
{
might_sleep();
- __asm__ __volatile__(
- "# atomic down operation\n\t"
- LOCK_PREFIX "decl %0\n\t" /* --sem->count */
- "jns 2f\n"
- "\tlea %0,%%eax\n\t"
- "call __down_failed\n"
- "2:"
- :"+m" (sem->count)
- :
- :"memory","ax");
+ asm volatile("# atomic down operation\n\t"
+ LOCK_PREFIX "decl %0\n\t" /* --sem->count */
+ "jns 2f\n"
+ "\tlea %0,%%eax\n\t"
+ "call __down_failed\n"
+ "2:"
+ : "+m" (sem->count)
+ :
+ : "memory", "ax");
}

/*
* Interruptible try to acquire a semaphore. If we obtained
* it, return zero. If we were interrupted, returns -EINTR
*/
-static inline int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore *sem)
{
int result;

might_sleep();
- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "lea %1,%%eax\n\t"
- "call __down_failed_interruptible\n"
- "2:"
- :"=&a" (result), "+m" (sem->count)
- :
- :"memory");
+ asm volatile("# atomic interruptible down operation\n\t"
+ "xorl %0,%0\n\t"
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
+ "jns 2f\n\t"
+ "lea %1,%%eax\n\t"
+ "call __down_failed_interruptible\n"
+ "2:"
+ : "=&a" (result), "+m" (sem->count)
+ :
+ : "memory");
return result;
}

@@ -135,21 +133,20 @@ static inline int down_interruptible(struct semaphore * sem)
* Non-blockingly attempt to down() a semaphore.
* Returns zero if we acquired it
*/
-static inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore *sem)
{
int result;

- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "lea %1,%%eax\n\t"
- "call __down_failed_trylock\n\t"
- "2:\n"
- :"=&a" (result), "+m" (sem->count)
- :
- :"memory");
+ asm volatile("# atomic interruptible down operation\n\t"
+ "xorl %0,%0\n\t"
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
+ "jns 2f\n\t"
+ "lea %1,%%eax\n\t"
+ "call __down_failed_trylock\n\t"
+ "2:\n"
+ : "=&a" (result), "+m" (sem->count)
+ :
+ : "memory");
return result;
}

@@ -157,18 +154,17 @@ static inline int down_trylock(struct semaphore * sem)
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
*/
-static inline void up(struct semaphore * sem)
+static inline void up(struct semaphore *sem)
{
- __asm__ __volatile__(
- "# atomic up operation\n\t"
- LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
- "jg 1f\n\t"
- "lea %0,%%eax\n\t"
- "call __up_wakeup\n"
- "1:"
- :"+m" (sem->count)
- :
- :"memory","ax");
+ asm volatile("# atomic up operation\n\t"
+ LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
+ "jg 1f\n\t"
+ "lea %0,%%eax\n\t"
+ "call __up_wakeup\n"
+ "1:"
+ : "+m" (sem->count)
+ :
+ : "memory", "ax");
}

#endif
--
1.5.4.rc2

2008-03-23 08:32:10

by Joe Perches

[permalink] [raw]
Subject: [PATCH 111/148] include/asm-x86/sigcontext32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/sigcontext32.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
index 6ffab4f..57a9686 100644
--- a/include/asm-x86/sigcontext32.h
+++ b/include/asm-x86/sigcontext32.h
@@ -26,7 +26,7 @@ struct _fpstate_ia32 {
__u32 cw;
__u32 sw;
__u32 tag; /* not compatible to 64bit twd */
- __u32 ipoff;
+ __u32 ipoff;
__u32 cssel;
__u32 dataoff;
__u32 datasel;
@@ -39,7 +39,7 @@ struct _fpstate_ia32 {
__u32 mxcsr;
__u32 reserved;
struct _fpxreg _fxsr_st[8];
- struct _xmmreg _xmm[8]; /* It's actually 16 */
+ struct _xmmreg _xmm[8]; /* It's actually 16 */
__u32 padding[56];
};

--
1.5.4.rc2

2008-03-23 08:32:34

by Joe Perches

[permalink] [raw]
Subject: [PATCH 108/148] include/asm-x86/semaphore_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/semaphore_64.h | 98 +++++++++++++++++++---------------------
1 files changed, 47 insertions(+), 51 deletions(-)

diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h
index 7969430..5835d8e 100644
--- a/include/asm-x86/semaphore_64.h
+++ b/include/asm-x86/semaphore_64.h
@@ -57,11 +57,11 @@ struct semaphore {
}

#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)

-#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)

-static inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init(struct semaphore *sem, int val)
{
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
@@ -74,12 +74,12 @@ static inline void sema_init (struct semaphore *sem, int val)
init_waitqueue_head(&sem->wait);
}

-static inline void init_MUTEX (struct semaphore *sem)
+static inline void init_MUTEX(struct semaphore *sem)
{
sema_init(sem, 1);
}

-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+static inline void init_MUTEX_LOCKED(struct semaphore *sem)
{
sema_init(sem, 0);
}
@@ -89,51 +89,49 @@ asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);

-asmlinkage void __down(struct semaphore * sem);
-asmlinkage int __down_interruptible(struct semaphore * sem);
-asmlinkage int __down_trylock(struct semaphore * sem);
-asmlinkage void __up(struct semaphore * sem);
+asmlinkage void __down(struct semaphore *sem);
+asmlinkage int __down_interruptible(struct semaphore *sem);
+asmlinkage int __down_trylock(struct semaphore *sem);
+asmlinkage void __up(struct semaphore *sem);

/*
* This is ugly, but we want the default case to fall through.
* "__down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/x86_64/kernel/semaphore.c
*/
-static inline void down(struct semaphore * sem)
+static inline void down(struct semaphore *sem)
{
might_sleep();

- __asm__ __volatile__(
- "# atomic down operation\n\t"
- LOCK_PREFIX "decl %0\n\t" /* --sem->count */
- "jns 1f\n\t"
- "call __down_failed\n"
- "1:"
- :"=m" (sem->count)
- :"D" (sem)
- :"memory");
+ asm volatile("# atomic down operation\n\t"
+ LOCK_PREFIX "decl %0\n\t" /* --sem->count */
+ "jns 1f\n\t"
+ "call __down_failed\n"
+ "1:"
+ : "=m" (sem->count)
+ : "D" (sem)
+ : "memory");
}

/*
* Interruptible try to acquire a semaphore. If we obtained
* it, return zero. If we were interrupted, returns -EINTR
*/
-static inline int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore *sem)
{
int result;

might_sleep();

- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "call __down_failed_interruptible\n"
- "2:\n"
- :"=&a" (result), "=m" (sem->count)
- :"D" (sem)
- :"memory");
+ asm volatile("# atomic interruptible down operation\n\t"
+ "xorl %0,%0\n\t"
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
+ "jns 2f\n\t"
+ "call __down_failed_interruptible\n"
+ "2:\n"
+ : "=&a" (result), "=m" (sem->count)
+ : "D" (sem)
+ : "memory");
return result;
}

@@ -141,20 +139,19 @@ static inline int down_interruptible(struct semaphore * sem)
* Non-blockingly attempt to down() a semaphore.
* Returns zero if we acquired it
*/
-static inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore *sem)
{
int result;

- __asm__ __volatile__(
- "# atomic interruptible down operation\n\t"
- "xorl %0,%0\n\t"
- LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "jns 2f\n\t"
- "call __down_failed_trylock\n\t"
- "2:\n"
- :"=&a" (result), "=m" (sem->count)
- :"D" (sem)
- :"memory","cc");
+ asm volatile("# atomic interruptible down operation\n\t"
+ "xorl %0,%0\n\t"
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
+ "jns 2f\n\t"
+ "call __down_failed_trylock\n\t"
+ "2:\n"
+ : "=&a" (result), "=m" (sem->count)
+ : "D" (sem)
+ : "memory", "cc");
return result;
}

@@ -164,17 +161,16 @@ static inline int down_trylock(struct semaphore * sem)
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
-static inline void up(struct semaphore * sem)
+static inline void up(struct semaphore *sem)
{
- __asm__ __volatile__(
- "# atomic up operation\n\t"
- LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
- "jg 1f\n\t"
- "call __up_wakeup\n"
- "1:"
- :"=m" (sem->count)
- :"D" (sem)
- :"memory");
+ asm volatile("# atomic up operation\n\t"
+ LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
+ "jg 1f\n\t"
+ "call __up_wakeup\n"
+ "1:"
+ : "=m" (sem->count)
+ : "D" (sem)
+ : "memory");
}
#endif /* __KERNEL__ */
#endif
--
1.5.4.rc2

2008-03-23 08:32:57

by Joe Perches

[permalink] [raw]
Subject: [PATCH 114/148] include/asm-x86/smp_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/smp_32.h | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index f861d04..cb3ada2 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -18,8 +18,8 @@

extern cpumask_t cpu_callin_map;

-extern void (*mtrr_hook) (void);
-extern void zap_low_mappings (void);
+extern void (*mtrr_hook)(void);
+extern void zap_low_mappings(void);

#ifdef CONFIG_SMP
/*
@@ -44,7 +44,7 @@ static inline int num_booting_cpus(void)

#ifdef CONFIG_X86_LOCAL_APIC

-static __inline int logical_smp_processor_id(void)
+static inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
--
1.5.4.rc2

2008-03-23 08:33:25

by Joe Perches

[permalink] [raw]
Subject: [PATCH 120/148] include/asm-x86/suspend_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/suspend_32.h | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 1bbda3a..24e1c08 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -10,7 +10,7 @@ static inline int arch_prepare_suspend(void) { return 0; }

/* image of the saved processor state */
struct saved_context {
- u16 es, fs, gs, ss;
+ u16 es, fs, gs, ss;
unsigned long cr0, cr2, cr3, cr4;
struct desc_ptr gdt;
struct desc_ptr idt;
@@ -32,11 +32,11 @@ extern unsigned long saved_edi;
static inline void acpi_save_register_state(unsigned long return_point)
{
saved_eip = return_point;
- asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
- asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
- asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
- asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
- asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
+ asm volatile("movl %%esp,%0" : "=m" (saved_esp));
+ asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
+ asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
+ asm volatile("movl %%edi,%0" : "=m" (saved_edi));
+ asm volatile("movl %%esi,%0" : "=m" (saved_esi));
}

#define acpi_restore_register_state() do {} while (0)
--
1.5.4.rc2

2008-03-23 08:33:47

by Joe Perches

[permalink] [raw]
Subject: [PATCH 118/148] include/asm-x86/string_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/string_32.h | 141 +++++++++++++++++++++----------------------
1 files changed, 70 insertions(+), 71 deletions(-)

diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index 32afb63..c18e533 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -3,7 +3,7 @@

#ifdef __KERNEL__

-/* Let gcc decide wether to inline or use the out of line functions */
+/* Let gcc decide whether to inline or use the out of line functions */

#define __HAVE_ARCH_STRCPY
extern char *strcpy(char *dest, const char *src);
@@ -32,16 +32,15 @@ extern size_t strlen(const char *s);
static __always_inline void *__memcpy(void *to, const void *from, size_t n)
{
int d0, d1, d2;
- __asm__ __volatile__(
- "rep ; movsl\n\t"
- "movl %4,%%ecx\n\t"
- "andl $3,%%ecx\n\t"
- "jz 1f\n\t"
- "rep ; movsb\n\t"
- "1:"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (n/4), "g" (n), "1" ((long)to), "2" ((long)from)
- : "memory");
+ asm volatile("rep ; movsl\n\t"
+ "movl %4,%%ecx\n\t"
+ "andl $3,%%ecx\n\t"
+ "jz 1f\n\t"
+ "rep ; movsb\n\t"
+ "1:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
+ : "memory");
return to;
}

@@ -74,10 +73,12 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, size_
*(int *)to = *(int *)from;
*((char *)to + 4) = *((char *)from + 4);
return to;
- case 6: *(int *)to = *(int *)from;
+ case 6:
+ *(int *)to = *(int *)from;
*((short *)to + 2) = *((short *)from + 2);
return to;
- case 8: *(int *)to = *(int *)from;
+ case 8:
+ *(int *)to = *(int *)from;
*((int *)to + 1) = *((int *)from + 1);
return to;
#endif
@@ -88,54 +89,55 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, size_
if (n >= 5 * 4) {
/* large block: use rep prefix */
int ecx;
- __asm__ __volatile__(
- "rep ; movsl"
- : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
- : "0" (n/4), "1" (edi), "2" (esi)
- : "memory"
+ asm volatile("rep ; movsl"
+ : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
+ : "0" (n / 4), "1" (edi), "2" (esi)
+ : "memory"
);
} else {
/* small block: don't clobber ecx + smaller code */
if (n >= 4 * 4)
- __asm__ __volatile__("movsl"
- : "=&D"(edi), "=&S"(esi)
- : "0"(edi), "1"(esi)
- : "memory");
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
if (n >= 3 * 4)
- __asm__ __volatile__("movsl"
- : "=&D"(edi), "=&S"(esi)
- : "0"(edi), "1"(esi)
- : "memory");
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
if (n >= 2 * 4)
- __asm__ __volatile__("movsl"
- : "=&D"(edi), "=&S"(esi)
- : "0"(edi), "1"(esi)
- : "memory");
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
if (n >= 1 * 4)
- __asm__ __volatile__("movsl"
- : "=&D"(edi), "=&S"(esi)
- : "0"(edi), "1"(esi)
- : "memory");
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
}
switch (n % 4) {
/* tail */
case 0:
return to;
case 1:
- __asm__ __volatile__("movsb"
- : "=&D"(edi), "=&S"(esi)
- : "0"(edi), "1"(esi)
- : "memory");
+ asm volatile("movsb"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
return to;
- case 2: __asm__ __volatile__("movsw"
- : "=&D"(edi), "=&S"(esi)
- : "0"(edi), "1"(esi)
- : "memory");
+ case 2:
+ asm volatile("movsw"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
return to;
- default: __asm__ __volatile__("movsw\n\tmovsb"
- : "=&D"(edi), "=&S"(esi)
- : "0"(edi), "1"(esi)
- : "memory");
+ default:
+ asm volatile("movsw\n\tmovsb"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
return to;
}
}
@@ -193,12 +195,11 @@ extern void *memchr(const void *cs, int c, size_t count);
static inline void *__memset_generic(void *s, char c, size_t count)
{
int d0, d1;
- __asm__ __volatile__(
- "rep\n\t"
- "stosb"
- : "=&c" (d0), "=&D" (d1)
- : "a" (c), "1" (s), "0" (count)
- : "memory");
+ asm volatile("rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ : "a" (c), "1" (s), "0" (count)
+ : "memory");
return s;
}

@@ -213,18 +214,17 @@ static inline void *__memset_generic(void *s, char c, size_t count)
static __always_inline void *__constant_c_memset(void *s, unsigned long c, size_t count)
{
int d0, d1;
- __asm__ __volatile__(
- "rep ; stosl\n\t"
- "testb $2,%b3\n\t"
- "je 1f\n\t"
- "stosw\n"
- "1:\ttestb $1,%b3\n\t"
- "je 2f\n\t"
- "stosb\n"
- "2:"
- : "=&c" (d0), "=&D" (d1)
- : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
- : "memory");
+ asm volatile("rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
+ : "memory");
return s;
}

@@ -260,13 +260,12 @@ static __always_inline void *__constant_c_and_count_memset(void *s, unsigned lon
return s;
}

-#define COMMON(x) \
- __asm__ __volatile__( \
- "rep ; stosl" \
- x \
- : "=&c" (d0), "=&D" (d1) \
- : "a" (pattern), "0" (count/4), "1" ((long)s) \
- : "memory")
+#define COMMON(x) \
+ asm volatile("rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (pattern), "0" (count/4), "1" ((long)s) \
+ : "memory")

{
int d0, d1;
--
1.5.4.rc2

2008-03-23 08:34:23

by Joe Perches

[permalink] [raw]
Subject: [PATCH 104/148] include/asm-x86/resume-trace.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/resume-trace.h | 15 ++++++++-------
1 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 46f725b..2557514 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -3,16 +3,17 @@

#include <asm/asm.h>

-#define TRACE_RESUME(user) do { \
+#define TRACE_RESUME(user) \
+do { \
if (pm_trace_enabled) { \
void *tracedata; \
asm volatile(_ASM_MOV_UL " $1f,%0\n" \
- ".section .tracedata,\"a\"\n" \
- "1:\t.word %c1\n\t" \
- _ASM_PTR " %c2\n" \
- ".previous" \
- :"=r" (tracedata) \
- : "i" (__LINE__), "i" (__FILE__)); \
+ ".section .tracedata,\"a\"\n" \
+ "1:\t.word %c1\n\t" \
+ _ASM_PTR " %c2\n" \
+ ".previous" \
+ :"=r" (tracedata) \
+ : "i" (__LINE__), "i" (__FILE__)); \
generate_resume_trace(tracedata, user); \
} \
} while (0)
--
1.5.4.rc2

2008-03-23 08:34:48

by Joe Perches

[permalink] [raw]
Subject: [PATCH 115/148] include/asm-x86/smp_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/smp_64.h | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index fd709cb..c53a011 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -24,9 +24,9 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
#define raw_smp_processor_id() read_pda(cpunumber)

#define stack_smp_processor_id() \
- ({ \
+({ \
struct thread_info *ti; \
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
ti->cpu; \
})

@@ -46,7 +46,7 @@ static inline int num_booting_cpus(void)

#define safe_smp_processor_id() smp_processor_id()

-static __inline int logical_smp_processor_id(void)
+static inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
--
1.5.4.rc2

2008-03-23 08:35:26

by Joe Perches

[permalink] [raw]
Subject: [PATCH 003/148] include/asm-x86/a.out-core.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/a.out-core.h | 12 +++++++-----
1 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h
index d2b6e11..714207a 100644
--- a/include/asm-x86/a.out-core.h
+++ b/include/asm-x86/a.out-core.h
@@ -29,8 +29,9 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->magic = CMAGIC;
dump->start_code = 0;
dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
+ >> PAGE_SHIFT;
dump->u_dsize -= dump->u_tsize;
dump->u_ssize = 0;
dump->u_debugreg[0] = current->thread.debugreg0;
@@ -43,7 +44,8 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->u_debugreg[7] = current->thread.debugreg7;

if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+ dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
+ >> PAGE_SHIFT;

dump->regs.bx = regs->bx;
dump->regs.cx = regs->cx;
@@ -55,7 +57,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs;
- savesegment(gs,gs);
+ savesegment(gs, gs);
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs;
@@ -63,7 +65,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.sp = regs->sp;
dump->regs.ss = (u16)regs->ss;

- dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+ dump->u_fpvalid = dump_fpu(regs, &dump->i387);
}

#endif /* CONFIG_X86_32 */
--
1.5.4.rc2

2008-03-23 08:35:55

by Joe Perches

[permalink] [raw]
Subject: [PATCH 122/148] include/asm-x86/swiotlb.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/swiotlb.h | 28 ++++++++++++++--------------
1 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index f9c5895..f5d9e74 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -8,15 +8,15 @@
extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
size_t size, int dir);
extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags);
+ dma_addr_t *dma_handle, gfp_t flags);
extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir);
+ size_t size, int dir);
extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t dev_addr,
- size_t size, int dir);
+ dma_addr_t dev_addr,
+ size_t size, int dir);
extern void swiotlb_sync_single_for_device(struct device *hwdev,
- dma_addr_t dev_addr,
- size_t size, int dir);
+ dma_addr_t dev_addr,
+ size_t size, int dir);
extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
dma_addr_t dev_addr,
unsigned long offset,
@@ -26,18 +26,18 @@ extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
unsigned long offset,
size_t size, int dir);
extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int dir);
+ struct scatterlist *sg, int nelems,
+ int dir);
extern void swiotlb_sync_sg_for_device(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int dir);
+ struct scatterlist *sg, int nelems,
+ int dir);
extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
+ int nents, int direction);
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
+ int nents, int direction);
extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
extern void swiotlb_init(void);

--
1.5.4.rc2

2008-03-23 08:36:29

by Joe Perches

[permalink] [raw]
Subject: [PATCH 119/148] include/asm-x86/string_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/string_64.h | 66 +++++++++++++++++++++---------------------
1 files changed, 33 insertions(+), 33 deletions(-)

diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h
index e583da7..52b5ab3 100644
--- a/include/asm-x86/string_64.h
+++ b/include/asm-x86/string_64.h
@@ -3,26 +3,24 @@

#ifdef __KERNEL__

-/* Written 2002 by Andi Kleen */
+/* Written 2002 by Andi Kleen */

-/* Only used for special circumstances. Stolen from i386/string.h */
-static __always_inline void *
-__inline_memcpy(void * to, const void * from, size_t n)
+/* Only used for special circumstances. Stolen from i386/string.h */
+static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
{
-unsigned long d0, d1, d2;
-__asm__ __volatile__(
- "rep ; movsl\n\t"
- "testb $2,%b4\n\t"
- "je 1f\n\t"
- "movsw\n"
- "1:\ttestb $1,%b4\n\t"
- "je 2f\n\t"
- "movsb\n"
- "2:"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
- : "memory");
-return (to);
+ unsigned long d0, d1, d2;
+ asm volatile("rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
+ : "memory");
+ return to;
}

/* Even with __builtin_ the compiler may decide to use the out of line
@@ -32,28 +30,30 @@ return (to);
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
extern void *memcpy(void *to, const void *from, size_t len);
#else
-extern void *__memcpy(void *to, const void *from, size_t len);
-#define memcpy(dst,src,len) \
- ({ size_t __len = (len); \
- void *__ret; \
- if (__builtin_constant_p(len) && __len >= 64) \
- __ret = __memcpy((dst),(src),__len); \
- else \
- __ret = __builtin_memcpy((dst),(src),__len); \
- __ret; })
+extern void *__memcpy(void *to, const void *from, size_t len);
+#define memcpy(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = __memcpy((dst), (src), __len); \
+ else \
+ __ret = __builtin_memcpy((dst), (src), __len); \
+ __ret; \
+})
#endif

#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);

#define __HAVE_ARCH_MEMMOVE
-void * memmove(void * dest,const void *src,size_t count);
+void *memmove(void *dest, const void *src, size_t count);

-int memcmp(const void * cs,const void * ct,size_t count);
-size_t strlen(const char * s);
-char *strcpy(char * dest,const char *src);
-char *strcat(char * dest, const char * src);
-int strcmp(const char * cs,const char * ct);
+int memcmp(const void *cs, const void *ct, size_t count);
+size_t strlen(const char *s);
+char *strcpy(char *dest, const char *src);
+char *strcat(char *dest, const char *src);
+int strcmp(const char *cs, const char *ct);

#endif /* __KERNEL__ */

--
1.5.4.rc2

2008-03-23 08:36:53

by Joe Perches

[permalink] [raw]
Subject: [PATCH 006/148] include/asm-x86/atomic_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/atomic_32.h | 143 ++++++++++++++++++++----------------------
1 files changed, 68 insertions(+), 75 deletions(-)

diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
index 437aac8..21a4825 100644
--- a/include/asm-x86/atomic_32.h
+++ b/include/asm-x86/atomic_32.h
@@ -15,138 +15,133 @@
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
-typedef struct { int counter; } atomic_t;
+typedef struct {
+ int counter;
+} atomic_t;

#define ATOMIC_INIT(i) { (i) }

/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically reads the value of @v.
- */
+ */
#define atomic_read(v) ((v)->counter)

/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
- *
+ *
* Atomically sets the value of @v to @i.
- */
-#define atomic_set(v,i) (((v)->counter) = (i))
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))

/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
- *
+ *
* Atomically adds @i to @v.
*/
-static __inline__ void atomic_add(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
- :"+m" (v->counter)
- :"ir" (i));
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
}

/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v.
*/
-static __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
- :"+m" (v->counter)
- :"ir" (i));
+ asm volatile(LOCK_PREFIX "subl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
}

/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
- *
+ *
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}

/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
+ */
+static inline void atomic_inc(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0"
- :"+m" (v->counter));
+ asm volatile(LOCK_PREFIX "incl %0"
+ : "+m" (v->counter));
}

/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
+ */
+static inline void atomic_dec(atomic_t *v)
{
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0"
- :"+m" (v->counter));
+ asm volatile(LOCK_PREFIX "decl %0"
+ : "+m" (v->counter));
}

/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- : : "memory");
+ asm volatile(LOCK_PREFIX "decl %0; sete %1"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}

/**
- * atomic_inc_and_test - increment and test
+ * atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
- *
+ *
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
+ */
+static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0; sete %1"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}

@@ -154,19 +149,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
- *
+ *
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
+ */
+static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;

- __asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
- :"+m" (v->counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}

@@ -177,20 +171,19 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
-static __inline__ int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
int __i;
#ifdef CONFIG_M386
unsigned long flags;
- if(unlikely(boot_cpu_data.x86 <= 3))
+ if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1"
- :"+r" (i), "+m" (v->counter)
- : : "memory");
+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;

#ifdef CONFIG_M386
@@ -210,9 +203,9 @@ no_xadd: /* Legacy 386 processor */
*
* Atomically subtracts @i from @v and returns @v - @i
*/
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- return atomic_add_return(-i,v);
+ return atomic_add_return(-i, v);
}

#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
@@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -244,17 +237,17 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)

#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
+#define atomic_inc_return(v) (atomic_add_return(1, v))
+#define atomic_dec_return(v) (atomic_sub_return(1, v))

/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
+#define atomic_clear_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "andl %0,%1" \
+ : : "r" (~(mask)), "m" (*(addr)) : "memory")

-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" (mask),"m" (*(addr)) : "memory")
+#define atomic_set_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "orl %0,%1" \
+ : : "r" (mask), "m" (*(addr)) : "memory")

/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
--
1.5.4.rc2

2008-03-23 08:37:32

by Joe Perches

[permalink] [raw]
Subject: [PATCH 123/148] include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/sync_bitops.h | 56 ++++++++++++++++++++--------------------
1 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
index bc249f4..f1078a5 100644
--- a/include/asm-x86/sync_bitops.h
+++ b/include/asm-x86/sync_bitops.h
@@ -13,7 +13,7 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/

-#define ADDR (*(volatile long *) addr)
+#define ADDR (*(volatile long *)addr)

/**
* sync_set_bit - Atomically set a bit in memory
@@ -26,12 +26,12 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void sync_set_bit(int nr, volatile unsigned long * addr)
+static inline void sync_set_bit(int nr, volatile unsigned long *addr)
{
- __asm__ __volatile__("lock; btsl %1,%0"
- :"+m" (ADDR)
- :"Ir" (nr)
- : "memory");
+ asm volatile("lock; btsl %1,%0"
+ : "+m" (ADDR)
+ : "Ir" (nr)
+ : "memory");
}

/**
@@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
+static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
{
- __asm__ __volatile__("lock; btrl %1,%0"
- :"+m" (ADDR)
- :"Ir" (nr)
- : "memory");
+ asm volatile("lock; btrl %1,%0"
+ : "+m" (ADDR)
+ : "Ir" (nr)
+ : "memory");
}

/**
@@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void sync_change_bit(int nr, volatile unsigned long * addr)
+static inline void sync_change_bit(int nr, volatile unsigned long *addr)
{
- __asm__ __volatile__("lock; btcl %1,%0"
- :"+m" (ADDR)
- :"Ir" (nr)
- : "memory");
+ asm volatile("lock; btcl %1,%0"
+ : "+m" (ADDR)
+ : "Ir" (nr)
+ : "memory");
}

/**
@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
+static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;

- __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"+m" (ADDR)
- :"Ir" (nr) : "memory");
+ asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "+m" (ADDR)
+ : "Ir" (nr) : "memory");
return oldbit;
}

@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
+static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;

- __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"+m" (ADDR)
- :"Ir" (nr) : "memory");
+ asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "+m" (ADDR)
+ : "Ir" (nr) : "memory");
return oldbit;
}

@@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
+static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;

- __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"+m" (ADDR)
- :"Ir" (nr) : "memory");
+ asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "+m" (ADDR)
+ : "Ir" (nr) : "memory");
return oldbit;
}

--
1.5.4.rc2

2008-03-23 08:38:01

by Joe Perches

[permalink] [raw]
Subject: [PATCH 116/148] include/asm-x86/spinlock.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/spinlock.h | 105 +++++++++++++++++++++-----------------------
1 files changed, 50 insertions(+), 55 deletions(-)

diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 23804c1..47dfe26 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
short inc = 0x0100;

- __asm__ __volatile__ (
+ asm volatile (
LOCK_PREFIX "xaddw %w0, %1\n"
"1:\t"
"cmpb %h0, %b0\n\t"
@@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */
"jmp 1b\n"
"2:"
- :"+Q" (inc), "+m" (lock->slock)
+ : "+Q" (inc), "+m" (lock->slock)
:
- :"memory", "cc");
+ : "memory", "cc");
}

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -104,30 +104,28 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp;
short new;

- asm volatile(
- "movw %2,%w0\n\t"
- "cmpb %h0,%b0\n\t"
- "jne 1f\n\t"
- "movw %w0,%w1\n\t"
- "incb %h1\n\t"
- "lock ; cmpxchgw %w1,%2\n\t"
- "1:"
- "sete %b1\n\t"
- "movzbl %b1,%0\n\t"
- :"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
- :
- : "memory", "cc");
+ asm volatile("movw %2,%w0\n\t"
+ "cmpb %h0,%b0\n\t"
+ "jne 1f\n\t"
+ "movw %w0,%w1\n\t"
+ "incb %h1\n\t"
+ "lock ; cmpxchgw %w1,%2\n\t"
+ "1:"
+ "sete %b1\n\t"
+ "movzbl %b1,%0\n\t"
+ : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
+ :
+ : "memory", "cc");

return tmp;
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(
- UNLOCK_LOCK_PREFIX "incb %0"
- :"+m" (lock->slock)
- :
- :"memory", "cc");
+ asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
+ : "+m" (lock->slock)
+ :
+ : "memory", "cc");
}
#else
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
@@ -149,21 +147,20 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
int inc = 0x00010000;
int tmp;

- __asm__ __volatile__ (
- "lock ; xaddl %0, %1\n"
- "movzwl %w0, %2\n\t"
- "shrl $16, %0\n\t"
- "1:\t"
- "cmpl %0, %2\n\t"
- "je 2f\n\t"
- "rep ; nop\n\t"
- "movzwl %1, %2\n\t"
- /* don't need lfence here, because loads are in-order */
- "jmp 1b\n"
- "2:"
- :"+Q" (inc), "+m" (lock->slock), "=r" (tmp)
- :
- :"memory", "cc");
+ asm volatile("lock ; xaddl %0, %1\n"
+ "movzwl %w0, %2\n\t"
+ "shrl $16, %0\n\t"
+ "1:\t"
+ "cmpl %0, %2\n\t"
+ "je 2f\n\t"
+ "rep ; nop\n\t"
+ "movzwl %1, %2\n\t"
+ /* don't need lfence here, because loads are in-order */
+ "jmp 1b\n"
+ "2:"
+ : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
+ :
+ : "memory", "cc");
}

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -173,31 +170,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp;
int new;

- asm volatile(
- "movl %2,%0\n\t"
- "movl %0,%1\n\t"
- "roll $16, %0\n\t"
- "cmpl %0,%1\n\t"
- "jne 1f\n\t"
- "addl $0x00010000, %1\n\t"
- "lock ; cmpxchgl %1,%2\n\t"
- "1:"
- "sete %b1\n\t"
- "movzbl %b1,%0\n\t"
- :"=&a" (tmp), "=r" (new), "+m" (lock->slock)
- :
- : "memory", "cc");
+ asm volatile("movl %2,%0\n\t"
+ "movl %0,%1\n\t"
+ "roll $16, %0\n\t"
+ "cmpl %0,%1\n\t"
+ "jne 1f\n\t"
+ "addl $0x00010000, %1\n\t"
+ "lock ; cmpxchgl %1,%2\n\t"
+ "1:"
+ "sete %b1\n\t"
+ "movzbl %b1,%0\n\t"
+ : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
+ :
+ : "memory", "cc");

return tmp;
}

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(
- UNLOCK_LOCK_PREFIX "incw %0"
- :"+m" (lock->slock)
- :
- :"memory", "cc");
+ asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
+ : "+m" (lock->slock)
+ :
+ : "memory", "cc");
}
#endif

--
1.5.4.rc2

2008-03-23 08:38:30

by Joe Perches

[permalink] [raw]
Subject: [PATCH 127/148] include/asm-x86/thread_info_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/thread_info_32.h | 88 ++++++++++++++++++++------------------
1 files changed, 46 insertions(+), 42 deletions(-)

diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
index 54140ce..5318599 100644
--- a/include/asm-x86/thread_info_32.h
+++ b/include/asm-x86/thread_info_32.h
@@ -20,7 +20,8 @@
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
- * - if the contents of this structure are changed, the assembly constants must also be changed
+ * - if the contents of this structure are changed,
+ * the assembly constants must also be changed
*/
#ifndef __ASSEMBLY__

@@ -30,18 +31,16 @@ struct thread_info {
unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
-
-
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
mm_segment_t addr_limit; /* thread address space:
- 0-0xBFFFFFFF for user-thead
- 0-0xFFFFFFFF for kernel-thread
+ 0-0xBFFFFFFF user-thread
+ 0-0xFFFFFFFF kernel-thread
*/
void *sysenter_return;
struct restart_block restart_block;
-
- unsigned long previous_esp; /* ESP of the previous stack in case
- of nested (IRQ) stacks
+ unsigned long previous_esp; /* ESP of the previous stack in
+ case of nested (IRQ) stacks
*/
__u8 supervisor_stack[0];
};
@@ -90,22 +89,23 @@ register unsigned long current_stack_pointer asm("esp") __used;
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
- return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1));
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}

/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(tsk) ((struct thread_info *) \
- __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE)))
+#define alloc_thread_info(tsk) ((struct thread_info *) \
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE)))
#else
-#define alloc_thread_info(tsk) ((struct thread_info *) \
+#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
#endif

#else /* !__ASSEMBLY__ */

/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
+#define GET_THREAD_INFO(reg) \
movl $-THREAD_SIZE, reg; \
andl %esp, reg

@@ -117,14 +117,16 @@ static inline struct thread_info *current_thread_info(void)

/*
* thread information flags
- * - these are process state flags that various assembly files may need to access
+ * - these are process state flags that various
+ * assembly files may need to access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
+#define TIF_SINGLESTEP 3 /* restore singlestep on return to
+ user mode */
#define TIF_IRET 4 /* return with iret */
#define TIF_SYSCALL_EMU 5 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
@@ -141,36 +143,36 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */
#define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */

-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
-#define _TIF_IRET (1<<TIF_IRET)
-#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
-#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP (1<<TIF_SECCOMP)
-#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED)
-#define _TIF_DEBUG (1<<TIF_DEBUG)
-#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
-#define _TIF_NOTSC (1<<TIF_NOTSC)
-#define _TIF_FORCED_TF (1<<TIF_FORCED_TF)
-#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR)
-#define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR)
-#define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_IRET (1 << TIF_IRET)
+#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
+#define _TIF_DEBUG (1 << TIF_DEBUG)
+#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_NOTSC (1 << TIF_NOTSC)
+#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
+#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
+#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
+#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)

/* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK \
- (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_EMU))
+#define _TIF_WORK_MASK \
+ (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_EMU))
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)

/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
- _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
+#define _TIF_WORK_CTXSW \
+ (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
+ _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)

@@ -182,8 +184,10 @@ static inline struct thread_info *current_thread_info(void)
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
-#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
-#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
+#define TS_USEDFPU 0x0001 /* FPU was used by this task
+ this quantum (SMP) */
+#define TS_POLLING 0x0002 /* True if in idle loop
+ and not sleeping */

#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)

--
1.5.4.rc2

2008-03-23 08:38:51

by Joe Perches

[permalink] [raw]
Subject: [PATCH 026/148] include/asm-x86/dma-mapping_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/dma-mapping_32.h | 130 ++++++++++++++++++-------------------
1 files changed, 63 insertions(+), 67 deletions(-)

diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
index 55f01bd..4f6c2a4 100644
--- a/include/asm-x86/dma-mapping_32.h
+++ b/include/asm-x86/dma-mapping_32.h
@@ -12,14 +12,14 @@
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)

void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+ dma_addr_t *dma_handle, gfp_t flag);

void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+ void *vaddr, dma_addr_t dma_handle);

-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
+static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
WARN_ON(size == 0);
@@ -27,16 +27,15 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
return virt_to_phys(ptr);
}

-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}

-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
@@ -54,88 +53,89 @@ dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
return nents;
}

-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
return page_to_phys(page) + offset;
}

-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}


-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries,
+ enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}

-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
}

-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
flush_write_buffers();
}

-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction)
{
}

-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction)
{
flush_write_buffers();
}

-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
}

-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
flush_write_buffers();
}

-static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return 0;
}

extern int forbid_dac;

-static inline int
-dma_supported(struct device *dev, u64 mask)
+static inline int dma_supported(struct device *dev, u64 mask)
{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;

/* Work around chipset bugs */
if (forbid_dac > 0 && mask > 0xffffffffULL)
@@ -144,10 +144,9 @@ dma_supported(struct device *dev, u64 mask)
return 1;
}

-static inline int
-dma_set_mask(struct device *dev, u64 mask)
+static inline int dma_set_mask(struct device *dev, u64 mask)
{
- if(!dev->dma_mask || !dma_supported(dev, mask))
+ if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;

*dev->dma_mask = mask;
@@ -155,8 +154,7 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}

-static inline int
-dma_get_cache_alignment(void)
+static inline int dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all x86, so return the
* maximum possible, to be safe */
@@ -165,23 +163,21 @@ dma_get_cache_alignment(void)

#define dma_is_consistent(d, h) (1)

-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
{
flush_write_buffers();
}

#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-extern int
-dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
- dma_addr_t device_addr, size_t size, int flags);
+extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size,
+ int flags);

-extern void
-dma_release_declared_memory(struct device *dev);
+extern void dma_release_declared_memory(struct device *dev);

-extern void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
+extern void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr,
+ size_t size);

#endif
--
1.5.4.rc2

2008-03-23 08:39:26

by Joe Perches

[permalink] [raw]
Subject: [PATCH 025/148] include/asm-x86/dma.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/dma.h | 45 ++++++++++++++++++++++-----------------------
1 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
index e9733ce..ca1098a 100644
--- a/include/asm-x86/dma.h
+++ b/include/asm-x86/dma.h
@@ -12,7 +12,6 @@
#include <asm/io.h> /* need byte IO */
#include <linux/delay.h>

-
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
@@ -74,15 +73,15 @@
#ifdef CONFIG_X86_32

/* The maximum address that we can perform a DMA transfer to on this platform */
-#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)

#else

/* 16MB ISA DMA zone */
-#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
+#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)

/* 4GB broken PCI/AGP hardware bus master zone */
-#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
+#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)

/* Compat define for old dma zone */
#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
@@ -154,20 +153,20 @@

extern spinlock_t dma_spin_lock;

-static __inline__ unsigned long claim_dma_lock(void)
+static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}

-static __inline__ void release_dma_lock(unsigned long flags)
+static inline void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}

/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
+static inline void enable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr, DMA1_MASK_REG);
@@ -175,7 +174,7 @@ static __inline__ void enable_dma(unsigned int dmanr)
dma_outb(dmanr & 3, DMA2_MASK_REG);
}

-static __inline__ void disable_dma(unsigned int dmanr)
+static inline void disable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
@@ -190,7 +189,7 @@ static __inline__ void disable_dma(unsigned int dmanr)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
-static __inline__ void clear_dma_ff(unsigned int dmanr)
+static inline void clear_dma_ff(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(0, DMA1_CLEAR_FF_REG);
@@ -199,7 +198,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr)
}

/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+static inline void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr <= 3)
dma_outb(mode | dmanr, DMA1_MODE_REG);
@@ -212,7 +211,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
-static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+static inline void set_dma_page(unsigned int dmanr, char pagenr)
{
switch (dmanr) {
case 0:
@@ -243,15 +242,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
if (dmanr <= 3) {
dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
} else {
- dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
- dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
}
}

@@ -264,18 +263,18 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+static inline void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
- dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
- dma_outb((count >> 8) & 0xff,
- ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb((count >> 8) & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
} else {
- dma_outb((count >> 1) & 0xff,
- ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
- dma_outb((count >> 9) & 0xff,
- ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 1) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 9) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
}
}

@@ -288,7 +287,7 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
*
* Assumes DMA flip-flop is clear.
*/
-static __inline__ int get_dma_residue(unsigned int dmanr)
+static inline int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port;
/* using short to get 16-bit wrap around */
--
1.5.4.rc2

2008-03-23 08:39:50

by Joe Perches

[permalink] [raw]
Subject: [PATCH 039/148] include/asm-x86/geode.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/geode.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index c208398..b1bdf63 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -177,7 +177,7 @@ static inline int is_geode(void)
/* MFGPTs */

#define MFGPT_MAX_TIMERS 8
-#define MFGPT_TIMER_ANY -1
+#define MFGPT_TIMER_ANY (-1)

#define MFGPT_DOMAIN_WORKING 1
#define MFGPT_DOMAIN_STANDBY 2
--
1.5.4.rc2

2008-03-23 08:40:24

by Joe Perches

[permalink] [raw]
Subject: [PATCH 038/148] include/asm-x86/genapic_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/genapic_32.h | 89 +++++++++++++++++++++---------------------
1 files changed, 45 insertions(+), 44 deletions(-)

diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index b501ae7..5fa893d 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -18,18 +18,18 @@ struct mpc_config_bus;
struct mp_config_table;
struct mpc_config_processor;

-struct genapic {
- char *name;
- int (*probe)(void);
+struct genapic {
+ char *name;
+ int (*probe)(void);

int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void);
int int_delivery_mode;
- int int_dest_mode;
+ int int_dest_mode;
int ESR_DISABLE;
int apic_destination_logical;
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
- unsigned long (*check_apicid_present)(int apicid);
+ unsigned long (*check_apicid_present)(int apicid);
int no_balance_irq;
int no_ioapic_check;
void (*init_apic_ldr)(void);
@@ -37,21 +37,21 @@ struct genapic {

void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
- int (*apicid_to_node)(int logical_apicid);
+ int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu);
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
- void (*setup_portio_remap)(void);
+ void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
void (*enable_apic_mode)(void);
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);

/* mpparse */
/* When one of the next two hooks returns 1 the genapic
- is switched to this. Essentially they are additional probe
+ is switched to this. Essentially they are additional probe
functions. */
- int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
- char *productid);
+ int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
+ char *productid);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);

unsigned (*get_apic_id)(unsigned long x);
@@ -64,7 +64,7 @@ struct genapic {
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
#endif
-};
+};

#define APICFUNC(x) .x = x,

@@ -77,39 +77,40 @@ struct genapic {
#define IPIFUNC(x)
#endif

-#define APIC_INIT(aname, aprobe) { \
- .name = aname, \
- .probe = aprobe, \
- .int_delivery_mode = INT_DELIVERY_MODE, \
- .int_dest_mode = INT_DEST_MODE, \
- .no_balance_irq = NO_BALANCE_IRQ, \
- .ESR_DISABLE = esr_disable, \
- .apic_destination_logical = APIC_DEST_LOGICAL, \
- APICFUNC(apic_id_registered) \
- APICFUNC(target_cpus) \
- APICFUNC(check_apicid_used) \
- APICFUNC(check_apicid_present) \
- APICFUNC(init_apic_ldr) \
- APICFUNC(ioapic_phys_id_map) \
- APICFUNC(setup_apic_routing) \
- APICFUNC(multi_timer_check) \
- APICFUNC(apicid_to_node) \
- APICFUNC(cpu_to_logical_apicid) \
- APICFUNC(cpu_present_to_apicid) \
- APICFUNC(apicid_to_cpu_present) \
- APICFUNC(setup_portio_remap) \
- APICFUNC(check_phys_apicid_present) \
- APICFUNC(mps_oem_check) \
- APICFUNC(get_apic_id) \
- .apic_id_mask = APIC_ID_MASK, \
- APICFUNC(cpu_mask_to_apicid) \
- APICFUNC(acpi_madt_oem_check) \
- IPIFUNC(send_IPI_mask) \
- IPIFUNC(send_IPI_allbutself) \
- IPIFUNC(send_IPI_all) \
- APICFUNC(enable_apic_mode) \
- APICFUNC(phys_pkg_id) \
- }
+#define APIC_INIT(aname, aprobe) \
+{ \
+ .name = aname, \
+ .probe = aprobe, \
+ .int_delivery_mode = INT_DELIVERY_MODE, \
+ .int_dest_mode = INT_DEST_MODE, \
+ .no_balance_irq = NO_BALANCE_IRQ, \
+ .ESR_DISABLE = esr_disable, \
+ .apic_destination_logical = APIC_DEST_LOGICAL, \
+ APICFUNC(apic_id_registered) \
+ APICFUNC(target_cpus) \
+ APICFUNC(check_apicid_used) \
+ APICFUNC(check_apicid_present) \
+ APICFUNC(init_apic_ldr) \
+ APICFUNC(ioapic_phys_id_map) \
+ APICFUNC(setup_apic_routing) \
+ APICFUNC(multi_timer_check) \
+ APICFUNC(apicid_to_node) \
+ APICFUNC(cpu_to_logical_apicid) \
+ APICFUNC(cpu_present_to_apicid) \
+ APICFUNC(apicid_to_cpu_present) \
+ APICFUNC(setup_portio_remap) \
+ APICFUNC(check_phys_apicid_present) \
+ APICFUNC(mps_oem_check) \
+ APICFUNC(get_apic_id) \
+ .apic_id_mask = APIC_ID_MASK, \
+ APICFUNC(cpu_mask_to_apicid) \
+ APICFUNC(acpi_madt_oem_check) \
+ IPIFUNC(send_IPI_mask) \
+ IPIFUNC(send_IPI_allbutself) \
+ IPIFUNC(send_IPI_all) \
+ APICFUNC(enable_apic_mode) \
+ APICFUNC(phys_pkg_id) \
+}

extern struct genapic *genapic;

--
1.5.4.rc2

2008-03-23 08:40:54

by Joe Perches

[permalink] [raw]
Subject: [PATCH 042/148] include/asm-x86/hypertransport.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/hypertransport.h | 15 +++++++++------
1 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h
index c16c6ff..d2bbd23 100644
--- a/include/asm-x86/hypertransport.h
+++ b/include/asm-x86/hypertransport.h
@@ -8,12 +8,14 @@
#define HT_IRQ_LOW_BASE 0xf8000000

#define HT_IRQ_LOW_VECTOR_SHIFT 16
-#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
-#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
+#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
+#define HT_IRQ_LOW_VECTOR(v) \
+ (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)

#define HT_IRQ_LOW_DEST_ID_SHIFT 8
-#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
-#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
+#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
+#define HT_IRQ_LOW_DEST_ID(v) \
+ (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)

#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
@@ -36,7 +38,8 @@


#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
-#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
-#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
+#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
+#define HT_IRQ_HIGH_DEST_ID(v) \
+ ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)

#endif /* ASM_HYPERTRANSPORT_H */
--
1.5.4.rc2

2008-03-23 08:41:28

by Joe Perches

[permalink] [raw]
Subject: [PATCH 128/148] include/asm-x86/thread_info_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/thread_info_64.h | 78 ++++++++++++++++++++------------------
1 files changed, 41 insertions(+), 37 deletions(-)

diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
index 92ff23d..f23fefc 100644
--- a/include/asm-x86/thread_info_64.h
+++ b/include/asm-x86/thread_info_64.h
@@ -29,9 +29,9 @@ struct thread_info {
__u32 flags; /* low level flags */
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
-
- mm_segment_t addr_limit;
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
+ mm_segment_t addr_limit;
struct restart_block restart_block;
#ifdef CONFIG_IA32_EMULATION
void __user *sysenter_return;
@@ -61,17 +61,17 @@ struct thread_info {
#define init_stack (init_thread_union.stack)

static inline struct thread_info *current_thread_info(void)
-{
+{
struct thread_info *ti;
ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
- return ti;
+ return ti;
}

/* do not use in interrupt context */
static inline struct thread_info *stack_thread_info(void)
{
struct thread_info *ti;
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+ asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
return ti;
}

@@ -82,8 +82,8 @@ static inline struct thread_info *stack_thread_info(void)
#define THREAD_FLAGS GFP_KERNEL
#endif

-#define alloc_thread_info(tsk) \
- ((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER))
+#define alloc_thread_info(tsk) \
+ ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))

#else /* !__ASSEMBLY__ */

@@ -96,7 +96,8 @@ static inline struct thread_info *stack_thread_info(void)

/*
* thread information flags
- * - these are process state flags that various assembly files may need to access
+ * - these are process state flags that various assembly files
+ * may need to access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
* Warning: layout of LSW is hardcoded in entry.S
@@ -112,7 +113,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
/* 16 free */
-#define TIF_IA32 17 /* 32bit process */
+#define TIF_IA32 17 /* 32bit process */
#define TIF_FORK 18 /* ret_from_fork */
#define TIF_ABI_PENDING 19
#define TIF_MEMDIE 20
@@ -124,39 +125,40 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */

-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_IRET (1<<TIF_IRET)
-#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP (1<<TIF_SECCOMP)
-#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY)
-#define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED)
-#define _TIF_IA32 (1<<TIF_IA32)
-#define _TIF_FORK (1<<TIF_FORK)
-#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
-#define _TIF_DEBUG (1<<TIF_DEBUG)
-#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
-#define _TIF_FORCED_TF (1<<TIF_FORCED_TF)
-#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR)
-#define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR)
-#define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_IRET (1 << TIF_IRET)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
+#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
+#define _TIF_IA32 (1 << TIF_IA32)
+#define _TIF_FORK (1 << TIF_FORK)
+#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
+#define _TIF_DEBUG (1 << TIF_DEBUG)
+#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
+#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
+#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
+#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)

/* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK \
- (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
+#define _TIF_WORK_MASK \
+ (0x0000FFFF & \
+ ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
/* work to do on any return to user space */
#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)

-#define _TIF_DO_NOTIFY_MASK \
+#define _TIF_DO_NOTIFY_MASK \
(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)

/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS)
+#define _TIF_WORK_CTXSW \
+ (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS)
#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)

@@ -169,9 +171,11 @@ static inline struct thread_info *stack_thread_info(void)
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
-#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+#define TS_USEDFPU 0x0001 /* FPU was used by this task
+ this quantum (SMP) */
#define TS_COMPAT 0x0002 /* 32bit syscall active */
-#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
+#define TS_POLLING 0x0004 /* true if in idle loop
+ and not sleeping */

#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)

--
1.5.4.rc2

2008-03-23 08:41:53

by Joe Perches

[permalink] [raw]
Subject: [PATCH 136/148] include/asm-x86/unistd_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/unistd_32.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index 984123a..8317d94 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -81,7 +81,7 @@
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
-#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
--
1.5.4.rc2

2008-03-23 08:42:24

by Joe Perches

[permalink] [raw]
Subject: [PATCH 146/148] include/asm-x86/vsyscall.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/vsyscall.h | 21 +++++++++++++--------
1 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h
index 17b3700..8a53b1c 100644
--- a/include/asm-x86/vsyscall.h
+++ b/include/asm-x86/vsyscall.h
@@ -11,20 +11,25 @@ enum vsyscall_num {
#define VSYSCALL_SIZE 1024
#define VSYSCALL_END (-2UL << 20)
#define VSYSCALL_MAPPED_PAGES 1
-#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
+#define VSYSCALL_ADDR(vsyscall_nr) \
+ (VSYSCALL_START + VSYSCALL_SIZE * (vsyscall_nr))

#ifdef __KERNEL__
#include <linux/seqlock.h>

-#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
+#define __section_vgetcpu_mode \
+ __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
+#define __section_jiffies \
+ __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))

/* Definitions for CONFIG_GENERIC_TIME definitions */
-#define __section_vsyscall_gtod_data __attribute__ \
- ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
-#define __section_vsyscall_clock __attribute__ \
- ((unused, __section__ (".vsyscall_clock"),aligned(16)))
-#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
+#define __section_vsyscall_gtod_data \
+ __attribute__ ((unused, __section__ (".vsyscall_gtod_data"), \
+ aligned(16)))
+#define __section_vsyscall_clock \
+ __attribute__ ((unused, __section__ (".vsyscall_clock"), aligned(16)))
+#define __vsyscall_fn \
+ __attribute__ ((unused, __section__(".vsyscall_fn")))

#define VGETCPU_RDTSCP 1
#define VGETCPU_LSL 2
--
1.5.4.rc2

2008-03-23 08:42:53

by Joe Perches

[permalink] [raw]
Subject: [PATCH 147/148] include/asm-x86/xor_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/xor_32.h | 494 +++++++++++++++++++++++-----------------------
1 files changed, 248 insertions(+), 246 deletions(-)

diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index a41ef1b..067b5c1 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -16,12 +16,12 @@
* Copyright (C) 1998 Ingo Molnar.
*/

-#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
-#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
-#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
-#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
-#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
-#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
+#define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
+#define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
+#define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
+#define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
+#define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
+#define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"

#include <asm/i387.h>

@@ -32,24 +32,24 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)

kernel_fpu_begin();

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- ST(i,0) \
- XO1(i+1,1) \
- ST(i+1,1) \
- XO1(i+2,2) \
- ST(i+2,2) \
- XO1(i+3,3) \
- ST(i+3,3)
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ ST(i, 0) \
+ XO1(i+1, 1) \
+ ST(i+1, 1) \
+ XO1(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO1(i + 3, 3) \
+ ST(i + 3, 3)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
@@ -76,25 +76,25 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,

kernel_fpu_begin();

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- ST(i,0) \
- XO2(i+1,1) \
- ST(i+1,1) \
- XO2(i+2,2) \
- ST(i+2,2) \
- XO2(i+3,3) \
- ST(i+3,3)
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ ST(i, 0) \
+ XO2(i + 1, 1) \
+ ST(i + 1, 1) \
+ XO2(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO2(i + 3, 3) \
+ ST(i + 3, 3)

" .align 32 ;\n"
" 1: ;\n"
@@ -125,29 +125,29 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,

kernel_fpu_begin();

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- ST(i,0) \
- XO3(i+1,1) \
- ST(i+1,1) \
- XO3(i+2,2) \
- ST(i+2,2) \
- XO3(i+3,3) \
- ST(i+3,3)
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ ST(i, 0) \
+ XO3(i + 1, 1) \
+ ST(i + 1, 1) \
+ XO3(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO3(i + 3, 3) \
+ ST(i + 3, 3)

" .align 32 ;\n"
" 1: ;\n"
@@ -186,35 +186,35 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
because we modify p4 and p5 there, but we can't mark them
as read/write, otherwise we'd overflow the 10-asm-operands
limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
+ asm("" : "+r" (p4), "+r" (p5));

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- ST(i,0) \
- XO4(i+1,1) \
- ST(i+1,1) \
- XO4(i+2,2) \
- ST(i+2,2) \
- XO4(i+3,3) \
- ST(i+3,3)
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ XO4(i, 0) \
+ ST(i, 0) \
+ XO4(i + 1, 1) \
+ ST(i + 1, 1) \
+ XO4(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO4(i + 3, 3) \
+ ST(i + 3, 3)

" .align 32 ;\n"
" 1: ;\n"
@@ -233,13 +233,13 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
+ : "r" (p4), "r" (p5)
: "memory");

/* p4 and p5 were modified, and now the variables are dead.
Clobber them just to be sure nobody does something stupid
like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
+ asm("" : "=r" (p4), "=r" (p5));

kernel_fpu_end();
}
@@ -259,7 +259,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)

kernel_fpu_begin();

- __asm__ __volatile__ (
+ asm volatile(
" .align 32 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
@@ -286,7 +286,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
" pxor 56(%2), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
-
+
" addl $64, %1 ;\n"
" addl $64, %2 ;\n"
" decl %0 ;\n"
@@ -307,7 +307,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,

kernel_fpu_begin();

- __asm__ __volatile__ (
+ asm volatile(
" .align 32,0x90 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
@@ -342,7 +342,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" pxor 56(%3), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
-
+
" addl $64, %1 ;\n"
" addl $64, %2 ;\n"
" addl $64, %3 ;\n"
@@ -364,7 +364,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,

kernel_fpu_begin();

- __asm__ __volatile__ (
+ asm volatile(
" .align 32,0x90 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
@@ -407,7 +407,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" pxor 56(%4), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
-
+
" addl $64, %1 ;\n"
" addl $64, %2 ;\n"
" addl $64, %3 ;\n"
@@ -436,9 +436,9 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
because we modify p4 and p5 there, but we can't mark them
as read/write, otherwise we'd overflow the 10-asm-operands
limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
+ asm("" : "+r" (p4), "+r" (p5));

- __asm__ __volatile__ (
+ asm volatile(
" .align 32,0x90 ;\n"
" 1: ;\n"
" movq (%1), %%mm0 ;\n"
@@ -489,7 +489,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" pxor 56(%5), %%mm7 ;\n"
" movq %%mm6, 48(%1) ;\n"
" movq %%mm7, 56(%1) ;\n"
-
+
" addl $64, %1 ;\n"
" addl $64, %2 ;\n"
" addl $64, %3 ;\n"
@@ -505,7 +505,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
/* p4 and p5 were modified, and now the variables are dead.
Clobber them just to be sure nobody does something stupid
like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
+ asm("" : "=r" (p4), "=r" (p5));

kernel_fpu_end();
}
@@ -531,11 +531,12 @@ static struct xor_block_template xor_block_p5_mmx = {
* Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
*/

-#define XMMS_SAVE do { \
+#define XMMS_SAVE \
+do { \
preempt_disable(); \
cr0 = read_cr0(); \
clts(); \
- __asm__ __volatile__ ( \
+ asm volatile( \
"movups %%xmm0,(%0) ;\n\t" \
"movups %%xmm1,0x10(%0) ;\n\t" \
"movups %%xmm2,0x20(%0) ;\n\t" \
@@ -543,10 +544,11 @@ static struct xor_block_template xor_block_p5_mmx = {
: \
: "r" (xmm_save) \
: "memory"); \
-} while(0)
+} while (0)

-#define XMMS_RESTORE do { \
- __asm__ __volatile__ ( \
+#define XMMS_RESTORE \
+do { \
+ asm volatile( \
"sfence ;\n\t" \
"movups (%0),%%xmm0 ;\n\t" \
"movups 0x10(%0),%%xmm1 ;\n\t" \
@@ -557,76 +559,76 @@ static struct xor_block_template xor_block_p5_mmx = {
: "memory"); \
write_cr0(cr0); \
preempt_enable(); \
-} while(0)
+} while (0)

#define ALIGN16 __attribute__((aligned(16)))

#define OFFS(x) "16*("#x")"
#define PF_OFFS(x) "256+16*("#x")"
#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
-#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
-#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
+#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
+#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
-#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
-#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
-#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
-#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
-#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
+#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
+#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
+#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
+#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
+#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"


static void
xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
- unsigned long lines = bytes >> 8;
+ unsigned long lines = bytes >> 8;
char xmm_save[16*4] ALIGN16;
int cr0;

XMMS_SAVE;

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
-#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
PF1(i) \
- PF1(i+2) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ PF1(i + 2) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r" (p2)
:
- : "memory");
+ : "memory");

XMMS_RESTORE;
}
@@ -635,59 +637,59 @@ static void
xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
- unsigned long lines = bytes >> 8;
+ unsigned long lines = bytes >> 8;
char xmm_save[16*4] ALIGN16;
int cr0;

XMMS_SAVE;

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
#define BLOCK(i) \
PF1(i) \
- PF1(i+2) \
+ PF1(i + 2) \
LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
PF2(i) \
- PF2(i+2) \
- PF0(i+4) \
- PF0(i+6) \
+ PF2(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r"(p2), "+r"(p3)
:
- : "memory" );
+ : "memory" );

XMMS_RESTORE;
}
@@ -696,66 +698,66 @@ static void
xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
- unsigned long lines = bytes >> 8;
+ unsigned long lines = bytes >> 8;
char xmm_save[16*4] ALIGN16;
int cr0;

XMMS_SAVE;

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
#define BLOCK(i) \
PF1(i) \
- PF1(i+2) \
+ PF1(i + 2) \
LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
PF2(i) \
- PF2(i+2) \
+ PF2(i + 2) \
XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
PF3(i) \
- PF3(i+2) \
- PF0(i+4) \
- PF0(i+6) \
+ PF3(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " addl $256, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
:
- : "memory" );
+ : "memory" );

XMMS_RESTORE;
}
@@ -764,7 +766,7 @@ static void
xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
- unsigned long lines = bytes >> 8;
+ unsigned long lines = bytes >> 8;
char xmm_save[16*4] ALIGN16;
int cr0;

@@ -776,65 +778,65 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
because we modify p4 and p5 there, but we can't mark them
as read/write, otherwise we'd overflow the 10-asm-operands
limit of GCC < 3.1. */
- __asm__ ("" : "+r" (p4), "+r" (p5));
+ asm("" : "+r" (p4), "+r" (p5));

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
#define BLOCK(i) \
PF1(i) \
- PF1(i+2) \
+ PF1(i + 2) \
LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
PF2(i) \
- PF2(i+2) \
+ PF2(i + 2) \
XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
PF3(i) \
- PF3(i+2) \
+ PF3(i + 2) \
XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
PF4(i) \
- PF4(i+2) \
- PF0(i+4) \
- PF0(i+6) \
+ PF4(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
XO4(i,0) \
- XO4(i+1,1) \
- XO4(i+2,2) \
- XO4(i+3,3) \
+ XO4(i + 1, 1) \
+ XO4(i + 2, 2) \
+ XO4(i + 3, 3) \
ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " addl $256, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
+ " addl $256, %1 ;\n"
+ " addl $256, %2 ;\n"
+ " addl $256, %3 ;\n"
+ " addl $256, %4 ;\n"
+ " addl $256, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r" (p2), "+r" (p3)
: "r" (p4), "r" (p5)
@@ -843,17 +845,17 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
/* p4 and p5 were modified, and now the variables are dead.
Clobber them just to be sure nobody does something stupid
like assuming they have some legal value. */
- __asm__ ("" : "=r" (p4), "=r" (p5));
+ asm("" : "=r" (p4), "=r" (p5));

XMMS_RESTORE;
}

static struct xor_block_template xor_block_pIII_sse = {
- .name = "pIII_sse",
- .do_2 = xor_sse_2,
- .do_3 = xor_sse_3,
- .do_4 = xor_sse_4,
- .do_5 = xor_sse_5,
+ .name = "pIII_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
};

/* Also try the generic routines. */
@@ -861,21 +863,21 @@ static struct xor_block_template xor_block_pIII_sse = {

#undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_8regs_p); \
- xor_speed(&xor_block_32regs); \
- xor_speed(&xor_block_32regs_p); \
- if (cpu_has_xmm) \
- xor_speed(&xor_block_pIII_sse); \
- if (cpu_has_mmx) { \
- xor_speed(&xor_block_pII_mmx); \
- xor_speed(&xor_block_p5_mmx); \
- } \
- } while (0)
+do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ if (cpu_has_xmm) \
+ xor_speed(&xor_block_pIII_sse); \
+ if (cpu_has_mmx) { \
+ xor_speed(&xor_block_pII_mmx); \
+ xor_speed(&xor_block_p5_mmx); \
+ } \
+} while (0)

/* We force the use of the SSE xor block because it can write around L2.
We may also be able to load into the L1 only depending on how the cpu
deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
+#define XOR_SELECT_TEMPLATE(FASTEST) \
(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
--
1.5.4.rc2

2008-03-23 08:43:20

by Joe Perches

[permalink] [raw]
Subject: [PATCH 142/148] include/asm-x86/vga.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/vga.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h
index 0ecf68a..0ccf804 100644
--- a/include/asm-x86/vga.h
+++ b/include/asm-x86/vga.h
@@ -12,9 +12,9 @@
* access the videoram directly without any black magic.
*/

-#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
+#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)

#define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
+#define vga_writeb(x, y) (*(y) = (x))

#endif
--
1.5.4.rc2

2008-03-23 08:43:38

by Joe Perches

[permalink] [raw]
Subject: [PATCH 133/148] include/asm-x86/uaccess_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/uaccess_32.h | 316 +++++++++++++++++++++++++-----------------
1 files changed, 187 insertions(+), 129 deletions(-)

diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index fcc570e..8e7595c 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -32,7 +32,7 @@
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))

-#define segment_eq(a,b) ((a).seg == (b).seg)
+#define segment_eq(a, b) ((a).seg == (b).seg)

/*
* movsl can be slow when source and dest are not both 8-byte aligned
@@ -43,7 +43,9 @@ extern struct movsl_mask {
} ____cacheline_aligned_in_smp movsl_mask;
#endif

-#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg))
+#define __addr_ok(addr) \
+ ((unsigned long __force)(addr) < \
+ (current_thread_info()->addr_limit.seg))

/*
* Test whether a block of memory is a valid user space address.
@@ -54,13 +56,16 @@ extern struct movsl_mask {
*
* This needs 33-bit arithmetic. We have a carry...
*/
-#define __range_ok(addr,size) ({ \
- unsigned long flag,roksum; \
- __chk_user_ptr(addr); \
- asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
- :"=&r" (flag), "=r" (roksum) \
- :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
- flag; })
+#define __range_ok(addr, size) \
+({ \
+ unsigned long flag, roksum; \
+ __chk_user_ptr(addr); \
+ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
+ :"=&r" (flag), "=r" (roksum) \
+ :"1" (addr), "g" ((int)(size)), \
+ "rm" (current_thread_info()->addr_limit.seg)); \
+ flag; \
+})

/**
* access_ok: - Checks if a user space pointer is valid
@@ -81,7 +86,7 @@ extern struct movsl_mask {
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
+#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))

/*
* The exception table consists of pairs of addresses: the first is the
@@ -96,8 +101,7 @@ extern struct movsl_mask {
* on our cache or tlb entries.
*/

-struct exception_table_entry
-{
+struct exception_table_entry {
unsigned long insn, fixup;
};

@@ -122,13 +126,15 @@ extern void __get_user_1(void);
extern void __get_user_2(void);
extern void __get_user_4(void);

-#define __get_user_x(size,ret,x,ptr) \
- __asm__ __volatile__("call __get_user_" #size \
- :"=a" (ret),"=d" (x) \
- :"0" (ptr))
+#define __get_user_x(size, ret, x, ptr) \
+ asm volatile("call __get_user_" #size \
+ :"=a" (ret),"=d" (x) \
+ :"0" (ptr))
+

+/* Careful: we have to cast the result to the type of the pointer
+ * for sign reasons */

-/* Careful: we have to cast the result to the type of the pointer for sign reasons */
/**
* get_user: - Get a simple variable from user space.
* @x: Variable to store result.
@@ -146,15 +152,24 @@ extern void __get_user_4(void);
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
-#define get_user(x,ptr) \
-({ int __ret_gu; \
+#define get_user(x, ptr) \
+({ \
+ int __ret_gu; \
unsigned long __val_gu; \
__chk_user_ptr(ptr); \
- switch(sizeof (*(ptr))) { \
- case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
- case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
- case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
- default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_user_x(1, __ret_gu, __val_gu, ptr); \
+ break; \
+ case 2: \
+ __get_user_x(2, __ret_gu, __val_gu, ptr); \
+ break; \
+ case 4: \
+ __get_user_x(4, __ret_gu, __val_gu, ptr); \
+ break; \
+ default: \
+ __get_user_x(X, __ret_gu, __val_gu, ptr); \
+ break; \
} \
(x) = (__typeof__(*(ptr)))__val_gu; \
__ret_gu; \
@@ -171,11 +186,25 @@ extern void __put_user_2(void);
extern void __put_user_4(void);
extern void __put_user_8(void);

-#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr))
-#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr))
+#define __put_user_1(x, ptr) \
+ asm volatile("call __put_user_1" : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_2(x, ptr) \
+ asm volatile("call __put_user_2" : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_4(x, ptr) \
+ asm volatile("call __put_user_4" : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_8(x, ptr) \
+ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
+ : "A" ((typeof(*(ptr)))(x)), "c" (ptr))
+
+#define __put_user_X(x, ptr) \
+ asm volatile("call __put_user_X" : "=a" (__ret_pu) \
+ : "c" (ptr))

/**
* put_user: - Write a simple value into user space.
@@ -195,32 +224,43 @@ extern void __put_user_8(void);
*/
#ifdef CONFIG_X86_WP_WORKS_OK

-#define put_user(x,ptr) \
-({ int __ret_pu; \
+#define put_user(x, ptr) \
+({ \
+ int __ret_pu; \
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
__pu_val = x; \
- switch(sizeof(*(ptr))) { \
- case 1: __put_user_1(__pu_val, ptr); break; \
- case 2: __put_user_2(__pu_val, ptr); break; \
- case 4: __put_user_4(__pu_val, ptr); break; \
- case 8: __put_user_8(__pu_val, ptr); break; \
- default:__put_user_X(__pu_val, ptr); break; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_user_1(__pu_val, ptr); \
+ break; \
+ case 2: \
+ __put_user_2(__pu_val, ptr); \
+ break; \
+ case 4: \
+ __put_user_4(__pu_val, ptr); \
+ break; \
+ case 8: \
+ __put_user_8(__pu_val, ptr); \
+ break; \
+ default: \
+ __put_user_X(__pu_val, ptr); \
+ break; \
} \
__ret_pu; \
})

#else
-#define put_user(x,ptr) \
+#define put_user(x, ptr) \
({ \
- int __ret_pu; \
- __typeof__(*(ptr)) __pus_tmp = x; \
- __ret_pu=0; \
- if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
- sizeof(*(ptr))) != 0)) \
- __ret_pu=-EFAULT; \
- __ret_pu; \
- })
+ int __ret_pu; \
+ __typeof__(*(ptr))__pus_tmp = x; \
+ __ret_pu = 0; \
+ if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
+ sizeof(*(ptr))) != 0)) \
+ __ret_pu = -EFAULT; \
+ __ret_pu; \
+})


#endif
@@ -245,8 +285,8 @@ extern void __put_user_8(void);
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))


/**
@@ -268,54 +308,62 @@ extern void __put_user_8(void);
*
* Returns zero on success, or -EFAULT on error.
*/
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))

-#define __put_user_nocheck(x,ptr,size) \
+#define __put_user_nocheck(x, ptr, size) \
({ \
long __pu_err; \
- __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
+ __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
__pu_err; \
})


-#define __put_user_u64(x, addr, err) \
- __asm__ __volatile__( \
- "1: movl %%eax,0(%2)\n" \
- "2: movl %%edx,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: movl %3,%0\n" \
- " jmp 3b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b,4b) \
- _ASM_EXTABLE(2b,4b) \
- : "=r"(err) \
- : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
+#define __put_user_u64(x, addr, err) \
+ asm volatile("1: movl %%eax,0(%2)\n" \
+ "2: movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=r" (err) \
+ : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))

#ifdef CONFIG_X86_WP_WORKS_OK

-#define __put_user_size(x,ptr,size,retval,errret) \
+#define __put_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
- case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
- case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \
- case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
- default: __put_user_bad(); \
+ case 1: \
+ __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
+ break; \
+ case 2: \
+ __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
+ break; \
+ case 4: \
+ __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
+ break; \
+ case 8: \
+ __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
+ break; \
+ default: \
+ __put_user_bad(); \
} \
} while (0)

#else

-#define __put_user_size(x,ptr,size,retval,errret) \
+#define __put_user_size(x, ptr, size, retval, errret) \
do { \
- __typeof__(*(ptr)) __pus_tmp = x; \
+ __typeof__(*(ptr))__pus_tmp = x; \
retval = 0; \
\
- if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
+ if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
retval = errret; \
} while (0)

@@ -329,65 +377,70 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- __asm__ __volatile__( \
- "1: mov"itype" %"rtype"1,%2\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b,3b) \
- : "=r"(err) \
- : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
-
-
-#define __get_user_nocheck(x,ptr,size) \
-({ \
- long __gu_err; \
- unsigned long __gu_val; \
- __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
+ asm volatile("1: mov"itype" %"rtype"1,%2\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
})

extern long __get_user_bad(void);

-#define __get_user_size(x,ptr,size,retval,errret) \
+#define __get_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
- case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
- case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \
- default: (x) = __get_user_bad(); \
+ case 1: \
+ __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
+ break; \
+ case 2: \
+ __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
+ break; \
+ case 4: \
+ __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
+ break; \
+ default: \
+ (x) = __get_user_bad(); \
} \
} while (0)

#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- __asm__ __volatile__( \
- "1: mov"itype" %2,%"rtype"1\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %3,%0\n" \
- " xor"itype" %"rtype"1,%"rtype"1\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b,3b) \
- : "=r"(err), ltype (x) \
- : "m"(__m(addr)), "i"(errret), "0"(err))
-
-
-unsigned long __must_check __copy_to_user_ll(void __user *to,
- const void *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll(void *to,
- const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nozero(void *to,
- const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nocache(void *to,
- const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
- const void __user *from, unsigned long n);
+ asm volatile("1: mov"itype" %2,%"rtype"1\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+
+unsigned long __must_check __copy_to_user_ll
+ (void __user *to, const void *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll
+ (void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nozero
+ (void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache
+ (void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache_nozero
+ (void *to, const void __user *from, unsigned long n);

/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
@@ -416,13 +469,16 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)

switch (n) {
case 1:
- __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
+ __put_user_size(*(u8 *)from, (u8 __user *)to,
+ 1, ret, 1);
return ret;
case 2:
- __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
+ __put_user_size(*(u16 *)from, (u16 __user *)to,
+ 2, ret, 2);
return ret;
case 4:
- __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
+ __put_user_size(*(u32 *)from, (u32 __user *)to,
+ 4, ret, 4);
return ret;
}
}
@@ -545,19 +601,21 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
}

static __always_inline unsigned long
-__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
{
return __copy_from_user_ll_nocache_nozero(to, from, n);
}

unsigned long __must_check copy_to_user(void __user *to,
- const void *from, unsigned long n);
+ const void *from, unsigned long n);
unsigned long __must_check copy_from_user(void *to,
- const void __user *from, unsigned long n);
+ const void __user *from,
+ unsigned long n);
long __must_check strncpy_from_user(char *dst, const char __user *src,
- long count);
+ long count);
long __must_check __strncpy_from_user(char *dst,
- const char __user *src, long count);
+ const char __user *src, long count);

/**
* strlen_user: - Get the size of a string in user space.
--
1.5.4.rc2

2008-03-23 08:43:54

by Joe Perches

[permalink] [raw]
Subject: [PATCH 073/148] include/asm-x86/mpspec_def.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mpspec_def.h | 35 +++++++++++++----------------------
1 files changed, 13 insertions(+), 22 deletions(-)

diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
index 1f35691..dc6ef85 100644
--- a/include/asm-x86/mpspec_def.h
+++ b/include/asm-x86/mpspec_def.h
@@ -11,7 +11,7 @@
* information is.
*/

-#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
+#define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_')

#ifdef CONFIG_X86_32
# define MAX_MPC_ENTRY 1024
@@ -23,8 +23,7 @@
# define MAX_APICS 255
#endif

-struct intel_mp_floating
-{
+struct intel_mp_floating {
char mpf_signature[4]; /* "_MP_" */
unsigned int mpf_physptr; /* Configuration table address */
unsigned char mpf_length; /* Our length (paragraphs) */
@@ -39,14 +38,13 @@ struct intel_mp_floating

#define MPC_SIGNATURE "PCMP"

-struct mp_config_table
-{
+struct mp_config_table {
char mpc_signature[4];
unsigned short mpc_length; /* Size of table */
- char mpc_spec; /* 0x01 */
- char mpc_checksum;
- char mpc_oem[8];
- char mpc_productid[12];
+ char mpc_spec; /* 0x01 */
+ char mpc_checksum;
+ char mpc_oem[8];
+ char mpc_productid[12];
unsigned int mpc_oemptr; /* 0 if not present */
unsigned short mpc_oemsize; /* 0 if not present */
unsigned short mpc_oemcount;
@@ -71,8 +69,7 @@ struct mp_config_table
#define CPU_MODEL_MASK 0x00F0
#define CPU_FAMILY_MASK 0x0F00

-struct mpc_config_processor
-{
+struct mpc_config_processor {
unsigned char mpc_type;
unsigned char mpc_apicid; /* Local APIC number */
unsigned char mpc_apicver; /* Its versions */
@@ -82,8 +79,7 @@ struct mpc_config_processor
unsigned int mpc_reserved[2];
};

-struct mpc_config_bus
-{
+struct mpc_config_bus {
unsigned char mpc_type;
unsigned char mpc_busid;
unsigned char mpc_bustype[6];
@@ -111,8 +107,7 @@ struct mpc_config_bus

#define MPC_APIC_USABLE 0x01

-struct mpc_config_ioapic
-{
+struct mpc_config_ioapic {
unsigned char mpc_type;
unsigned char mpc_apicid;
unsigned char mpc_apicver;
@@ -120,8 +115,7 @@ struct mpc_config_ioapic
unsigned int mpc_apicaddr;
};

-struct mpc_config_intsrc
-{
+struct mpc_config_intsrc {
unsigned char mpc_type;
unsigned char mpc_irqtype;
unsigned short mpc_irqflag;
@@ -144,8 +138,7 @@ enum mp_irq_source_types {

#define MP_APIC_ALL 0xFF

-struct mpc_config_lintsrc
-{
+struct mpc_config_lintsrc {
unsigned char mpc_type;
unsigned char mpc_irqtype;
unsigned short mpc_irqflag;
@@ -157,8 +150,7 @@ struct mpc_config_lintsrc

#define MPC_OEM_SIGNATURE "_OEM"

-struct mp_config_oemtable
-{
+struct mp_config_oemtable {
char oem_signature[4];
unsigned short oem_length; /* Size of table */
char oem_rev; /* 0x01 */
@@ -185,4 +177,3 @@ enum mp_bustype {
MP_BUS_MCA,
};
#endif
-
--
1.5.4.rc2

2008-03-23 08:44:25

by Joe Perches

[permalink] [raw]
Subject: [PATCH 131/148] include/asm-x86/topology.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/topology.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
index 5bc825b..46a8994 100644
--- a/include/asm-x86/topology.h
+++ b/include/asm-x86/topology.h
@@ -72,7 +72,7 @@ static inline int cpu_to_node(int cpu)
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (x86_cpu_to_node_map_early_ptr) {
printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n",
- (int)cpu);
+ (int)cpu);
dump_stack();
return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
}
--
1.5.4.rc2

2008-03-23 08:44:46

by Joe Perches

[permalink] [raw]
Subject: [PATCH 072/148] include/asm-x86/mmzone_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mmzone_64.h | 24 ++++++++++++------------
1 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
index ebaf966..594bd0d 100644
--- a/include/asm-x86/mmzone_64.h
+++ b/include/asm-x86/mmzone_64.h
@@ -7,7 +7,7 @@

#ifdef CONFIG_NUMA

-#define VIRTUAL_BUG_ON(x)
+#define VIRTUAL_BUG_ON(x)

#include <asm/smp.h>

@@ -16,7 +16,7 @@ struct memnode {
int shift;
unsigned int mapsize;
s16 *map;
- s16 embedded_map[64-8];
+ s16 embedded_map[64 - 8];
} ____cacheline_aligned; /* total size = 128 bytes */
extern struct memnode memnode;
#define memnode_shift memnode.shift
@@ -25,27 +25,27 @@ extern struct memnode memnode;

extern struct pglist_data *node_data[];

-static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
-{
- unsigned nid;
+static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
+{
+ unsigned nid;
VIRTUAL_BUG_ON(!memnodemap);
VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize);
- nid = memnodemap[addr >> memnode_shift];
- VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
- return nid;
-}
+ nid = memnodemap[addr >> memnode_shift];
+ VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
+ return nid;
+}

#define NODE_DATA(nid) (node_data[nid])

#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
+#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
NODE_DATA(nid)->node_spanned_pages)

extern int early_pfn_to_nid(unsigned long pfn);

#ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE (64*1024*1024)
-#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL))
+#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024)
+#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
#endif

#endif
--
1.5.4.rc2

2008-03-23 08:45:06

by Joe Perches

[permalink] [raw]
Subject: [PATCH 047/148] include/asm-x86/io_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/io_32.h | 137 ++++++++++++++++++++++++++++------------------
1 files changed, 83 insertions(+), 54 deletions(-)

diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 10d10df..c908196 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -59,14 +59,14 @@
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
+ * addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
-
-static inline unsigned long virt_to_phys(volatile void * address)
+
+static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
@@ -84,7 +84,7 @@ static inline unsigned long virt_to_phys(volatile void * address)
* this function
*/

-static inline void * phys_to_virt(unsigned long address)
+static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
@@ -163,16 +163,19 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);

static inline unsigned char readb(const volatile void __iomem *addr)
{
- return *(volatile unsigned char __force *) addr;
+ return *(volatile unsigned char __force *)addr;
}
+
static inline unsigned short readw(const volatile void __iomem *addr)
{
- return *(volatile unsigned short __force *) addr;
+ return *(volatile unsigned short __force *)addr;
}
+
static inline unsigned int readl(const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
+
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
@@ -182,15 +185,17 @@ static inline unsigned int readl(const volatile void __iomem *addr)

static inline void writeb(unsigned char b, volatile void __iomem *addr)
{
- *(volatile unsigned char __force *) addr = b;
+ *(volatile unsigned char __force *)addr = b;
}
+
static inline void writew(unsigned short b, volatile void __iomem *addr)
{
- *(volatile unsigned short __force *) addr = b;
+ *(volatile unsigned short __force *)addr = b;
}
+
static inline void writel(unsigned int b, volatile void __iomem *addr)
{
- *(volatile unsigned int __force *) addr = b;
+ *(volatile unsigned int __force *)addr = b;
}
#define __raw_writeb writeb
#define __raw_writew writew
@@ -233,12 +238,12 @@ memcpy_toio(volatile void __iomem *dst, const void *src, int count)
* 1. Out of order aware processors
* 2. Accidentally out of order processors (PPro errata #51)
*/
-
+
#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)

static inline void flush_write_buffers(void)
{
- __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+ asm volatile("lock; addl $0,0(%%esp)": : :"memory");
}

#else
@@ -258,7 +263,8 @@ extern void io_delay_init(void);
#include <asm/paravirt.h>
#else

-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
native_io_delay();
#ifdef REALLY_SLOW_IO
native_io_delay();
@@ -269,51 +275,74 @@ static inline void slow_down_io(void) {

#endif

-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl(unsigned type value, int port) { \
- out##bwl##_local(value, port); \
-} \
-static inline unsigned type in##bwl(int port) { \
- return in##bwl##_local(port); \
+#define __BUILDIO(bwl, bw, type) \
+static inline void out##bwl(unsigned type value, int port) \
+{ \
+ out##bwl##_local(value, port); \
+} \
+ \
+static inline unsigned type in##bwl(int port) \
+{ \
+ return in##bwl##_local(port); \
}

-#define BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_local(unsigned type value, int port) { \
- __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
-} \
-static inline unsigned type in##bwl##_local(int port) { \
- unsigned type value; \
- __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
- return value; \
-} \
-static inline void out##bwl##_local_p(unsigned type value, int port) { \
- out##bwl##_local(value, port); \
- slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_local_p(int port) { \
- unsigned type value = in##bwl##_local(port); \
- slow_down_io(); \
- return value; \
-} \
-__BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_p(unsigned type value, int port) { \
- out##bwl(value, port); \
- slow_down_io(); \
-} \
-static inline unsigned type in##bwl##_p(int port) { \
- unsigned type value = in##bwl(port); \
- slow_down_io(); \
- return value; \
-} \
-static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
- __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
-} \
-static inline void ins##bwl(int port, void *addr, unsigned long count) { \
- __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
+#define BUILDIO(bwl, bw, type) \
+static inline void out##bwl##_local(unsigned type value, int port) \
+{ \
+ asm volatile("out" #bwl " %" #bw "0, %w1" \
+ : : "a"(value), "Nd"(port)); \
+} \
+ \
+static inline unsigned type in##bwl##_local(int port) \
+{ \
+ unsigned type value; \
+ asm volatile("in" #bwl " %w1, %" #bw "0" \
+ : "=a"(value) : "Nd"(port)); \
+ return value; \
+} \
+ \
+static inline void out##bwl##_local_p(unsigned type value, int port) \
+{ \
+ out##bwl##_local(value, port); \
+ slow_down_io(); \
+} \
+ \
+static inline unsigned type in##bwl##_local_p(int port) \
+{ \
+ unsigned type value = in##bwl##_local(port); \
+ slow_down_io(); \
+ return value; \
+} \
+ \
+__BUILDIO(bwl, bw, type) \
+ \
+static inline void out##bwl##_p(unsigned type value, int port) \
+{ \
+ out##bwl(value, port); \
+ slow_down_io(); \
+} \
+ \
+static inline unsigned type in##bwl##_p(int port) \
+{ \
+ unsigned type value = in##bwl(port); \
+ slow_down_io(); \
+ return value; \
+} \
+ \
+static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+{ \
+ asm volatile("rep; outs" #bwl \
+ : "+S"(addr), "+c"(count) : "d"(port)); \
+} \
+ \
+static inline void ins##bwl(int port, void *addr, unsigned long count) \
+{ \
+ asm volatile("rep; ins" #bwl \
+ : "+D"(addr), "+c"(count) : "d"(port)); \
}

-BUILDIO(b,b,char)
-BUILDIO(w,w,short)
-BUILDIO(l,,int)
+BUILDIO(b, b, char)
+BUILDIO(w, w, short)
+BUILDIO(l, , int)

#endif
--
1.5.4.rc2

2008-03-23 08:45:35

by Joe Perches

[permalink] [raw]
Subject: [PATCH 060/148] include/asm-x86/kvm_x86_emulate.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/kvm_x86_emulate.h | 28 ++++++++++++++--------------
1 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
index 7db91b9..d6337f9 100644
--- a/include/asm-x86/kvm_x86_emulate.h
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -68,10 +68,10 @@ struct x86_emulate_ops {
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
* @bytes: [IN ] Number of bytes to read from memory.
*/
- int (*read_emulated) (unsigned long addr,
- void *val,
- unsigned int bytes,
- struct kvm_vcpu *vcpu);
+ int (*read_emulated)(unsigned long addr,
+ void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);

/*
* write_emulated: Read bytes from emulated/special memory area.
@@ -80,10 +80,10 @@ struct x86_emulate_ops {
* required).
* @bytes: [IN ] Number of bytes to write to memory.
*/
- int (*write_emulated) (unsigned long addr,
- const void *val,
- unsigned int bytes,
- struct kvm_vcpu *vcpu);
+ int (*write_emulated)(unsigned long addr,
+ const void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);

/*
* cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -93,11 +93,11 @@ struct x86_emulate_ops {
* @new: [IN ] Value to write to @addr.
* @bytes: [IN ] Number of bytes to access using CMPXCHG.
*/
- int (*cmpxchg_emulated) (unsigned long addr,
- const void *old,
- const void *new,
- unsigned int bytes,
- struct kvm_vcpu *vcpu);
+ int (*cmpxchg_emulated)(unsigned long addr,
+ const void *old,
+ const void *new,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);

};

@@ -143,7 +143,7 @@ struct x86_emulate_ctxt {
/* Register state before/after emulation. */
struct kvm_vcpu *vcpu;

- /* Linear faulting address (if emulating a page-faulting instruction). */
+ /* Linear faulting address (if emulating a page-faulting instruction) */
unsigned long eflags;

/* Emulated execution mode, represented by an X86EMUL_MODE value. */
--
1.5.4.rc2

2008-03-23 08:45:48

by Joe Perches

[permalink] [raw]
Subject: [PATCH 074/148] include/asm-x86/mpspec.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mpspec.h | 23 +++++++++++------------
1 files changed, 11 insertions(+), 12 deletions(-)

diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 99da0a5..eccbc58 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -55,8 +55,7 @@ extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);

#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)

-struct physid_mask
-{
+struct physid_mask {
unsigned long mask[PHYSID_ARRAY_SIZE];
};

@@ -65,34 +64,34 @@ typedef struct physid_mask physid_mask_t;
#define physid_set(physid, map) set_bit(physid, (map).mask)
#define physid_clear(physid, map) clear_bit(physid, (map).mask)
#define physid_isset(physid, map) test_bit(physid, (map).mask)
-#define physid_test_and_set(physid, map) \
+#define physid_test_and_set(physid, map) \
test_and_set_bit(physid, (map).mask)

-#define physids_and(dst, src1, src2) \
+#define physids_and(dst, src1, src2) \
bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)

-#define physids_or(dst, src1, src2) \
+#define physids_or(dst, src1, src2) \
bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)

-#define physids_clear(map) \
+#define physids_clear(map) \
bitmap_zero((map).mask, MAX_APICS)

-#define physids_complement(dst, src) \
+#define physids_complement(dst, src) \
bitmap_complement((dst).mask, (src).mask, MAX_APICS)

-#define physids_empty(map) \
+#define physids_empty(map) \
bitmap_empty((map).mask, MAX_APICS)

-#define physids_equal(map1, map2) \
+#define physids_equal(map1, map2) \
bitmap_equal((map1).mask, (map2).mask, MAX_APICS)

-#define physids_weight(map) \
+#define physids_weight(map) \
bitmap_weight((map).mask, MAX_APICS)

-#define physids_shift_right(d, s, n) \
+#define physids_shift_right(d, s, n) \
bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)

-#define physids_shift_left(d, s, n) \
+#define physids_shift_left(d, s, n) \
bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)

#define physids_coerce(map) ((map).mask[0])
--
1.5.4.rc2

2008-03-23 08:46:16

by Joe Perches

[permalink] [raw]
Subject: [PATCH 075/148] include/asm-x86/msidef.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/msidef.h | 12 ++++++++----
1 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
index 5b8acdd..296f29c 100644
--- a/include/asm-x86/msidef.h
+++ b/include/asm-x86/msidef.h
@@ -11,7 +11,8 @@

#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR_MASK 0x000000ff
-#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK)
+#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
+ MSI_DATA_VECTOR_MASK)

#define MSI_DATA_DELIVERY_MODE_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
@@ -37,11 +38,14 @@
#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)

#define MSI_ADDR_REDIRECTION_SHIFT 3
-#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */
-#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+ /* dedicated cpu */
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+ /* lowest priority */

#define MSI_ADDR_DEST_ID_SHIFT 12
#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
-#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)
+#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+ MSI_ADDR_DEST_ID_MASK)

#endif /* ASM_MSIDEF_H */
--
1.5.4.rc2

2008-03-23 08:46:42

by Joe Perches

[permalink] [raw]
Subject: [PATCH 078/148] include/asm-x86/mutex_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mutex_32.h | 64 ++++++++++++++++++++-----------------------
1 files changed, 30 insertions(+), 34 deletions(-)

diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h
index 9a6b3da..73e928e 100644
--- a/include/asm-x86/mutex_32.h
+++ b/include/asm-x86/mutex_32.h
@@ -21,22 +21,20 @@
* wasn't 1 originally. This function MUST leave the value lower than 1
* even when the "1" assertion wasn't true.
*/
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " decl (%%eax) \n" \
- " jns 1f \n" \
- " call "#fail_fn" \n" \
- "1: \n" \
- \
- :"=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
+ " jns 1f \n" \
+ " call " #fail_fn "\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
} while (0)


@@ -50,8 +48,8 @@ do { \
* wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns
*/
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- __asm__ __volatile__( \
- LOCK_PREFIX " incl (%%eax) \n" \
- " jg 1f \n" \
- " call "#fail_fn" \n" \
- "1: \n" \
- \
- :"=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
+ " jg 1f\n" \
+ " call " #fail_fn "\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
} while (0)

#define __mutex_slowpath_needs_to_unlock() 1
@@ -104,8 +100,8 @@ do { \
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*/
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
{
/*
* We have two variants here. The cmpxchg based one is the best one
--
1.5.4.rc2

2008-03-23 08:47:00

by Joe Perches

[permalink] [raw]
Subject: [PATCH 069/148] include/asm-x86/mmu.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mmu.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index efa962c..00e8867 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -10,10 +10,10 @@
*
* cpu_vm_mask is used to optimize ldt flushing.
*/
-typedef struct {
+typedef struct {
void *ldt;
#ifdef CONFIG_X86_64
- rwlock_t ldtlock;
+ rwlock_t ldtlock;
#endif
int size;
struct mutex lock;
--
1.5.4.rc2

2008-03-23 08:47:25

by Joe Perches

[permalink] [raw]
Subject: [PATCH 132/148] include/asm-x86/tsc.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/tsc.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index 7d3e27f..d2d8eb5 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -42,7 +42,7 @@ static inline cycles_t vget_cycles(void)
if (!cpu_has_tsc)
return 0;
#endif
- return (cycles_t) __native_read_tsc();
+ return (cycles_t)__native_read_tsc();
}

extern void tsc_init(void);
--
1.5.4.rc2

2008-03-23 08:47:44

by Joe Perches

[permalink] [raw]
Subject: [PATCH 046/148] include/asm-x86/ide.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/ide.h | 58 +++++++++++++++++++++++++++++-------------------
1 files changed, 35 insertions(+), 23 deletions(-)

diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h
index c2552d8..828df8a 100644
--- a/include/asm-x86/ide.h
+++ b/include/asm-x86/ide.h
@@ -22,41 +22,53 @@

#define IDE_ARCH_OBSOLETE_DEFAULTS

-static __inline__ int ide_default_irq(unsigned long base)
+static inline int ide_default_irq(unsigned long base)
{
switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- case 0x168: return 10;
- case 0x1e0: return 8;
- case 0x160: return 12;
- default:
- return 0;
+ case 0x1f0:
+ return 14;
+ case 0x170:
+ return 15;
+ case 0x1e8:
+ return 11;
+ case 0x168:
+ return 10;
+ case 0x1e0:
+ return 8;
+ case 0x160:
+ return 12;
+ default:
+ return 0;
}
}

-static __inline__ unsigned long ide_default_io_base(int index)
+static inline unsigned long ide_default_io_base(int index)
{
/*
- * If PCI is present then it is not safe to poke around
- * the other legacy IDE ports. Only 0x1f0 and 0x170 are
- * defined compatibility mode ports for PCI. A user can
- * override this using ide= but we must default safe.
+ * If PCI is present then it is not safe to poke around
+ * the other legacy IDE ports. Only 0x1f0 and 0x170 are
+ * defined compatibility mode ports for PCI. A user can
+ * override this using ide= but we must default safe.
*/
if (no_pci_devices()) {
- switch(index) {
- case 2: return 0x1e8;
- case 3: return 0x168;
- case 4: return 0x1e0;
- case 5: return 0x160;
+ switch (index) {
+ case 2:
+ return 0x1e8;
+ case 3:
+ return 0x168;
+ case 4:
+ return 0x1e0;
+ case 5:
+ return 0x160;
}
}
switch (index) {
- case 0: return 0x1f0;
- case 1: return 0x170;
- default:
- return 0;
+ case 0:
+ return 0x1f0;
+ case 1:
+ return 0x170;
+ default:
+ return 0;
}
}

--
1.5.4.rc2

2008-03-23 08:47:59

by Joe Perches

[permalink] [raw]
Subject: [PATCH 087/148] include/asm-x86/pci_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pci_64.h | 14 +++++---------
1 files changed, 5 insertions(+), 9 deletions(-)

diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index da8266a..f330234 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -1,12 +1,10 @@
#ifndef __x8664_PCI_H
#define __x8664_PCI_H

-
#ifdef __KERNEL__

-
#ifdef CONFIG_CALGARY_IOMMU
-static inline void* pci_iommu(struct pci_bus *bus)
+static inline void *pci_iommu(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
return sd->iommu;
@@ -19,11 +17,10 @@ static inline void set_pci_iommu(struct pci_bus *bus, void *val)
}
#endif /* CONFIG_CALGARY_IOMMU */

-
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-
-
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 value);

extern void dma32_reserve_bootmem(void);
extern void pci_iommu_alloc(void);
@@ -66,5 +63,4 @@ extern void pci_iommu_alloc(void);

#endif /* __KERNEL__ */

-
#endif /* __x8664_PCI_H */
--
1.5.4.rc2

2008-03-23 08:48:26

by Joe Perches

[permalink] [raw]
Subject: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/serial.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/serial.h b/include/asm-x86/serial.h
index 628c801..ae8370f 100644
--- a/include/asm-x86/serial.h
+++ b/include/asm-x86/serial.h
@@ -8,7 +8,7 @@
* clock, since the 16550A is capable of handling a top speed of 1.5
* megabits/second; but this requires the faster clock.
*/
-#define BASE_BAUD ( 1843200 / 16 )
+#define BASE_BAUD (1843200 / 16)

/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
--
1.5.4.rc2

2008-03-23 08:48:40

by Joe Perches

[permalink] [raw]
Subject: [PATCH 110/148] include/asm-x86/setup.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/setup.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index f745de2..fa6763a 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -55,8 +55,8 @@ struct e820entry;
char * __init machine_specific_memory_setup(void);
char *memory_setup(void);

-int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
-int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
+int __init copy_e820_map(struct e820entry *biosmap, int nr_map);
+int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map);
void __init add_memory_region(unsigned long long start,
unsigned long long size, int type);

--
1.5.4.rc2

2008-03-23 08:49:00

by Joe Perches

[permalink] [raw]
Subject: [PATCH 053/148] include/asm-x86/irq_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/irq_32.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h
index aca9c96..0b79f31 100644
--- a/include/asm-x86/irq_32.h
+++ b/include/asm-x86/irq_32.h
@@ -15,7 +15,7 @@
#include "irq_vectors.h"
#include <asm/thread_info.h>

-static __inline__ int irq_canonicalize(int irq)
+static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
}
--
1.5.4.rc2

2008-03-23 08:49:23

by Joe Perches

[permalink] [raw]
Subject: [PATCH 101/148] include/asm-x86/proto.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/proto.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index 68563c0..9da46af 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -26,7 +26,7 @@ extern int reboot_force;

long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);

-#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
-#define round_down(x,y) ((x) & ~((y)-1))
+#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
+#define round_down(x, y) ((x) & ~((y) - 1))

#endif
--
1.5.4.rc2

2008-03-23 08:49:42

by Joe Perches

[permalink] [raw]
Subject: [PATCH 027/148] include/asm-x86/dma-mapping_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/dma-mapping_64.h | 117 +++++++++++++++++++------------------
1 files changed, 60 insertions(+), 57 deletions(-)

diff --git a/include/asm-x86/dma-mapping_64.h b/include/asm-x86/dma-mapping_64.h
index ecd0f61..c3723ec 100644
--- a/include/asm-x86/dma-mapping_64.h
+++ b/include/asm-x86/dma-mapping_64.h
@@ -13,45 +13,49 @@
struct dma_mapping_ops {
int (*mapping_error)(dma_addr_t dma_addr);
void* (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
+ dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+ void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
- size_t size, int direction);
+ size_t size, int direction);
/* like map_single, but doesn't check the device mask */
dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
- size_t size, int direction);
+ size_t size, int direction);
void (*unmap_single)(struct device *dev, dma_addr_t addr,
- size_t size, int direction);
+ size_t size, int direction);
void (*sync_single_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
+ dma_addr_t dma_handle,
+ size_t size, int direction);
void (*sync_single_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
+ dma_addr_t dma_handle,
+ size_t size, int direction);
void (*sync_single_range_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction);
void (*sync_single_range_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction);
void (*sync_sg_for_cpu)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
+ struct scatterlist *sg,
+ int nelems, int direction);
void (*sync_sg_for_device)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
+ struct scatterlist *sg,
+ int nelems, int direction);
int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
+ int nents, int direction);
void (*unmap_sg)(struct device *hwdev,
- struct scatterlist *sg, int nents,
- int direction);
+ struct scatterlist *sg, int nents,
+ int direction);
int (*dma_supported)(struct device *hwdev, u64 mask);
int is_phys;
};

extern dma_addr_t bad_dma_address;
-extern const struct dma_mapping_ops* dma_ops;
+extern const struct dma_mapping_ops *dma_ops;
extern int iommu_merge;

static inline int dma_mapping_error(dma_addr_t dma_addr)
@@ -73,30 +77,29 @@ extern void *dma_alloc_coherent(struct device *dev, size_t size,
extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);

-static inline dma_addr_t
-dma_map_single(struct device *hwdev, void *ptr, size_t size,
+static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
+ size_t size,
int direction)
{
BUG_ON(!valid_dma_direction(direction));
return dma_ops->map_single(hwdev, ptr, size, direction);
}

-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
- int direction)
+static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
+ size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
dma_ops->unmap_single(dev, addr, size, direction);
}

-#define dma_map_page(dev,page,offset,size,dir) \
- dma_map_single((dev), page_address(page)+(offset), (size), (dir))
+#define dma_map_page(dev, page, offset, size, dir) \
+ dma_map_single((dev), page_address(page) + (offset), (size), (dir))

#define dma_unmap_page dma_unmap_single

-static inline void
-dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
+static inline void dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_for_cpu)
@@ -105,9 +108,9 @@ dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
flush_write_buffers();
}

-static inline void
-dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
+static inline void dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_for_device)
@@ -116,21 +119,24 @@ dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
flush_write_buffers();
}

-static inline void
-dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, int direction)
+static inline void dma_sync_single_range_for_cpu(struct device *hwdev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_range_for_cpu) {
- dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
+ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle,
+ offset, size, direction);
}

flush_write_buffers();
}

-static inline void
-dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, int direction)
+static inline void dma_sync_single_range_for_device(struct device *hwdev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_single_range_for_device)
@@ -140,9 +146,9 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
flush_write_buffers();
}

-static inline void
-dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, int direction)
+static inline void dma_sync_sg_for_cpu(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_sg_for_cpu)
@@ -150,28 +156,26 @@ dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
flush_write_buffers();
}

-static inline void
-dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, int direction)
+static inline void dma_sync_sg_for_device(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction)
{
BUG_ON(!valid_dma_direction(direction));
- if (dma_ops->sync_sg_for_device) {
+ if (dma_ops->sync_sg_for_device)
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
- }

flush_write_buffers();
}

-static inline int
-dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
+static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
{
BUG_ON(!valid_dma_direction(direction));
return dma_ops->map_sg(hwdev, sg, nents, direction);
}

-static inline void
-dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
- int direction)
+static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
{
BUG_ON(!valid_dma_direction(direction));
dma_ops->unmap_sg(hwdev, sg, nents, direction);
@@ -189,9 +193,8 @@ static inline int dma_get_cache_alignment(void)

extern int dma_set_mask(struct device *dev, u64 mask);

-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
{
flush_write_buffers();
}
--
1.5.4.rc2

2008-03-23 08:50:09

by Joe Perches

[permalink] [raw]
Subject: [PATCH 035/148] include/asm-x86/fixmap_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/fixmap_64.h | 20 +++++++++++---------
1 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index 70ddb21..f3d7685 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -34,32 +34,34 @@

enum fixed_addresses {
VSYSCALL_LAST_PAGE,
- VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VSYSCALL_HPET,
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
FIX_HPET_BASE,
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
FIX_IO_APIC_BASE_0,
- FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
FIX_EFI_IO_MAP_LAST_PAGE,
- FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1,
+ FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
+ + MAX_EFI_IO_PAGES - 1,
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
__end_of_fixed_addresses
};

-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
+extern void __set_fixmap(enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);

-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)

#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
--
1.5.4.rc2

2008-03-23 08:50:51

by Joe Perches

[permalink] [raw]
Subject: [PATCH 019/148] include/asm-x86/cpufeature.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/cpufeature.h | 232 +++++++++++++++++++++++-------------------
1 files changed, 125 insertions(+), 107 deletions(-)

diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 0d609c8..0c4034d 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -6,147 +6,165 @@

#include <asm/required-features.h>

+#if defined _CF
+#undef _CF
+#endif
+#define _CF(word, bit) ((word) * 32 + (bit))
+
#define NCAPINTS 8 /* N 32-bit words worth of info */

/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
-#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
-#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
-#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DS (0*32+21) /* Debug Store */
-#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
- /* of FPU context), and CR4.OSFXSR available */
-#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
-#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
-#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
-#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
+#define X86_FEATURE_FPU _CF(0, 0) /* Onboard FPU */
+#define X86_FEATURE_VME _CF(0, 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE _CF(0, 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE _CF(0, 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC _CF(0, 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR _CF(0, 5) /* Model-Specific Registers,
+ * RDMSR, WRMSR */
+#define X86_FEATURE_PAE _CF(0, 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE _CF(0, 7) /* Machine Check Architecture */
+#define X86_FEATURE_CX8 _CF(0, 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC _CF(0, 9) /* Onboard APIC */
+#define X86_FEATURE_SEP _CF(0, 11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR _CF(0, 12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE _CF(0, 13) /* Page Global Enable */
+#define X86_FEATURE_MCA _CF(0, 14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV _CF(0, 15) /* CMOV instruction
+ * (FCMOVCC and FCOMI too
+ * if FPU present) */
+#define X86_FEATURE_PAT _CF(0, 16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 _CF(0, 17) /* 36-bit PSEs */
+#define X86_FEATURE_PN _CF(0, 18) /* Processor serial number */
+#define X86_FEATURE_CLFLSH _CF(0, 19) /* Supports CLFLUSH instruction */
+#define X86_FEATURE_DS _CF(0, 21) /* Debug Store */
+#define X86_FEATURE_ACPI _CF(0, 22) /* ACPI via MSR */
+#define X86_FEATURE_MMX _CF(0, 23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR _CF(0, 24) /* FXSAVE and FXRSTOR instructions
+ * (fast save and restore of FPU
+ * context), and CR4.OSFXSR
+ * available */
+#define X86_FEATURE_XMM _CF(0, 25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2 _CF(0, 26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_SELFSNOOP _CF(0, 27) /* CPU self snoop */
+#define X86_FEATURE_HT _CF(0, 28) /* Hyper-Threading */
+#define X86_FEATURE_ACC _CF(0, 29) /* Automatic clock control */
+#define X86_FEATURE_IA64 _CF(0, 30) /* IA-64 processor */

/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */
-#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
+#define X86_FEATURE_SYSCALL _CF(1, 11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP _CF(1, 19) /* MP Capable. */
+#define X86_FEATURE_NX _CF(1, 20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT _CF(1, 22) /* AMD MMX extensions */
+#define X86_FEATURE_GBPAGES _CF(1, 26) /* GB pages */
+#define X86_FEATURE_RDTSCP _CF(1, 27) /* RDTSCP */
+#define X86_FEATURE_LM _CF(1, 29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT _CF(1, 30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW _CF(1, 31) /* 3DNow! */

/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
+#define X86_FEATURE_RECOVERY _CF(2, 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN _CF(2, 1) /* Longrun power control */
+#define X86_FEATURE_LRTI _CF(2, 3) /* LongRun table interface */

/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
+#define X86_FEATURE_CXMMX _CF(3, 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR _CF(3, 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR _CF(3, 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR _CF(3, 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
-#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
-#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
-#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
-#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
-#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
-#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
-/* 14 free */
-/* 15 free */
-#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
-#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
+#define X86_FEATURE_K8 _CF(3, 4) /* Opteron, Athlon64 */
+#define X86_FEATURE_K7 _CF(3, 5) /* Athlon */
+#define X86_FEATURE_P3 _CF(3, 6) /* P3 */
+#define X86_FEATURE_P4 _CF(3, 7) /* P4 */
+#define X86_FEATURE_CONSTANT_TSC _CF(3, 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP _CF(3, 9) /* smp kernel running on up */
+#define X86_FEATURE_FXSAVE_LEAK _CF(3, 10) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_ARCH_PERFMON _CF(3, 11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS _CF(3, 12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS _CF(3, 13) /* Branch Trace Store */
+ /* 14 free */
+ /* 15 free */
+#define X86_FEATURE_REP_GOOD _CF(3, 16) /* rep microcode works well
+ * on this CPU */
+#define X86_FEATURE_MFENCE_RDTSC _CF(3, 17) /* Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC _CF(3, 18) /* Lfence synchronizes RDTSC */

/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
-#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
-#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
-#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_CID (4*32+10) /* Context ID */
-#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM3 _CF(4, 0) /* Streaming SIMD Extensions-3 */
+#define X86_FEATURE_MWAIT _CF(4, 3) /* Monitor/Mwait support */
+#define X86_FEATURE_DSCPL _CF(4, 4) /* CPL Qualified Debug Store */
+#define X86_FEATURE_EST _CF(4, 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 _CF(4, 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_CID _CF(4, 10) /* Context ID */
+#define X86_FEATURE_CX16 _CF(4, 13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR _CF(4, 14) /* Send Task Priority Messages */
+#define X86_FEATURE_DCA _CF(4, 18) /* Direct Cache Access */

/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
-#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
-#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
-#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
-#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
-#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
+#define X86_FEATURE_XSTORE _CF(5, 2) /* on-CPU RNG present
+ * (xstore insn) */
+#define X86_FEATURE_XSTORE_EN _CF(5, 3) /* on-CPU RNG enabled */
+#define X86_FEATURE_XCRYPT _CF(5, 6) /* on-CPU crypto (xcrypt insn) */
+#define X86_FEATURE_XCRYPT_EN _CF(5, 7) /* on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 _CF(5, 8) /* AdvancedCryptographyEngine v2 */
+#define X86_FEATURE_ACE2_EN _CF(5, 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE _CF(5, 10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN _CF(5, 11) /* PHE enabled */
+#define X86_FEATURE_PMM _CF(5, 12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN _CF(5, 13) /* PMM enabled */

/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_LAHF_LM _CF(6, 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY _CF(6, 1) /* If yes HyperThreading
+ * not valid */

/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc
*/
-#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_IDA _CF(7, 0) /* Intel Dynamic Acceleration */

#if defined(__KERNEL__) && !defined(__ASSEMBLY__)

#include <linux/bitops.h>

-extern const char * const x86_cap_flags[NCAPINTS*32];
+extern const char * const x86_cap_flags[NCAPINTS * 32];
extern const char * const x86_power_flags[32];

#define test_cpu_cap(c, bit) \
- test_bit(bit, (unsigned long *)((c)->x86_capability))
+ test_bit(bit, (unsigned long *)((c)->x86_capability))

#define cpu_has(c, bit) \
- (__builtin_constant_p(bit) && \
- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
- (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
- (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
- (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
- (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
- (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
- (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
- (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
- ? 1 : \
- test_cpu_cap(c, bit))
-
-#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
-
-#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
-#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
-#define setup_clear_cpu_cap(bit) do { \
- clear_cpu_cap(&boot_cpu_data, bit); \
- set_bit(bit, cleared_cpu_caps); \
+ (__builtin_constant_p((bit)) && \
+ ((((bit) >> 5) == 0 && (1UL << ((bit) & 31) & REQUIRED_MASK0)) || \
+ (((bit) >> 5) == 1 && (1UL << ((bit) & 31) & REQUIRED_MASK1)) || \
+ (((bit) >> 5) == 2 && (1UL << ((bit) & 31) & REQUIRED_MASK2)) || \
+ (((bit) >> 5) == 3 && (1UL << ((bit) & 31) & REQUIRED_MASK3)) || \
+ (((bit) >> 5) == 4 && (1UL << ((bit) & 31) & REQUIRED_MASK4)) || \
+ (((bit) >> 5) == 5 && (1UL << ((bit) & 31) & REQUIRED_MASK5)) || \
+ (((bit) >> 5) == 6 && (1UL << ((bit) & 31) & REQUIRED_MASK6)) || \
+ (((bit) >> 5) == 7 && (1UL << ((bit) & 31) & REQUIRED_MASK7))) \
+ ? 1 \
+ : test_bit((bit), (unsigned long *)((c)->x86_capability)))
+
+#define boot_cpu_has(bit) \
+ cpu_has(&boot_cpu_data, (bit))
+
+#define set_cpu_cap(c, bit) \
+ set_bit((bit), (unsigned long *)((c)->x86_capability))
+#define clear_cpu_cap(c, bit) \
+ clear_bit((bit), (unsigned long *)((c)->x86_capability))
+#define setup_clear_cpu_cap(bit) \
+do { \
+ clear_cpu_cap(&boot_cpu_data, (bit)); \
+ set_bit((bit), cleared_cpu_caps); \
} while (0)
-#define setup_force_cpu_cap(bit) do { \
- set_cpu_cap(&boot_cpu_data, bit); \
- clear_bit(bit, cleared_cpu_caps); \
+#define setup_force_cpu_cap(bit) \
+do { \
+ set_cpu_cap(&boot_cpu_data, (bit)); \
+ clear_bit((bit), cleared_cpu_caps); \
} while (0)

#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
--
1.5.4.rc2

2008-03-23 08:51:21

by Joe Perches

[permalink] [raw]
Subject: [PATCH 024/148] include/asm-x86/div64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/div64.h | 35 ++++++++++++++++++-----------------
1 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index e98d16e..0dbf8bf 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -17,18 +17,20 @@
* This ends up being the most efficient "calling
* convention" on x86.
*/
-#define do_div(n,base) ({ \
- unsigned long __upper, __low, __high, __mod, __base; \
- __base = (base); \
- asm("":"=a" (__low), "=d" (__high):"A" (n)); \
- __upper = __high; \
- if (__high) { \
- __upper = __high % (__base); \
- __high = __high / (__base); \
- } \
- asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
- asm("":"=A" (n):"a" (__low),"d" (__high)); \
- __mod; \
+#define do_div(n, base) \
+({ \
+ unsigned long __upper, __low, __high, __mod, __base; \
+ __base = (base); \
+ asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
+ __upper = __high; \
+ if (__high) { \
+ __upper = __high % (__base); \
+ __high = __high / (__base); \
+ } \
+ asm("divl %2":"=a" (__low), "=d" (__mod) \
+ : "rm" (__base), "0" (__low), "1" (__upper)); \
+ asm("":"=A" (n) : "a" (__low), "d" (__high)); \
+ __mod; \
})

/*
@@ -37,14 +39,13 @@
*
* Warning, this will do an exception if X overflows.
*/
-#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
+#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c)

-static inline long
-div_ll_X_l_rem(long long divs, long div, long *rem)
+static inline long div_ll_X_l_rem(long long divs, long div, long *rem)
{
long dum2;
- __asm__("divl %2":"=a"(dum2), "=d"(*rem)
- : "rm"(div), "A"(divs));
+ asm("divl %2":"=a"(dum2), "=d"(*rem)
+ : "rm"(div), "A"(divs));

return dum2;

--
1.5.4.rc2

2008-03-23 08:51:39

by Joe Perches

[permalink] [raw]
Subject: [PATCH 018/148] include/asm-x86/compat.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/compat.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index d3e8f3e..1793ac3 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}

-static __inline__ void __user *compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
--
1.5.4.rc2

2008-03-23 08:51:56

by Joe Perches

[permalink] [raw]
Subject: [PATCH 031/148] include/asm-x86/edac.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/edac.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
index cf3200a..a8088f6 100644
--- a/include/asm-x86/edac.h
+++ b/include/asm-x86/edac.h
@@ -3,7 +3,7 @@

/* ECC atomic, DMA, SMP and interrupt safe scrub function */

-static __inline__ void atomic_scrub(void *va, u32 size)
+static inline void atomic_scrub(void *va, u32 size)
{
u32 i, *virt_addr = va;

@@ -12,7 +12,7 @@ static __inline__ void atomic_scrub(void *va, u32 size)
* are interrupt, DMA and SMP safe.
*/
for (i = 0; i < size / 4; i++, virt_addr++)
- __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+ asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
}

#endif
--
1.5.4.rc2

2008-03-23 08:52:20

by Joe Perches

[permalink] [raw]
Subject: [PATCH 041/148] include/asm-x86/hw_irq_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/hw_irq_64.h | 15 +++++++--------
1 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
index 312a58d..0062ef3 100644
--- a/include/asm-x86/hw_irq_64.h
+++ b/include/asm-x86/hw_irq_64.h
@@ -36,7 +36,7 @@
* cleanup after irq migration.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
-
+
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
@@ -159,13 +159,12 @@ extern atomic_t irq_mis_count;
* SMP has a few special interrupts for IPI messages
*/

-#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n.p2align\n" \
-"IRQ" #nr "_interrupt:\n\t" \
- "push $~(" #nr ") ; " \
- "jmp common_interrupt");
+#define BUILD_IRQ(nr) \
+ asmlinkage void IRQ_NAME(nr); \
+ asm("\n.p2align\n" \
+ "IRQ" #nr "_interrupt:\n\t" \
+ "push $~(" #nr ") ; " \
+ "jmp common_interrupt");

#define platform_legacy_irq(irq) ((irq) < 16)

--
1.5.4.rc2

2008-03-23 08:52:41

by Joe Perches

[permalink] [raw]
Subject: [PATCH 004/148] include/asm-x86/apicdef.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/apicdef.h | 50 ++++++++++++++++++++++----------------------
1 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
index 674a228..8b24468 100644
--- a/include/asm-x86/apicdef.h
+++ b/include/asm-x86/apicdef.h
@@ -14,10 +14,10 @@

#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
-#define GET_APIC_VERSION(x) ((x)&0xFFu)
-#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu)
+#define GET_APIC_VERSION(x) ((x) & 0xFFu)
+#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
#ifdef CONFIG_X86_32
-# define APIC_INTEGRATED(x) ((x)&0xF0u)
+# define APIC_INTEGRATED(x) ((x) & 0xF0u)
#else
# define APIC_INTEGRATED(x) (1)
#endif
@@ -31,16 +31,16 @@
#define APIC_EIO_ACK 0x0
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
-#define APIC_LDR_MASK (0xFFu<<24)
-#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu)
-#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
+#define APIC_LDR_MASK (0xFFu << 24)
+#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
+#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
-#define APIC_SPIV_FOCUS_DISABLED (1<<9)
-#define APIC_SPIV_APIC_ENABLED (1<<8)
+#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
+#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
@@ -76,27 +76,27 @@
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
-#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
-#define SET_APIC_DEST_FIELD(x) ((x)<<24)
+#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
+#define SET_APIC_DEST_FIELD(x) ((x) << 24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
-#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
-#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
-#define SET_APIC_TIMER_BASE(x) (((x)<<18))
+#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
+#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
+#define SET_APIC_TIMER_BASE(x) (((x) << 18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
-#define APIC_LVT_TIMER_PERIODIC (1<<17)
-#define APIC_LVT_MASKED (1<<16)
-#define APIC_LVT_LEVEL_TRIGGER (1<<15)
-#define APIC_LVT_REMOTE_IRR (1<<14)
-#define APIC_INPUT_POLARITY (1<<13)
-#define APIC_SEND_PENDING (1<<12)
+#define APIC_LVT_TIMER_PERIODIC (1 << 17)
+#define APIC_LVT_MASKED (1 << 16)
+#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
+#define APIC_LVT_REMOTE_IRR (1 << 14)
+#define APIC_INPUT_POLARITY (1 << 13)
+#define APIC_SEND_PENDING (1 << 12)
#define APIC_MODE_MASK 0x700
-#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
-#define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8))
+#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
+#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
@@ -105,7 +105,7 @@
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
-#define APIC_TDR_DIV_TMBASE (1<<2)
+#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
@@ -115,14 +115,14 @@
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#define APIC_EILVT0 0x500
-#define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */
+#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
-#define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF)
+#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_MSG_EXT 0x7
-#define APIC_EILVT_MASKED (1<<16)
+#define APIC_EILVT_MASKED (1 << 16)
#define APIC_EILVT1 0x510
#define APIC_EILVT2 0x520
#define APIC_EILVT3 0x530
--
1.5.4.rc2

2008-03-23 08:53:03

by Joe Perches

[permalink] [raw]
Subject: [PATCH 028/148] include/asm-x86/dwarf2_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/dwarf2_64.h | 9 ++++-----
1 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h
index eedc085..c950519 100644
--- a/include/asm-x86/dwarf2_64.h
+++ b/include/asm-x86/dwarf2_64.h
@@ -1,16 +1,15 @@
#ifndef _DWARF2_H
#define _DWARF2_H 1

-
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif

-/*
+/*
Macros for dwarf2 CFI unwind table entries.
- See "as.info" for details on these pseudo ops. Unfortunately
- they are only supported in very new binutils, so define them
- away for older version.
+ See "as.info" for details on these pseudo ops. Unfortunately
+ they are only supported in very new binutils, so define them
+ away for older version.
*/

#ifdef CONFIG_AS_CFI
--
1.5.4.rc2

2008-03-23 08:53:53

by Joe Perches

[permalink] [raw]
Subject: [PATCH 125/148] include/asm-x86/tce.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/tce.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h
index cd955d3..b1a4ea0 100644
--- a/include/asm-x86/tce.h
+++ b/include/asm-x86/tce.h
@@ -39,7 +39,7 @@ struct iommu_table;
#define TCE_RPN_MASK 0x0000fffffffff000ULL

extern void tce_build(struct iommu_table *tbl, unsigned long index,
- unsigned int npages, unsigned long uaddr, int direction);
+ unsigned int npages, unsigned long uaddr, int direction);
extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
extern void * __init alloc_tce_table(void);
extern void __init free_tce_table(void *tbl);
--
1.5.4.rc2

2008-03-23 08:53:30

by Joe Perches

[permalink] [raw]
Subject: [PATCH 032/148] include/asm-x86/efi.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/efi.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index ea9734b..d53004b 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -20,7 +20,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
*/

#define efi_call_virt(f, args...) \
- ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
+ ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)

#define efi_call_virt0(f) efi_call_virt(f)
#define efi_call_virt1(f, a1) efi_call_virt(f, a1)
--
1.5.4.rc2

2008-03-23 08:54:25

by Joe Perches

[permalink] [raw]
Subject: [PATCH 005/148] include/asm-x86/apic.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/apic.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 73d1635..31b0faa 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -67,7 +67,7 @@ static inline void native_apic_write(unsigned long reg, u32 v)

static inline void native_apic_write_atomic(unsigned long reg, u32 v)
{
- (void) xchg((u32*)(APIC_BASE + reg), v);
+ (void)xchg((u32 *)(APIC_BASE + reg), v);
}

static inline u32 native_apic_read(unsigned long reg)
--
1.5.4.rc2

2008-03-23 08:54:43

by Joe Perches

[permalink] [raw]
Subject: [PATCH 034/148] include/asm-x86/fixmap_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/fixmap_32.h | 21 ++++++++++-----------
1 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index a7404d5..eb16651 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -99,8 +99,7 @@ enum fixed_addresses {
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_NESTING 4
- FIX_BTMAP_END =
- __end_of_permanent_fixed_addresses + 512 -
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 -
(__end_of_permanent_fixed_addresses & 511),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
FIX_WP_TEST,
@@ -110,20 +109,20 @@ enum fixed_addresses {
__end_of_fixed_addresses
};

-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
+extern void __set_fixmap(enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
extern void reserve_top_address(unsigned long reserve);

-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)

-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))

#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)

@@ -156,7 +155,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx)
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();

- return __fix_to_virt(idx);
+ return __fix_to_virt(idx);
}

static inline unsigned long virt_to_fix(const unsigned long vaddr)
--
1.5.4.rc2

2008-03-23 08:55:09

by Joe Perches

[permalink] [raw]
Subject: [PATCH 033/148] include/asm-x86/elf.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/elf.h | 102 +++++++++++++++++++++++++++----------------------
1 files changed, 56 insertions(+), 46 deletions(-)

diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 7732564..37b1492 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -11,7 +11,7 @@

typedef unsigned long elf_greg_t;

-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];

typedef struct user_i387_struct elf_fpregset_t;
@@ -100,10 +100,11 @@ extern unsigned int vdso_enabled;
We might as well make sure everything else is cleared too (except for %esp),
just to make things more deterministic.
*/
-#define ELF_PLAT_INIT(_r, load_addr) do { \
- _r->bx = 0; _r->cx = 0; _r->dx = 0; \
- _r->si = 0; _r->di = 0; _r->bp = 0; \
- _r->ax = 0; \
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { \
+ _r->bx = 0; _r->cx = 0; _r->dx = 0; \
+ _r->si = 0; _r->di = 0; _r->bp = 0; \
+ _r->ax = 0; \
} while (0)

/*
@@ -111,24 +112,25 @@ extern unsigned int vdso_enabled;
* now struct_user_regs, they are different)
*/

-#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
- pr_reg[0] = regs->bx; \
- pr_reg[1] = regs->cx; \
- pr_reg[2] = regs->dx; \
- pr_reg[3] = regs->si; \
- pr_reg[4] = regs->di; \
- pr_reg[5] = regs->bp; \
- pr_reg[6] = regs->ax; \
- pr_reg[7] = regs->ds & 0xffff; \
- pr_reg[8] = regs->es & 0xffff; \
- pr_reg[9] = regs->fs & 0xffff; \
- savesegment(gs, pr_reg[10]); \
- pr_reg[11] = regs->orig_ax; \
- pr_reg[12] = regs->ip; \
- pr_reg[13] = regs->cs & 0xffff; \
- pr_reg[14] = regs->flags; \
- pr_reg[15] = regs->sp; \
- pr_reg[16] = regs->ss & 0xffff; \
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+do { \
+ pr_reg[0] = regs->bx; \
+ pr_reg[1] = regs->cx; \
+ pr_reg[2] = regs->dx; \
+ pr_reg[3] = regs->si; \
+ pr_reg[4] = regs->di; \
+ pr_reg[5] = regs->bp; \
+ pr_reg[6] = regs->ax; \
+ pr_reg[7] = regs->ds & 0xffff; \
+ pr_reg[8] = regs->es & 0xffff; \
+ pr_reg[9] = regs->fs & 0xffff; \
+ savesegment(gs, pr_reg[10]); \
+ pr_reg[11] = regs->orig_ax; \
+ pr_reg[12] = regs->ip; \
+ pr_reg[13] = regs->cs & 0xffff; \
+ pr_reg[14] = regs->flags; \
+ pr_reg[15] = regs->sp; \
+ pr_reg[16] = regs->ss & 0xffff; \
} while (0);

#define ELF_PLATFORM (utsname()->machine)
@@ -139,7 +141,7 @@ extern unsigned int vdso_enabled;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(x) \
+#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)

#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
@@ -168,24 +170,27 @@ static inline void elf_common_init(struct thread_struct *t,
t->ds = t->es = ds;
}

-#define ELF_PLAT_INIT(_r, load_addr) do { \
- elf_common_init(&current->thread, _r, 0); \
- clear_thread_flag(TIF_IA32); \
+#define ELF_PLAT_INIT(_r, load_addr) \
+do { \
+ elf_common_init(&current->thread, _r, 0); \
+ clear_thread_flag(TIF_IA32); \
} while (0)

#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
elf_common_init(&current->thread, regs, __USER_DS)
-#define compat_start_thread(regs, ip, sp) do { \
- start_ia32_thread(regs, ip, sp); \
- set_fs(USER_DS); \
- } while (0)
-#define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \
- if (test_thread_flag(TIF_IA32)) \
- clear_thread_flag(TIF_ABI_PENDING); \
- else \
- set_thread_flag(TIF_ABI_PENDING); \
- current->personality |= force_personality32; \
- } while (0)
+#define compat_start_thread(regs, ip, sp)
+do { \
+ start_ia32_thread(regs, ip, sp); \
+ set_fs(USER_DS); \
+} while (0)
+#define COMPAT_SET_PERSONALITY(ex, ibcs2) \
+do { \
+ if (test_thread_flag(TIF_IA32)) \
+ clear_thread_flag(TIF_ABI_PENDING); \
+ else \
+ set_thread_flag(TIF_ABI_PENDING); \
+ current->personality |= force_personality32; \
+} while (0)
#define COMPAT_ELF_PLATFORM ("i686")

/*
@@ -194,7 +199,8 @@ static inline void elf_common_init(struct thread_struct *t,
* getting dumped.
*/

-#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+do { \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
@@ -268,10 +274,12 @@ extern int force_personality32;

struct task_struct;

-#define ARCH_DLINFO_IA32(vdso_enabled) \
-do if (vdso_enabled) { \
+#define ARCH_DLINFO_IA32(vdso_enabled) \
+do { \
+ if (vdso_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
+ } \
} while (0)

#ifdef CONFIG_X86_32
@@ -289,9 +297,11 @@ do if (vdso_enabled) { \
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)

-#define ARCH_DLINFO \
-do if (vdso_enabled) { \
- NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
+#define ARCH_DLINFO \
+do { \
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso); \
} while (0)

#define AT_SYSINFO 32
@@ -304,8 +314,8 @@ do if (vdso_enabled) { \

#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)

-#define VDSO_ENTRY \
- ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+#define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))

struct linux_binprm;

--
1.5.4.rc2

2008-03-23 08:55:39

by Joe Perches

[permalink] [raw]
Subject: [PATCH 105/148] include/asm-x86/rio.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/rio.h | 76 ++++++++++++++++++++++++------------------------
1 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index 97cdcc9..3451c57 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -11,53 +11,53 @@
#define RIO_TABLE_VERSION 3

struct rio_table_hdr {
- u8 version; /* Version number of this data structure */
- u8 num_scal_dev; /* # of Scalability devices */
- u8 num_rio_dev; /* # of RIO I/O devices */
+ u8 version; /* Version number of this data structure */
+ u8 num_scal_dev; /* # of Scalability devices */
+ u8 num_rio_dev; /* # of RIO I/O devices */
} __attribute__((packed));

struct scal_detail {
- u8 node_id; /* Scalability Node ID */
- u32 CBAR; /* Address of 1MB register space */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF = None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port2node; /* Node ID port connected to: 0xFF = None */
- u8 port2port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
+ u8 node_id; /* Scalability Node ID */
+ u32 CBAR; /* Address of 1MB register space */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF = None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port2node; /* Node ID port connected to: 0xFF = None */
+ u8 port2port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
} __attribute__((packed));

struct rio_detail {
- u8 node_id; /* RIO Node ID */
- u32 BBAR; /* Address of 1MB register space */
- u8 type; /* Type of device */
- u8 owner_id; /* Node ID of Hurricane that owns this */
- /* node */
- u8 port0node; /* Node ID port connected to: 0xFF=None */
- u8 port0port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 port1node; /* Node ID port connected to: 0xFF=None */
- u8 port1port; /* Port num port connected to: 0,1,2, or */
- /* 0xFF=None */
- u8 first_slot; /* Lowest slot number below this Calgary */
- u8 status; /* Bit 0 = 1 : the XAPIC is used */
- /* = 0 : the XAPIC is not used, ie: */
- /* ints fwded to another XAPIC */
- /* Bits1:7 Reserved */
- u8 WP_index; /* instance index - lower ones have */
- /* lower slot numbers/PCI bus numbers */
- u8 chassis_num; /* 1 based Chassis number */
+ u8 node_id; /* RIO Node ID */
+ u32 BBAR; /* Address of 1MB register space */
+ u8 type; /* Type of device */
+ u8 owner_id; /* Node ID of Hurricane that owns this */
+ /* node */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF=None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 first_slot; /* Lowest slot number below this Calgary */
+ u8 status; /* Bit 0 = 1 : the XAPIC is used */
+ /* = 0 : the XAPIC is not used, ie: */
+ /* ints fwded to another XAPIC */
+ /* Bits1:7 Reserved */
+ u8 WP_index; /* instance index - lower ones have */
+ /* lower slot numbers/PCI bus numbers */
+ u8 chassis_num; /* 1 based Chassis number */
} __attribute__((packed));

enum {
- HURR_SCALABILTY = 0, /* Hurricane Scalability info */
- HURR_RIOIB = 2, /* Hurricane RIOIB info */
- COMPAT_CALGARY = 4, /* Compatibility Calgary */
- ALT_CALGARY = 5, /* Second Planar Calgary */
+ HURR_SCALABILTY = 0, /* Hurricane Scalability info */
+ HURR_RIOIB = 2, /* Hurricane RIOIB info */
+ COMPAT_CALGARY = 4, /* Compatibility Calgary */
+ ALT_CALGARY = 5, /* Second Planar Calgary */
};

/*
--
1.5.4.rc2

2008-03-23 08:56:00

by Joe Perches

[permalink] [raw]
Subject: [PATCH 037/148] include/asm-x86/futex.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/futex.h | 101 +++++++++++++++++++++++++---------------------
1 files changed, 55 insertions(+), 46 deletions(-)

diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index c9952ea..ac0fbf2 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -12,35 +12,32 @@
#include <asm/uaccess.h>

#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
- __asm__ __volatile( \
-"1: " insn "\n" \
-"2: .section .fixup,\"ax\"\n \
-3: mov %3, %1\n \
- jmp 2b\n \
- .previous\n" \
- _ASM_EXTABLE(1b,3b) \
- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
- : "i" (-EFAULT), "0" (oparg), "1" (0))
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
+ : "i" (-EFAULT), "0" (oparg), "1" (0))

#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
- __asm__ __volatile( \
-"1: movl %2, %0\n \
- movl %0, %3\n" \
- insn "\n" \
-"2: lock; cmpxchgl %3, %2\n \
- jnz 1b\n \
-3: .section .fixup,\"ax\"\n \
-4: mov %5, %1\n \
- jmp 3b\n \
- .previous\n" \
- _ASM_EXTABLE(1b,4b) \
- _ASM_EXTABLE(2b,4b) \
- : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
- "=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "1" (0))
-
-static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+ "2:\tlock; cmpxchgl %3, %2\n" \
+ "\tjnz\t1b\n" \
+ "3:\t.section .fixup,\"ax\"\n" \
+ "4:\tmov\t%5, %1\n" \
+ "\tjmp\t3b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+ "+m" (*uaddr), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -87,20 +84,33 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)

if (!ret) {
switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
}
}
return ret;
}

-static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
+ int newval)
{

#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
@@ -112,16 +122,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

- __asm__ __volatile__(
- "1: lock; cmpxchgl %3, %1 \n"
- "2: .section .fixup, \"ax\" \n"
- "3: mov %2, %0 \n"
- " jmp 2b \n"
- " .previous \n"
- _ASM_EXTABLE(1b,3b)
- : "=a" (oldval), "+m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "0" (oldval)
- : "memory"
+ asm volatile("1:\tlock; cmpxchgl %3, %1\n"
+ "2:\t.section .fixup, \"ax\"\n"
+ "3:\tmov %2, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
+ : "memory"
);

return oldval;
--
1.5.4.rc2

2008-03-23 08:56:35

by Joe Perches

[permalink] [raw]
Subject: [PATCH 145/148] include/asm-x86/voyager.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/voyager.h | 51 +++++++++++++++++++++++++++-----------------
1 files changed, 31 insertions(+), 20 deletions(-)

diff --git a/include/asm-x86/voyager.h b/include/asm-x86/voyager.h
index 91a9932..9c811d2 100644
--- a/include/asm-x86/voyager.h
+++ b/include/asm-x86/voyager.h
@@ -91,8 +91,7 @@
#define VOYAGER_WRITE_CONFIG 0x2
#define VOYAGER_BYPASS 0xff

-typedef struct voyager_asic
-{
+typedef struct voyager_asic {
__u8 asic_addr; /* ASIC address; Level 4 */
__u8 asic_type; /* ASIC type */
__u8 asic_id; /* ASIC id */
@@ -113,7 +112,7 @@ typedef struct voyager_module {
__u16 largest_reg; /* Largest register in the scan path */
__u16 smallest_reg; /* Smallest register in the scan path */
voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */
- struct voyager_module *submodule; /* Submodule pointer */
+ struct voyager_module *submodule; /* Submodule pointer */
struct voyager_module *next; /* Next module in linked list */
} voyager_module_t;

@@ -135,7 +134,7 @@ typedef struct voyager_eeprom_hdr {
__u16 cct_offset;
__u16 log_length; /* length of err log */
__u16 xsum_end; /* offset to end of
- checksum */
+ checksum */
__u8 reserved[4];
__u8 sflag; /* starting sentinal */
__u8 part_number[13]; /* prom part number */
@@ -148,7 +147,8 @@ typedef struct voyager_eeprom_hdr {



-#define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
+#define VOYAGER_EPROM_SIZE_OFFSET \
+ ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
#define VOYAGER_XSUM_END_OFFSET 0x2a

/* the following three definitions are for internal table layouts
@@ -199,7 +199,7 @@ typedef struct voyager_asic_data_table {
#define VOYAGER_WCBIC_TOM_L 0x4
#define VOYAGER_WCBIC_TOM_H 0x5

-/* register defines for Voyager Memory Contol (VMC)
+/* register defines for Voyager Memory Contol (VMC)
* these are present on L4 machines only */
#define VOYAGER_VMC1 0x81
#define VOYAGER_VMC2 0x91
@@ -334,7 +334,7 @@ typedef struct {

struct QuadDescription {
__u8 Type; /* for type 0 (DYADIC or MONADIC) all fields
- * will be zero except for slot */
+ * will be zero except for slot */
__u8 StructureVersion;
__u32 CPI_BaseAddress;
__u32 LARC_BankSize;
@@ -342,7 +342,7 @@ struct QuadDescription {
__u8 Slot; /* Processor slots 1 - 4 */
} __attribute__((packed));

-struct ProcBoardInfo {
+struct ProcBoardInfo {
__u8 Type;
__u8 StructureVersion;
__u8 NumberOfBoards;
@@ -382,19 +382,30 @@ struct CPU_Info {
* packed in it by our friend the compiler.
*/
typedef struct {
- __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */
- __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */
- __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */
- __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */
- __u32 OS_Flags; /* Flags set by the OS as info for SUS */
- __u32 SUS_Flags; /* Flags set by SUS as info for the OS */
- __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */
+ __u8 Mailbox_SUS; /* Written to by SUS to give
+ commands/response to the OS */
+ __u8 Mailbox_OS; /* Written to by the OS to give
+ commands/response to SUS */
+ __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the
+ interface SUS supports */
+ __u8 OS_MailboxVersion; /* Tells SUS which iteration of the
+ interface the OS supports */
+ __u32 OS_Flags; /* Flags set by the OS as info for
+ SUS */
+ __u32 SUS_Flags; /* Flags set by SUS as info
+ for the OS */
+ __u32 WatchDogPeriod; /* Watchdog period (in seconds) which
+ the DP uses to see if the OS
+ is dead */
__u32 WatchDogCount; /* Updated by the OS on every tic. */
- __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */
- MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */
+ __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS
+ where to stuff the SUS error log
+ on a dump */
+ MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];
+ /* Storage for MCA POS data */
/* All new SECOND_PASS_INTERFACE fields added from this point */
- struct ProcBoardInfo *BoardData;
- struct CPU_Info *CPU_Data;
+ struct ProcBoardInfo *BoardData;
+ struct CPU_Info *CPU_Data;
/* All new fields must be added from this point */
} Voyager_KernelSUS_Mbox_t;

@@ -478,7 +489,7 @@ struct voyager_SUS {
__u32 SUS_errorlog;
/* lots of system configuration stuff under here */
};
-
+
/* Variables exported by voyager_smp */
extern __u32 voyager_extended_vic_processors;
extern __u32 voyager_allowed_boot_processors;
--
1.5.4.rc2

2008-03-23 08:56:54

by Joe Perches

[permalink] [raw]
Subject: [PATCH 036/148] include/asm-x86/floppy.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/floppy.h | 87 ++++++++++++++++++++++++----------------------
1 files changed, 45 insertions(+), 42 deletions(-)

diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
index a48d715..438b303 100644
--- a/include/asm-x86/floppy.h
+++ b/include/asm-x86/floppy.h
@@ -20,20 +20,21 @@
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
-#define _CROSS_64KB(a,s,vdma) \
-(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+#define _CROSS_64KB(a, s, vdma) \
+ (!(vdma) && \
+ ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))

-#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
+#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1)


-#define SW fd_routine[use_virtual_dma&1]
+#define SW fd_routine[use_virtual_dma & 1]
#define CSW fd_routine[can_use_virtual_dma & 1]


#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
+#define fd_outb(value, port) outb_p(value, port)

-#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
@@ -57,15 +58,15 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
#undef TRACE_FLPY_INT

#ifdef TRACE_FLPY_INT
- static int calls=0;
- static int bytes=0;
- static int dma_wait=0;
+ static int calls;
+ static int bytes;
+ static int dma_wait;
#endif
if (!doing_pdma)
return floppy_interrupt(irq, dev_id);

#ifdef TRACE_FLPY_INT
- if(!calls)
+ if (!calls)
bytes = virtual_dma_count;
#endif

@@ -74,42 +75,42 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
register char *lptr;

st = 1;
- for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
- lcount; lcount--, lptr++) {
- st=inb(virtual_dma_port+4) & 0xa0 ;
- if(st != 0xa0)
+ for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st = inb(virtual_dma_port + 4) & 0xa0;
+ if (st != 0xa0)
break;
- if(virtual_dma_mode)
- outb_p(*lptr, virtual_dma_port+5);
+ if (virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port + 5);
else
- *lptr = inb_p(virtual_dma_port+5);
+ *lptr = inb_p(virtual_dma_port + 5);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
- st = inb(virtual_dma_port+4);
+ st = inb(virtual_dma_port + 4);
}

#ifdef TRACE_FLPY_INT
calls++;
#endif
- if(st == 0x20)
+ if (st == 0x20)
return IRQ_HANDLED;
- if(!(st & 0x20)) {
+ if (!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
+ virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
- dma_wait=0;
+ dma_wait = 0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id);
return IRQ_HANDLED;
}
#ifdef TRACE_FLPY_INT
- if(!virtual_dma_count)
+ if (!virtual_dma_count)
dma_wait++;
#endif
return IRQ_HANDLED;
@@ -117,14 +118,14 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)

static void fd_disable_dma(void)
{
- if(! (can_use_virtual_dma & 1))
+ if (!(can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
- virtual_dma_count=0;
+ virtual_dma_count = 0;
}

-static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+static int vdma_request_dma(unsigned int dmanr, const char *device_id)
{
return 0;
}
@@ -142,7 +143,7 @@ static int vdma_get_dma_residue(unsigned int dummy)

static int fd_request_irq(void)
{
- if(can_use_virtual_dma)
+ if (can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
IRQF_DISABLED, "floppy", NULL);
else
@@ -152,13 +153,13 @@ static int fd_request_irq(void)

static unsigned long dma_mem_alloc(unsigned long size)
{
- return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
+ return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size));
}


static unsigned long vdma_mem_alloc(unsigned long size)
{
- return (unsigned long) vmalloc(size);
+ return (unsigned long)vmalloc(size);

}

@@ -166,7 +167,7 @@ static unsigned long vdma_mem_alloc(unsigned long size)

static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
- if((unsigned long) addr >= (unsigned long) high_memory)
+ if ((unsigned long)addr >= (unsigned long)high_memory)
vfree((void *)addr);
else
free_pages(addr, get_order(size));
@@ -176,10 +177,10 @@ static void _fd_dma_mem_free(unsigned long addr, unsigned long size)

static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
- if(can_use_virtual_dma == 2) {
- if((unsigned long) addr >= (unsigned long) high_memory ||
- isa_virt_to_bus(addr) >= 0x1000000 ||
- _CROSS_64KB(addr, size, 0))
+ if (can_use_virtual_dma == 2) {
+ if ((unsigned long)addr >= (unsigned long)high_memory ||
+ isa_virt_to_bus(addr) >= 0x1000000 ||
+ _CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
@@ -195,7 +196,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
- virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
@@ -213,18 +214,18 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
- set_dma_mode(FLOPPY_DMA,mode);
- set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
- set_dma_count(FLOPPY_DMA,size);
+ set_dma_mode(FLOPPY_DMA, mode);
+ set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr));
+ set_dma_count(FLOPPY_DMA, size);
enable_dma(FLOPPY_DMA);
return 0;
}

static struct fd_routine_l {
- int (*_request_dma)(unsigned int dmanr, const char * device_id);
+ int (*_request_dma)(unsigned int dmanr, const char *device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
- unsigned long (*_dma_mem_alloc) (unsigned long size);
+ unsigned long (*_dma_mem_alloc)(unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
@@ -252,7 +253,8 @@ static int FDC2 = -1;
* is needed to prevent corrupted CMOS RAM in case "insmod floppy"
* coincides with another rtc CMOS user. Paul G.
*/
-#define FLOPPY0_TYPE ({ \
+#define FLOPPY0_TYPE \
+({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
@@ -261,7 +263,8 @@ static int FDC2 = -1;
val; \
})

-#define FLOPPY1_TYPE ({ \
+#define FLOPPY1_TYPE \
+({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
--
1.5.4.rc2

2008-03-23 08:57:24

by Joe Perches

[permalink] [raw]
Subject: [PATCH 040/148] include/asm-x86/highmem.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/highmem.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 479767c..e153f3b 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -8,7 +8,7 @@
* [email protected]
*
*
- * Redesigned the x86 32-bit VM architecture to deal with
+ * Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
--
1.5.4.rc2

2008-03-23 08:57:42

by Joe Perches

[permalink] [raw]
Subject: [PATCH 051/148] include/asm-x86/ipcbuf.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/ipcbuf.h | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
index 2adf8b3..ee678fd 100644
--- a/include/asm-x86/ipcbuf.h
+++ b/include/asm-x86/ipcbuf.h
@@ -11,8 +11,7 @@
* - 2 miscellaneous 32-bit values
*/

-struct ipc64_perm
-{
+struct ipc64_perm {
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
--
1.5.4.rc2

2008-03-23 08:58:08

by Joe Perches

[permalink] [raw]
Subject: [PATCH 008/148] include/asm-x86/bitops_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/bitops_32.h | 40 +++++++++++++++++++++-------------------
1 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index c19fbe9..3ed64b2 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -20,20 +20,22 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)

if (!size)
return 0;
- /* This looks at memory. Mark it volatile to tell gcc not to move it around */
- __asm__ __volatile__(
- "movl $-1,%%eax\n\t"
- "xorl %%edx,%%edx\n\t"
- "repe; scasl\n\t"
- "je 1f\n\t"
- "xorl -4(%%edi),%%eax\n\t"
- "subl $4,%%edi\n\t"
- "bsfl %%eax,%%edx\n"
- "1:\tsubl %%ebx,%%edi\n\t"
- "shll $3,%%edi\n\t"
- "addl %%edi,%%edx"
- :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
- :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
+ /* This looks at memory.
+ * Mark it volatile to tell gcc not to move it around
+ */
+ asm volatile("movl $-1,%%eax\n\t"
+ "xorl %%edx,%%edx\n\t"
+ "repe; scasl\n\t"
+ "je 1f\n\t"
+ "xorl -4(%%edi),%%eax\n\t"
+ "subl $4,%%edi\n\t"
+ "bsfl %%eax,%%edx\n"
+ "1:\tsubl %%ebx,%%edi\n\t"
+ "shll $3,%%edi\n\t"
+ "addl %%edi,%%edx"
+ : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+ : "1" ((size + 31) >> 5), "2" (addr),
+ "b" (addr) : "memory");
return res;
}

@@ -53,7 +55,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
unsigned long val = *addr++;
if (val)
return __ffs(val) + x;
- x += (sizeof(*addr)<<3);
+ x += sizeof(*addr) << 3;
}
return x;
}
@@ -72,10 +74,10 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)

#include <asm-generic/bitops/ext2-non-atomic.h>

-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr), (unsigned long *)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr), (unsigned long *)addr)
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr), (unsigned long *)(addr))

#include <asm-generic/bitops/minix.h>

--
1.5.4.rc2

2008-03-23 08:58:35

by Joe Perches

[permalink] [raw]
Subject: [PATCH 044/148] include/asm-x86/i8259.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/i8259.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index e2650f2..45d4df3 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -5,7 +5,7 @@

extern unsigned int cached_irq_mask;

-#define __byte(x,y) (((unsigned char *) &(y))[x])
+#define __byte(x, y) (((unsigned char *)&(y))[x])
#define cached_master_mask (__byte(0, cached_irq_mask))
#define cached_slave_mask (__byte(1, cached_irq_mask))

--
1.5.4.rc2

2008-03-23 08:58:52

by Joe Perches

[permalink] [raw]
Subject: [PATCH 052/148] include/asm-x86/ipi.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/ipi.h | 11 +++++++----
1 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index 6d011bd..ecc80f3 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -27,7 +27,8 @@
* We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
*/

-static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
+static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
+ unsigned int dest)
{
unsigned int icr = shortcut | dest;

@@ -42,12 +43,13 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns
return icr;
}

-static inline int __prepare_ICR2 (unsigned int mask)
+static inline int __prepare_ICR2(unsigned int mask)
{
return SET_APIC_DEST_FIELD(mask);
}

-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
+static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
+ unsigned int dest)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
@@ -78,7 +80,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign
* This is used to send an IPI with no shorthand notation (the destination is
* specified in bits 56 to 63 of the ICR).
*/
-static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
+static inline void __send_IPI_dest_field(unsigned int mask, int vector,
+ unsigned int dest)
{
unsigned long cfg;

--
1.5.4.rc2

2008-03-23 08:59:24

by Joe Perches

[permalink] [raw]
Subject: [PATCH 058/148] include/asm-x86/kprobes.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/kprobes.h | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
index 61ad7b5..54980b0 100644
--- a/include/asm-x86/kprobes.h
+++ b/include/asm-x86/kprobes.h
@@ -35,12 +35,12 @@ typedef u8 kprobe_opcode_t;
#define RELATIVEJUMP_INSTRUCTION 0xe9
#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
- (((unsigned long)current_thread_info()) + THREAD_SIZE \
- - (unsigned long)(ADDR))) \
- ? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + THREAD_SIZE \
- - (unsigned long)(ADDR)))
+#define MIN_STACK_SIZE(ADDR) \
+ (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+ THREAD_SIZE - (unsigned long)(ADDR))) \
+ ? (MAX_STACK_SIZE) \
+ : (((unsigned long)current_thread_info()) + \
+ THREAD_SIZE - (unsigned long)(ADDR)))

#define flush_insn_slot(p) do { } while (0)

--
1.5.4.rc2

2008-03-23 08:59:51

by Joe Perches

[permalink] [raw]
Subject: [PATCH 054/148] include/asm-x86/irq_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/irq_64.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h
index 5006c6e..083d35a 100644
--- a/include/asm-x86/irq_64.h
+++ b/include/asm-x86/irq_64.h
@@ -31,10 +31,10 @@

#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */

-#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS))
+#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
#define NR_IRQ_VECTORS NR_IRQS

-static __inline__ int irq_canonicalize(int irq)
+static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
}
--
1.5.4.rc2

2008-03-23 09:00:26

by Joe Perches

[permalink] [raw]
Subject: [PATCH 050/148] include/asm-x86/io.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/io.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index 616c9f0..99694ce 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -11,7 +11,7 @@

extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val);
-extern void __iomem * ioremap_wc(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);

extern void *xlate_dev_mem_ptr(unsigned long phys);
extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
--
1.5.4.rc2

2008-03-23 09:00:45

by Joe Perches

[permalink] [raw]
Subject: [PATCH 049/148] include/asm-x86/ioctls.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/ioctls.h | 13 +++++++------
1 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
index 93c894d..c0c338b 100644
--- a/include/asm-x86/ioctls.h
+++ b/include/asm-x86/ioctls.h
@@ -47,12 +47,13 @@
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TCGETS2 _IOR('T',0x2A, struct termios2)
-#define TCSETS2 _IOW('T',0x2B, struct termios2)
-#define TCSETSW2 _IOW('T',0x2C, struct termios2)
-#define TCSETSF2 _IOW('T',0x2D, struct termios2)
-#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TCGETS2 _IOR('T', 0x2A, struct termios2)
+#define TCSETS2 _IOW('T', 0x2B, struct termios2)
+#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
+#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
+#define TIOCGPTN _IOR('T', 0x30, unsigned int)
+ /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */

#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
--
1.5.4.rc2

2008-03-23 09:01:07

by Joe Perches

[permalink] [raw]
Subject: [PATCH 009/148] include/asm-x86/bitops_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/bitops_64.h | 40 ++++++++++++++++++++--------------------
1 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index 63b439b..d133520 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -15,25 +15,25 @@ static inline long __scanbit(unsigned long val, unsigned long max)
return val;
}

-#define find_first_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- (__scanbit(*(unsigned long *)addr,(size))) : \
- find_first_bit(addr,size)))
-
-#define find_first_zero_bit(addr,size) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- (__scanbit(~*(unsigned long *)addr,(size))) : \
- find_first_zero_bit(addr,size)))
-
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
+#define find_first_bit(addr, size) \
+ ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
+ ? (__scanbit(*(unsigned long *)(addr), (size))) \
+ : find_first_bit((addr), (size))))
+
+#define find_first_zero_bit(addr, size) \
+ ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
+ ? (__scanbit(~*(unsigned long *)(addr), (size))) \
+ : find_first_zero_bit((addr), (size))))
+
+static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
+ int len)
+{
+ unsigned long end = i + len;
while (i < end) {
- __set_bit(i, bitmap);
+ __set_bit(i, bitmap);
i++;
}
-}
+}

#ifdef __KERNEL__

@@ -51,10 +51,10 @@ static inline void set_bit_string(unsigned long *bitmap, unsigned long i,

#include <asm-generic/bitops/ext2-non-atomic.h>

-#define ext2_set_bit_atomic(lock,nr,addr) \
- test_and_set_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock,nr,addr) \
- test_and_clear_bit((nr),(unsigned long*)addr)
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr), (unsigned long *)(addr))

#include <asm-generic/bitops/minix.h>

--
1.5.4.rc2

2008-03-23 09:01:30

by Joe Perches

[permalink] [raw]
Subject: [PATCH 045/148] include/asm-x86/ia32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/ia32.h | 62 +++++++++++++++++++++++------------------------
1 files changed, 30 insertions(+), 32 deletions(-)

diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index aa97332..55d3abe 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -14,19 +14,19 @@

/* signal.h */
struct sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
- compat_sigset_t sa_mask; /* A 32 bit mask */
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
+ compat_sigset_t sa_mask; /* A 32 bit mask */
};

struct old_sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- compat_old_sigset_t sa_mask; /* A 32 bit mask */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ compat_old_sigset_t sa_mask; /* A 32 bit mask */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
};

typedef struct sigaltstack_ia32 {
@@ -65,7 +65,7 @@ struct stat64 {
long long st_size;
unsigned int st_blksize;

- long long st_blocks;/* Number 512-byte blocks allocated. */
+ long long st_blocks;/* Number 512-byte blocks allocated */

unsigned st_atime;
unsigned st_atime_nsec;
@@ -77,13 +77,13 @@ struct stat64 {
unsigned long long st_ino;
} __attribute__((packed));

-typedef struct compat_siginfo{
+typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;

union {
- int _pad[((128/sizeof(int)) - 3)];
+ int _pad[((128 / sizeof(int)) - 3)];

/* kill() */
struct {
@@ -129,28 +129,26 @@ typedef struct compat_siginfo{
} _sifields;
} compat_siginfo_t;

-struct sigframe32
-{
- u32 pretcode;
- int sig;
- struct sigcontext_ia32 sc;
- struct _fpstate_ia32 fpstate;
- unsigned int extramask[_COMPAT_NSIG_WORDS-1];
+struct sigframe32 {
+ u32 pretcode;
+ int sig;
+ struct sigcontext_ia32 sc;
+ struct _fpstate_ia32 fpstate;
+ unsigned int extramask[_COMPAT_NSIG_WORDS-1];
};

-struct rt_sigframe32
-{
- u32 pretcode;
- int sig;
- u32 pinfo;
- u32 puc;
- compat_siginfo_t info;
- struct ucontext_ia32 uc;
- struct _fpstate_ia32 fpstate;
+struct rt_sigframe32 {
+ u32 pretcode;
+ int sig;
+ u32 pinfo;
+ u32 puc;
+ compat_siginfo_t info;
+ struct ucontext_ia32 uc;
+ struct _fpstate_ia32 fpstate;
};

struct ustat32 {
- __u32 f_tfree;
+ __u32 f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
@@ -168,5 +166,5 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
#endif

#endif /* !CONFIG_IA32_SUPPORT */
-
-#endif
+
+#endif
--
1.5.4.rc2

2008-03-23 09:01:51

by Joe Perches

[permalink] [raw]
Subject: [PATCH 068/148] include/asm-x86/mmu_context_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mmu_context_64.h | 21 +++++++++++----------
1 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index ad6dc82..ca44c71 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
- if (read_pda(mmu_state) == TLBSTATE_OK)
+ if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
#endif
}

-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);

- if (unlikely(next->context.ldt != prev->context.ldt))
+ if (unlikely(next->context.ldt != prev->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (read_pda(active_mm) != next)
BUG();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
+ /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}

-#define deactivate_mm(tsk,mm) do { \
- load_gs_index(0); \
- asm volatile("movl %0,%%fs"::"r"(0)); \
-} while(0)
+#define deactivate_mm(tsk, mm) \
+do { \
+ load_gs_index(0); \
+ asm volatile("movl %0,%%fs"::"r"(0)); \
+} while (0)

-#define activate_mm(prev, next) \
- switch_mm((prev),(next),NULL)
+#define activate_mm(prev, next) \
+ switch_mm((prev), (next), NULL)


#endif
--
1.5.4.rc2

2008-03-23 09:02:16

by Joe Perches

[permalink] [raw]
Subject: [PATCH 071/148] include/asm-x86/mmzone_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mmzone_32.h | 34 ++++++++++++++++++----------------
1 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index b9f5be2..e3a1390 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -18,7 +18,7 @@ extern struct pglist_data *node_data[];
#include <asm/srat.h>
#endif

-extern int get_memcfg_numa_flat(void );
+extern int get_memcfg_numa_flat(void);
/*
* This allows any one NUMA architecture to be compiled
* for, and still fall back to the flat function if it
@@ -107,34 +107,36 @@ static inline int pfn_valid(int pfn)
/*
* Following are macros that are specific to this numa platform.
*/
-#define reserve_bootmem(addr, size, flags) \
+#define reserve_bootmem(addr, size, flags) \
reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
-#define alloc_bootmem(x) \
- __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low(x) \
+#define alloc_bootmem(x) \
+ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
+ __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_pages(x) \
- __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages(x) \
+#define alloc_bootmem_pages(x) \
+ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
+ __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
({ \
- struct pglist_data __maybe_unused \
- *__alloc_bootmem_node__pgdat = (pgdat); \
+ struct pglist_data *__alloc_bootmem_node__pgdat = (pgdat) \
+ __maybe_unused; \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
- __pa(MAX_DMA_ADDRESS)); \
+ __pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_pages_node(pgdat, x) \
({ \
- struct pglist_data __maybe_unused \
- *__alloc_bootmem_node__pgdat = (pgdat); \
+ struct pglist_data *__alloc_bootmem_node__pgdat = (pgdat) \
+ __maybe_unused; \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
- __pa(MAX_DMA_ADDRESS)); \
+ __pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_low_pages_node(pgdat, x) \
({ \
- struct pglist_data __maybe_unused \
- *__alloc_bootmem_node__pgdat = (pgdat); \
+ struct pglist_data *__alloc_bootmem_node__pgdat = (pgdat) \
+ __maybe_unused; \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
})
#endif /* CONFIG_NEED_MULTIPLE_NODES */
--
1.5.4.rc2

2008-03-23 09:03:09

by Joe Perches

[permalink] [raw]
Subject: [PATCH 093/148] include/asm-x86/pgtable-2level.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pgtable-2level.h | 18 +++++++++++-------
1 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 701404f..46bc52c 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -26,7 +26,8 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
native_set_pte(ptep, pte);
}

-static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
+static inline void native_set_pte_present(struct mm_struct *mm,
+ unsigned long addr,
pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
@@ -37,7 +38,8 @@ static inline void native_pmd_clear(pmd_t *pmdp)
native_set_pmd(pmdp, __pmd(0));
}

-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
+static inline void native_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *xp)
{
*xp = native_make_pte(0);
}
@@ -61,16 +63,18 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
*/
#define PTE_FILE_MAX_BITS 29

-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+#define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5))

-#define pgoff_to_pte(off) \
- ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+#define pgoff_to_pte(off) \
+ ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \
+ (((off) >> 5) << 8) + _PAGE_FILE })

/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x1f)
#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })

--
1.5.4.rc2

2008-03-23 09:02:44

by Joe Perches

[permalink] [raw]
Subject: [PATCH 126/148] include/asm-x86/termios.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/termios.h | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h
index f729563..7cff8bf 100644
--- a/include/asm-x86/termios.h
+++ b/include/asm-x86/termios.h
@@ -54,10 +54,11 @@ struct termio {
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
- unsigned short __tmp; \
- get_user(__tmp,&(termio)->x); \
- *(unsigned short *) &(termios)->x = __tmp; \
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) \
+{ \
+ unsigned short __tmp; \
+ get_user(__tmp, &(termio)->x); \
+ *(unsigned short *)&(termios)->x = __tmp; \
}

static inline int user_termio_to_kernel_termios(struct ktermios *termios,
--
1.5.4.rc2

2008-03-23 09:03:28

by Joe Perches

[permalink] [raw]
Subject: [PATCH 090/148] include/asm-x86/pda.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pda.h | 80 +++++++++++++++++++++++++-----------------------
1 files changed, 42 insertions(+), 38 deletions(-)

diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index b364d68..62b7349 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -55,34 +55,36 @@ extern struct x8664_pda _proxy_pda;

#define pda_offset(field) offsetof(struct x8664_pda, field)

-#define pda_to_op(op, field, val) do { \
- typedef typeof(_proxy_pda.field) T__; \
- if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- case 4: \
- asm(op "l %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i" (pda_offset(field))); \
- break; \
- case 8: \
- asm(op "q %1,%%gs:%c2": \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
- } while (0)
+#define pda_to_op(op, field, val) \
+do { \
+ typedef typeof(_proxy_pda.field) T__; \
+ if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
+ switch (sizeof(_proxy_pda.field)) { \
+ case 2: \
+ asm(op "w %1,%%gs:%c2" : \
+ "+m" (_proxy_pda.field) : \
+ "ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ case 4: \
+ asm(op "l %1,%%gs:%c2" : \
+ "+m" (_proxy_pda.field) : \
+ "ri" ((T__)val), \
+ "i" (pda_offset(field))); \
+ break; \
+ case 8: \
+ asm(op "q %1,%%gs:%c2": \
+ "+m" (_proxy_pda.field) : \
+ "ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ default: \
+ __bad_pda_field(); \
+ } \
+} while (0)

-#define pda_from_op(op,field) ({ \
+#define pda_from_op(op, field) \
+({ \
typeof(_proxy_pda.field) ret__; \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
@@ -90,23 +92,24 @@ extern struct x8664_pda _proxy_pda;
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
- break; \
+ break; \
case 4: \
asm(op "l %%gs:%c1,%0": \
"=r" (ret__): \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
- break; \
+ break; \
case 8: \
asm(op "q %%gs:%c1,%0": \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
- break; \
+ break; \
default: \
__bad_pda_field(); \
- } \
- ret__; })
+ } \
+ ret__; \
+})

#define read_pda(field) pda_from_op("mov", field)
#define write_pda(field, val) pda_to_op("mov", field, val)
@@ -115,12 +118,13 @@ extern struct x8664_pda _proxy_pda;
#define or_pda(field, val) pda_to_op("or", field, val)

/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define test_and_clear_bit_pda(bit, field) ({ \
- int old__; \
- asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (_proxy_pda.field) \
- : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
- old__; \
+#define test_and_clear_bit_pda(bit, field) \
+({ \
+ int old__; \
+ asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
+ : "=r" (old__), "+m" (_proxy_pda.field) \
+ : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
+ old__; \
})

#endif
--
1.5.4.rc2

2008-03-23 09:03:48

by Joe Perches

[permalink] [raw]
Subject: [PATCH 077/148] include/asm-x86/mtrr.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/mtrr.h | 64 ++++++++++++++++++++++-------------------------
1 files changed, 30 insertions(+), 34 deletions(-)

diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index ee17229..7877f46 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -28,8 +28,7 @@

#define MTRR_IOCTL_BASE 'M'

-struct mtrr_sentry
-{
+struct mtrr_sentry {
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int type; /* Type of region */
@@ -41,8 +40,7 @@ struct mtrr_sentry
will break. */

#ifdef __i386__
-struct mtrr_gentry
-{
+struct mtrr_gentry {
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
@@ -51,8 +49,7 @@ struct mtrr_gentry

#else /* __i386__ */

-struct mtrr_gentry
-{
+struct mtrr_gentry {
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int regnum; /* Register number */
@@ -89,12 +86,12 @@ struct mtrr_gentry
extern u8 mtrr_type_lookup(u64 addr, u64 end);
extern void mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
-extern int mtrr_add (unsigned long base, unsigned long size,
- unsigned int type, bool increment);
-extern int mtrr_add_page (unsigned long base, unsigned long size,
- unsigned int type, bool increment);
-extern int mtrr_del (int reg, unsigned long base, unsigned long size);
-extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
+extern int mtrr_add(unsigned long base, unsigned long size,
+ unsigned int type, bool increment);
+extern int mtrr_add_page(unsigned long base, unsigned long size,
+ unsigned int type, bool increment);
+extern int mtrr_del(int reg, unsigned long base, unsigned long size);
+extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
extern void mtrr_bp_init(void);
@@ -109,23 +106,21 @@ static inline u8 mtrr_type_lookup(u64 addr, u64 end)
}
#define mtrr_save_fixed_ranges(arg) do {} while (0)
#define mtrr_save_state() do {} while (0)
-static __inline__ int mtrr_add (unsigned long base, unsigned long size,
- unsigned int type, bool increment)
+static inline int mtrr_add(unsigned long base, unsigned long size,
+ unsigned int type, bool increment)
{
return -ENODEV;
}
-static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
+static inline int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, bool increment)
{
return -ENODEV;
}
-static __inline__ int mtrr_del (int reg, unsigned long base,
- unsigned long size)
+static inline int mtrr_del(int reg, unsigned long base, unsigned long size)
{
return -ENODEV;
}
-static __inline__ int mtrr_del_page (int reg, unsigned long base,
- unsigned long size)
+static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
return -ENODEV;
}
@@ -133,7 +128,9 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
{
return 0;
}
-static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
+static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
+{
+}

#define mtrr_ap_init() do {} while (0)
#define mtrr_bp_init() do {} while (0)
@@ -142,15 +139,13 @@ static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
#ifdef CONFIG_COMPAT
#include <linux/compat.h>

-struct mtrr_sentry32
-{
+struct mtrr_sentry32 {
compat_ulong_t base; /* Base address */
compat_uint_t size; /* Size of region */
compat_uint_t type; /* Type of region */
};

-struct mtrr_gentry32
-{
+struct mtrr_gentry32 {
compat_ulong_t regnum; /* Register number */
compat_uint_t base; /* Base address */
compat_uint_t size; /* Size of region */
@@ -159,16 +154,17 @@ struct mtrr_gentry32

#define MTRR_IOCTL_BASE 'M'

-#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
-#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
-#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
-#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
-#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
-#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
-#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
-#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
+#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
+#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
+#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
+#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
+#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
+#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_PAGE_ENTRY \
+ _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
#endif /* CONFIG_COMPAT */

#endif /* __KERNEL__ */
--
1.5.4.rc2

2008-03-23 09:04:12

by Joe Perches

[permalink] [raw]
Subject: [PATCH 088/148] include/asm-x86/pci-direct.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pci-direct.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
index 6823fa4..5b21485 100644
--- a/include/asm-x86/pci-direct.h
+++ b/include/asm-x86/pci-direct.h
@@ -4,7 +4,7 @@
#include <linux/types.h>

/* Direct PCI access. This is used for PCI accesses in early boot before
- the PCI subsystem works. */
+ the PCI subsystem works. */

extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
--
1.5.4.rc2

2008-03-23 09:04:36

by Joe Perches

[permalink] [raw]
Subject: [PATCH 076/148] include/asm-x86/msr.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/msr.h | 81 +++++++++++++++++++++++++-----------------------
1 files changed, 42 insertions(+), 39 deletions(-)

diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 3ca29eb..2c698a2 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -16,8 +16,8 @@
static inline unsigned long long native_read_tscp(unsigned int *aux)
{
unsigned long low, high;
- asm volatile (".byte 0x0f,0x01,0xf9"
- : "=a" (low), "=d" (high), "=c" (*aux));
+ asm volatile(".byte 0x0f,0x01,0xf9"
+ : "=a" (low), "=d" (high), "=c" (*aux));
return low | ((u64)high >> 32);
}

@@ -29,7 +29,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
*/
#ifdef CONFIG_X86_64
#define DECLARE_ARGS(val, low, high) unsigned low, high
-#define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32))
+#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
#else
@@ -57,7 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
".section .fixup,\"ax\"\n\t"
"3: mov %3,%0 ; jmp 1b\n\t"
".previous\n\t"
- _ASM_EXTABLE(2b,3b)
+ _ASM_EXTABLE(2b, 3b)
: "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), "i" (-EFAULT));
return EAX_EDX_VAL(val, low, high);
@@ -78,10 +78,10 @@ static inline int native_write_msr_safe(unsigned int msr,
".section .fixup,\"ax\"\n\t"
"3: mov %4,%0 ; jmp 1b\n\t"
".previous\n\t"
- _ASM_EXTABLE(2b,3b)
+ _ASM_EXTABLE(2b, 3b)
: "=a" (err)
: "c" (msr), "0" (low), "d" (high),
- "i" (-EFAULT));
+ "i" (-EFAULT));
return err;
}

@@ -116,23 +116,23 @@ static inline unsigned long long native_read_pmc(int counter)
* pointer indirection), this allows gcc to optimize better
*/

-#define rdmsr(msr,val1,val2) \
- do { \
- u64 __val = native_read_msr(msr); \
- (val1) = (u32)__val; \
- (val2) = (u32)(__val >> 32); \
- } while(0)
+#define rdmsr(msr, val1, val2) \
+do { \
+ u64 __val = native_read_msr((msr)); \
+ (val1) = (u32)__val; \
+ (val2) = (u32)(__val >> 32); \
+} while (0)

static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
{
native_write_msr(msr, low, high);
}

-#define rdmsrl(msr,val) \
- ((val) = native_read_msr(msr))
+#define rdmsrl(msr, val) \
+ ((val) = native_read_msr((msr)))

#define wrmsrl(msr, val) \
- native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32))
+ native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))

/* wrmsr with exception handling */
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
@@ -141,14 +141,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
}

/* rdmsr with exception handling */
-#define rdmsr_safe(msr,p1,p2) \
- ({ \
- int __err; \
- u64 __val = native_read_msr_safe(msr, &__err); \
- (*p1) = (u32)__val; \
- (*p2) = (u32)(__val >> 32); \
- __err; \
- })
+#define rdmsr_safe(msr, p1, p2) \
+({ \
+ int __err; \
+ u64 __val = native_read_msr_safe((msr), &__err); \
+ (*p1) = (u32)__val; \
+ (*p2) = (u32)(__val >> 32); \
+ __err; \
+})

#define rdtscl(low) \
((low) = (u32)native_read_tsc())
@@ -156,35 +156,37 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
#define rdtscll(val) \
((val) = native_read_tsc())

-#define rdpmc(counter,low,high) \
- do { \
- u64 _l = native_read_pmc(counter); \
- (low) = (u32)_l; \
- (high) = (u32)(_l >> 32); \
- } while(0)
+#define rdpmc(counter, low, high) \
+do { \
+ u64 _l = native_read_pmc((counter)); \
+ (low) = (u32)_l; \
+ (high) = (u32)(_l >> 32); \
+} while (0)

-#define rdtscp(low, high, aux) \
- do { \
- unsigned long long _val = native_read_tscp(&(aux)); \
- (low) = (u32)_val; \
- (high) = (u32)(_val >> 32); \
- } while (0)
+#define rdtscp(low, high, aux) \
+do { \
+ unsigned long long _val = native_read_tscp(&(aux)); \
+ (low) = (u32)_val; \
+ (high) = (u32)(_val >> 32); \
+} while (0)

#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))

#endif /* !CONFIG_PARAVIRT */


-#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
+#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
+ (u32)((val) >> 32))

-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2))

-#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
+#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)

#ifdef CONFIG_SMP
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
@@ -195,7 +197,8 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
wrmsr(msr_no, l, h);
}
-static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
+ u32 *l, u32 *h)
{
return rdmsr_safe(msr_no, l, h);
}
--
1.5.4.rc2

2008-03-23 09:05:31

by Joe Perches

[permalink] [raw]
Subject: [PATCH 086/148] include/asm-x86/parport.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/parport.h | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
index 019cbca..3c4ffeb 100644
--- a/include/asm-x86/parport.h
+++ b/include/asm-x86/parport.h
@@ -1,10 +1,10 @@
#ifndef _ASM_X86_PARPORT_H
#define _ASM_X86_PARPORT_H

-static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
- return parport_pc_find_isa_ports (autoirq, autodma);
+ return parport_pc_find_isa_ports(autoirq, autodma);
}

#endif /* _ASM_X86_PARPORT_H */
--
1.5.4.rc2

2008-03-23 09:04:58

by Joe Perches

[permalink] [raw]
Subject: [PATCH 091/148] include/asm-x86/percpu.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/percpu.h | 104 ++++++++++++++++++++++++----------------------
1 files changed, 54 insertions(+), 50 deletions(-)

diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 0dec00f..736fc3b 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -85,58 +85,62 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
* don't give an lvalue though). */
extern void __bad_percpu_size(void);

-#define percpu_to_op(op,var,val) \
- do { \
- typedef typeof(var) T__; \
- if (0) { T__ tmp__; tmp__ = (val); } \
- switch (sizeof(var)) { \
- case 1: \
- asm(op "b %1,"__percpu_seg"%0" \
- : "+m" (var) \
- :"ri" ((T__)val)); \
- break; \
- case 2: \
- asm(op "w %1,"__percpu_seg"%0" \
- : "+m" (var) \
- :"ri" ((T__)val)); \
- break; \
- case 4: \
- asm(op "l %1,"__percpu_seg"%0" \
- : "+m" (var) \
- :"ri" ((T__)val)); \
- break; \
- default: __bad_percpu_size(); \
- } \
- } while (0)
-
-#define percpu_from_op(op,var) \
- ({ \
- typeof(var) ret__; \
- switch (sizeof(var)) { \
- case 1: \
- asm(op "b "__percpu_seg"%1,%0" \
- : "=r" (ret__) \
- : "m" (var)); \
- break; \
- case 2: \
- asm(op "w "__percpu_seg"%1,%0" \
- : "=r" (ret__) \
- : "m" (var)); \
- break; \
- case 4: \
- asm(op "l "__percpu_seg"%1,%0" \
- : "=r" (ret__) \
- : "m" (var)); \
- break; \
- default: __bad_percpu_size(); \
- } \
- ret__; })
+#define percpu_to_op(op, var, val) \
+do { \
+ typedef typeof(var) T__; \
+ if (0) { \
+ T__ tmp__; \
+ tmp__ = (val); \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b %1,"__percpu_seg"%0" \
+ : "+m" (var) \
+ : "ri" ((T__)val)); \
+ break; \
+ case 2: \
+ asm(op "w %1,"__percpu_seg"%0" \
+ : "+m" (var) \
+ : "ri" ((T__)val)); \
+ break; \
+ case 4: \
+ asm(op "l %1,"__percpu_seg"%0" \
+ : "+m" (var) \
+ : "ri" ((T__)val)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+} while (0)
+
+#define percpu_from_op(op, var) \
+({ \
+ typeof(var) ret__; \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_seg"%1,%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_seg"%1,%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_seg"%1,%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ ret__; \
+})

#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
+#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
+#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
+#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
+#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
#endif /* !__ASSEMBLY__ */
#endif /* !CONFIG_X86_64 */
#endif /* _ASM_X86_PERCPU_H_ */
--
1.5.4.rc2

2008-03-23 09:05:55

by Joe Perches

[permalink] [raw]
Subject: [PATCH 092/148] include/asm-x86/pgalloc.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pgalloc.h | 32 ++++++++++++++++++++++++--------
1 files changed, 24 insertions(+), 8 deletions(-)

diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index 91e4641..c316e1b 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -8,14 +8,30 @@
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
-static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
-static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
- unsigned long start, unsigned long count) {}
-static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
-static inline void paravirt_release_pte(unsigned long pfn) {}
-static inline void paravirt_release_pmd(unsigned long pfn) {}
-static inline void paravirt_release_pud(unsigned long pfn) {}
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
+{
+}
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
+{
+}
+static inline void paravirt_alloc_pmd_clone(unsigned long pfn,
+ unsigned long clonepfn,
+ unsigned long start,
+ unsigned long count)
+{
+}
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
+{
+}
+static inline void paravirt_release_pte(unsigned long pfn)
+{
+}
+static inline void paravirt_release_pmd(unsigned long pfn)
+{
+}
+static inline void paravirt_release_pud(unsigned long pfn)
+{
+}
#endif

/*
--
1.5.4.rc2

2008-03-23 09:06:20

by Joe Perches

[permalink] [raw]
Subject: [PATCH 130/148] include/asm-x86/tlbflush.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/tlbflush.h | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index 3998709..0c0674d 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -32,7 +32,7 @@ static inline void __native_flush_tlb_global(void)

static inline void __native_flush_tlb_single(unsigned long addr)
{
- __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory");
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}

static inline void __flush_tlb_all(void)
@@ -134,8 +134,7 @@ void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
#define TLBSTATE_LAZY 2

#ifdef CONFIG_X86_32
-struct tlb_state
-{
+struct tlb_state {
struct mm_struct *active_mm;
int state;
char __cacheline_padding[L1_CACHE_BYTES-8];
--
1.5.4.rc2

2008-03-23 09:06:40

by Joe Perches

[permalink] [raw]
Subject: [PATCH 096/148] include/asm-x86/pgtable_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pgtable_64.h | 141 ++++++++++++++++++++++-------------------
1 files changed, 76 insertions(+), 65 deletions(-)

diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 9bd551e..a3bbf87 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -52,14 +52,18 @@ extern void paging_init(void);

#ifndef __ASSEMBLY__

-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pud_ERROR(e) \
- printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pud_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016lx).\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))

#define pgd_none(x) (!pgd_val(x))
#define pud_none(x) (!pud_val(x))
@@ -87,7 +91,8 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#ifdef CONFIG_SMP
return native_make_pte(xchg(&xp->pte, 0));
#else
- /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */
+ /* native_local_ptep_get_and_clear,
+ but duplicated because of cyclic dependency */
pte_t ret = *xp;
native_pte_clear(NULL, 0, xp);
return ret;
@@ -119,7 +124,7 @@ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
*pgdp = pgd;
}

-static inline void native_pgd_clear(pgd_t * pgd)
+static inline void native_pgd_clear(pgd_t *pgd)
{
native_set_pgd(pgd, native_make_pgd(0));
}
@@ -128,15 +133,15 @@ static inline void native_pgd_clear(pgd_t * pgd)

#endif /* !__ASSEMBLY__ */

-#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))


-#define MAXMEM _AC(0x3fffffffffff, UL)
+#define MAXMEM _AC(0x00003fffffffffff, UL)
#define VMALLOC_START _AC(0xffffc20000000000, UL)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
@@ -163,18 +168,18 @@ static inline unsigned long pmd_bad(pmd_t pmd)
~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX);
}

-#define pte_none(x) (!pte_val(x))
-#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_none(x) (!pte_val((x)))
+#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))

-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
+#define pte_page(x) pfn_to_page(pte_pfn((x)))
+#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)

/*
* Macro to mark a page protection value as "uncacheable".
*/
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-
+#define pgprot_noncached(prot) \
+ (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT))

/*
* Conversion functions: convert a page and protection to a page entry,
@@ -184,77 +189,81 @@ static inline unsigned long pmd_bad(pmd_t pmd)
/*
* Level 4 access.
*/
-#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
-#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
+#define pgd_page_vaddr(pgd) \
+ ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK))
+#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
+#define pgd_offset_k(address) (init_level4_pgt + pgd_index((address)))
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })

/* PUD - Level3 access */
/* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
-#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
+#define pud_page_vaddr(pud) \
+ ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
+#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+#define pud_offset(pgd, address) \
+ ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
+#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)

static inline int pud_large(pud_t pte)
{
- return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
- (_PAGE_PSE|_PAGE_PRESENT);
+ return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ (_PAGE_PSE | _PAGE_PRESENT);
}

/* PMD - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
- pmd_index(address))
-#define pmd_none(x) (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
-#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE })
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK))
+#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
+
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
+ pmd_index(address))
+#define pmd_none(x) (!pmd_val((x)))
+#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
+#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
+#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
+ _PAGE_FILE })
#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT

/* PTE - Level 1 access. */

/* page, protection -> pte */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
+
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
- pte_index(address))
+ pte_index((address)))

/* x86-64 always has all page tables mapped. */
-#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
-#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) /* NOP */
-#define pte_unmap_nested(pte) /* NOP */
+#define pte_unmap_nested(pte) /* NOP */

-#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define update_mmu_cache(vma, address, pte) do { } while (0)

extern int direct_gbpages;

/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x3f)
#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
+ ((offset) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })

-extern int kern_addr_valid(unsigned long addr);
+extern int kern_addr_valid(unsigned long addr);
extern void cleanup_highmap(void);

-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)

#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -267,8 +276,10 @@ extern void cleanup_highmap(void);

/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-#define kc_offset_to_vaddr(o) \
- (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
+#define kc_offset_to_vaddr(o) \
+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \
+ ? ((o) | ~__VIRTUAL_MASK) \
+ : (o))

#define __HAVE_ARCH_PTE_SAME
#endif /* !__ASSEMBLY__ */
--
1.5.4.rc2

2008-03-23 09:06:56

by Joe Perches

[permalink] [raw]
Subject: [PATCH 089/148] include/asm-x86/pci.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pci.h | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index 0659543..30bbde0 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -8,14 +8,13 @@
#include <asm/scatterlist.h>
#include <asm/io.h>

-
#ifdef __KERNEL__

struct pci_sysdata {
int domain; /* PCI domain */
int node; /* NUMA node */
#ifdef CONFIG_X86_64
- void* iommu; /* IOMMU private data */
+ void *iommu; /* IOMMU private data */
#endif
};

@@ -54,7 +53,7 @@ extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_CARDBUS_IO 0x4000

void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
+struct pci_bus *pcibios_scan_root(int bus);

void pcibios_set_master(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
@@ -64,7 +63,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);

#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+ enum pci_mmap_state mmap_state,
+ int write_combine);


#ifdef CONFIG_PCI
--
1.5.4.rc2

2008-03-23 09:07:32

by Joe Perches

[permalink] [raw]
Subject: [PATCH 095/148] include/asm-x86/pgtable-3level.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pgtable-3level.h | 48 ++++++++++++++++++++++---------------
1 files changed, 28 insertions(+), 20 deletions(-)

diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index 1d763ee..8b4a9d4 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -8,22 +8,26 @@
* Copyright (C) 1999 Ingo Molnar <[email protected]>
*/

-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
+ __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016Lx).\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016Lx).\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))

static inline int pud_none(pud_t pud)
{
return pud_val(pud) == 0;
}
+
static inline int pud_bad(pud_t pud)
{
return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
}
+
static inline int pud_present(pud_t pud)
{
return pud_val(pud) & _PAGE_PRESENT;
@@ -48,7 +52,8 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
* we are justified in merely clearing the PTE present bit, followed
* by a set. The ordering here is important.
*/
-static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
+static inline void native_set_pte_present(struct mm_struct *mm,
+ unsigned long addr,
pte_t *ptep, pte_t pte)
{
ptep->pte_low = 0;
@@ -60,15 +65,17 @@ static inline void native_set_pte_present(struct mm_struct *mm, unsigned long ad

static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
- set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
+ set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
}
+
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
}
+
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
- set_64bit((unsigned long long *)(pudp),native_pud_val(pud));
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
}

/*
@@ -76,7 +83,8 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud)
* entry, so clear the bottom half first and enforce ordering with a compiler
* barrier.
*/
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
ptep->pte_low = 0;
smp_wmb();
@@ -107,20 +115,19 @@ static inline void pud_clear(pud_t *pudp)
* current pgd to avoid unnecessary TLB flushes.
*/
pgd = read_cr3();
- if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
+ if (__pa(pudp) >= pgd && __pa(pudp) <
+ (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
write_cr3(pgd);
}

-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK))

-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))


/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
- pmd_index(address))
+#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \
+ pmd_index(address))

#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
@@ -161,7 +168,8 @@ static inline unsigned long pte_pfn(pte_t pte)
* put the 32 bits of offset into the high part.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
-#define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
+#define pgoff_to_pte(off) \
+ ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
#define PTE_FILE_MAX_BITS 32

/* Encode and de-code a swap entry */
--
1.5.4.rc2

2008-03-23 09:07:56

by Joe Perches

[permalink] [raw]
Subject: [PATCH 121/148] include/asm-x86/suspend_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/suspend_64.h | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
index 2eb92cb..dc3262b 100644
--- a/include/asm-x86/suspend_64.h
+++ b/include/asm-x86/suspend_64.h
@@ -9,8 +9,7 @@
#include <asm/desc.h>
#include <asm/i387.h>

-static inline int
-arch_prepare_suspend(void)
+static inline int arch_prepare_suspend(void)
{
return 0;
}
@@ -25,7 +24,7 @@ arch_prepare_suspend(void)
*/
struct saved_context {
struct pt_regs regs;
- u16 ds, es, fs, gs, ss;
+ u16 ds, es, fs, gs, ss;
unsigned long gs_base, gs_kernel_base, fs_base;
unsigned long cr0, cr2, cr3, cr4, cr8;
unsigned long efer;
--
1.5.4.rc2

2008-03-23 09:08:21

by Joe Perches

[permalink] [raw]
Subject: [PATCH 112/148] include/asm-x86/sigcontext.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/sigcontext.h | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index d743947..2f9c884 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -79,7 +79,7 @@ struct sigcontext {
unsigned long flags;
unsigned long sp_at_signal;
unsigned short ss, __ssh;
- struct _fpstate __user * fpstate;
+ struct _fpstate __user *fpstate;
unsigned long oldmask;
unsigned long cr2;
};
@@ -107,7 +107,7 @@ struct sigcontext {
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
- struct _fpstate __user * fpstate;
+ struct _fpstate __user *fpstate;
unsigned long oldmask;
unsigned long cr2;
};
@@ -121,7 +121,8 @@ struct sigcontext {
struct _fpstate {
__u16 cwd;
__u16 swd;
- __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */
+ __u16 twd; /* Note this is not the same as the
+ 32bit/x87/FSAVE twd */
__u16 fop;
__u64 rip;
__u64 rdp;
--
1.5.4.rc2

2008-03-23 09:08:39

by Joe Perches

[permalink] [raw]
Subject: [PATCH 113/148] include/asm-x86/signal.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/signal.h | 48 +++++++++++++++++++++++-----------------------
1 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
index aee7eca..f15186d 100644
--- a/include/asm-x86/signal.h
+++ b/include/asm-x86/signal.h
@@ -185,61 +185,61 @@ typedef struct sigaltstack {

#define __HAVE_ARCH_SIG_BITOPS

-#define sigaddset(set,sig) \
- (__builtin_constantp(sig) ? \
- __const_sigaddset((set),(sig)) : \
- __gen_sigaddset((set),(sig)))
+#define sigaddset(set,sig) \
+ (__builtin_constantp(sig) \
+ ? __const_sigaddset((set), (sig)) \
+ : __gen_sigaddset((set), (sig)))

-static __inline__ void __gen_sigaddset(sigset_t *set, int _sig)
+static inline void __gen_sigaddset(sigset_t *set, int _sig)
{
- __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
+ asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
}

-static __inline__ void __const_sigaddset(sigset_t *set, int _sig)
+static inline void __const_sigaddset(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
}

-#define sigdelset(set,sig) \
- (__builtin_constant_p(sig) ? \
- __const_sigdelset((set),(sig)) : \
- __gen_sigdelset((set),(sig)))
+#define sigdelset(set, sig) \
+ (__builtin_constant_p(sig) \
+ ? __const_sigdelset((set), (sig)) \
+ : __gen_sigdelset((set), (sig)))


-static __inline__ void __gen_sigdelset(sigset_t *set, int _sig)
+static inline void __gen_sigdelset(sigset_t *set, int _sig)
{
- __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
+ asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
}

-static __inline__ void __const_sigdelset(sigset_t *set, int _sig)
+static inline void __const_sigdelset(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
}

-static __inline__ int __const_sigismember(sigset_t *set, int _sig)
+static inline int __const_sigismember(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
}

-static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
+static inline int __gen_sigismember(sigset_t *set, int _sig)
{
int ret;
- __asm__("btl %2,%1\n\tsbbl %0,%0"
- : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+ asm("btl %2,%1\n\tsbbl %0,%0"
+ : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
return ret;
}

-#define sigismember(set,sig) \
- (__builtin_constant_p(sig) ? \
- __const_sigismember((set),(sig)) : \
- __gen_sigismember((set),(sig)))
+#define sigismember(set, sig) \
+ (__builtin_constant_p(sig) \
+ ? __const_sigismember((set), (sig)) \
+ : __gen_sigismember((set), (sig)))

-static __inline__ int sigfindinword(unsigned long word)
+static inline int sigfindinword(unsigned long word)
{
- __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
+ asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
return word;
}

--
1.5.4.rc2

2008-03-23 09:08:58

by Joe Perches

[permalink] [raw]
Subject: [PATCH 102/148] include/asm-x86/ptrace.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/ptrace.h | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index bc44246..e779f2b 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -140,7 +140,8 @@ extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);

#ifdef CONFIG_X86_32
-extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code);
#else
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
#endif
@@ -169,8 +170,8 @@ static inline int user_mode(struct pt_regs *regs)
static inline int user_mode_vm(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
- return ((regs->cs & SEGMENT_RPL_MASK) |
- (regs->flags & VM_MASK)) >= USER_RPL;
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & VM_MASK)) >=
+ USER_RPL;
#else
return user_mode(regs);
#endif
--
1.5.4.rc2

2008-03-23 09:09:28

by Joe Perches

[permalink] [raw]
Subject: [PATCH 094/148] include/asm-x86/pgtable_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/pgtable_32.h | 102 ++++++++++++++++++++++--------------------
1 files changed, 53 insertions(+), 49 deletions(-)

diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 82ba08f..e9c8c3f 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -40,13 +40,13 @@ void paging_init(void);
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level-defs.h>
# define PMD_SIZE (1UL << PMD_SHIFT)
-# define PMD_MASK (~(PMD_SIZE-1))
+# define PMD_MASK (~(PMD_SIZE - 1))
#else
# include <asm/pgtable-2level-defs.h>
#endif

#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))

/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
@@ -55,21 +55,22 @@ void paging_init(void);
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
-#define VMALLOC_OFFSET (8*1024*1024)
-#define VMALLOC_START (((unsigned long) high_memory + \
- 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_OFFSET (8 * 1024 * 1024)
+#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \
+ & ~(VMALLOC_OFFSET - 1))
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif

-#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
+ & PMD_MASK)

#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
-# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif

/*
@@ -86,16 +87,16 @@ extern unsigned long pg0[];
#define pte_hidden(x) ((x).pte_low & (_PAGE_HIDDEN))

/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-#define pmd_none(x) (!(unsigned long)pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_none(x) (!(unsigned long)pmd_val((x)))
+#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)

extern int pmd_bad(pmd_t pmd);

-#define pmd_bad_v1(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-#define pmd_bad_v2(x) ((pmd_val(x) \
- & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \
- != _KERNPG_TABLE)
-
+#define pmd_bad_v1(x) \
+ (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER)))
+#define pmd_bad_v2(x) \
+ (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \
+ _PAGE_PSE | _PAGE_NX)))

#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))

@@ -106,17 +107,18 @@ extern int pmd_bad(pmd_t pmd);
#endif

/*
- * Macro to mark a page protection value as "uncacheable". On processors which do not support
- * it, this is a no-op.
+ * Macro to mark a page protection value as "uncacheable".
+ * On processors which do not support it, this is a no-op.
*/
-#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
- ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+#define pgprot_noncached(prot) \
+ ((boot_cpu_data.x86 > 3) \
+ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \
+ : (prot))

/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))

/*
@@ -125,20 +127,20 @@ extern int pmd_bad(pmd_t pmd);
* this macro returns the index of the entry in the pgd page which would
* control the given virtual address
*/
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_index_k(addr) pgd_index(addr)
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_index_k(addr) pgd_index((addr))

/*
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))

/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))

static inline int pud_large(pud_t pud) { return 0; }

@@ -148,8 +150,8 @@ static inline int pud_large(pud_t pud) { return 0; }
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
-#define pmd_index(address) \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_index(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))

/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -157,43 +159,45 @@ static inline int pud_large(pud_t pud) { return 0; }
* this macro returns the index of the entry in the pte page which would
* control the given virtual address
*/
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))

-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))

-#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK))

#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
+ pte_index((address)))
+#define pte_offset_map_nested(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
#else
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
+#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#endif

/* Clear a kernel PTE and flush it from the TLB */
-#define kpte_clear_flush(ptep, vaddr) \
-do { \
- pte_clear(&init_mm, vaddr, ptep); \
- __flush_tlb_one(vaddr); \
+#define kpte_clear_flush(ptep, vaddr) \
+do { \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
+ __flush_tlb_one((vaddr)); \
} while (0)

/*
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
-#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define update_mmu_cache(vma, address, pte) do { } while (0)

void native_pagetable_setup_start(pgd_t *base);
void native_pagetable_setup_done(pgd_t *base);
@@ -222,7 +226,7 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
#define kern_addr_valid(kaddr) (0)
#endif

-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)

#endif /* _I386_PGTABLE_H */
--
1.5.4.rc2

2008-03-23 09:09:48

by Joe Perches

[permalink] [raw]
Subject: [PATCH 129/148] include/asm-x86/thread_info.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/thread_info.h | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 407b88c..9c93845 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -8,6 +8,7 @@
#ifndef __ASSEMBLY__
extern void arch_task_cache_init(void);
extern void free_thread_info(struct thread_info *ti);
-extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+extern int arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src);
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
--
1.5.4.rc2

2008-03-23 09:10:18

by Joe Perches

[permalink] [raw]
Subject: [PATCH 148/148] include/asm-x86/xor_64.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <[email protected]>
---
include/asm-x86/xor_64.h | 294 +++++++++++++++++++++++-----------------------
1 files changed, 149 insertions(+), 145 deletions(-)

diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 1eee7fc..24957e3 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -24,20 +24,23 @@
*/

/*
- * x86-64 changes / gcc fixes from Andi Kleen.
+ * x86-64 changes / gcc fixes from Andi Kleen.
* Copyright 2002 Andi Kleen, SuSE Labs.
*
* This hasn't been optimized for the hammer yet, but there are likely
* no advantages to be gotten from x86-64 here anyways.
*/

-typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
+typedef struct {
+ unsigned long a, b;
+} __attribute__((aligned(16))) xmm_store_t;

-/* Doesn't use gcc to save the XMM registers, because there is no easy way to
+/* Doesn't use gcc to save the XMM registers, because there is no easy way to
tell it to do a clts before the register saving. */
-#define XMMS_SAVE do { \
+#define XMMS_SAVE \
+do { \
preempt_disable(); \
- asm volatile ( \
+ asm volatile( \
"movq %%cr0,%0 ;\n\t" \
"clts ;\n\t" \
"movups %%xmm0,(%1) ;\n\t" \
@@ -47,10 +50,11 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
: "=&r" (cr0) \
: "r" (xmm_save) \
: "memory"); \
-} while(0)
+} while (0)

-#define XMMS_RESTORE do { \
- asm volatile ( \
+#define XMMS_RESTORE \
+do { \
+ asm volatile( \
"sfence ;\n\t" \
"movups (%1),%%xmm0 ;\n\t" \
"movups 0x10(%1),%%xmm1 ;\n\t" \
@@ -61,72 +65,72 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
: "r" (cr0), "r" (xmm_save) \
: "memory"); \
preempt_enable(); \
-} while(0)
+} while (0)

#define OFFS(x) "16*("#x")"
#define PF_OFFS(x) "256+16*("#x")"
#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
-#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
-#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
+#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
+#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
-#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
-#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
-#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
-#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
-#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
+#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
+#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
+#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
+#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
+#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"


static void
xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
- unsigned int lines = bytes >> 8;
+ unsigned int lines = bytes >> 8;
unsigned long cr0;
xmm_store_t xmm_save[4];

XMMS_SAVE;

- asm volatile (
+ asm volatile(
#undef BLOCK
#define BLOCK(i) \
- LD(i,0) \
- LD(i+1,1) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
PF1(i) \
- PF1(i+2) \
- LD(i+2,2) \
- LD(i+3,3) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ PF1(i + 2) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
" decl %[cnt] ; jnz 1b"
: [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
- : [inc] "r" (256UL)
- : "memory");
+ : [inc] "r" (256UL)
+ : "memory");

XMMS_RESTORE;
}
@@ -141,52 +145,52 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,

XMMS_SAVE;

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
#define BLOCK(i) \
PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
PF2(i) \
- PF2(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ PF2(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
" decl %[cnt] ; jnz 1b"
: [cnt] "+r" (lines),
[p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
: [inc] "r" (256UL)
- : "memory");
+ : "memory");
XMMS_RESTORE;
}

@@ -195,64 +199,64 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
unsigned int lines = bytes >> 8;
- xmm_store_t xmm_save[4];
+ xmm_store_t xmm_save[4];
unsigned long cr0;

XMMS_SAVE;

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
#define BLOCK(i) \
PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
PF3(i) \
- PF3(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ PF3(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " addq %[inc], %[p4] ;\n"
" decl %[cnt] ; jnz 1b"
: [cnt] "+c" (lines),
[p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
: [inc] "r" (256UL)
- : "memory" );
+ : "memory" );

XMMS_RESTORE;
}
@@ -261,70 +265,70 @@ static void
xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
- unsigned int lines = bytes >> 8;
+ unsigned int lines = bytes >> 8;
xmm_store_t xmm_save[4];
unsigned long cr0;

XMMS_SAVE;

- __asm__ __volatile__ (
+ asm volatile(
#undef BLOCK
#define BLOCK(i) \
PF1(i) \
- PF1(i+2) \
- LD(i,0) \
- LD(i+1,1) \
- LD(i+2,2) \
- LD(i+3,3) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
PF2(i) \
- PF2(i+2) \
- XO1(i,0) \
- XO1(i+1,1) \
- XO1(i+2,2) \
- XO1(i+3,3) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
PF3(i) \
- PF3(i+2) \
- XO2(i,0) \
- XO2(i+1,1) \
- XO2(i+2,2) \
- XO2(i+3,3) \
+ PF3(i + 2) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
PF4(i) \
- PF4(i+2) \
- PF0(i+4) \
- PF0(i+6) \
- XO3(i,0) \
- XO3(i+1,1) \
- XO3(i+2,2) \
- XO3(i+3,3) \
- XO4(i,0) \
- XO4(i+1,1) \
- XO4(i+2,2) \
- XO4(i+3,3) \
- ST(i,0) \
- ST(i+1,1) \
- ST(i+2,2) \
- ST(i+3,3) \
+ PF4(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ XO4(i, 0) \
+ XO4(i + 1, 1) \
+ XO4(i + 2, 2) \
+ XO4(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \


PF0(0)
PF0(2)

" .align 32 ;\n"
- " 1: ;\n"
+ " 1: ;\n"

BLOCK(0)
BLOCK(4)
BLOCK(8)
BLOCK(12)

- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
- " addq %[inc], %[p5] ;\n"
+ " addq %[inc], %[p1] ;\n"
+ " addq %[inc], %[p2] ;\n"
+ " addq %[inc], %[p3] ;\n"
+ " addq %[inc], %[p4] ;\n"
+ " addq %[inc], %[p5] ;\n"
" decl %[cnt] ; jnz 1b"
: [cnt] "+c" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
[p5] "+r" (p5)
: [inc] "r" (256UL)
: "memory");
@@ -333,18 +337,18 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}

static struct xor_block_template xor_block_sse = {
- .name = "generic_sse",
- .do_2 = xor_sse_2,
- .do_3 = xor_sse_3,
- .do_4 = xor_sse_4,
- .do_5 = xor_sse_5,
+ .name = "generic_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
};

#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_sse); \
- } while (0)
+#define XOR_TRY_TEMPLATES \
+do { \
+ xor_speed(&xor_block_sse); \
+} while (0)

/* We force the use of the SSE xor block because it can write around L2.
We may also be able to load into the L1 only depending on how the cpu
--
1.5.4.rc2

2008-03-23 09:12:32

by Al Viro

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Sun, Mar 23, 2008 at 01:03:24AM -0700, Joe Perches wrote:
>
> Signed-off-by: Joe Perches <[email protected]>

An obvious corollary to this highly inspired series of patches:

diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
deleted file mode 100755
index 2a7cef9..0000000
--- a/scripts/checkpatch.pl
+++ /dev/null
@@ -1,1931 +0,0 @@
-#!/usr/bin/perl -w
-# (c) 2001, Dave Jones. <[email protected]> (the file handling bit)
-# (c) 2005, Joel Schopp <[email protected]> (the ugly bit)
-# (c) 2007, Andy Whitcroft <[email protected]> (new conditions, test suite, etc)
-# Licensed under the terms of the GNU GPL License version 2
-
-use strict;
-
-my $P = $0;
-$P =~ s@.*/@@g;
-
-my $V = '0.15';
-
-use Getopt::Long qw(:config no_auto_abbrev);
-
-my $quiet = 0;
-my $tree = 1;
-my $chk_signoff = 1;
-my $chk_patch = 1;
-my $tst_type = 0;
-my $emacs = 0;
-my $terse = 0;
-my $file = 0;
-my $check = 0;
-my $summary = 1;
-my $mailback = 0;
-my $summary_file = 0;
-my $root;
-my %debug;
-GetOptions(
- 'q|quiet+' => \$quiet,
- 'tree!' => \$tree,
- 'signoff!' => \$chk_signoff,
- 'patch!' => \$chk_patch,
- 'emacs!' => \$emacs,
- 'terse!' => \$terse,
- 'file!' => \$file,
- 'subjective!' => \$check,
- 'strict!' => \$check,
- 'root=s' => \$root,
- 'summary!' => \$summary,
- 'mailback!' => \$mailback,
- 'summary-file!' => \$summary_file,
-
- 'debug=s' => \%debug,
- 'test-type!' => \$tst_type,
-) or exit;
-
-my $exit = 0;
-
-if ($#ARGV < 0) {
- print "usage: $P [options] patchfile\n";
- print "version: $V\n";
- print "options: -q => quiet\n";
- print " --no-tree => run without a kernel tree\n";
- print " --terse => one line per report\n";
- print " --emacs => emacs compile window format\n";
- print " --file => check a source file\n";
- print " --strict => enable more subjective tests\n";
- print " --root => path to the kernel tree root\n";
- print " --no-summary => suppress the per-file summary\n";
- print " --summary-file => include the filename in summary\n";
- exit(1);
-}
-
-my $dbg_values = 0;
-my $dbg_possible = 0;
-for my $key (keys %debug) {
- eval "\${dbg_$key} = '$debug{$key}';"
-}
-
-if ($terse) {
- $emacs = 1;
- $quiet++;
-}
-
-if ($tree) {
- if (defined $root) {
- if (!top_of_kernel_tree($root)) {
- die "$P: $root: --root does not point at a valid tree\n";
- }
- } else {
- if (top_of_kernel_tree('.')) {
- $root = '.';
- } elsif ($0 =~ m@(.*)/scripts/[^/]*$@ &&
- top_of_kernel_tree($1)) {
- $root = $1;
- }
- }
-
- if (!defined $root) {
- print "Must be run from the top-level dir. of a kernel tree\n";
- exit(2);
- }
-}
-
-my $emitted_corrupt = 0;
-
-our $Ident = qr{[A-Za-z_][A-Za-z\d_]*};
-our $Storage = qr{extern|static|asmlinkage};
-our $Sparse = qr{
- __user|
- __kernel|
- __force|
- __iomem|
- __must_check|
- __init_refok|
- __kprobes
- }x;
-our $Attribute = qr{
- const|
- __read_mostly|
- __kprobes|
- __(?:mem|cpu|dev|)(?:initdata|init)
- }x;
-our $Inline = qr{inline|__always_inline|noinline};
-our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
-our $Lval = qr{$Ident(?:$Member)*};
-
-our $Constant = qr{(?:[0-9]+|0x[0-9a-fA-F]+)[UL]*};
-our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)};
-our $Operators = qr{
- <=|>=|==|!=|
- =>|->|<<|>>|<|>|!|~|
- &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%
- }x;
-
-our $NonptrType;
-our $Type;
-our $Declare;
-
-our @typeList = (
- qr{void},
- qr{char},
- qr{short},
- qr{int},
- qr{long},
- qr{unsigned},
- qr{float},
- qr{double},
- qr{bool},
- qr{long\s+int},
- qr{long\s+long},
- qr{long\s+long\s+int},
- qr{(?:__)?(?:u|s|be|le)(?:8|16|32|64)},
- qr{struct\s+$Ident},
- qr{union\s+$Ident},
- qr{enum\s+$Ident},
- qr{${Ident}_t},
- qr{${Ident}_handler},
- qr{${Ident}_handler_fn},
-);
-
-sub build_types {
- my $all = "(?: \n" . join("|\n ", @typeList) . "\n)";
- $NonptrType = qr{
- \b
- (?:const\s+)?
- (?:unsigned\s+)?
- (?:
- $all|
- (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\)
- )
- (?:\s+$Sparse|\s+const)*
- \b
- }x;
- $Type = qr{
- \b$NonptrType\b
- (?:\s*\*+\s*const|\s*\*+|(?:\s*\[\s*\])+)?
- (?:\s+$Inline|\s+$Sparse|\s+$Attribute)*
- }x;
- $Declare = qr{(?:$Storage\s+)?$Type};
-}
-build_types();
-
-$chk_signoff = 0 if ($file);
-
-my @dep_includes = ();
-my @dep_functions = ();
-my $removal = "Documentation/feature-removal-schedule.txt";
-if ($tree && -f "$root/$removal") {
- open(REMOVE, "<$root/$removal") ||
- die "$P: $removal: open failed - $!\n";
- while (<REMOVE>) {
- if (/^Check:\s+(.*\S)/) {
- for my $entry (split(/[, ]+/, $1)) {
- if ($entry =~ m@include/(.*)@) {
- push(@dep_includes, $1);
-
- } elsif ($entry !~ m@/@) {
- push(@dep_functions, $entry);
- }
- }
- }
- }
-}
-
-my @rawlines = ();
-my @lines = ();
-my $vname;
-for my $filename (@ARGV) {
- if ($file) {
- open(FILE, "diff -u /dev/null $filename|") ||
- die "$P: $filename: diff failed - $!\n";
- } else {
- open(FILE, "<$filename") ||
- die "$P: $filename: open failed - $!\n";
- }
- if ($filename eq '-') {
- $vname = 'Your patch';
- } else {
- $vname = $filename;
- }
- while (<FILE>) {
- chomp;
- push(@rawlines, $_);
- }
- close(FILE);
- if (!process($filename)) {
- $exit = 1;
- }
- @rawlines = ();
- @lines = ();
-}
-
-exit($exit);
-
-sub top_of_kernel_tree {
- my ($root) = @_;
-
- my @tree_check = (
- "COPYING", "CREDITS", "Kbuild", "MAINTAINERS", "Makefile",
- "README", "Documentation", "arch", "include", "drivers",
- "fs", "init", "ipc", "kernel", "lib", "scripts",
- );
-
- foreach my $check (@tree_check) {
- if (! -e $root . '/' . $check) {
- return 0;
- }
- }
- return 1;
-}
-
-sub expand_tabs {
- my ($str) = @_;
-
- my $res = '';
- my $n = 0;
- for my $c (split(//, $str)) {
- if ($c eq "\t") {
- $res .= ' ';
- $n++;
- for (; ($n % 8) != 0; $n++) {
- $res .= ' ';
- }
- next;
- }
- $res .= $c;
- $n++;
- }
-
- return $res;
-}
-sub copy_spacing {
- my ($str) = @_;
-
- my $res = '';
- for my $c (split(//, $str)) {
- if ($c eq "\t") {
- $res .= $c;
- } else {
- $res .= ' ';
- }
- }
-
- return $res;
-}
-
-sub line_stats {
- my ($line) = @_;
-
- # Drop the diff line leader and expand tabs
- $line =~ s/^.//;
- $line = expand_tabs($line);
-
- # Pick the indent from the front of the line.
- my ($white) = ($line =~ /^(\s*)/);
-
- return (length($line), length($white));
-}
-
-sub sanitise_line {
- my ($line) = @_;
-
- my $res = '';
- my $l = '';
-
- my $quote = '';
- my $qlen = 0;
-
- foreach my $c (split(//, $line)) {
- # The second backslash of a pair is not a "quote".
- if ($l eq "\\" && $c eq "\\") {
- $c = 'X';
- }
- if ($l ne "\\" && ($c eq "'" || $c eq '"')) {
- if ($quote eq '') {
- $quote = $c;
- $res .= $c;
- $l = $c;
- $qlen = 0;
- next;
- } elsif ($quote eq $c) {
- $quote = '';
- }
- }
- if ($quote eq "'" && $qlen > 1) {
- $quote = '';
- }
- if ($quote && $c ne "\t") {
- $res .= "X";
- $qlen++;
- } else {
- $res .= $c;
- }
-
- $l = $c;
- }
-
- # Clear out the comments.
- while ($res =~ m@(/\*.*?\*/)@g) {
- substr($res, $-[1], $+[1] - $-[1]) = $; x ($+[1] - $-[1]);
- }
- if ($res =~ m@(/\*.*)@) {
- substr($res, $-[1], $+[1] - $-[1]) = $; x ($+[1] - $-[1]);
- }
- if ($res =~ m@^.(.*\*/)@) {
- substr($res, $-[1], $+[1] - $-[1]) = $; x ($+[1] - $-[1]);
- }
-
- # The pathname on a #include may be surrounded by '<' and '>'.
- if ($res =~ /^.#\s*include\s+\<(.*)\>/) {
- my $clean = 'X' x length($1);
- $res =~ s@\<.*\>@<$clean>@;
-
- # The whole of a #error is a string.
- } elsif ($res =~ /^.#\s*(?:error|warning)\s+(.*)\b/) {
- my $clean = 'X' x length($1);
- $res =~ s@(#\s*(?:error|warning)\s+).*@$1$clean@;
- }
-
- return $res;
-}
-
-sub ctx_statement_block {
- my ($linenr, $remain, $off) = @_;
- my $line = $linenr - 1;
- my $blk = '';
- my $soff = $off;
- my $coff = $off - 1;
-
- my $loff = 0;
-
- my $type = '';
- my $level = 0;
- my $p;
- my $c;
- my $len = 0;
-
- my $remainder;
- while (1) {
- #warn "CSB: blk<$blk>\n";
- # If we are about to drop off the end, pull in more
- # context.
- if ($off >= $len) {
- for (; $remain > 0; $line++) {
- next if ($lines[$line] =~ /^-/);
- $remain--;
- $loff = $len;
- $blk .= $lines[$line] . "\n";
- $len = length($blk);
- $line++;
- last;
- }
- # Bail if there is no further context.
- #warn "CSB: blk<$blk> off<$off> len<$len>\n";
- if ($off >= $len) {
- last;
- }
- }
- $p = $c;
- $c = substr($blk, $off, 1);
- $remainder = substr($blk, $off);
-
- #warn "CSB: c<$c> type<$type> level<$level>\n";
- # Statement ends at the ';' or a close '}' at the
- # outermost level.
- if ($level == 0 && $c eq ';') {
- last;
- }
-
- # An else is really a conditional as long as its not else if
- if ($level == 0 && (!defined($p) || $p =~ /(?:\s|\})/) &&
- $remainder =~ /(else)(?:\s|{)/ &&
- $remainder !~ /else\s+if\b/) {
- $coff = $off + length($1);
- }
-
- if (($type eq '' || $type eq '(') && $c eq '(') {
- $level++;
- $type = '(';
- }
- if ($type eq '(' && $c eq ')') {
- $level--;
- $type = ($level != 0)? '(' : '';
-
- if ($level == 0 && $coff < $soff) {
- $coff = $off;
- }
- }
- if (($type eq '' || $type eq '{') && $c eq '{') {
- $level++;
- $type = '{';
- }
- if ($type eq '{' && $c eq '}') {
- $level--;
- $type = ($level != 0)? '{' : '';
-
- if ($level == 0) {
- last;
- }
- }
- $off++;
- }
- if ($off == $len) {
- $line++;
- $remain--;
- }
-
- my $statement = substr($blk, $soff, $off - $soff + 1);
- my $condition = substr($blk, $soff, $coff - $soff + 1);
-
- #warn "STATEMENT<$statement>\n";
- #warn "CONDITION<$condition>\n";
-
- #print "off<$off> loff<$loff>\n";
-
- return ($statement, $condition,
- $line, $remain + 1, $off - $loff + 1, $level);
-}
-
-sub statement_lines {
- my ($stmt) = @_;
-
- # Strip the diff line prefixes and rip blank lines at start and end.
- $stmt =~ s/(^|\n)./$1/g;
- $stmt =~ s/^\s*//;
- $stmt =~ s/\s*$//;
-
- my @stmt_lines = ($stmt =~ /\n/g);
-
- return $#stmt_lines + 2;
-}
-
-sub statement_rawlines {
- my ($stmt) = @_;
-
- my @stmt_lines = ($stmt =~ /\n/g);
-
- return $#stmt_lines + 2;
-}
-
-sub statement_block_size {
- my ($stmt) = @_;
-
- $stmt =~ s/(^|\n)./$1/g;
- $stmt =~ s/^\s*{//;
- $stmt =~ s/}\s*$//;
- $stmt =~ s/^\s*//;
- $stmt =~ s/\s*$//;
-
- my @stmt_lines = ($stmt =~ /\n/g);
- my @stmt_statements = ($stmt =~ /;/g);
-
- my $stmt_lines = $#stmt_lines + 2;
- my $stmt_statements = $#stmt_statements + 1;
-
- if ($stmt_lines > $stmt_statements) {
- return $stmt_lines;
- } else {
- return $stmt_statements;
- }
-}
-
-sub ctx_statement_full {
- my ($linenr, $remain, $off) = @_;
- my ($statement, $condition, $level);
-
- my (@chunks);
-
- # Grab the first conditional/block pair.
- ($statement, $condition, $linenr, $remain, $off, $level) =
- ctx_statement_block($linenr, $remain, $off);
- #print "F: c<$condition> s<$statement>\n";
- push(@chunks, [ $condition, $statement ]);
- if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:if|else|do)\b/s)) {
- return ($level, $linenr, @chunks);
- }
-
- # Pull in the following conditional/block pairs and see if they
- # could continue the statement.
- for (;;) {
- ($statement, $condition, $linenr, $remain, $off, $level) =
- ctx_statement_block($linenr, $remain, $off);
- #print "C: c<$condition> s<$statement> remain<$remain>\n";
- last if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:else|do)\b/s));
- #print "C: push\n";
- push(@chunks, [ $condition, $statement ]);
- }
-
- return ($level, $linenr, @chunks);
-}
-
-sub ctx_block_get {
- my ($linenr, $remain, $outer, $open, $close, $off) = @_;
- my $line;
- my $start = $linenr - 1;
- my $blk = '';
- my @o;
- my @c;
- my @res = ();
-
- my $level = 0;
- for ($line = $start; $remain > 0; $line++) {
- next if ($rawlines[$line] =~ /^-/);
- $remain--;
-
- $blk .= $rawlines[$line];
- foreach my $c (split(//, $rawlines[$line])) {
- ##print "C<$c>L<$level><$open$close>O<$off>\n";
- if ($off > 0) {
- $off--;
- next;
- }
-
- if ($c eq $close && $level > 0) {
- $level--;
- last if ($level == 0);
- } elsif ($c eq $open) {
- $level++;
- }
- }
-
- if (!$outer || $level <= 1) {
- push(@res, $rawlines[$line]);
- }
-
- last if ($level == 0);
- }
-
- return ($level, @res);
-}
-sub ctx_block_outer {
- my ($linenr, $remain) = @_;
-
- my ($level, @r) = ctx_block_get($linenr, $remain, 1, '{', '}', 0);
- return @r;
-}
-sub ctx_block {
- my ($linenr, $remain) = @_;
-
- my ($level, @r) = ctx_block_get($linenr, $remain, 0, '{', '}', 0);
- return @r;
-}
-sub ctx_statement {
- my ($linenr, $remain, $off) = @_;
-
- my ($level, @r) = ctx_block_get($linenr, $remain, 0, '(', ')', $off);
- return @r;
-}
-sub ctx_block_level {
- my ($linenr, $remain) = @_;
-
- return ctx_block_get($linenr, $remain, 0, '{', '}', 0);
-}
-sub ctx_statement_level {
- my ($linenr, $remain, $off) = @_;
-
- return ctx_block_get($linenr, $remain, 0, '(', ')', $off);
-}
-
-sub ctx_locate_comment {
- my ($first_line, $end_line) = @_;
-
- # Catch a comment on the end of the line itself.
- my ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*$@);
- return $current_comment if (defined $current_comment);
-
- # Look through the context and try and figure out if there is a
- # comment.
- my $in_comment = 0;
- $current_comment = '';
- for (my $linenr = $first_line; $linenr < $end_line; $linenr++) {
- my $line = $rawlines[$linenr - 1];
- #warn " $line\n";
- if ($linenr == $first_line and $line =~ m@^.\s*\*@) {
- $in_comment = 1;
- }
- if ($line =~ m@/\*@) {
- $in_comment = 1;
- }
- if (!$in_comment && $current_comment ne '') {
- $current_comment = '';
- }
- $current_comment .= $line . "\n" if ($in_comment);
- if ($line =~ m@\*/@) {
- $in_comment = 0;
- }
- }
-
- chomp($current_comment);
- return($current_comment);
-}
-sub ctx_has_comment {
- my ($first_line, $end_line) = @_;
- my $cmt = ctx_locate_comment($first_line, $end_line);
-
- ##print "LINE: $rawlines[$end_line - 1 ]\n";
- ##print "CMMT: $cmt\n";
-
- return ($cmt ne '');
-}
-
-sub cat_vet {
- my ($vet) = @_;
- my ($res, $coded);
-
- $res = '';
- while ($vet =~ /([^[:cntrl:]]*)([[:cntrl:]]|$)/g) {
- $res .= $1;
- if ($2 ne '') {
- $coded = sprintf("^%c", unpack('C', $2) + 64);
- $res .= $coded;
- }
- }
- $res =~ s/$/\$/;
-
- return $res;
-}
-
-my $av_preprocessor = 0;
-my $av_pending;
-my @av_paren_type;
-
-sub annotate_reset {
- $av_preprocessor = 0;
- $av_pending = '_';
- @av_paren_type = ('E');
-}
-
-sub annotate_values {
- my ($stream, $type) = @_;
-
- my $res;
- my $cur = $stream;
-
- print "$stream\n" if ($dbg_values > 1);
-
- while (length($cur)) {
- print " <" . join('', @av_paren_type) .
- "> <$type> " if ($dbg_values > 1);
- if ($cur =~ /^(\s+)/o) {
- print "WS($1)\n" if ($dbg_values > 1);
- if ($1 =~ /\n/ && $av_preprocessor) {
- $type = pop(@av_paren_type);
- $av_preprocessor = 0;
- }
-
- } elsif ($cur =~ /^($Type)/) {
- print "DECLARE($1)\n" if ($dbg_values > 1);
- $type = 'T';
-
- } elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) {
- print "DEFINE($1)\n" if ($dbg_values > 1);
- $av_preprocessor = 1;
- $av_pending = 'N';
-
- } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if))/o) {
- print "PRE_START($1)\n" if ($dbg_values > 1);
- $av_preprocessor = 1;
-
- push(@av_paren_type, $type);
- push(@av_paren_type, $type);
- $type = 'N';
-
- } elsif ($cur =~ /^(#\s*(?:else|elif))/o) {
- print "PRE_RESTART($1)\n" if ($dbg_values > 1);
- $av_preprocessor = 1;
-
- push(@av_paren_type, $av_paren_type[$#av_paren_type]);
-
- $type = 'N';
-
- } elsif ($cur =~ /^(#\s*(?:endif))/o) {
- print "PRE_END($1)\n" if ($dbg_values > 1);
-
- $av_preprocessor = 1;
-
- # Assume all arms of the conditional end as this
- # one does, and continue as if the #endif was not here.
- pop(@av_paren_type);
- push(@av_paren_type, $type);
- $type = 'N';
-
- } elsif ($cur =~ /^(\\\n)/o) {
- print "PRECONT($1)\n" if ($dbg_values > 1);
-
- } elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
- print "SIZEOF($1)\n" if ($dbg_values > 1);
- if (defined $2) {
- $av_pending = 'V';
- }
- $type = 'N';
-
- } elsif ($cur =~ /^(if|while|typeof|__typeof__|for)\b/o) {
- print "COND($1)\n" if ($dbg_values > 1);
- $av_pending = 'N';
- $type = 'N';
-
- } elsif ($cur =~/^(return|case|else)/o) {
- print "KEYWORD($1)\n" if ($dbg_values > 1);
- $type = 'N';
-
- } elsif ($cur =~ /^(\()/o) {
- print "PAREN('$1')\n" if ($dbg_values > 1);
- push(@av_paren_type, $av_pending);
- $av_pending = '_';
- $type = 'N';
-
- } elsif ($cur =~ /^(\))/o) {
- my $new_type = pop(@av_paren_type);
- if ($new_type ne '_') {
- $type = $new_type;
- print "PAREN('$1') -> $type\n"
- if ($dbg_values > 1);
- } else {
- print "PAREN('$1')\n" if ($dbg_values > 1);
- }
-
- } elsif ($cur =~ /^($Ident)\(/o) {
- print "FUNC($1)\n" if ($dbg_values > 1);
- $av_pending = 'V';
-
- } elsif ($cur =~ /^($Ident|$Constant)/o) {
- print "IDENT($1)\n" if ($dbg_values > 1);
- $type = 'V';
-
- } elsif ($cur =~ /^($Assignment)/o) {
- print "ASSIGN($1)\n" if ($dbg_values > 1);
- $type = 'N';
-
- } elsif ($cur =~/^(;|{|})/) {
- print "END($1)\n" if ($dbg_values > 1);
- $type = 'E';
-
- } elsif ($cur =~ /^(;|\?|:|\[)/o) {
- print "CLOSE($1)\n" if ($dbg_values > 1);
- $type = 'N';
-
- } elsif ($cur =~ /^($Operators)/o) {
- print "OP($1)\n" if ($dbg_values > 1);
- if ($1 ne '++' && $1 ne '--') {
- $type = 'N';
- }
-
- } elsif ($cur =~ /(^.)/o) {
- print "C($1)\n" if ($dbg_values > 1);
- }
- if (defined $1) {
- $cur = substr($cur, length($1));
- $res .= $type x length($1);
- }
- }
-
- return $res;
-}
-
-sub possible {
- my ($possible, $line) = @_;
-
- #print "CHECK<$possible>\n";
- if ($possible !~ /^(?:$Storage|$Type|DEFINE_\S+)$/ &&
- $possible ne 'goto' && $possible ne 'return' &&
- $possible ne 'struct' && $possible ne 'enum' &&
- $possible ne 'case' && $possible ne 'else' &&
- $possible ne 'typedef') {
- warn "POSSIBLE: $possible ($line)\n" if ($dbg_possible);
- push(@typeList, $possible);
- build_types();
- }
-}
-
-my $prefix = '';
-
-sub report {
- my $line = $prefix . $_[0];
-
- $line = (split('\n', $line))[0] . "\n" if ($terse);
-
- push(our @report, $line);
-}
-sub report_dump {
- our @report;
-}
-sub ERROR {
- report("ERROR: $_[0]\n");
- our $clean = 0;
- our $cnt_error++;
-}
-sub WARN {
- report("WARNING: $_[0]\n");
- our $clean = 0;
- our $cnt_warn++;
-}
-sub CHK {
- if ($check) {
- report("CHECK: $_[0]\n");
- our $clean = 0;
- our $cnt_chk++;
- }
-}
-
-sub process {
- my $filename = shift;
-
- my $linenr=0;
- my $prevline="";
- my $prevrawline="";
- my $stashline="";
- my $stashrawline="";
-
- my $length;
- my $indent;
- my $previndent=0;
- my $stashindent=0;
-
- our $clean = 1;
- my $signoff = 0;
- my $is_patch = 0;
-
- our @report = ();
- our $cnt_lines = 0;
- our $cnt_error = 0;
- our $cnt_warn = 0;
- our $cnt_chk = 0;
-
- # Trace the real file/line as we go.
- my $realfile = '';
- my $realline = 0;
- my $realcnt = 0;
- my $here = '';
- my $in_comment = 0;
- my $comment_edge = 0;
- my $first_line = 0;
-
- my $prev_values = 'E';
-
- # suppression flags
- my $suppress_ifbraces = 0;
-
- # Pre-scan the patch sanitizing the lines.
- # Pre-scan the patch looking for any __setup documentation.
- #
- my @setup_docs = ();
- my $setup_docs = 0;
- my $line;
- foreach my $rawline (@rawlines) {
- # Standardise the strings and chars within the input to
- # simplify matching.
- $line = sanitise_line($rawline);
- push(@lines, $line);
-
- ##print "==>$rawline\n";
- ##print "-->$line\n";
-
- if ($line=~/^\+\+\+\s+(\S+)/) {
- $setup_docs = 0;
- if ($1 =~ m@Documentation/kernel-parameters.txt$@) {
- $setup_docs = 1;
- }
- next;
- }
-
- if ($setup_docs && $line =~ /^\+/) {
- push(@setup_docs, $line);
- }
- }
-
- $prefix = '';
-
- foreach my $line (@lines) {
- $linenr++;
-
- my $rawline = $rawlines[$linenr - 1];
-
-#extract the filename as it passes
- if ($line=~/^\+\+\+\s+(\S+)/) {
- $realfile=$1;
- $realfile =~ s@^[^/]*/@@;
- $in_comment = 0;
- next;
- }
-#extract the line range in the file after the patch is applied
- if ($line=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
- $is_patch = 1;
- $first_line = $linenr + 1;
- $in_comment = 0;
- $realline=$1-1;
- if (defined $2) {
- $realcnt=$3+1;
- } else {
- $realcnt=1+1;
- }
- annotate_reset();
- $prev_values = 'E';
-
- $suppress_ifbraces = $linenr - 1;
- next;
- }
-
-# track the line number as we move through the hunk, note that
-# new versions of GNU diff omit the leading space on completely
-# blank context lines so we need to count that too.
- if ($line =~ /^( |\+|$)/) {
- $realline++;
- $realcnt-- if ($realcnt != 0);
-
- # Guestimate if this is a continuing comment. Run
- # the context looking for a comment "edge". If this
- # edge is a close comment then we must be in a comment
- # at context start.
- if ($linenr == $first_line) {
- my $edge;
- for (my $ln = $first_line; $ln < ($linenr + $realcnt); $ln++) {
- ($edge) = ($rawlines[$ln - 1] =~ m@(/\*|\*/)@);
- last if (defined $edge);
- }
- if (defined $edge && $edge eq '*/') {
- $in_comment = 1;
- }
- }
-
- # Guestimate if this is a continuing comment. If this
- # is the start of a diff block and this line starts
- # ' *' then it is very likely a comment.
- if ($linenr == $first_line and $rawline =~ m@^.\s* \*(?:\s|$)@) {
- $in_comment = 1;
- }
-
- # Find the last comment edge on _this_ line.
- $comment_edge = 0;
- while (($rawline =~ m@(/\*|\*/)@g)) {
- if ($1 eq '/*') {
- $in_comment = 1;
- } else {
- $in_comment = 0;
- }
- $comment_edge = 1;
- }
-
- # Measure the line length and indent.
- ($length, $indent) = line_stats($rawline);
-
- # Track the previous line.
- ($prevline, $stashline) = ($stashline, $line);
- ($previndent, $stashindent) = ($stashindent, $indent);
- ($prevrawline, $stashrawline) = ($stashrawline, $rawline);
-
- #warn "ic<$in_comment> ce<$comment_edge> line<$line>\n";
-
- } elsif ($realcnt == 1) {
- $realcnt--;
- }
-
-#make up the handle for any error we report on this line
- $here = "#$linenr: " if (!$file);
- $here = "#$realline: " if ($file);
- $here .= "FILE: $realfile:$realline:" if ($realcnt != 0);
-
- my $hereline = "$here\n$rawline\n";
- my $herecurr = "$here\n$rawline\n";
- my $hereprev = "$here\n$prevrawline\n$rawline\n";
-
- $prefix = "$filename:$realline: " if ($emacs && $file);
- $prefix = "$filename:$linenr: " if ($emacs && !$file);
- $cnt_lines++ if ($realcnt != 0);
-
-#check the patch for a signoff:
- if ($line =~ /^\s*signed-off-by:/i) {
- # This is a signoff, if ugly, so do not double report.
- $signoff++;
- if (!($line =~ /^\s*Signed-off-by:/)) {
- WARN("Signed-off-by: is the preferred form\n" .
- $herecurr);
- }
- if ($line =~ /^\s*signed-off-by:\S/i) {
- WARN("need space after Signed-off-by:\n" .
- $herecurr);
- }
- }
-
-# Check for wrappage within a valid hunk of the file
- if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) {
- ERROR("patch seems to be corrupt (line wrapped?)\n" .
- $herecurr) if (!$emitted_corrupt++);
- }
-
-# UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
- if (($realfile =~ /^$/ || $line =~ /^\+/) &&
- !($rawline =~ m/^(
- [\x09\x0A\x0D\x20-\x7E] # ASCII
- | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte
- | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs
- | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte
- | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates
- | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3
- | [\xF1-\xF3][\x80-\xBF]{3} # planes 4-15
- | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16
- )*$/x )) {
- ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $herecurr);
- }
-
-#ignore lines being removed
- if ($line=~/^-/) {next;}
-
-# check we are in a valid source file if not then ignore this hunk
- next if ($realfile !~ /\.(h|c|s|S|pl|sh)$/);
-
-#trailing whitespace
- if ($line =~ /^\+.*\015/) {
- my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- ERROR("DOS line endings\n" . $herevet);
-
- } elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
- my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- ERROR("trailing whitespace\n" . $herevet);
- }
-#80 column limit
- if ($line =~ /^\+/ && !($prevrawline=~/\/\*\*/) && $length > 80) {
- WARN("line over 80 characters\n" . $herecurr);
- }
-
-# check for adding lines without a newline.
- if ($line =~ /^\+/ && defined $lines[$linenr] && $lines[$linenr] =~ /^\\ No newline at end of file/) {
- WARN("adding a line without newline at end of file\n" . $herecurr);
- }
-
-# check we are in a valid source file *.[hc] if not then ignore this hunk
- next if ($realfile !~ /\.[hc]$/);
-
-# at the beginning of a line any tabs must come first and anything
-# more than 8 must use tabs.
- if ($rawline =~ /^\+\s* \t\s*\S/ ||
- $rawline =~ /^\+\s* \s*/) {
- my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- ERROR("use tabs not spaces\n" . $herevet);
- }
-
-# check for RCS/CVS revision markers
- if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
- WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr);
- }
-
-# The rest of our checks refer specifically to C style
-# only apply those _outside_ comments. Only skip
-# lines in the middle of comments.
- next if (!$comment_edge && $in_comment);
-
-# Check for potential 'bare' types
- if ($realcnt) {
- my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
- $s =~ s/\n./ /g;
- $s =~ s/{.*$//;
-
- # Ignore goto labels.
- if ($s =~ /$Ident:\*$/) {
-
- # Ignore functions being called
- } elsif ($s =~ /^.\s*$Ident\s*\(/) {
-
- # definitions in global scope can only start with types
- } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) {
- possible($1, $s);
-
- # declarations always start with types
- } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) {
- possible($1, $s);
- }
-
- # any (foo ... *) is a pointer cast, and foo is a type
- while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) {
- possible($1, $s);
- }
-
- # Check for any sort of function declaration.
- # int foo(something bar, other baz);
- # void (*store_gdt)(x86_descr_ptr *);
- if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) {
- my ($name_len) = length($1);
-
- my $ctx = $s;
- substr($ctx, 0, $name_len + 1) = '';
- $ctx =~ s/\)[^\)]*$//;
-
- for my $arg (split(/\s*,\s*/, $ctx)) {
- if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/ || $arg =~ /^($Ident)$/) {
-
- possible($1, $s);
- }
- }
- }
-
- }
-
-#
-# Checks which may be anchored in the context.
-#
-
-# Check for switch () and associated case and default
-# statements should be at the same indent.
- if ($line=~/\bswitch\s*\(.*\)/) {
- my $err = '';
- my $sep = '';
- my @ctx = ctx_block_outer($linenr, $realcnt);
- shift(@ctx);
- for my $ctx (@ctx) {
- my ($clen, $cindent) = line_stats($ctx);
- if ($ctx =~ /^\+\s*(case\s+|default:)/ &&
- $indent != $cindent) {
- $err .= "$sep$ctx\n";
- $sep = '';
- } else {
- $sep = "[...]\n";
- }
- }
- if ($err ne '') {
- ERROR("switch and case should be at the same indent\n$hereline$err");
- }
- }
-
-# if/while/etc brace do not go on next line, unless defining a do while loop,
-# or if that brace on the next line is for something else
- if ($line =~ /\b(?:(if|while|for|switch)\s*\(|do\b|else\b)/ && $line !~ /^.#/) {
- my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
- my $ctx_ln = $linenr + $#ctx + 1;
- my $ctx_cnt = $realcnt - $#ctx - 1;
- my $ctx = join("\n", @ctx);
-
- # Skip over any removed lines in the context following statement.
- while ($ctx_cnt > 0 && $lines[$ctx_ln - 1] =~ /^-/) {
- $ctx_ln++;
- $ctx_cnt--;
- }
- ##warn "line<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>";
-
- if ($ctx !~ /{\s*/ && $ctx_cnt > 0 && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
- ERROR("That open brace { should be on the previous line\n" .
- "$here\n$ctx\n$lines[$ctx_ln - 1]");
- }
- if ($level == 0 && $ctx =~ /\)\s*\;\s*$/ && defined $lines[$ctx_ln - 1]) {
- my ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);
- if ($nindent > $indent) {
- WARN("Trailing semicolon indicates no statements, indent implies otherwise\n" .
- "$here\n$ctx\n$lines[$ctx_ln - 1]");
- }
- }
- }
-
- # Track the 'values' across context and added lines.
- my $opline = $line; $opline =~ s/^./ /;
- my $curr_values = annotate_values($opline . "\n", $prev_values);
- $curr_values = $prev_values . $curr_values;
- if ($dbg_values) {
- my $outline = $opline; $outline =~ s/\t/ /g;
- print "$linenr > .$outline\n";
- print "$linenr > $curr_values\n";
- }
- $prev_values = substr($curr_values, -1);
-
-#ignore lines not being added
- if ($line=~/^[^\+]/) {next;}
-
-# TEST: allow direct testing of the type matcher.
- if ($tst_type && $line =~ /^.$Declare$/) {
- ERROR("TEST: is type $Declare\n" . $herecurr);
- next;
- }
-
-# check for initialisation to aggregates open brace on the next line
- if ($prevline =~ /$Declare\s*$Ident\s*=\s*$/ &&
- $line =~ /^.\s*{/) {
- ERROR("That open brace { should be on the previous line\n" . $hereprev);
- }
-
-#
-# Checks which are anchored on the added line.
-#
-
-# check for malformed paths in #include statements (uses RAW line)
- if ($rawline =~ m{^.#\s*include\s+[<"](.*)[">]}) {
- my $path = $1;
- if ($path =~ m{//}) {
- ERROR("malformed #include filename\n" .
- $herecurr);
- }
- }
-
-# no C99 // comments
- if ($line =~ m{//}) {
- ERROR("do not use C99 // comments\n" . $herecurr);
- }
- # Remove C99 comments.
- $line =~ s@//.*@@;
- $opline =~ s@//.*@@;
-
-#EXPORT_SYMBOL should immediately follow its function closing }.
- if (($line =~ /EXPORT_SYMBOL.*\((.*)\)/) ||
- ($line =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
- my $name = $1;
- if (($prevline !~ /^}/) &&
- ($prevline !~ /^\+}/) &&
- ($prevline !~ /^ }/) &&
- ($prevline !~ /^.DECLARE_$Ident\(\Q$name\E\)/) &&
- ($prevline !~ /^.LIST_HEAD\(\Q$name\E\)/) &&
- ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=|\[)/)) {
- WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
- }
- }
-
-# check for external initialisers.
- if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL);/) {
- ERROR("do not initialise externals to 0 or NULL\n" .
- $herecurr);
- }
-# check for static initialisers.
- if ($line =~ /\s*static\s.*=\s*(0|NULL);/) {
- ERROR("do not initialise statics to 0 or NULL\n" .
- $herecurr);
- }
-
-# check for new typedefs, only function parameters and sparse annotations
-# make sense.
- if ($line =~ /\btypedef\s/ &&
- $line !~ /\btypedef\s+$Type\s+\(\s*\*?$Ident\s*\)\s*\(/ &&
- $line !~ /\b__bitwise(?:__|)\b/) {
- WARN("do not add new typedefs\n" . $herecurr);
- }
-
-# * goes on variable not on type
- if ($line =~ m{\($NonptrType(\*+)(?:\s+const)?\)}) {
- ERROR("\"(foo$1)\" should be \"(foo $1)\"\n" .
- $herecurr);
-
- } elsif ($line =~ m{\($NonptrType\s+(\*+)(?!\s+const)\s+\)}) {
- ERROR("\"(foo $1 )\" should be \"(foo $1)\"\n" .
- $herecurr);
-
- } elsif ($line =~ m{$NonptrType(\*+)(?:\s+(?:$Attribute|$Sparse))?\s+[A-Za-z\d_]+}) {
- ERROR("\"foo$1 bar\" should be \"foo $1bar\"\n" .
- $herecurr);
-
- } elsif ($line =~ m{$NonptrType\s+(\*+)(?!\s+(?:$Attribute|$Sparse))\s+[A-Za-z\d_]+}) {
- ERROR("\"foo $1 bar\" should be \"foo $1bar\"\n" .
- $herecurr);
- }
-
-# # no BUG() or BUG_ON()
-# if ($line =~ /\b(BUG|BUG_ON)\b/) {
-# print "Try to use WARN_ON & Recovery code rather than BUG() or BUG_ON()\n";
-# print "$herecurr";
-# $clean = 0;
-# }
-
- if ($line =~ /\bLINUX_VERSION_CODE\b/) {
- WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
- }
-
-# printk should use KERN_* levels. Note that follow on printk's on the
-# same line do not need a level, so we use the current block context
-# to try and find and validate the current printk. In summary the current
-# printk includes all preceeding printk's which have no newline on the end.
-# we assume the first bad printk is the one to report.
- if ($line =~ /\bprintk\((?!KERN_)\s*"/) {
- my $ok = 0;
- for (my $ln = $linenr - 1; $ln >= $first_line; $ln--) {
- #print "CHECK<$lines[$ln - 1]\n";
- # we have a preceeding printk if it ends
- # with "\n" ignore it, else it is to blame
- if ($lines[$ln - 1] =~ m{\bprintk\(}) {
- if ($rawlines[$ln - 1] !~ m{\\n"}) {
- $ok = 1;
- }
- last;
- }
- }
- if ($ok == 0) {
- WARN("printk() should include KERN_ facility level\n" . $herecurr);
- }
- }
-
-# function brace can't be on same line, except for #defines of do while,
-# or if closed on same line
- if (($line=~/$Type\s*[A-Za-z\d_]+\(.*\).*\s{/) and
- !($line=~/\#define.*do\s{/) and !($line=~/}/)) {
- ERROR("open brace '{' following function declarations go on the next line\n" . $herecurr);
- }
-
-# open braces for enum, union and struct go on the same line.
- if ($line =~ /^.\s*{/ &&
- $prevline =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?\s*$/) {
- ERROR("open brace '{' following $1 go on the same line\n" . $hereprev);
- }
-
-# check for spaces between functions and their parentheses.
- while ($line =~ /($Ident)\s+\(/g) {
- my $name = $1;
- my $ctx = substr($line, 0, $-[1]);
-
- # Ignore those directives where spaces _are_ permitted.
- if ($name =~ /^(?:if|for|while|switch|return|volatile|__volatile__|__attribute__|format|__extension__|Copyright|case|__asm__)$/) {
-
- # cpp #define statements have non-optional spaces, ie
- # if there is a space between the name and the open
- # parenthesis it is simply not a parameter group.
- } elsif ($ctx =~ /^.\#\s*define\s*$/) {
-
- # If this whole things ends with a type its most
- # likely a typedef for a function.
- } elsif ("$ctx$name" =~ /$Type$/) {
-
- } else {
- WARN("no space between function name and open parenthesis '('\n" . $herecurr);
- }
- }
-# Check operator spacing.
- if (!($line=~/\#\s*include/)) {
- my $ops = qr{
- <<=|>>=|<=|>=|==|!=|
- \+=|-=|\*=|\/=|%=|\^=|\|=|&=|
- =>|->|<<|>>|<|>|=|!|~|
- &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%
- }x;
- my @elements = split(/($ops|;)/, $opline);
- my $off = 0;
-
- my $blank = copy_spacing($opline);
-
- for (my $n = 0; $n < $#elements; $n += 2) {
- $off += length($elements[$n]);
-
- my $a = '';
- $a = 'V' if ($elements[$n] ne '');
- $a = 'W' if ($elements[$n] =~ /\s$/);
- $a = 'C' if ($elements[$n] =~ /$;$/);
- $a = 'B' if ($elements[$n] =~ /(\[|\()$/);
- $a = 'O' if ($elements[$n] eq '');
- $a = 'E' if ($elements[$n] eq '' && $n == 0);
-
- my $op = $elements[$n + 1];
-
- my $c = '';
- if (defined $elements[$n + 2]) {
- $c = 'V' if ($elements[$n + 2] ne '');
- $c = 'W' if ($elements[$n + 2] =~ /^\s/);
- $c = 'C' if ($elements[$n + 2] =~ /^$;/);
- $c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/);
- $c = 'O' if ($elements[$n + 2] eq '');
- $c = 'E' if ($elements[$n + 2] =~ /\s*\\$/);
- } else {
- $c = 'E';
- }
-
- # Pick up the preceeding and succeeding characters.
- my $ca = substr($opline, 0, $off);
- my $cc = '';
- if (length($opline) >= ($off + length($elements[$n + 1]))) {
- $cc = substr($opline, $off + length($elements[$n + 1]));
- }
- my $cb = "$ca$;$cc";
-
- my $ctx = "${a}x${c}";
-
- my $at = "(ctx:$ctx)";
-
- my $ptr = substr($blank, 0, $off) . "^";
- my $hereptr = "$hereline$ptr\n";
-
- # Classify operators into binary, unary, or
- # definitions (* only) where they have more
- # than one mode.
- my $op_type = substr($curr_values, $off + 1, 1);
- my $op_left = substr($curr_values, $off, 1);
- my $is_unary;
- if ($op_type eq 'T') {
- $is_unary = 2;
- } elsif ($op_left eq 'V') {
- $is_unary = 0;
- } else {
- $is_unary = 1;
- }
- #if ($op eq '-' || $op eq '&' || $op eq '*') {
- # print "UNARY: <$op_left$op_type $is_unary $a:$op:$c> <$ca:$op:$cc> <$unary_ctx>\n";
- #}
-
- # Ignore operators passed as parameters.
- if ($op_type ne 'V' &&
- $ca =~ /\s$/ && $cc =~ /^\s*,/) {
-
-# # Ignore comments
-# } elsif ($op =~ /^$;+$/) {
-
- # ; should have either the end of line or a space or \ after it
- } elsif ($op eq ';') {
- if ($ctx !~ /.x[WEBC]/ &&
- $cc !~ /^\\/ && $cc !~ /^;/) {
- ERROR("need space after that '$op' $at\n" . $hereptr);
- }
-
- # // is a comment
- } elsif ($op eq '//') {
-
- # -> should have no spaces
- } elsif ($op eq '->') {
- if ($ctx =~ /Wx.|.xW/) {
- ERROR("no spaces around that '$op' $at\n" . $hereptr);
- }
-
- # , must have a space on the right.
- } elsif ($op eq ',') {
- if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {
- ERROR("need space after that '$op' $at\n" . $hereptr);
- }
-
- # '*' as part of a type definition -- reported already.
- } elsif ($op eq '*' && $is_unary == 2) {
- #warn "'*' is part of type\n";
-
- # unary operators should have a space before and
- # none after. May be left adjacent to another
- # unary operator, or a cast
- } elsif ($op eq '!' || $op eq '~' ||
- ($is_unary && ($op eq '*' || $op eq '-' || $op eq '&'))) {
- if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
- ERROR("need space before that '$op' $at\n" . $hereptr);
- }
- if ($ctx =~ /.xW/) {
- ERROR("no space after that '$op' $at\n" . $hereptr);
- }
-
- # unary ++ and unary -- are allowed no space on one side.
- } elsif ($op eq '++' or $op eq '--') {
- if ($ctx !~ /[WOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
- ERROR("need space one side of that '$op' $at\n" . $hereptr);
- }
- if ($ctx =~ /WxB/ || ($ctx =~ /Wx./ && $cc =~ /^;/)) {
- ERROR("no space before that '$op' $at\n" . $hereptr);
- }
-
- # << and >> may either have or not have spaces both sides
- } elsif ($op eq '<<' or $op eq '>>' or
- $op eq '&' or $op eq '^' or $op eq '|' or
- $op eq '+' or $op eq '-' or
- $op eq '*' or $op eq '/' or
- $op eq '%')
- {
- if ($ctx !~ /VxV|WxW|VxE|WxE|VxO|Cx.|.xC/) {
- ERROR("need consistent spacing around '$op' $at\n" .
- $hereptr);
- }
-
- # All the others need spaces both sides.
- } elsif ($ctx !~ /[EWC]x[CWE]/) {
- # Ignore email addresses <foo@bar>
- if (!($op eq '<' && $cb =~ /$;\S+\@\S+>/) &&
- !($op eq '>' && $cb =~ /<\S+\@\S+$;/)) {
- ERROR("need spaces around that '$op' $at\n" . $hereptr);
- }
- }
- $off += length($elements[$n + 1]);
- }
- }
-
-# check for multiple assignments
- if ($line =~ /^.\s*$Lval\s*=\s*$Lval\s*=(?!=)/) {
- CHK("multiple assignments should be avoided\n" . $herecurr);
- }
-
-## # check for multiple declarations, allowing for a function declaration
-## # continuation.
-## if ($line =~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Ident.*/ &&
-## $line !~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Type\s*$Ident.*/) {
-##
-## # Remove any bracketed sections to ensure we do not
-## # falsly report the parameters of functions.
-## my $ln = $line;
-## while ($ln =~ s/\([^\(\)]*\)//g) {
-## }
-## if ($ln =~ /,/) {
-## WARN("declaring multiple variables together should be avoided\n" . $herecurr);
-## }
-## }
-
-#need space before brace following if, while, etc
- if (($line =~ /\(.*\){/ && $line !~ /\($Type\){/) ||
- $line =~ /do{/) {
- ERROR("need a space before the open brace '{'\n" . $herecurr);
- }
-
-# closing brace should have a space following it when it has anything
-# on the line
- if ($line =~ /}(?!(?:,|;|\)))\S/) {
- ERROR("need a space after that close brace '}'\n" . $herecurr);
- }
-
-# check spacing on square brackets
- if ($line =~ /\[\s/ && $line !~ /\[\s*$/) {
- ERROR("no space after that open square bracket '['\n" . $herecurr);
- }
- if ($line =~ /\s\]/) {
- ERROR("no space before that close square bracket ']'\n" . $herecurr);
- }
-
-# check spacing on paretheses
- if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ &&
- $line !~ /for\s*\(\s+;/) {
- ERROR("no space after that open parenthesis '('\n" . $herecurr);
- }
- if ($line =~ /(\s+)\)/ && $line !~ /^.\s*\)/ &&
- $line !~ /for\s*\(.*;\s+\)/) {
- ERROR("no space before that close parenthesis ')'\n" . $herecurr);
- }
-
-#goto labels aren't indented, allow a single space however
- if ($line=~/^.\s+[A-Za-z\d_]+:(?![0-9]+)/ and
- !($line=~/^. [A-Za-z\d_]+:/) and !($line=~/^.\s+default:/)) {
- WARN("labels should not be indented\n" . $herecurr);
- }
-
-# Need a space before open parenthesis after if, while etc
- if ($line=~/\b(if|while|for|switch)\(/) {
- ERROR("need a space before the open parenthesis '('\n" . $herecurr);
- }
-
-# Check for illegal assignment in if conditional.
- if ($line =~ /\bif\s*\(/) {
- my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
-
- if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/) {
- ERROR("do not use assignment in if condition\n" . $herecurr);
- }
-
- # Find out what is on the end of the line after the
- # conditional.
- substr($s, 0, length($c)) = '';
- $s =~ s/\n.*//g;
- $s =~ s/$;//g; # Remove any comments
- if (length($c) && $s !~ /^\s*({|;|)\s*\\*\s*$/) {
- ERROR("trailing statements should be on next line\n" . $herecurr);
- }
- }
-
-# Check for bitwise tests written as boolean
- if ($line =~ /
- (?:
- (?:\[|\(|\&\&|\|\|)
- \s*0[xX][0-9]+\s*
- (?:\&\&|\|\|)
- |
- (?:\&\&|\|\|)
- \s*0[xX][0-9]+\s*
- (?:\&\&|\|\||\)|\])
- )/x)
- {
- WARN("boolean test with hexadecimal, perhaps just 1 \& or \|?\n" . $herecurr);
- }
-
-# if and else should not have general statements after it
- if ($line =~ /^.\s*(?:}\s*)?else\b(.*)/) {
- my $s = $1;
- $s =~ s/$;//g; # Remove any comments
- if ($s !~ /^\s*(?:\sif|(?:{|)\s*\\?\s*$)/) {
- ERROR("trailing statements should be on next line\n" . $herecurr);
- }
- }
-
- # Check for }<nl>else {, these must be at the same
- # indent level to be relevant to each other.
- if ($prevline=~/}\s*$/ and $line=~/^.\s*else\s*/ and
- $previndent == $indent) {
- ERROR("else should follow close brace '}'\n" . $hereprev);
- }
-
- if ($prevline=~/}\s*$/ and $line=~/^.\s*while\s*/ and
- $previndent == $indent) {
- my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
-
- # Find out what is on the end of the line after the
- # conditional.
- substr($s, 0, length($c)) = '';
- $s =~ s/\n.*//g;
-
- if ($s =~ /^\s*;/) {
- ERROR("while should follow close brace '}'\n" . $hereprev);
- }
- }
-
-#studly caps, commented out until figure out how to distinguish between use of existing and adding new
-# if (($line=~/[\w_][a-z\d]+[A-Z]/) and !($line=~/print/)) {
-# print "No studly caps, use _\n";
-# print "$herecurr";
-# $clean = 0;
-# }
-
-#no spaces allowed after \ in define
- if ($line=~/\#define.*\\\s$/) {
- WARN("Whitepspace after \\ makes next lines useless\n" . $herecurr);
- }
-
-#warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
- if ($tree && $rawline =~ m{^.\#\s*include\s*\<asm\/(.*)\.h\>}) {
- my $checkfile = "$root/include/linux/$1.h";
- if (-f $checkfile && $1 ne 'irq.h') {
- CHK("Use #include <linux/$1.h> instead of <asm/$1.h>\n" .
- $herecurr);
- }
- }
-
-# multi-statement macros should be enclosed in a do while loop, grab the
-# first statement and ensure its the whole macro if its not enclosed
-# in a known good container
- if ($prevline =~ /\#define.*\\/ &&
- $prevline !~/(?:do\s+{|\(\{|\{)/ &&
- $line !~ /(?:do\s+{|\(\{|\{)/ &&
- $line !~ /^.\s*$Declare\s/) {
- # Grab the first statement, if that is the entire macro
- # its ok. This may start either on the #define line
- # or the one below.
- my $ln = $linenr;
- my $cnt = $realcnt;
- my $off = 0;
-
- # If the macro starts on the define line start
- # grabbing the statement after the identifier
- $prevline =~ m{^(.#\s*define\s*$Ident(?:\([^\)]*\))?\s*)(.*)\\\s*$};
- ##print "1<$1> 2<$2>\n";
- if (defined $2 && $2 ne '') {
- $off = length($1);
- $ln--;
- $cnt++;
- while ($lines[$ln - 1] =~ /^-/) {
- $ln--;
- $cnt++;
- }
- }
- my @ctx = ctx_statement($ln, $cnt, $off);
- my $ctx_ln = $ln + $#ctx + 1;
- my $ctx = join("\n", @ctx);
-
- # Pull in any empty extension lines.
- while ($ctx =~ /\\$/ &&
- $lines[$ctx_ln - 1] =~ /^.\s*(?:\\)?$/) {
- $ctx .= $lines[$ctx_ln - 1];
- $ctx_ln++;
- }
-
- if ($ctx =~ /\\$/) {
- if ($ctx =~ /;/) {
- ERROR("Macros with multiple statements should be enclosed in a do - while loop\n" . "$here\n$ctx\n");
- } else {
- ERROR("Macros with complex values should be enclosed in parenthesis\n" . "$here\n$ctx\n");
- }
- }
- }
-
-# check for redundant bracing round if etc
- if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) {
- my ($level, $endln, @chunks) =
- ctx_statement_full($linenr, $realcnt, 1);
- #print "chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n";
- #print "APW: <<$chunks[1][0]>><<$chunks[1][1]>>\n";
- if ($#chunks > 0 && $level == 0) {
- my $allowed = 0;
- my $seen = 0;
- my $herectx = $here . "\n";;
- my $ln = $linenr - 1;
- for my $chunk (@chunks) {
- my ($cond, $block) = @{$chunk};
-
- $herectx .= "$rawlines[$ln]\n[...]\n";
- $ln += statement_rawlines($block) - 1;
-
- substr($block, 0, length($cond)) = '';
-
- $seen++ if ($block =~ /^\s*{/);
-
- #print "cond<$cond> block<$block> allowed<$allowed>\n";
- if (statement_lines($cond) > 1) {
- #print "APW: ALLOWED: cond<$cond>\n";
- $allowed = 1;
- }
- if ($block =~/\b(?:if|for|while)\b/) {
- #print "APW: ALLOWED: block<$block>\n";
- $allowed = 1;
- }
- if (statement_block_size($block) > 1) {
- #print "APW: ALLOWED: lines block<$block>\n";
- $allowed = 1;
- }
- }
- if ($seen && !$allowed) {
- WARN("braces {} are not necessary for any arm of this statement\n" . $herectx);
- }
- # Either way we have looked over this whole
- # statement and said what needs to be said.
- $suppress_ifbraces = $endln;
- }
- }
- if ($linenr > $suppress_ifbraces &&
- $line =~ /\b(if|while|for|else)\b/) {
- my ($level, $endln, @chunks) =
- ctx_statement_full($linenr, $realcnt, $-[0]);
-
- my $allowed = 0;
-
- # Check the pre-context.
- if (substr($line, 0, $-[0]) =~ /(\}\s*)$/) {
- #print "APW: ALLOWED: pre<$1>\n";
- $allowed = 1;
- }
- # Check the condition.
- my ($cond, $block) = @{$chunks[0]};
- if (defined $cond) {
- substr($block, 0, length($cond)) = '';
- }
- if (statement_lines($cond) > 1) {
- #print "APW: ALLOWED: cond<$cond>\n";
- $allowed = 1;
- }
- if ($block =~/\b(?:if|for|while)\b/) {
- #print "APW: ALLOWED: block<$block>\n";
- $allowed = 1;
- }
- if (statement_block_size($block) > 1) {
- #print "APW: ALLOWED: lines block<$block>\n";
- $allowed = 1;
- }
- # Check the post-context.
- if (defined $chunks[1]) {
- my ($cond, $block) = @{$chunks[1]};
- if (defined $cond) {
- substr($block, 0, length($cond)) = '';
- }
- if ($block =~ /^\s*\{/) {
- #print "APW: ALLOWED: chunk-1 block<$block>\n";
- $allowed = 1;
- }
- }
- if ($level == 0 && $block =~ /^\s*\{/ && !$allowed) {
- my $herectx = $here . "\n";;
- my $end = $linenr + statement_rawlines($block) - 1;
-
- for (my $ln = $linenr - 1; $ln < $end; $ln++) {
- $herectx .= $rawlines[$ln] . "\n";;
- }
-
- WARN("braces {} are not necessary for single statement blocks\n" . $herectx);
- }
- }
-
-# don't include deprecated include files (uses RAW line)
- for my $inc (@dep_includes) {
- if ($rawline =~ m@\#\s*include\s*\<$inc>@) {
- ERROR("Don't use <$inc>: see Documentation/feature-removal-schedule.txt\n" . $herecurr);
- }
- }
-
-# don't use deprecated functions
- for my $func (@dep_functions) {
- if ($line =~ /\b$func\b/) {
- ERROR("Don't use $func(): see Documentation/feature-removal-schedule.txt\n" . $herecurr);
- }
- }
-
-# no volatiles please
- my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
- if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
- WARN("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
- }
-
-# SPIN_LOCK_UNLOCKED & RW_LOCK_UNLOCKED are deprecated
- if ($line =~ /\b(SPIN_LOCK_UNLOCKED|RW_LOCK_UNLOCKED)/) {
- ERROR("Use of $1 is deprecated: see Documentation/spinlocks.txt\n" . $herecurr);
- }
-
-# warn about #if 0
- if ($line =~ /^.#\s*if\s+0\b/) {
- CHK("if this code is redundant consider removing it\n" .
- $herecurr);
- }
-
-# check for needless kfree() checks
- if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
- my $expr = $1;
- if ($line =~ /\bkfree\(\Q$expr\E\);/) {
- WARN("kfree(NULL) is safe this check is probabally not required\n" . $hereprev);
- }
- }
-
-# warn about #ifdefs in C files
-# if ($line =~ /^.#\s*if(|n)def/ && ($realfile =~ /\.c$/)) {
-# print "#ifdef in C files should be avoided\n";
-# print "$herecurr";
-# $clean = 0;
-# }
-
-# warn about spacing in #ifdefs
- if ($line =~ /^.#\s*(ifdef|ifndef|elif)\s\s+/) {
- ERROR("exactly one space required after that #$1\n" . $herecurr);
- }
-
-# check for spinlock_t definitions without a comment.
- if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/) {
- my $which = $1;
- if (!ctx_has_comment($first_line, $linenr)) {
- CHK("$1 definition without comment\n" . $herecurr);
- }
- }
-# check for memory barriers without a comment.
- if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
- if (!ctx_has_comment($first_line, $linenr)) {
- CHK("memory barrier without comment\n" . $herecurr);
- }
- }
-# check of hardware specific defines
- if ($line =~ m@^.#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
- CHK("architecture specific defines should be avoided\n" . $herecurr);
- }
-
-# check the location of the inline attribute, that it is between
-# storage class and type.
- if ($line =~ /\b$Type\s+$Inline\b/ ||
- $line =~ /\b$Inline\s+$Storage\b/) {
- ERROR("inline keyword should sit between storage class and type\n" . $herecurr);
- }
-
-# Check for __inline__ and __inline, prefer inline
- if ($line =~ /\b(__inline__|__inline)\b/) {
- WARN("plain inline is preferred over $1\n" . $herecurr);
- }
-
-# check for new externs in .c files.
- if ($line =~ /^.\s*extern\s/ && ($realfile =~ /\.c$/)) {
- WARN("externs should be avoided in .c files\n" . $herecurr);
- }
-
-# checks for new __setup's
- if ($rawline =~ /\b__setup\("([^"]*)"/) {
- my $name = $1;
-
- if (!grep(/$name/, @setup_docs)) {
- CHK("__setup appears un-documented -- check Documentation/kernel-parameters.txt\n" . $herecurr);
- }
- }
-
-# check for pointless casting of kmalloc return
- if ($line =~ /\*\s*\)\s*k[czm]alloc\b/) {
- WARN("unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
- }
-
-# check for gcc specific __FUNCTION__
- if ($line =~ /__FUNCTION__/) {
- WARN("__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr);
- }
- }
-
- # If we have no input at all, then there is nothing to report on
- # so just keep quiet.
- if ($#rawlines == -1) {
- exit(0);
- }
-
- # In mailback mode only produce a report in the negative, for
- # things that appear to be patches.
- if ($mailback && ($clean == 1 || !$is_patch)) {
- exit(0);
- }
-
- # This is not a patch, and we are are in 'no-patch' mode so
- # just keep quiet.
- if (!$chk_patch && !$is_patch) {
- exit(0);
- }
-
- if (!$is_patch) {
- ERROR("Does not appear to be a unified-diff format patch\n");
- }
- if ($is_patch && $chk_signoff && $signoff == 0) {
- ERROR("Missing Signed-off-by: line(s)\n");
- }
-
- print report_dump();
- if ($summary && !($clean == 1 && $quiet == 1)) {
- print "$filename " if ($summary_file);
- print "total: $cnt_error errors, $cnt_warn warnings, " .
- (($check)? "$cnt_chk checks, " : "") .
- "$cnt_lines lines checked\n";
- print "\n" if ($quiet == 0);
- }
-
- if ($clean == 1 && $quiet == 0) {
- print "$vname has no obvious style problems and is ready for submission.\n"
- }
- if ($clean == 0 && $quiet == 0) {
- print "$vname has style problems, please review. If any of these errors\n";
- print "are false positives report them to the maintainer, see\n";
- print "CHECKPATCH in MAINTAINERS.\n";
- }
-
- return $clean;
-}

2008-03-23 09:12:54

by Alexey Dobriyan

[permalink] [raw]
Subject: Re: [PATCH 120/148] include/asm-x86/suspend_32.h: checkpatch cleanups - formatting only

On Sun, Mar 23, 2008 at 01:03:35AM -0700, Joe Perches wrote:
> --- a/include/asm-x86/suspend_32.h
> +++ b/include/asm-x86/suspend_32.h
> @@ -32,11 +32,11 @@ extern unsigned long saved_edi;
> static inline void acpi_save_register_state(unsigned long return_point)
> {
> saved_eip = return_point;
> - asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
> - asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
> - asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
> - asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
> - asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
> + asm volatile("movl %%esp,%0" : "=m" (saved_esp));
> + asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
> + asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
> + asm volatile("movl %%edi,%0" : "=m" (saved_edi));
> + asm volatile("movl %%esi,%0" : "=m" (saved_esi));

Since when "volatile" became a function?

2008-03-23 09:13:50

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only


* Joe Perches <[email protected]> wrote:

> Cleanup to standardize formatting of .h files

thanks Joe, this is a good first step. I'll keep the 180 patches
separated until it's in x86/testing, to ease bisection in the case of
any (unlikely) problems - but after that we'll probably combine them
into one single commit to not pollute the changelogs too much. In the
future you might want to put them into a git tree and ask me to pull
them, to not pollute lkml with 180 patches. (but, this is the least of
our problems really)

Ingo

2008-03-23 10:13:46

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 001/148] include/asm-x86/acpi.h: checkpatch cleanups - formatting only


Please do not post 148 patches all at once.

I and others have warned you specifically about this
several times in the past.

Please learn this or else we will need to do something
more forceful to keep you from abusing the list server
like this.

Send your patches in small chunks at a time and wait
for the review and other feedback, and then after a day
or two post another chunk.

Please let me know which part of this you don't understand.

2008-03-23 10:20:18

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: Al Viro <[email protected]>
Date: Sun, 23 Mar 2008 08:52:10 +0000

> On Sun, Mar 23, 2008 at 01:03:24AM -0700, Joe Perches wrote:
> >
> > Signed-off-by: Joe Perches <[email protected]>
>
> An obvious corollary to this highly inspired series of patches:

Acked-by: David S. Miller <[email protected]>

I totally agree, checkpatch is just shit.

2008-03-23 11:16:57

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 120/148] include/asm-x86/suspend_32.h: checkpatch cleanups - formatting only

On Sun, 2008-03-23 at 11:57 +0300, Alexey Dobriyan wrote:
> On Sun, Mar 23, 2008 at 01:03:35AM -0700, Joe Perches wrote:
> > --- a/include/asm-x86/suspend_32.h
> > +++ b/include/asm-x86/suspend_32.h
> > @@ -32,11 +32,11 @@ extern unsigned long saved_edi;
> > static inline void acpi_save_register_state(unsigned long return_point)
> > {
> > saved_eip = return_point;
> > - asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
> > - asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
> > - asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
> > - asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
> > - asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
> > + asm volatile("movl %%esp,%0" : "=m" (saved_esp));
> > + asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
> > + asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
> > + asm volatile("movl %%edi,%0" : "=m" (saved_edi));
> > + asm volatile("movl %%esi,%0" : "=m" (saved_esi));
>
> Since when "volatile" became a function?

Agreed, this is crack.

Joe, please find something useful to contribute. Having a few cleanup
patches at the beginning of a useful series isn't an issue, but such
series as posted here are utterly annoying, and by the above just plain
stupid.

checkpatch.pl is a guide, not a replacement for common sense.

2008-03-23 12:06:20

by Jiri Slaby

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On 03/23/2008 11:20 AM, David Miller wrote:
> From: Al Viro <[email protected]>
> Date: Sun, 23 Mar 2008 08:52:10 +0000
>
>> On Sun, Mar 23, 2008 at 01:03:24AM -0700, Joe Perches wrote:
>>> Signed-off-by: Joe Perches <[email protected]>
>> An obvious corollary to this highly inspired series of patches:
>
> Acked-by: David S. Miller <[email protected]>
>
> I totally agree, checkpatch is just shit.

I disagree. It's just misuse in this case (like using Lindent on whole tree).

2008-03-23 12:19:27

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: Jiri Slaby <[email protected]>
Date: Sun, 23 Mar 2008 13:06:04 +0100

> On 03/23/2008 11:20 AM, David Miller wrote:
> > From: Al Viro <[email protected]>
> > Date: Sun, 23 Mar 2008 08:52:10 +0000
> >
> >> On Sun, Mar 23, 2008 at 01:03:24AM -0700, Joe Perches wrote:
> >>> Signed-off-by: Joe Perches <[email protected]>
> >> An obvious corollary to this highly inspired series of patches:
> >
> > Acked-by: David S. Miller <[email protected]>
> >
> > I totally agree, checkpatch is just shit.
>
> I disagree. It's just misuse in this case (like using Lindent on whole tree).

Unlike sparse, this thing encourages the kind of behavior seen here.

And even worse it becomes monkey see monkey do.

There are mountains of more useful stuff to be working on (much of it
automated, but unlike checkpatch work doesn't result in crap) rather
than 148 patches of checkpatch vomit.

Fixing sparse warnings properly fixes real issues, whereas fixing
checkpatch stuff creates garbage 9 times out of 10.

2008-03-23 12:24:36

by Jiri Slaby

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On 03/23/2008 01:19 PM, David Miller wrote:
> There are mountains of more useful stuff to be working on (much of it
> automated, but unlike checkpatch work doesn't result in crap) rather
> than 148 patches of checkpatch vomit.
>
> Fixing sparse warnings properly fixes real issues, whereas fixing
> checkpatch stuff creates garbage 9 times out of 10.

Yes, I agree with you in this.

What I don't agree with is that it's useless. It may help track down some issues
in yet prepared patch (it's checkpatch, not checkcode and it should be used in
that manner).

2008-03-23 12:30:36

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: Jiri Slaby <[email protected]>
Date: Sun, 23 Mar 2008 13:24:23 +0100

> On 03/23/2008 01:19 PM, David Miller wrote:
> > There are mountains of more useful stuff to be working on (much of it
> > automated, but unlike checkpatch work doesn't result in crap) rather
> > than 148 patches of checkpatch vomit.
> >
> > Fixing sparse warnings properly fixes real issues, whereas fixing
> > checkpatch stuff creates garbage 9 times out of 10.
>
> Yes, I agree with you in this.
>
> What I don't agree with is that it's useless. It may help track down some issues
> in yet prepared patch (it's checkpatch, not checkcode and it should be used in
> that manner).

I strongly disagree still.

Half the warnings I get when I have run checkpatch on things I've
written were crap.

It's an automaton, and that's why people like it. It tells you
exactly what to do, and people like to be able to turn their brains
off like that.

2008-03-23 12:49:16

by Jiri Slaby

[permalink] [raw]
Subject: checkpatch [was: include/asm-x86/serial.h: checkpatch cleanups - formatting only]

On 03/23/2008 01:30 PM, David Miller wrote:
> From: Jiri Slaby <[email protected]>
> Date: Sun, 23 Mar 2008 13:24:23 +0100
>
>> On 03/23/2008 01:19 PM, David Miller wrote:
>>> There are mountains of more useful stuff to be working on (much of it
>>> automated, but unlike checkpatch work doesn't result in crap) rather
>>> than 148 patches of checkpatch vomit.
>>>
>>> Fixing sparse warnings properly fixes real issues, whereas fixing
>>> checkpatch stuff creates garbage 9 times out of 10.
>> Yes, I agree with you in this.
>>
>> What I don't agree with is that it's useless. It may help track down some issues
>> in yet prepared patch (it's checkpatch, not checkcode and it should be used in
>> that manner).
>
> I strongly disagree still.
>
> Half the warnings I get when I have run checkpatch on things I've
> written were crap.

Could you be more concrete here? I often get only "more than 80 columns used
blah blah" in the header files and this sucks, yes. This check removal was
discussed some time ago on the list, seems like the result was to let it be.

> It's an automaton, and that's why people like it. It tells you
> exactly what to do, and people like to be able to turn their brains
> off like that.

It just spits out warnings/errors like compiler or some static analyzer, maybe
I'm terribly missing something, what exactly do you mind on the output?

2008-03-23 12:58:20

by Alan

[permalink] [raw]
Subject: Re: [PATCH 126/148] include/asm-x86/termios.h: checkpatch cleanups - formatting only

On Sun, 23 Mar 2008 01:03:41 -0700
Joe Perches <[email protected]> wrote:

>
> Signed-off-by: Joe Perches <[email protected]>

NAK - all serial/tty changes in this set.

Please leave the tty code alone for now (all of it). You are getting in
the way of far more important work and the changes you make add no real
value so should be done when nothing else is happening

2008-03-23 17:36:49

by Andi Kleen

[permalink] [raw]
Subject: Re: checkpatch [was: include/asm-x86/serial.h: checkpatch cleanups - formatting only]

Jiri Slaby <[email protected]> writes:
>
> It just spits out warnings/errors like compiler or some static
> analyzer, maybe I'm terribly missing something, what exactly do you
> mind on the output?

A good example are the trailing white space warnings in there. They
are just useless and a waste of time. That is something that really
should just be done automatically by maintainer scripts somewhere on
merge (and a lot of maintainers do it automatically), not waste the
valuable time of a human.

There are also a couple of other pointless warnings. e.g. I dislike
some of the style warnings -- they are far too broad. Or there
can be good reasons to violate them occasionally. e.g. I would
rather put not a space around an operator occasionally instead
of splitting an expression to avoid going over 80 characers.
Or the EXPORT_SYMBOL warning. WTF is that good for? Or
the warning about if () ... else { ... }.

Also checkpatch.pl --file seems to be a good candidate for one
of the worst ideas ever merged (to be fair it was difficult
to see it would become that bad in advance -- but it got)

On the other hand a few of the pattern matches are ful.

To fix the worst problems of checkpatch.pl one could probably
do something like this:
- Get rid of --file
- Drop at least 50% of the style warnings (or make them
not trigger unless a special option is given) and review all
the others.
- Concentrate on the really useful things like checking
for deprecated functions and pattern matching for a few
common mistakes.

-Andi

2008-03-24 08:09:19

by Jiri Slaby

[permalink] [raw]
Subject: Re: checkpatch [was: include/asm-x86/serial.h: checkpatch cleanups - formatting only]

On 03/23/2008 06:36 PM, Andi Kleen wrote:
> Jiri Slaby <[email protected]> writes:
>> It just spits out warnings/errors like compiler or some static
>> analyzer, maybe I'm terribly missing something, what exactly do you
>> mind on the output?
>
> A good example are the trailing white space warnings in there. They
> are just useless and a waste of time. That is something that really
> should just be done automatically by maintainer scripts somewhere on
> merge (and a lot of maintainers do it automatically), not waste the
> valuable time of a human.

Agreed.

> There are also a couple of other pointless warnings. e.g. I dislike
> some of the style warnings -- they are far too broad. Or there
> can be good reasons to violate them occasionally. e.g. I would
> rather put not a space around an operator occasionally instead
> of splitting an expression to avoid going over 80 characers.

Me too, there will ever be certain circumstances where the rules just doesn't
apply and you can't teach the scripts in that way (turning off doubtful warnings
sounds reasonable anyway).

> Or the EXPORT_SYMBOL warning. WTF is that good for? Or
> the warning about if () ... else { ... }.

Understand you points, but most of it is strict implementation of CodingStyle.
People, who don't prefer coding style as described in that document, won't like
checkpatch either.

> Also checkpatch.pl --file seems to be a good candidate for one
> of the worst ideas ever merged (to be fair it was difficult
> to see it would become that bad in advance -- but it got)

Definitely agree.

2008-03-24 15:13:48

by Jörn Engel

[permalink] [raw]
Subject: Re: checkpatch [was: include/asm-x86/serial.h: checkpatch cleanups - formatting only]

On Mon, 24 March 2008 09:09:00 +0100, Jiri Slaby wrote:
>
> Understand you points, but most of it is strict implementation of
> CodingStyle. People, who don't prefer coding style as described in that
> document, won't like checkpatch either.

I used to like CodingStyle. Recently I've started to have doubts.
Basically, the file used to contain roughly this paraphrase:
"I know coding style is highly personly, but when possible I'd prefer to
receive patches in this style..."

Over time the particular style has been refined more and more and the
tone of the document became a little more like:
"And the Lord sayeth, thou shalt not diverge from His style."

And that is just going too far. Keeping the style roughly similar is
useful. Several of the rules in CodingStyle are about writing code in a
way that avoids bugs. Clearly useful. But if you go too far you end in
religious zealotry.

Finding the right balance between following common rules and bending
them where appropriate is important - here as much as anywhere.

Jörn

--
The grand essentials of happiness are: something to do, something to
love, and something to hope for.
-- Allan K. Chalmers

2008-03-24 16:28:59

by Will Newton

[permalink] [raw]
Subject: Re: checkpatch [was: include/asm-x86/serial.h: checkpatch cleanups - formatting only]

On 23 Mar 2008 18:36:21 +0100, Andi Kleen <[email protected]> wrote:

> Also checkpatch.pl --file seems to be a good candidate for one
> of the worst ideas ever merged (to be fair it was difficult
> to see it would become that bad in advance -- but it got)

I have found checkpatch --file useful when taking large amounts of out
of tree code (a kernel arch subdir and a number of drivers) and trying
to get them into better shape for inclusion. In that situation it's
quite a good way of seeing how far off that goal I am. Running it on
existing in-tree code is more problematic.

2008-03-25 08:45:40

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* David Miller <[email protected]> wrote:

> I strongly disagree still.
>
> Half the warnings I get when I have run checkpatch on things I've
> written were crap.

could you please give me a file name as an example that i could
double-check myself? Thanks,

Ingo

2008-03-25 08:52:03

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 001/148] include/asm-x86/acpi.h: checkpatch cleanups - formatting only


* David Miller <[email protected]> wrote:

> Please do not post 148 patches all at once.

seconded - i asked Joe to offer such cleanups in a git tree. Given that
most of the changes in the patches are along the same pattern, posting
the script would have made more sense as well.

while we do have a growing code quality problem, spamming our lists with
trivialities is certainly not the answer.

Ingo

2008-03-25 09:00:52

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only


* Joe Perches <[email protected]> wrote:

> Ingo's code-quality script totals for include/asm-x86
>
> Errors LOC
> Before: 1457 31320
> After: 252 31729
>
> Changes:

btw., i'd prefer to see more structural cleanups as well. For example,
to convert macros that generate code (i.e. just about everything except
constants) to inlines. For example in include/asm-x86/processor.h, to
convert get_debugreg/set_debugreg or task_pt_regs to inline functions.

Ingo

2008-03-25 09:42:18

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: Ingo Molnar <[email protected]>
Date: Tue, 25 Mar 2008 09:44:57 +0100

> * David Miller <[email protected]> wrote:
>
> > I strongly disagree still.
> >
> > Half the warnings I get when I have run checkpatch on things I've
> > written were crap.
>
> could you please give me a file name as an example that i could
> double-check myself? Thanks,

I can't because I pacified it to cut down the review noise
for the patch in question last time it happened.

I can tell you one more example of things I strongly disagree with
that it does, for example, such as telling me how to document
spinlocks in datastructures.

It wants a comment right above the spinlock_t member, but this
totally ignores that perhaps I put a huge comment explaining
the locking semantics elsewhere.

It's a black and white tool in a grey world, it just sucks. And I'd
be fine with that if people used it as a guide but people, especially
kernel newbies, treat it as gospel and a way to contribe "useful"
patches. They aren't useful, they're crap. Fix a bug instead of this
automaton whitespace noise.

2008-03-25 10:49:35

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* David Miller <[email protected]> wrote:

> > I disagree. It's just misuse in this case (like using Lindent on
> > whole tree).
>
> Unlike sparse, this thing encourages the kind of behavior seen here.
>
> And even worse it becomes monkey see monkey do.
>
> There are mountains of more useful stuff to be working on (much of it
> automated, but unlike checkpatch work doesn't result in crap) rather
> than 148 patches of checkpatch vomit.

Joe should not have spammed lkml like this - but let me take up the
general issue of checkpatch that you touch upon - even if the incident
at hand puts checkpatch into an unfavorable light.

> Fixing sparse warnings properly fixes real issues, whereas fixing
> checkpatch stuff creates garbage 9 times out of 10.

actually, my experience has been that 99% of the arch/x86 sparse
annotations dont fix any "real" issue but remove a warning. The
remaining 1% still very much makes it worth though (they can prevent a
security hole, a bad bug, endianness incompatibility, etc.), so we've
taken a large amount of sparse annotations in arch/x86 in the past few
months - while fixing exactly zero "real" bugs in the process.

Same goes for checkpatch: almost no individual checkpatch change _looks_
worthwile in isolation, but the combined effect very much shows and
avoids real bugs. (While Sparse is 'type functional' and checkpatch is
'style only' - still both avoid real bugs - see below why.)

Lets consider the "end result" that we are aiming for. One example of a
"checkpatch clean" codebase is the scheduler (kernel/sched*.[ch]):

$ code-quality kernel/sched*.[ch]

errors lines of code errors/KLOC
kernel/sched.c 0 8490 0
kernel/sched_debug.c 0 402 0
kernel/sched_fair.c 0 1475 0
kernel/sched_idletask.c 0 130 0
kernel/sched_rt.c 0 1341 0
kernel/sched_stats.h 0 238 0

The value of a "checkpatch clean" codebase is significant to me as a
maintainer. No matter where i look at the (rather sizable) scheduler
codebase, the style is uniform and changes all look the same and are
much easier to review. Since 2.6.22 we've managed to do about 500
changes to this 10 KLOC codebase, with a very low regression rate - that
is more than 10 times the rate of change of the kernel as a whole.

We couldnt achieve that without broad "visual uniformity". Human vision
is based on pattern matching, which brain capacity has a physical limit.
Reducing the complexity of that process, and helping the "flow of eye
movement during review" is just as important as to clean up the logic of
code - it gives us better chances to find a bad bug during review.

We here on lkml are all quite good at filtering out unnecessary visual
noise when reviewing patches and writing code, but i prefer to reserve
that brain capacity towards understanding the code and finding mistakes
in it.

So i minimize all visual distractions in my physical work environment (i
optimize the field of view i have at the monitor), i minimize visual
distractions in the editor i use (no GUI for example, just plain
fullscreen text view with no borders, no menus, etc.), and an important
part of that is that i also minimize all unnecessary distractions in the
_code_ itself that i maintain.

But if you look at the git log of the scheduler in of the past 5 months,
you'll see a striking lack of trivial, checkpatch generated "monkey"
patches.

Why? Because all the patches that are applied are checkpatch-clean from
the get go, so there's no need for trivialities. There were certainly
some checkpatch "trivialities" early in the process (despite the
scheduler being very clean to begin with), but the transients have
subsided meanwhile and what we have is a squeaky-clean codebase in
action. In this model of maintenance, checkpatch ends up being just a
'background force' that never truly comes to the surface to bother us
with explicit trivialities. In other words: there's _zero_ room for
"monkey patches".

Note that there are no "problems to development patches" caused by
scheduler cleanups either - because there are essentially _no_ cleanup
patches at all in the scheduler - almost all patches we apply to the
scheduler are clean.

arch/x86 is on a similar path:

errors LOC err/KLOC
-----------------------------
v2.6.24-rc1 arch/x86/ 8695 117423 74.0
v2.6.24-x86.git arch/x86/ [21 Nov 2007] 5190 117156 44.2
v2.6.24-x86.git arch/x86/ [18 Dec 2007] 4057 117213 34.6
v2.6.24-x86.git arch/x86/ [ 8 Jan 2008] 3650 117987 30.9
v2.6.24-x86.git arch/x86/ [ 4 Feb 2008] 3334 133542 24.9
v2.6.25-x86.git arch/x86/ [21 Feb 2008] 2724 136963 19.8
v2.6.25-x86.git arch/x86/ [ 1 Mar 2008] 2155 136404 15.7
v2.6.25-x86.git arch/x86/ [21 Mar 2008] 1979 137205 14.4

and once we reach a "zero" state, the flow of "explicit" checkpatch
patches comes to a virtual standstill - just like it did for the
scheduler. And we broke up the "please dont do this to my outstanding
development patches" Catch-22 (which is also a way too easy place for
lazy developers to hide behind) by doing backports/forward ports along
more intrusive cleanups.

On a more conceptual angle: "coding style", despite being entirely
"non-functional" (it does not affect the generated code), is still very
much an integral part of the code because source code is fundamentally
about "knowledge" - and extra style noise in knowledge can never
possibly increase the quality of that knowledge. There are strong links
between code correctness and typography/aesthetics.

So, in the specific example of the scheduler subsystem, i've only
observed advantages to checkpatch and zero downsides. Could anyone give
me _any_ objective reason why i shouldnt be using checkpatch for the
scheduler? More broadly, could anyone give me an objective reason why we
shouldnt be doing it for arch/x86? And even more broadly, could anyone
give me an objective reason why we shouldnt be doing it for all actively
maintained areas of the kernel?

Ingo

2008-03-25 11:12:12

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Tue, 25 March 2008 11:48:41 +0100, Ingo Molnar wrote:
>
> So, in the specific example of the scheduler subsystem, i've only
> observed advantages to checkpatch and zero downsides. Could anyone give
> me _any_ objective reason why i shouldnt be using checkpatch for the
> scheduler? More broadly, could anyone give me an objective reason why we
> shouldnt be doing it for arch/x86? And even more broadly, could anyone
> give me an objective reason why we shouldnt be doing it for all actively
> maintained areas of the kernel?

Disagreement between checkpatch and maintainers preferred style. I've
had a patch that fixed a bug and - while in the region - "cleaned up"
the style for a single line. This line no longer matches the rest of
the file and creates the kind of visual distraction you complain about.

In short, for a file with an active maintainer whatever the maintainer
prefers should be done. Doing a full checkpatch sweep against a
maintainers wishes is madness, doing a partial "cleanup" is worse.
Of course when a maintainer likes checkpatch, as you do, there is no
disagreement to deal with. :)

Jörn

--
I don't understand it. Nobody does.
-- Richard P. Feynman

2008-03-25 12:24:46

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* J?rn Engel <[email protected]> wrote:

> On Tue, 25 March 2008 11:48:41 +0100, Ingo Molnar wrote:
> >
> > So, in the specific example of the scheduler subsystem, i've only
> > observed advantages to checkpatch and zero downsides. Could anyone
> > give me _any_ objective reason why i shouldnt be using checkpatch
> > for the scheduler? More broadly, could anyone give me an objective
> > reason why we shouldnt be doing it for arch/x86? And even more
> > broadly, could anyone give me an objective reason why we shouldnt be
> > doing it for all actively maintained areas of the kernel?
>
> Disagreement between checkpatch and maintainers preferred style. I've
> had a patch that fixed a bug and - while in the region - "cleaned up"
> the style for a single line. This line no longer matches the rest of
> the file and creates the kind of visual distraction you complain
> about.

well, once a subsystem is "checkpatch-clean" (which my challenge above
obviously assumes), there's no such "partial style" problem. It
obviously also assumes that the maintainer agrees that having consistent
coding style across all of Linux is a long-term advantage.

The current visual inconsistency between subsystems makes the Linux
kernel appear rather unpleasant and unprofessional to new kernel
developers. This is not just embarrasing to us (we want to write the
best OS on the planet), it is also actively harmful: such a "consistent
style does not matter" stance turns away people who have taste and tends
to attract people who have no taste - which i'm sure you'll agree with
results in a deadly spiral if it gets strong enough.

So to turn around the argument: could you give me any reason why
differing coding style between subsystems, _often in blatant violation
of Documentation/CodingStyle_, is somehow "good" for Linux in the long
run? I listed numerous first-hand advantages that style consistency
brings and i listed numerous disadvantages created by inconsistency. So
i'm waiting for the list of counter-arguments - there _must_ be some
objective ones, besides the obvious "kernel old-timers are lazy to
change their ways" argument =B-)

In my experience in almost all cases the "style disagreement" between a
subsystem maintainer and checkpatch is due to the _maintainer_ being
wrong about seemingly unimportant (to him) style details.

And that very much includes myself: i used to have such "disagreement"
with checkpatch errors and i used to be annoyed at the style nitpicking
of checkpatch. And yes, it takes a certain amount of time for a
long-time kernel hacker like me to realize that a lot of code i wrote in
the past needs a good clean-up ;-)

These style differences are certainly not "wrong enough" to
inconvenience or displace an active maintainer (and i never made that
point), but they are nevertheless a death by a thousand cuts that the
general kernel is suffering from right now, and i'd be a fool not to
point it out. These seemingly unimportant style details add up to a
hodgepodge of coding style that makes life difficult for people who have
to look at many different parts of the kernel that they _dont maintain_.

That's why i asked about specific examples that we can talk about - and
i gave specific examples and filenames. The checkpatch maintainer (Andy
Whitcroft) has certainly shown flexibility to fix false positives ASAP.

> Of course when a maintainer likes checkpatch, as you do, there is no
> disagreement to deal with. :)

note, all those patches are "Subject: x86: " and 99% of them is
maintained by us x86 maintainers.

Ingo

2008-03-25 12:26:35

by Andi Kleen

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

Ingo Molnar <[email protected]> writes:
>
> On a more conceptual angle: "coding style", despite being entirely
> "non-functional" (it does not affect the generated code), is still very
> much an integral part of the code because source code is fundamentally
> about "knowledge" - and extra style noise in knowledge can never
> possibly increase the quality of that knowledge. There are strong links
> between code correctness and typography/aesthetics.

You assert that all the time, but it is just that: an assertation.
I assert that code style is only a small part of code correctness.
Also just an assertation. Who is more right? Probably the truth
is somewhere inbetween. At least I think it is nearer my position
than yours @)

Also regarding the rules enforced by checkpatch I think there is a wide
range on how much they impact readability: e.g. if someone uses
the wrong bracket style consistently that is somewhat disrupting.
I agree.

But is trailing white space disrupting to code reading in any
way? Very doubtful.

Most rules are somewhere inbetween. They vary widely in how
much they impact readability.

Also sometimes the rules conflict. Example: the 80 column rule
often conflicts with the "always space around operator" rule.
That is because expressions split over multiple lines are harder
to read than an expression on a single line (at least here) and at
least I would rather trade a few missing spaces around operators
than to have a multi-line expression.

It's always a trade-off and checkpatch.pl is not very good
(read it doesn't really handle) trade-offs.

> So, in the specific example of the scheduler subsystem, i've only
> observed advantages to checkpatch and zero downsides. Could anyone give
> me _any_ objective reason why i shouldnt be using checkpatch for the
> scheduler? More broadly, could anyone give me an objective reason why we
> shouldnt be doing it for arch/x86? And even more broadly, could anyone
> give me an objective reason why we shouldnt be doing it for all actively
> maintained areas of the kernel?

For new code being added (like your CFS scheduler) it is fine, but for
old code it has the problem of conflicting with other actually useful
changes on the same areas which are pending. And doing merges into
such changing code bases is always somewhat error prone because the people
who do it are also only human and can apply subtle typos etc.

Strictly seen each such merge requires a whole new testing cycle and
doing such a testing cycle just for someone's checkpatch changes is
really a waste of time and seriously impacting real progress.
The only saving grace is that it will hopefully only happen once
per file, but the point still holds. There are a lot of different files
in Linux, so it has the potential to be a serious problem.

That is an objective (not just random assertation) reason against
doing extensive changes of existing files like Joe's patchkit.

I think it would be fair at least if people doing this asked first at least:
- Does anybody have pending changes against file X, perhaps
also checking mm and linux-next
and then wait a bit and if someone says he has pending changes then not do
the reformatting until the pending changes get merged.

Or better really only do it on new code.

-Andi

2008-03-25 13:06:24

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* David Miller <[email protected]> wrote:

> I can tell you one more example of things I strongly disagree with
> that it does, for example, such as telling me how to document
> spinlocks in datastructures.
>
> It wants a comment right above the spinlock_t member, but this totally
> ignores that perhaps I put a huge comment explaining the locking
> semantics elsewhere.

firstly, this warning from checkpatch.pl is off by default.

There are 3 checkpatch warning categories: ERROR, WARNING, CHECK.
spinlock_t without a warning is in this third category and you wont even
see that warning unless you very explicitly do:

checkpatch.pl --subjective

Secondly, even about this "checkpatch.pl --subjective" check you are
wrong. As someone who had to decode (way!) too many lockdep backtraces
in various kernel code that i didnt author and didnt maintain, i can
tell it you with very strong authority that even in this case it's a
minimum requirement to put a comment right to that lock:

/*
* Regarding the locking rules, see the big comment block above in
* this file:
*/

or:

/* See net/core/sock.c for the locking rules: */

_Way_ too many times do i have to wonder where the heck a given lock is
documented. You _wrote_ and maintain a good portion of that code, so to
you it's seemingly an annoyance and nuisance. To everyone else, it's
must-have information. Locks are at the heart of kernel data structures,
not having at least a minimal pointer at them is really bad.

(sidenote: the scheduler has one deficiency there and i've fixed it in
my tree. this warning should be moved from the 'check' category into the
warning category.)

Ingo

2008-03-25 13:13:40

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Tue, 25 March 2008 13:24:14 +0100, Ingo Molnar wrote:
>
> The current visual inconsistency between subsystems makes the Linux
> kernel appear rather unpleasant and unprofessional to new kernel
> developers. This is not just embarrasing to us (we want to write the
> best OS on the planet), it is also actively harmful: such a "consistent
> style does not matter" stance turns away people who have taste and tends
> to attract people who have no taste - which i'm sure you'll agree with
> results in a deadly spiral if it gets strong enough.

I disagree with that assertion. My favorite example of where
CodingStyle has gone too far is this:
for (i=0; i<10; i++)
While the official document demands four extra spaces, I _hate_ them.
Whitespace offers visual grouping. The lack of whitespace around the
binary operators emphasizes that one kind of grouping is stonger than
another. Ever since this binary operator testament was added to our
Holy Canon, I started violating the coding style on purpose. Imo this
is beyond silly.

So do I have bad taste and should I leave the kernel in favor of someone
else with better taste that is currently turned away by me? Maybe.
Show me that person and I'll consider gardening. Until then I'll
continue to violate the style. Just to spite the fundamentalist
movement.

> So to turn around the argument: could you give me any reason why
> differing coding style between subsystems, _often in blatant violation
> of Documentation/CodingStyle_, is somehow "good" for Linux in the long
> run? I listed numerous first-hand advantages that style consistency
> brings and i listed numerous disadvantages created by inconsistency. So
> i'm waiting for the list of counter-arguments - there _must_ be some
> objective ones, besides the obvious "kernel old-timers are lazy to
> change their ways" argument =B-)

When you reject useful patches based on "this is not our preferred
style", you piss people off. That is a significant reason why people
choose to spend their time elsewhere. In certain cases having people
abandon the kernel may be a net gain, in many it is a loss.

So unless you are willing to maintain every single driver in the kernel,
pissing all the maintainers off that happen to disagree with the
canonical style - if only in detail - is not a good recipe.

Don't get me wrong, I certainly see advantages in checkpatch and keeping
the style consistent. But there are limits, where the gains no longer
justify the cost. And the limits will never be clearly defined. Are
some variable names better than others - sure. Can you write a rule for
checkpatch to ensure good names - hardly. And yet, variable names are
part of the style.

> These style differences are certainly not "wrong enough" to
> inconvenience or displace an active maintainer (and i never made that
> point)

It seems we both agree then. And for the record, your mail could easily
be interpreted as if you had made that point. Thanks for clarifying
things.

Jörn

--
I don't understand it. Nobody does.
-- Richard P. Feynman

2008-03-25 13:17:51

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* David Miller <[email protected]> wrote:

> > could you please give me a file name as an example that i could
> > double-check myself? Thanks,
>
> I can't because I pacified it to cut down the review noise for the
> patch in question last time it happened.

ok, so i'll have to come up with some hard data about code you maintain
i guess - please provide contrary data if you have it.

Using the "code-quality" script mentioned at:

http://people.redhat.com/mingo/x86.git/README

on the Sparc64 tree:

$ code-quality `find arch/sparc64/ include/asm-sparc64/ -name '*.c'` |
tee ~/quality-sparc.txt
$ sort -n -k 4 ~/quality-sparc.txt | tail -20

shows the following "20 worst files":

errors lines of code errors/KLOC
arch/sparc64/kernel/sys_sparc32.c 48 1034 46.4
arch/sparc64/kernel/signal32.c 65 1392 46.6
arch/sparc64/kernel/sys_sunos32.c 73 1360 53.6
arch/sparc64/solaris/misc.c 43 787 54.6
arch/sparc64/solaris/socket.c 30 462 64.9
arch/sparc64/kernel/binfmt_aout32.c 34 420 80.9
arch/sparc64/oprofile/init.c 2 24 83.3
arch/sparc64/solaris/fs.c 64 746 85.7
arch/sparc64/prom/devops.c 4 42 95.2
arch/sparc64/prom/console.c 8 75 106.6
arch/sparc64/solaris/ioctl.c 89 826 107.7
arch/sparc64/kernel/cpu.c 17 155 109.6
arch/sparc64/solaris/ipc.c 19 127 149.6
arch/sparc64/solaris/timod.c 157 977 160.6
arch/sparc64/kernel/sunos_ioctl32.c 49 276 177.5
arch/sparc64/solaris/signal.c 81 430 188.3
arch/sparc64/solaris/socksys.c 39 204 191.1
arch/sparc64/prom/tree.c 58 300 193.3
arch/sparc64/math-emu/math.c 215 514 418.2
arch/sparc64/boot/piggyback.c 47 110 427.2

most of these are certainly legacy stuff that dont matter to active
maintenance anymore (nobody will ever change math-emu/math.c i hope),
but for example looking at:

scripts/checkpatch.pl --file arch/sparc64/kernel/cpu.c
[...]
total: 17 errors, 2 warnings, 154 lines checked

all 17 errors and both warnings are legitimate complaints and if i was
maintaining that file i'd accept any patch that cleans those problems
up, in a heartbeat.

And today's mainstream files might become tomorrow's legacy files. To
come up with a hypothetical example: had you applied checkpatch.pl on
all changes to this file in the past 15 years (checkpatch.pl only exists
for less than 5 years so this is hypothetical), you'd have a
squeaky-clean file and no need for any annoying "monkey patches".

or take a look at the 65 errors that arch/sparc64/kernel/signal32.c
produces:

total: 65 errors, 33 warnings, 1391 lines checked

i've just checked all of the 65 errors and all look legitimate at first
sight and should be fixed.

even looking at "best of breed" arch/sparc64 code, arch/sparc64/mm/*.c
gives 34 errors:

scripts/checkpatch.pl --file arch/sparc64/mm/*.c | grep ERROR | wc -l
34

all of which seem legitimate complaints.

so the score right now: checkpatch versus DaveM 116:0 ;-)

i'm sure there are false positives in it too, but in my experience (from
the scheduler and from arch/x86) there's less than 1 false positive for
every 100 legitimate errors that checkpatch.pl it finds. So for the 1500
errors reported by checkpatch.pl for the whole sparc64 tree:

errors lines of code errors/KLOC
arch/sparc64/ 1460 49801 29.3

i estimate that there will be less than 15 false positive "ERROR" lines,
and my random sample of about 100 errors confirms that rate. That's a
diminishing rate IMO.

as a comparison, core kernel code has this 'style quality':

errors lines of code errors/KLOC
kernel/ 727 100364 7.2

in my experience, subsystems (of any significant size - i.e. above a few
KLOC) where errors/KLOC is "single digit", has very clean code in
general. Subsystems that have low-double-digit errors/KLOC are OK-ish,
subsystems with high-double-digit or triple-digit errors/KLOC are messy.

There can be fluctuations and artifacts, and obviously this is just
another (arbitrary) static metric that has no forced relationship with
real code quality - but in my experience it's surprisingly close to
reality - closer than any other code metric i've seen.

Ingo

2008-03-25 13:38:42

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* J?rn Engel <[email protected]> wrote:

> > The current visual inconsistency between subsystems makes the Linux
> > kernel appear rather unpleasant and unprofessional to new kernel
> > developers. This is not just embarrasing to us (we want to write the
> > best OS on the planet), it is also actively harmful: such a
> > "consistent style does not matter" stance turns away people who have
> > taste and tends to attract people who have no taste - which i'm sure
> > you'll agree with results in a deadly spiral if it gets strong
> > enough.
>
> I disagree with that assertion. My favorite example of where
> CodingStyle has gone too far is this:
> for (i=0; i<10; i++)
> While the official document demands four extra spaces, I _hate_ them.
> Whitespace offers visual grouping. The lack of whitespace around the
> binary operators emphasizes that one kind of grouping is stonger than
> another. Ever since this binary operator testament was added to our
> Holy Canon, I started violating the coding style on purpose. Imo this
> is beyond silly.

you picked an borderline case without showing the full effects of your
choice of style - but still even in this example you are wrong i
believe. Look at how inconsistent this looks:

for (i=0; i<10; i++) {
l = 10;
if (k <= 10)
k = 11;
}

(the inconsistent 'i=0' versus 'l = 10')

so in your style we'd have to write it as:

for (i=0; i<10; i++) {
l=10;
if (k<=10)
k=11;
}

which, on one hand, looks unprofessional (in fixed width font), but on
the other hand, the literals and operators are way too "close" to each
other and operators are easily missed and mis-identified visually -
causing bugs.

For example the 'k' and the '<=' operator may look visually similar and
can be "blended", making it easy to skip over 'k=10' versus 'k<=10' -
while 'k = 10' clearly stands apart from 'k <= 10'. [and syntax
highlighting does not help with this particular problem]

in Documentation/CodingStyle it looks:

for (i = 0; i < 10; i++) {
l = 10;
if (k <= 10)
k = 11;
}

which is certainly reasonable and groups safely.

yes - you can have arguments one way or another, but there's nothing
worse than maintainers each going towards their own _arbitrary_ and
often clearly inferior coding style, which is inconsistent within the
same file.

I at least make the point that i'm trying to converge to
Documentation/CodingStyle. Is it arbitrary? Yes, to a fair degree, but
it certainly conveys a very strong, unambiguous sense of taste.

Ingo

2008-03-25 13:46:26

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* Ingo Molnar <[email protected]> wrote:

> you picked an borderline case without showing the full effects of your
> choice of style - but still even in this example you are wrong i
> believe. [...]

and let me give an example with the your very own code that you wrote
and maintain, drivers/mtd/devices/block2mtd.c:

errors lines of code errors/KLOC
drivers/mtd/devices/block2mtd.c 10 490 20.4

that's pretty OK code, but not perfect, the 10 errors are:

ERROR: do not use C99 // comments
ERROR: need spaces around that '=' (ctx:VxV)
ERROR: need spaces around that '<' (ctx:VxV)
ERROR: do not use C99 // comments
ERROR: do not use C99 // comments
ERROR: do not use C99 // comments
ERROR: do not use C99 // comments
ERROR: do not use C99 // comments
ERROR: do not use C99 // comments
ERROR: do not initialise statics to 0 or NULL

so just because you disagreed with those 2 errors that relate to '=' and
'<' (and where accoding to CodingStyle checkpatch.pl is correct), you
disregarded the other 8 very valid complaints that checkpatch.pl had.
(the final one even negatively affects the size of the kernel)

and this is the experience i made in general: the checkpatch.pl benefits
far outweigh the costs, even if you disagree with a particular rule of
checkpatch.pl. When you came to Linux you already had to change your
coding style quite radically, correct?

Ingo

2008-03-25 14:03:51

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* J?rn Engel <[email protected]> wrote:

> > So to turn around the argument: could you give me any reason why
> > differing coding style between subsystems, _often in blatant
> > violation of Documentation/CodingStyle_, is somehow "good" for Linux
> > in the long run? I listed numerous first-hand advantages that style
> > consistency brings and i listed numerous disadvantages created by
> > inconsistency. So i'm waiting for the list of counter-arguments -
> > there _must_ be some objective ones, besides the obvious "kernel
> > old-timers are lazy to change their ways" argument =B-)
>
> When you reject useful patches based on "this is not our preferred
> style", you piss people off. [...]

we dont actually do that for newbies and newbies are in fact happy to
write cleaner code - so the rest of your argument which depends on this
premise fails. (Most of the time i fix it up silently myself or if a
style error comes in a pattern, i ask the person to send future patches
with that small detail fixed.)

my experience with checkpatch.pl is the exact opposite of what you fear:
it _widened_ the contributor base: a good number of newbies felt
encouraged that an objective piece of tool reports an "error" in a file
that was written by otherwise "much more knowledgable" kernel hackers.
checkpatch.pl is basically the "yes, really, you are right, this piece
of code in the Linux kernel is indeed crap" review tool that reinforces
newbies. It lowers the bar of entry to kernel hacking, and it does so
for exactly those pieces of code that we want newbies to be active on:
barely maintained source code.

Whoever is afraid of an "army of checkpatch wielding newbies" who'll
never rise above their newbie status fails to consider two important
factors: 1) checkpatch errors are a finite resource to feed on and they
dont re-grow in a well-maintained subsystem 2) they need to look back a
few years when they themselves were newbies and were in need of some
easy kernel projects just to familiarize themselves with the kernel and
its contribution environment. (My first-ever contribution to the Linux
kernel was a trivial patch.)

( i only remember one patch ever being rejected due to checkpatch
failures, it came from a kernel old-timer who sent absolutely horrible
patches and who _should have known better_. Kernel old-timers are
"multipliers", they write more code and influence more people's code,
so it's expected of them to write absolutely squeaky-clean code. )

so at least for the scheduler and for arch/x86 there's absolutely zero
friction between checkpatch.pl use and newbies - and if you look at the
nicely evolving arch/x86 contributor statistics you'll have to come to
the same conclusion i believe.

Ingo

2008-03-25 15:26:29

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 134/148] include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only


* Joe Perches <[email protected]> wrote:

> + : "1" (addr), "g" ((long)(size)), \
> + "g" (current_thread_info()->addr_limit.seg)); \
> + flag;
> +})

hm, you apparently never built this on 64-bit x86? The above has a
trivial typo.

Ingo

2008-03-25 15:31:21

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 019/148] include/asm-x86/cpufeature.h: checkpatch cleanups - formatting only


* Joe Perches <[email protected]> wrote:

> +#define X86_FEATURE_REP_GOOD _CF(3, 16) /* rep microcode works well
> + * on this CPU */

that is crap too ...

Ingo

2008-03-25 16:08:19

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Tue, 25 March 2008 14:45:56 +0100, Ingo Molnar wrote:
>
> and let me give an example with the your very own code that you wrote
> and maintain, drivers/mtd/devices/block2mtd.c:
>
> errors lines of code errors/KLOC
> drivers/mtd/devices/block2mtd.c 10 490 20.4
>
> that's pretty OK code, but not perfect, the 10 errors are:
>
> ERROR: do not use C99 // comments
> ERROR: need spaces around that '=' (ctx:VxV)
> ERROR: need spaces around that '<' (ctx:VxV)
> ERROR: do not use C99 // comments
> ERROR: do not use C99 // comments
> ERROR: do not use C99 // comments
> ERROR: do not use C99 // comments
> ERROR: do not use C99 // comments
> ERROR: do not use C99 // comments
> ERROR: do not initialise statics to 0 or NULL

The last should and will be fixed. The // I don't really care about.
Send a patch if you do.

Going over my logfs patch, I found several things that are either false
positives or rather questionable in my book. <adds Andy to Cc:>


(foo*) should be (foo *)
What does that extra space gain us?


ERROR: no space before that close parenthesis ')'
#2565: FILE: fs/logfs/gc.c:294:
+ seg_ofs + sizeof(oh) < super->s_segsize; ) {

Actual code is this:
for (seg_ofs = LOGFS_SEGMENT_HEADERSIZE;
seg_ofs + sizeof(oh) < super->s_segsize; ) {
The for() loop is missing one of its three terms. I assume this is one
of the effects of not having a perfect C parser.


ERROR: trailing statements should be on next line
#5000: FILE: fs/logfs/readwrite.c:203:
+ } else while (unlikely(TestSetPageLocked(page))) {

We have an explicit exception for "else if". "else while" makes imo
just as much (or as little) sense.


ERROR: need space after that ',' (ctx:VxV)
#5801: FILE: fs/logfs/readwrite.c:1004:
+ ret = logfs_segment_read(inode, ipage, this_wc->ofs, bix,level);

One of those examples where a missing space is the lesser of two or
three evils.


ERROR: need consistent spacing around '|' (ctx:WxV)
#8293: FILE: fs/logfs/dev_mtd.c:376:
+ |SLAB_MEM_SPREAD|SLAB_PANIC),

I have no idea what this is about. Original code:
mtd_cache = kmem_cache_create("mtd_cache", sizeof(struct mtd_inode), 0,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD|SLAB_PANIC),

Jörn

--
Victory in war is not repetitious.
-- Sun Tzu

2008-03-25 17:20:15

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Tue, 2008-03-25 at 13:26 +0100, Andi Kleen wrote:
> Ingo Molnar <[email protected]> writes:
> >
> > On a more conceptual angle: "coding style", despite being entirely
> > "non-functional" (it does not affect the generated code), is still very
> > much an integral part of the code because source code is fundamentally
> > about "knowledge" - and extra style noise in knowledge can never
> > possibly increase the quality of that knowledge. There are strong links
> > between code correctness and typography/aesthetics.
>
> You assert that all the time, but it is just that: an assertation.
> I assert that code style is only a small part of code correctness.
> Also just an assertation. Who is more right? Probably the truth
> is somewhere inbetween. At least I think it is nearer my position
> than yours @)

I think someone with a consistent code style generates better code than
someone who can't even make up his mind on how to write it, let alone
what to write.

With Linux we have many people, many of us with a preferred style, most
of which will differ from the suggested style.

Much like natural languages, many of us don't speak English as their
native tongue, mine is Dutch, yours is German. Still we communicate with
each other using English, because that is the de-facto standard on LKML
[ and of course because my German truly sucks ;-) ].

Similarly for coding style, if we all stick to one style its easier to
read (and hopefully fix) each others code. All checkpatch.pl attempts is
help us do that.

Sure it complains about sometimes trivial things - but if truly trivial
making the change isn't hard - or might even not be done in favour of
'better' taste. Its a guide, not hard rules, it suggests we inspect a
certain piece of code again, to see if we really intended to write it
so.

Does it get things wrong; Yes I do think so. The change in patch 120/148
is utter nonsense IMHO.

Checkpatch will also not complain about larger things that make a bigger
difference; for instance it will happily let you write a 200 line
function, even though the code would have been so much better in 10
smaller functions.

Does it help; Yes, I do think it does. If only people would apply common
sense along with it...

> Also regarding the rules enforced by checkpatch I think there is a wide
> range on how much they impact readability: e.g. if someone uses
> the wrong bracket style consistently that is somewhat disrupting.
> I agree.
>
> But is trailing white space disrupting to code reading in any
> way? Very doubtful.

The trailing and leading whitespace thingies are trivial to fix (and
quilt refresh --strip-trailing-whitespace does half of it already, so
anybody using quilt doesn't have any excuse in ever seeing that error
anyway).

There is something to say to send Linus a script that fixes up the whole
tree and be done with it. These are petty things indeed.

> Most rules are somewhere inbetween. They vary widely in how
> much they impact readability.
>
> Also sometimes the rules conflict. Example: the 80 column rule
> often conflicts with the "always space around operator" rule.
> That is because expressions split over multiple lines are harder
> to read than an expression on a single line (at least here) and at
> least I would rather trade a few missing spaces around operators
> than to have a multi-line expression.
>
> It's always a trade-off and checkpatch.pl is not very good
> (read it doesn't really handle) trade-offs.

Which is why it is a guide, a human using the tool can use his good
taste and discretion by choosing which suggestions to take and which to
politely ignore.

> > So, in the specific example of the scheduler subsystem, i've only
> > observed advantages to checkpatch and zero downsides. Could anyone give
> > me _any_ objective reason why i shouldnt be using checkpatch for the
> > scheduler? More broadly, could anyone give me an objective reason why we
> > shouldnt be doing it for arch/x86? And even more broadly, could anyone
> > give me an objective reason why we shouldnt be doing it for all actively
> > maintained areas of the kernel?
>
> For new code being added (like your CFS scheduler) it is fine, but for
> old code it has the problem of conflicting with other actually useful
> changes on the same areas which are pending. And doing merges into
> such changing code bases is always somewhat error prone because the people
> who do it are also only human and can apply subtle typos etc.

Sure, and I think that when there is actual work pending its 'actively'
maintained and will eventually gravitate towards cleaniness anyway.

> Strictly seen each such merge requires a whole new testing cycle and
> doing such a testing cycle just for someone's checkpatch changes is
> really a waste of time and seriously impacting real progress.
> The only saving grace is that it will hopefully only happen once
> per file, but the point still holds. There are a lot of different files
> in Linux, so it has the potential to be a serious problem.

The good thing is that most of these checkpatch patches could be
validated by comparing object code of the affected translation units -
they're that trivial.

When generating 100% identical code, there is no issue.

> That is an objective (not just random assertation) reason against
> doing extensive changes of existing files like Joe's patchkit.

Building a single allyesconfig for x86_32 and x86_64 before and after
and getting identical binaries is pretty strong.

Although I think this patch series doesn't manage to accomplish it, if
only because it moves __LINE__ statements around.

> I think it would be fair at least if people doing this asked first at least:
> - Does anybody have pending changes against file X, perhaps
> also checking mm and linux-next
> and then wait a bit and if someone says he has pending changes then not do
> the reformatting until the pending changes get merged.

Sure, cleanups like this should never bother real work - good chance
that the real work already cleans up most issues anyway.

> Or better really only do it on new code.

It might be an incentive to touch (and get to know and appreciate and
eventually clean up or start maintaining) otherwise dead code. I don't
think we should stop such incentives (even if they are very small).

2008-03-25 17:25:21

by Andi Kleen

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

> > That is an objective (not just random assertation) reason against
> > doing extensive changes of existing files like Joe's patchkit.
>
> Building a single allyesconfig for x86_32 and x86_64 before and after
> and getting identical binaries is pretty strong.

checkpatch does not necessarily result in the same binaries. First
there is the build date and then there might be changes like
KERN_* prefixes added etc.

And there might be code which is not covered under a single configuration,
e.g. when both 32bit and 64bit x86 is changed.

-Andi

2008-03-25 18:23:27

by Paolo Ciarrocchi

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On 3/25/08, Andi Kleen <[email protected]> wrote:
> > > That is an objective (not just random assertation) reason against
> > > doing extensive changes of existing files like Joe's patchkit.
> >
> > Building a single allyesconfig for x86_32 and x86_64 before and after
> > and getting identical binaries is pretty strong.

true, but it's usually simplier to just compile the affected file.

> checkpatch does not necessarily result in the same binaries. First
> there is the build date and then there might be changes like
> KERN_* prefixes added etc.
>
> And there might be code which is not covered under a single configuration,
> e.g. when both 32bit and 64bit x86 is changed.

in the last series of coding style patches i sent to both ingo and
bart i worked as follow:
- worked on files with agreement of the maintainer (or after he asked
me to do the cleanup)
- separated changes that modified the binary from the pure style changes.
-all the patch were compile tested and when possible a size/md5sum
verificatio was performed and added to the changelog.

i learned this "rules" learning from my mistakes and in the end it
worked well so i think the problem is in how people are using the
tool, not in the tool itself.

ciao,
--
Paolo
http://paolo.ciarrocchi.googlepages.com/

2008-03-25 18:28:18

by Joe Perches

[permalink] [raw]
Subject: Re: [PATCH 019/148] include/asm-x86/cpufeature.h: checkpatch cleanups - formatting only

On Tue, 2008-03-25 at 16:30 +0100, Ingo Molnar wrote:
> > +#define X86_FEATURE_REP_GOOD _CF(3, 16) /* rep microcode works well
> > + * on this CPU */
> that is crap too ...

the _CF or the newly line-broken comment?

2008-03-25 18:30:25

by Joe Perches

[permalink] [raw]
Subject: Re: [PATCH 134/148] include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only

On Tue, 2008-03-25 at 16:25 +0100, Ingo Molnar wrote:
> hm, you apparently never built this on 64-bit x86?

Correct. x86(_32) defconfig and allyesconfig only.

> The above has a trivial typo.

Careless.
I'll set up an x86-64 cross-compiler.

Before building I did
s/__LINE__/0/g
to minimize the md5sum differences

and md5sum/diff and objdump -Dx/diff and inspected
the objects before and after.

2008-03-25 20:15:29

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 019/148] include/asm-x86/cpufeature.h: checkpatch cleanups - formatting only


* Joe Perches <[email protected]> wrote:

> On Tue, 2008-03-25 at 16:30 +0100, Ingo Molnar wrote:
> > > +#define X86_FEATURE_REP_GOOD _CF(3, 16) /* rep microcode works well
> > > + * on this CPU */
> > that is crap too ...
>
> the _CF or the newly line-broken comment?

both :) Line-breaking in macros isnt done like that. And the _CF thing:

+#if defined _CF
+#undef _CF
+#endif
+#define _CF(word, bit) ((word) * 32 + (bit))

looks quite ugly - either we have such a macro in which case it should
be a generic define somewhere that doesnt override anything else, or we
shouldnt do it.

I also had to fix some other typos that broke the 64-bit build. I ended
up skipping the whole cpufeatures.h patch - could you please re-do and
re-send it?

Ingo

2008-03-25 20:17:40

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 134/148] include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only


* Joe Perches <[email protected]> wrote:

> On Tue, 2008-03-25 at 16:25 +0100, Ingo Molnar wrote:
> > hm, you apparently never built this on 64-bit x86?
>
> Correct. x86(_32) defconfig and allyesconfig only.
>
> > The above has a trivial typo.
>
> Careless.
> I'll set up an x86-64 cross-compiler.

yeah - 64-bit allyesconfig (with DEBUG_INFO disabled - it just slows
down the build) should trigger most of the problems.

nevertheless i have most of your other patches in x86.git/latest right
now, you can pick it up via:

http://people.redhat.com/mingo/x86.git/README

> Before building I did
> s/__LINE__/0/g
> to minimize the md5sum differences
>
> and md5sum/diff and objdump -Dx/diff and inspected
> the objects before and after.

that's a nice trick - i never figured out a good way to skip such type
of build differences.

Ingo

2008-03-25 23:10:11

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: Ingo Molnar <[email protected]>
Date: Tue, 25 Mar 2008 14:17:08 +0100

> There can be fluctuations and artifacts, and obviously this is just
> another (arbitrary) static metric that has no forced relationship with
> real code quality - but in my experience it's surprisingly close to
> reality - closer than any other code metric i've seen.

And yet you used it to claim that the sparc64 port is an
unmaintainable pile of poo.

Thanks but no thanks, you just proved even more to me why
checkpatch is crap.

2008-03-25 23:11:54

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: Ingo Molnar <[email protected]>
Date: Tue, 25 Mar 2008 14:45:56 +0100

> and let me give an example with the your very own code that you wrote
> and maintain, drivers/mtd/devices/block2mtd.c:
>
> errors lines of code errors/KLOC
> drivers/mtd/devices/block2mtd.c 10 490 20.4

Ingo, this is devolving into a "code I maintain is great, code you
maintain sucks, checkpatch says so" kind of discussion, please stop.

You're not making any friends by making your arguments this way.

2008-03-26 09:36:39

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Tue, 25 March 2008 16:11:42 -0700, David Miller wrote:
> > errors lines of code errors/KLOC
> > drivers/mtd/devices/block2mtd.c 10 490 20.4
>
> Ingo, this is devolving into a "code I maintain is great, code you
> maintain sucks, checkpatch says so" kind of discussion, please stop.
>
> You're not making any friends by making your arguments this way.

To be fair, block2mtd does suck, even conceptually. But that would
digress too far.

Jörn

--
Rules of Optimization:
Rule 1: Don't do it.
Rule 2 (for experts only): Don't do it yet.
-- M.A. Jackson

2008-03-26 09:53:24

by Andy Whitcroft

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Tue, Mar 25, 2008 at 05:07:35PM +0100, J?rn Engel wrote:
> On Tue, 25 March 2008 14:45:56 +0100, Ingo Molnar wrote:
> >
> > and let me give an example with the your very own code that you wrote
> > and maintain, drivers/mtd/devices/block2mtd.c:
> >
> > errors lines of code errors/KLOC
> > drivers/mtd/devices/block2mtd.c 10 490 20.4
> >
> > that's pretty OK code, but not perfect, the 10 errors are:
> >
> > ERROR: do not use C99 // comments
> > ERROR: need spaces around that '=' (ctx:VxV)
> > ERROR: need spaces around that '<' (ctx:VxV)
> > ERROR: do not use C99 // comments
> > ERROR: do not use C99 // comments
> > ERROR: do not use C99 // comments
> > ERROR: do not use C99 // comments
> > ERROR: do not use C99 // comments
> > ERROR: do not use C99 // comments
> > ERROR: do not initialise statics to 0 or NULL
>
> The last should and will be fixed. The // I don't really care about.
> Send a patch if you do.
>
> Going over my logfs patch, I found several things that are either false
> positives or rather questionable in my book. <adds Andy to Cc:>
>
>
> (foo*) should be (foo *)
> What does that extra space gain us?

It really gains us nothing, however that is not really the point.
The point is that consistancy is good, with the space is the more normal
'C' usage, without for 'C++'; something to do with the implication that
(foo *) is a pointer to a foo (separate things), and (foo*) is a thing
of type pointer to foo (one thing) which is more object oriented.

The "norm" is with and so it makes sense to maintain it that way. A lot
of the layout and style choises are arbitrary, and disliked by many of us,
but we follow the style to maintain that common feel.

> ERROR: no space before that close parenthesis ')'
> #2565: FILE: fs/logfs/gc.c:294:
> + seg_ofs + sizeof(oh) < super->s_segsize; ) {
>
> Actual code is this:
> for (seg_ofs = LOGFS_SEGMENT_HEADERSIZE;
> seg_ofs + sizeof(oh) < super->s_segsize; ) {
> The for() loop is missing one of its three terms. I assume this is one
> of the effects of not having a perfect C parser.
>

Yes that is a false positive. I'll have a look at fixing it.

> ERROR: trailing statements should be on next line
> #5000: FILE: fs/logfs/readwrite.c:203:
> + } else while (unlikely(TestSetPageLocked(page))) {
>
> We have an explicit exception for "else if". "else while" makes imo
> just as much (or as little) sense.

"else if" is at least creating an additional arm of the same control
structure. else while is mixing two different paradigms.

> ERROR: need space after that ',' (ctx:VxV)
> #5801: FILE: fs/logfs/readwrite.c:1004:
> + ret = logfs_segment_read(inode, ipage, this_wc->ofs, bix,level);
>
> One of those examples where a missing space is the lesser of two or
> three evils.

Because to add the space would mean breaking the line to avoid exceeding
80 characters?

> ERROR: need consistent spacing around '|' (ctx:WxV)
> #8293: FILE: fs/logfs/dev_mtd.c:376:
> + |SLAB_MEM_SPREAD|SLAB_PANIC),

That is a false positive triggered by the ' |S' at the start of
the line. This one is fixed in the head of my tree. The more normal
form would be for that leading | to be on the end of the previous line,
and the exception for that was already there.

> I have no idea what this is about. Original code:
> mtd_cache = kmem_cache_create("mtd_cache", sizeof(struct mtd_inode), 0,
> (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT
> |SLAB_MEM_SPREAD|SLAB_PANIC),
>

-apw

2008-03-26 10:15:38

by Ingo Molnar

[permalink] [raw]
Subject: [patch] bkl2mtd: cleanup


* J?rn Engel <[email protected]> wrote:

> > ERROR: do not initialise statics to 0 or NULL
>
> The last should and will be fixed. The // I don't really care about.
> Send a patch if you do.

sure, find clean-up patch below. (Some of the changes are for just tiny
nuances, not mentioned in CodingStyle nor flagged by checkpatch)

Ingo

------------------------>
Subject: bkl2mtd: cleanup
From: Ingo Molnar <[email protected]>
Date: Wed Mar 26 10:40:47 CET 2008

Before:
total: 10 errors, 4 warnings, 488 lines checked
After:
total: 0 errors, 0 warnings, 497 lines checked

No code changed, except the small shrink due to the
block2mtd_init_called initialization change:

drivers/mtd/devices/block2mtd.o:

text data bss dec hex filename
3097 16 124 3237 ca5 block2mtd.o.before
3093 16 124 3233 ca1 block2mtd.o.after

md5 changes only due to the different section that the
block2mtd_init_called got into:

bacbb932ec90b514f8adf654afa29b44 block2mtd.o.before.asm
05e1252749f7294f45c79c44554ec6ba block2mtd.o.after.asm

Signed-off-by: Ingo Molnar <[email protected]>
---
drivers/mtd/devices/block2mtd.c | 151 +++++++++++++++++++++-------------------
1 file changed, 80 insertions(+), 71 deletions(-)

Index: linux/drivers/mtd/devices/block2mtd.c
===================================================================
--- linux.orig/drivers/mtd/devices/block2mtd.c
+++ linux/drivers/mtd/devices/block2mtd.c
@@ -1,6 +1,4 @@
/*
- * $Id: block2mtd.c,v 1.30 2005/11/29 14:48:32 gleixner Exp $
- *
* block2mtd.c - create an mtd from a block device
*
* Copyright (C) 2001,2002 Simon Evans <[email protected]>
@@ -20,7 +18,7 @@
#include <linux/mutex.h>
#include <linux/mount.h>

-#define VERSION "$Revision: 1.30 $"
+#define VERSION "Revision: 1.30"


#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
@@ -29,13 +27,13 @@

/* Info for the block device */
struct block2mtd_dev {
- struct list_head list;
- struct block_device *blkdev;
- struct mtd_info mtd;
- struct mutex write_mutex;
+ struct list_head list;
+ struct block_device *blkdev;
+ struct mtd_info mtd;
+ /* serializes writes with each other and also with erase: */
+ struct mutex write_mutex;
};

-
/* Static info about the MTD, used in cleanup_module */
static LIST_HEAD(blkmtd_device_list);

@@ -49,11 +47,11 @@ static struct page *page_read(struct add
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
- struct page *page;
- int index = to >> PAGE_SHIFT; // page index
+ int index = to >> PAGE_SHIFT; /* page index */
int pages = len >> PAGE_SHIFT;
- u_long *p;
+ struct page *page;
u_long *max;
+ u_long *p;

while (pages) {
page = page_read(mapping, index);
@@ -63,7 +61,7 @@ static int _block2mtd_erase(struct block
return PTR_ERR(page);

max = page_address(page) + PAGE_SIZE;
- for (p=page_address(page); p<max; p++)
+ for (p = page_address(page); p < max; p++) {
if (*p != -1UL) {
lock_page(page);
memset(page_address(page), 0xff, PAGE_SIZE);
@@ -71,6 +69,7 @@ static int _block2mtd_erase(struct block
unlock_page(page);
break;
}
+ }

page_cache_release(page);
pages--;
@@ -78,6 +77,7 @@ static int _block2mtd_erase(struct block
}
return 0;
}
+
static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct block2mtd_dev *dev = mtd->priv;
@@ -86,17 +86,21 @@ static int block2mtd_erase(struct mtd_in
int err;

instr->state = MTD_ERASING;
+
mutex_lock(&dev->write_mutex);
err = _block2mtd_erase(dev, from, len);
mutex_unlock(&dev->write_mutex);
+
if (err) {
ERROR("erase failed err = %d", err);
instr->state = MTD_ERASE_FAILED;
- } else
+ } else {
instr->state = MTD_ERASE_DONE;
+ }

instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
+
return err;
}

@@ -105,13 +109,14 @@ static int block2mtd_read(struct mtd_inf
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
- struct page *page;
int index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
+ struct page *page;
int cpylen;

if (from > mtd->size)
return -EINVAL;
+
if (from + len > mtd->size)
len = mtd->size - from;

@@ -120,9 +125,9 @@ static int block2mtd_read(struct mtd_inf

while (len) {
if ((offset + len) > PAGE_SIZE)
- cpylen = PAGE_SIZE - offset; // multiple pages
+ cpylen = PAGE_SIZE - offset; /* multiple pages */
else
- cpylen = len; // this page
+ cpylen = len; /* this page */
len = len - cpylen;

page = page_read(dev->blkdev->bd_inode->i_mapping, index);
@@ -145,22 +150,23 @@ static int block2mtd_read(struct mtd_inf


/* write data to the underlying device */
-static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
- loff_t to, size_t len, size_t *retlen)
+static int
+_block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, loff_t to,
+ size_t len, size_t *retlen)
{
- struct page *page;
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
- int index = to >> PAGE_SHIFT; // page index
- int offset = to & ~PAGE_MASK; // page offset
+ int index = to >> PAGE_SHIFT; /* page index */
+ int offset = to & ~PAGE_MASK; /* page offset */
+ struct page *page;
int cpylen;

if (retlen)
*retlen = 0;
while (len) {
if ((offset+len) > PAGE_SIZE)
- cpylen = PAGE_SIZE - offset; // multiple pages
+ cpylen = PAGE_SIZE - offset; /* multiple pages */
else
- cpylen = len; // this page
+ cpylen = len; /* this page */
len = len - cpylen;

page = page_read(mapping, index);
@@ -187,8 +193,8 @@ static int _block2mtd_write(struct block
return 0;
}

-
-static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+static int
+block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
@@ -198,12 +204,14 @@ static int block2mtd_write(struct mtd_in
return 0;
if (to >= mtd->size)
return -ENOSPC;
+
if (to + len > mtd->size)
len = mtd->size - to;

mutex_lock(&dev->write_mutex);
err = _block2mtd_write(dev, buf, to, len, retlen);
mutex_unlock(&dev->write_mutex);
+
if (err > 0)
err = 0;
return err;
@@ -214,8 +222,8 @@ static int block2mtd_write(struct mtd_in
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
+
sync_blockdev(dev->blkdev);
- return;
}


@@ -253,14 +261,14 @@ static struct block2mtd_dev *add_device(
bdev = open_bdev_excl(devname, O_RDWR, NULL);
#ifndef MODULE
if (IS_ERR(bdev)) {
-
- /* We might not have rootfs mounted at this point. Try
- to resolve the device name by other means. */
-
+ /*
+ * We might not have rootfs mounted at this point. Try
+ * to resolve the device name by other means.
+ */
dev_t devt = name_to_dev_t(devname);
- if (devt) {
+
+ if (devt)
bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
- }
}
#endif

@@ -286,18 +294,18 @@ static struct block2mtd_dev *add_device(

sprintf(dev->mtd.name, "block2mtd: %s", devname);

- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
- dev->mtd.erasesize = erase_size;
- dev->mtd.writesize = 1;
- dev->mtd.type = MTD_RAM;
- dev->mtd.flags = MTD_CAP_RAM;
- dev->mtd.erase = block2mtd_erase;
- dev->mtd.write = block2mtd_write;
- dev->mtd.writev = default_mtd_writev;
- dev->mtd.sync = block2mtd_sync;
- dev->mtd.read = block2mtd_read;
- dev->mtd.priv = dev;
- dev->mtd.owner = THIS_MODULE;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
+ dev->mtd.type = MTD_RAM;
+ dev->mtd.flags = MTD_CAP_RAM;
+ dev->mtd.erase = block2mtd_erase;
+ dev->mtd.write = block2mtd_write;
+ dev->mtd.writev = default_mtd_writev;
+ dev->mtd.sync = block2mtd_sync;
+ dev->mtd.read = block2mtd_read;
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;

if (add_mtd_device(&dev->mtd)) {
/* Device didnt get added, so free the entry */
@@ -311,11 +319,12 @@ static struct block2mtd_dev *add_device(

devinit_err:
block2mtd_free_device(dev);
+
return NULL;
}

-
-/* This function works similar to reguler strtoul. In addition, it
+/*
+ * This function works similar to reguler strtoul. In addition, it
* allows some suffixes for a more human-readable number format:
* ki, Ki, kiB, KiB - multiply result with 1024
* Mi, MiB - multiply result with 1024^2
@@ -324,6 +333,7 @@ devinit_err:
static int ustrtoul(const char *cp, char **endp, unsigned int base)
{
unsigned long result = simple_strtoul(cp, endp, base);
+
switch (**endp) {
case 'G' :
result *= 1024;
@@ -343,7 +353,6 @@ static int ustrtoul(const char *cp, char
return result;
}

-
static int parse_num(size_t *num, const char *token)
{
char *endp;
@@ -354,10 +363,10 @@ static int parse_num(size_t *num, const
return -EINVAL;

*num = n;
+
return 0;
}

-
static inline void kill_final_newline(char *str)
{
char *newline = strrchr(str, '\n');
@@ -365,25 +374,25 @@ static inline void kill_final_newline(ch
*newline = 0;
}

-
#define parse_err(fmt, args...) do { \
ERROR("block2mtd: " fmt "\n", ## args); \
return 0; \
} while (0)

+#define BLK2MTD_PARAM_SIZE (80 + 12) /* 80 for device, 12 for erase size */
+
#ifndef MODULE
-static int block2mtd_init_called = 0;
-static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
+static int block2mtd_init_called;
+static char block2mtd_paramline[BLK2MTD_PARAM_SIZE];
#endif

-
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ size_t erase_size = PAGE_SIZE;
+ char buf[BLK2MTD_PARAM_SIZE];
char *str = buf;
char *token[2];
char *name;
- size_t erase_size = PAGE_SIZE;
int i, ret;

if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -407,9 +416,8 @@ static int block2mtd_setup2(const char *

if (token[1]) {
ret = parse_num(&erase_size, token[1]);
- if (ret) {
+ if (ret)
parse_err("illegal erase size");
- }
}

add_device(name, erase_size);
@@ -423,34 +431,36 @@ static int block2mtd_setup(const char *v
#ifdef MODULE
return block2mtd_setup2(val);
#else
- /* If more parameters are later passed in via
- /sys/module/block2mtd/parameters/block2mtd
- and block2mtd_init() has already been called,
- we can parse the argument now. */
-
+ /*
+ * If more parameters are later passed in via
+ * /sys/module/block2mtd/parameters/block2mtd
+ * and block2mtd_init() has already been called,
+ * we can parse the argument now:
+ */
if (block2mtd_init_called)
return block2mtd_setup2(val);

- /* During early boot stage, we only save the parameters
- here. We must parse them later: if the param passed
- from kernel boot command line, block2mtd_setup() is
- called so early that it is not possible to resolve
- the device (even kmalloc() fails). Deter that work to
- block2mtd_setup2(). */
-
+ /*
+ * During early boot stage, we only save the parameters
+ * here. We must parse them later: if the param passed
+ * from kernel boot command line, block2mtd_setup() is
+ * called so early that it is not possible to resolve
+ * the device (even kmalloc() fails). Deter that work to
+ * block2mtd_setup2():
+ */
strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));

return 0;
#endif
}

-
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");

static int __init block2mtd_init(void)
{
int ret = 0;
+
INFO("version " VERSION);

#ifndef MODULE
@@ -462,7 +472,6 @@ static int __init block2mtd_init(void)
return ret;
}

-
static void __devexit block2mtd_exit(void)
{
struct list_head *pos, *next;
@@ -470,6 +479,7 @@ static void __devexit block2mtd_exit(voi
/* Remove the MTD devices */
list_for_each_safe(pos, next, &blkmtd_device_list) {
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
+
block2mtd_sync(&dev->mtd);
del_mtd_device(&dev->mtd);
INFO("mtd%d: [%s] removed", dev->mtd.index,
@@ -479,7 +489,6 @@ static void __devexit block2mtd_exit(voi
}
}

-
module_init(block2mtd_init);
module_exit(block2mtd_exit);

2008-03-26 10:25:39

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* David Miller <[email protected]> wrote:

> From: Ingo Molnar <[email protected]>
> Date: Tue, 25 Mar 2008 14:17:08 +0100
>
> > There can be fluctuations and artifacts, and obviously this is just
> > another (arbitrary) static metric that has no forced relationship with
> > real code quality - but in my experience it's surprisingly close to
> > reality - closer than any other code metric i've seen.
>
> And yet you used it to claim that the sparc64 port is an
> unmaintainable pile of poo.

i did not claim that at all, the checkpatch average of sparc64 is pretty
good:

errors lines of code errors/KLOC
arch/sparc64/ 1457 49785 29.2

Sparc64 is one of the cleanest architectures, in terms of average
checkpatch.pl code-quality.

the list i generated is the '20 worst files' (out of 90 sparc64 files)
those are the files in need of cleanups (according to that metric) - and
a quick manual glance confirmed that impression.

Or is it your position that every single file in sparc64 is squeaky
clean and that each of the 1457 cleanliness problems that checkpatch.pl
reported there is bogus?

Ingo

2008-03-26 10:27:22

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Wed, 26 March 2008 09:52:49 +0000, Andy Whitcroft wrote:
> On Tue, Mar 25, 2008 at 05:07:35PM +0100, Jörn Engel wrote:
> >
> > (foo*) should be (foo *)
> > What does that extra space gain us?
>
> It really gains us nothing, however that is not really the point.
> The point is that consistancy is good, with the space is the more normal
> 'C' usage, without for 'C++'; something to do with the implication that
> (foo *) is a pointer to a foo (separate things), and (foo*) is a thing
> of type pointer to foo (one thing) which is more object oriented.
>
> The "norm" is with and so it makes sense to maintain it that way. A lot
> of the layout and style choises are arbitrary, and disliked by many of us,
> but we follow the style to maintain that common feel.

Then I'll happily ignore it. Not having the space gains me one column.
It is absolutely minimal, sure. But when the alternative is based on
pure whim...

> > ERROR: trailing statements should be on next line
> > #5000: FILE: fs/logfs/readwrite.c:203:
> > + } else while (unlikely(TestSetPageLocked(page))) {
> >
> > We have an explicit exception for "else if". "else while" makes imo
> > just as much (or as little) sense.
>
> "else if" is at least creating an additional arm of the same control
> structure. else while is mixing two different paradigms.

Fairly weak grounds to argue on. Not that mine are much stronger, I
just default to less (shorter, fewer indentations, etc.) when lacking a
reason to use more (characters, lines, indentations, etc.).

> > ERROR: need space after that ',' (ctx:VxV)
> > #5801: FILE: fs/logfs/readwrite.c:1004:
> > + ret = logfs_segment_read(inode, ipage, this_wc->ofs, bix,level);
> >
> > One of those examples where a missing space is the lesser of two or
> > three evils.
>
> Because to add the space would mean breaking the line to avoid exceeding
> 80 characters?

Or breaking the line. Either of those choices sucks. Well, breaking
the line is often the lesser of those evils, but in this particular
function it looks worse to me - and I have to stare at it often enough
to care.

The best strategy usually is to rethink the code and reduce the
indentation, number of arguments or length of identifiers. I just don't
see a good way of doing that without resorting to
ret = logfs_segment_read(i, p, w->o, b, l);

Probably nothing checkpatch should worry about. Although I would have
been happy to have finer-grained options to enable/disable particular
warnings on the command line. Right now I commented out several lines
in checkpatch.pl.

Jörn

--
Joern's library part 7:
http://www.usenix.org/publications/library/proceedings/neworl/full_papers/mckusick.a

2008-03-26 10:40:00

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: Ingo Molnar <[email protected]>
Date: Wed, 26 Mar 2008 11:25:14 +0100

> Or is it your position that every single file in sparc64 is squeaky
> clean and that each of the 1457 cleanliness problems that checkpatch.pl
> reported there is bogus?

No but last time I checked sparc64 was one of the few arch's
that build themselves with -Werror.

2008-03-26 10:48:40

by Al Viro

[permalink] [raw]
Subject: Re: [patch] bkl2mtd: cleanup

On Wed, Mar 26, 2008 at 11:14:52AM +0100, Ingo Molnar wrote:
> /* Info for the block device */
> struct block2mtd_dev {
> - struct list_head list;
> - struct block_device *blkdev;
> - struct mtd_info mtd;
> - struct mutex write_mutex;
> + struct list_head list;
> + struct block_device *blkdev;
> + struct mtd_info mtd;
> + /* serializes writes with each other and also with erase: */
> + struct mutex write_mutex;
> };

Why the hell?

> -static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
> - loff_t to, size_t len, size_t *retlen)
> +static int
> +_block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, loff_t to,
> + size_t len, size_t *retlen)

That's actually worse... BTW, single-underscore-... for identifiers? Odd.

> -static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> +static int
> +block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> size_t *retlen, const u_char *buf)

Again, why split it that way?

> - dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
> - dev->mtd.erasesize = erase_size;
> - dev->mtd.writesize = 1;
> - dev->mtd.type = MTD_RAM;
> - dev->mtd.flags = MTD_CAP_RAM;
> - dev->mtd.erase = block2mtd_erase;
> - dev->mtd.write = block2mtd_write;
> - dev->mtd.writev = default_mtd_writev;
> - dev->mtd.sync = block2mtd_sync;
> - dev->mtd.read = block2mtd_read;
> - dev->mtd.priv = dev;
> - dev->mtd.owner = THIS_MODULE;
> + dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
> + dev->mtd.erasesize = erase_size;
> + dev->mtd.writesize = 1;
> + dev->mtd.type = MTD_RAM;
> + dev->mtd.flags = MTD_CAP_RAM;
> + dev->mtd.erase = block2mtd_erase;
> + dev->mtd.write = block2mtd_write;
> + dev->mtd.writev = default_mtd_writev;
> + dev->mtd.sync = block2mtd_sync;
> + dev->mtd.read = block2mtd_read;
> + dev->mtd.priv = dev;
> + dev->mtd.owner = THIS_MODULE;

Bogus.

> - if (ret) {
> + if (ret)
> parse_err("illegal erase size");
> - }
> }

BTW, I certainly wouldn't inflict that on patches; at some point in
series the body of if may shrink to one line only to get longer in
the next patch. Removing and restoring {} would only add noise.

OTOH, you've missed quite a few of my pet peeves, starting with
u_char in the quoted part...

2008-03-26 10:56:56

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* David Miller <[email protected]> wrote:

> From: Ingo Molnar <[email protected]>
> Date: Tue, 25 Mar 2008 14:45:56 +0100
>
> > and let me give an example with the your very own code that you wrote
> > and maintain, drivers/mtd/devices/block2mtd.c:
> >
> > errors lines of code errors/KLOC
> > drivers/mtd/devices/block2mtd.c 10 490 20.4
>
> Ingo, this is devolving into a "code I maintain is great, code you
> maintain sucks, checkpatch says so" kind of discussion, please stop.

Firstly, had you read my previous mails you'd realize that a score of
20.4 errors/KLOC is pretty clean code for a driver. (and a manual view
of drivers/mtd/devices/block2mtd.c confirmed that first quick
impression.)

Secondly, it was you who claimed that checkpatch.pl warnings were bogus,
i asked for hard data which you refused to provide, so i had to come up
with examples to counter that.

What did you expect me to express my checkpatch.pl experience about, if
not about code that i maintain? How did you expect me to show to you
that the warnings are correct, if not code that you maintain? Why should
i talk about any code that neither you nor i am directly interested in?
Do you want me to talk about FreeBSD, so that i dont hurt anyone's
feelings? Feel free to use my code as an example to make your points.

> You're not making any friends by making your arguments this way.

just to make it clear, i'm not here on lkml to "make friends", i'm here
to make Linux better. The two goals are not fundamentally incompatible
but sometimes they do clash ;-)

Ingo

2008-03-26 10:57:53

by Jörn Engel

[permalink] [raw]
Subject: Re: [patch] bkl2mtd: cleanup

On Wed, 26 March 2008 10:48:21 +0000, Al Viro wrote:
>
> > -static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
> > - loff_t to, size_t len, size_t *retlen)
> > +static int
> > +_block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, loff_t to,
> > + size_t len, size_t *retlen)
>
> That's actually worse... BTW, single-underscore-... for identifiers? Odd.

Indeed. Must have been my fault.

> OTOH, you've missed quite a few of my pet peeves, starting with
> u_char in the quoted part...

A large-scale s/u_char/void/ over mtd read and write functions wouldn't
hurt, no doubt. Currently every other driver contains a cast to
(u_char*) or three for no gain afaics.

Jörn

--
Never argue with idiots - first they drag you down to their level,
then they beat you with experience.
-- unknown

2008-03-26 11:01:21

by Ingo Molnar

[permalink] [raw]
Subject: Re: [patch] bkl2mtd: cleanup


* Al Viro <[email protected]> wrote:

> On Wed, Mar 26, 2008 at 11:14:52AM +0100, Ingo Molnar wrote:
> > /* Info for the block device */
> > struct block2mtd_dev {
> > - struct list_head list;
> > - struct block_device *blkdev;
> > - struct mtd_info mtd;
> > - struct mutex write_mutex;
> > + struct list_head list;
> > + struct block_device *blkdev;
> > + struct mtd_info mtd;
> > + /* serializes writes with each other and also with erase: */
> > + struct mutex write_mutex;
> > };
>
> Why the hell?

the vertical alignment? For the same reason some of the key VFS data
structures in include/linux/fs.h are aligned vertically:

struct inode
struct file
struct super_block
struct address_space
struct block_device

there are several advantages of aligning structure fields vertically
(and the same applies to bulk initializers of structures), should i list
them?

Ingo

2008-03-26 11:03:20

by Ingo Molnar

[permalink] [raw]
Subject: Re: [patch] bkl2mtd: cleanup


* Al Viro <[email protected]> wrote:

> > - if (ret) {
> > + if (ret)
> > parse_err("illegal erase size");
> > - }
> > }
>
> BTW, I certainly wouldn't inflict that on patches; at some point in
> series the body of if may shrink to one line only to get longer in the
> next patch. Removing and restoring {} would only add noise.

agreed, we dont enforce it strictly - it is the end result that must be
clean.

[ OTOH, when i create a patch series i personally always try to make
each patch clean, because one never knows when a later patch gets
delayed or dropped altogether. ]

Ingo

2008-03-26 11:09:39

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Tue, Mar 25, 2008 at 02:12:58PM +0100, J??rn Engel wrote:
> CodingStyle has gone too far is this:
> for (i=0; i<10; i++)

it's a very good argument why we need a consistant style. The above is
unreadable crap that hurts my eyes.

2008-03-26 11:10:44

by Ingo Molnar

[permalink] [raw]
Subject: Re: [patch] bkl2mtd: cleanup


* Al Viro <[email protected]> wrote:

> > -static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
> > - loff_t to, size_t len, size_t *retlen)
> > +static int
> > +_block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, loff_t to,
> > + size_t len, size_t *retlen)
>
> That's actually worse... BTW, single-underscore-... for identifiers?
> Odd.

(yep, that's odd.)

> > -static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> > +static int
> > +block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> > size_t *retlen, const u_char *buf)
>
> Again, why split it that way?

these are really nuances, so unless you are interested in such nuances
nowhere found in CodingStyle, stop reading here :-)

i personally try to minimize the number and complexity of function
prototype patterns, while still trying to keep the linecount low. So if
a function prototype wants to be multi-line, it's not a "simple one-line
function prototype" anymore, so i use the same template for everything:

type
function_name(vars ...
more vars ...)
{

[ having the 'type' separately makes it easy to judge the return type of
a function (especially with syntax highlighting active). Aligned
variables are an efficient extension of the 'line' concept that does
not mix the function_name with the variables. ]

incidentally, a natural simplified variant of that is the following:

type
function_name(vars...)
{

which tends to stay cleanly 2-line and looks tidier and shorter than
the:

type function_name(vars...
more vars ...)
{

form. The preferred form is of course:

type function_name(vars...)

Ingo

2008-03-26 11:14:32

by Jiri Slaby

[permalink] [raw]
Subject: Re: [patch] bkl2mtd: cleanup

On 03/26/2008 12:10 PM, Ingo Molnar wrote:
> type
> function_name(vars...)
> {
>
> which tends to stay cleanly 2-line and looks tidier and shorter than
> the:
>
> type function_name(vars...
> more vars ...)
> {
>
> form. The preferred form is of course:
>
> type function_name(vars...)

Anyway I wouldn't mix that bsd style with this one in one particular file.
(Beside the fact I dislike the BSD one.)

2008-03-26 11:23:36

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* J?rn Engel <[email protected]> wrote:

> > > (foo*) should be (foo *)
> > > What does that extra space gain us?
> >
> > It really gains us nothing, however that is not really the point.
> > The point is that consistancy is good, with the space is the more
> > normal 'C' usage, without for 'C++'; something to do with the
> > implication that (foo *) is a pointer to a foo (separate things),
> > and (foo*) is a thing of type pointer to foo (one thing) which is
> > more object oriented.
> >
> > The "norm" is with and so it makes sense to maintain it that way. A
> > lot of the layout and style choises are arbitrary, and disliked by
> > many of us, but we follow the style to maintain that common feel.
>
> Then I'll happily ignore it. Not having the space gains me one
> column. It is absolutely minimal, sure. But when the alternative is
> based on pure whim...

you seem to be confused here. Consistency is not a 'whim', and it is
often just about "arbitrary" choices that also have some ergonomic
component as well (but sometimes not). The following form:

if(x)
{
MyFunction1();
MyFunction2();
}

can be argued to be just as clean (and just as ergonomic) as:

if (x) {
my_function_1();
my_function_2();
}

if you use it consistently throughout the _whole_ codebase.

what does not make sense is to _mix_ different coding styles, especially
within the same source code block - which you do in that specific file.

or is it your argument that consistent coding style is bad? That
argument has been settled long ago with the creation of
Documentation/CodingStyle. All kernel code is supposed to follow that
style, unless the resulting line of code looks clearly _wrong_. Your
arguments seem to center around "hey, my way it looks similarly good so
i'll do that instead because i'm the maintainer" - and that argument
does not fly. CodingStyle is definitely not gospel and common sense
should be applied, but _arbitrarily_ and _intentionally_ deviating from
it is considered bad manners and hurts Linux as a whole.

granted, especially in the driver space, where there's a lack of
maintenance resources, all such secondary rules are weaker - because a
somewhat quirky maintainer is still much better to Linux than no
maintainer. But the closer you get to the core kernel, the higher the
code quality stakes get, and the stricter (and the more consistent)
these style requirements become as well.

Ingo

2008-03-26 11:29:19

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only


* Christoph Hellwig <[email protected]> wrote:

> On Tue, Mar 25, 2008 at 02:12:58PM +0100, J??rn Engel wrote:
> > CodingStyle has gone too far is this:
> > for (i=0; i<10; i++)
>
> it's a very good argument why we need a consistant style. The above is
> unreadable crap that hurts my eyes.

same for me.

and PLEASE, folks, even if you _dont_ find that line unreadable, and
even if you have full power and control over your own subsystem that you
maintain and can NAK cleanup patches at whim, still _PLEASE_ follow the
Linux coding style because it inconsistency hurts the eyes of a
substantial proportion of kernel developers. Other folks might have to
fix bugs in your code, other folks might want to reuse your code or you
might go into a different subsystem and give maintainership to someone
else, etc. etc. Consistent coding style is one of the few concepts that
only has advantages and no disadvantages at all.

Ingo

2008-03-26 11:42:47

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Wed, 26 March 2008 12:23:11 +0100, Ingo Molnar wrote:
> * Jörn Engel <[email protected]> wrote:
>
> >
> > Then I'll happily ignore it. Not having the space gains me one
> > column. It is absolutely minimal, sure. But when the alternative is
> > based on pure whim...
>
> you seem to be confused here. Consistency is not a 'whim', and it is
> often just about "arbitrary" choices that also have some ergonomic
> component as well (but sometimes not). The following form:

I have an objective reason to prefer one over the other. Your only
reason is consistency. If consistency was everything, we might as well
let the often-abused 1000 monkeys type up a coding style document and
stick to that.

But most of our rules exist for reasons beyond mere consistency.
Following the rules, on average, makes the code better. In particular
the "less" rules (fewer lines, less indentation, etc.) result in more
code fitting any arbitrary editor window. Which means more control flow
our mind can ponder about without scrolling.

Do you have a non-consistency based reason to prefer the longer version?
If not, then we should settle on the short version, which does have a
minimal advantage.

Jörn

--
Joern's library part 13:
http://www.chip-architect.com/

2008-03-26 11:48:18

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: J?rn Engel <[email protected]>
Date: Wed, 26 Mar 2008 12:41:42 +0100

> Do you have a non-consistency based reason to prefer the longer
> version?

Inconstent spacing fools people's eyes and leads to bugs,
more often than not.

After 15 years of kernel development, I can remember at
least 10 or so multi-week-debugging sessions that could
have been curtailed had I not mis-read a poorly spaced
C statement.

It matters in practical terms, not just consistency terms,
trust me.

2008-03-26 11:59:22

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Wed, 26 March 2008 04:48:02 -0700, David Miller wrote:
>
> > Do you have a non-consistency based reason to prefer the longer
> > version?
>
> Inconstent spacing fools people's eyes and leads to bugs,
> more often than not.
>
> After 15 years of kernel development, I can remember at
> least 10 or so multi-week-debugging sessions that could
> have been curtailed had I not mis-read a poorly spaced
> C statement.
>
> It matters in practical terms, not just consistency terms,
> trust me.

Sure, I buy that. What I'm arguing here is why we have to be
consistently long instead of consistently short. CodingStyle seems to
be silent on the question. And a quick grep shows that while being in
the minority, I seem to be in a sizeable minority:
joern@Dublin:/usr/src/kernel/logfs$ sgrep ' \*)' .|wc
51687 346050 4213259
joern@Dublin:/usr/src/kernel/logfs$ sgrep '[^ ]\*)' .|wc
5838 33165 472462

What is the reason why (void *)foo is better than (void*)foo? Just that
fact that by random chance one them them became more common in our
codebase and the minority always has to give in?

Jörn

--
A victorious army first wins and then seeks battle.
-- Sun Tzu

2008-03-26 12:01:24

by David Miller

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

From: J?rn Engel <[email protected]>
Date: Wed, 26 Mar 2008 12:58:53 +0100

> Sure, I buy that. What I'm arguing here is why we have to be
> consistently long instead of consistently short. CodingStyle seems to
> be silent on the question.

Becauseitmoreeasilyallowsyoureyestoseethedifferentoperators.

2008-03-26 12:03:49

by Will Newton

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Wed, Mar 26, 2008 at 11:58 AM, J?rn Engel <[email protected]> wrote:

> What is the reason why (void *)foo is better than (void*)foo? Just that
> fact that by random chance one them them became more common in our
> codebase and the minority always has to give in?

It doesn't matter. This is the crux of all interminable coding style
discussions. There is no objectively best coding style. Pick one,
stick to it. That's all that matters.

Linux happens to have a particular coding style, GNU has another. When
I hack Linux code I use one style, when I hack GNU coee I use another.
Arguing about it is a waste of time.

2008-03-26 14:18:46

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH 109/148] include/asm-x86/serial.h: checkpatch cleanups - formatting only

On Wed, 26 March 2008 05:01:16 -0700, David Miller wrote:
>
> > Sure, I buy that. What I'm arguing here is why we have to be
> > consistently long instead of consistently short. CodingStyle seems to
> > be silent on the question.
>
> Becauseitmoreeasilyallowsyoureyestoseethedifferentoperators.

How many different operators are there in a cast? ;)

But you seem to be arguing the i=0 case. Took me a while to notice
that. Fair enough.

Jörn

--
Maintenance in other professions and of other articles is concerned with
the return of the item to its original state; in Software, maintenance
is concerned with moving an item away from its original state.
-- Les Belady

2008-03-26 16:32:13

by Joe Perches

[permalink] [raw]
Subject: Re: [patch] bkl2mtd: cleanup

On Wed, 2008-03-26 at 12:10 +0100, Ingo Molnar wrote:
> not a "simple one-line function prototype" anymore,
> so i use the same template for everything:
>
> type
> function_name(vars ...
> more vars ...)
> {

It seems the most common linux style keeps type and function
on the same line where possible and arguments on subsequent
lines when necessary.

type function_name(args...)

type function_name(args_to_col80,
more_args...)

Perhaps that should be your default template.

2008-03-30 04:10:25

by Oleg Verych

[permalink] [raw]
Subject: style of function definitions (Re: [patch] bkl2mtd: cleanup)

Ingo Molnar @ Wed, 26 Mar 2008 12:10:21 +0100
>
> * Al Viro <[email protected]> wrote:
>
[]
>> > -static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>> > +static int
>> > +block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>> > size_t *retlen, const u_char *buf)
>>
>> Again, why split it that way?
>
> these are really nuances, so unless you are interested in such nuances
> nowhere found in CodingStyle, stop reading here :-)

Nuances, or not, there are all kinds of stuff in Linux. Very small
journey from simple `grep` and trivial multi-line `sed` to a huge
discovery can be found here:

http://kernelnewbies.org/olecom#Function_definitions

No matter what coding style is, the winner is "linux-2.6/fs/xfs", and
i doubt anyone can fix that :)

Maybe meta-patching by `sed` scripts in `git`? Just kidding.
--
-o--=O`C
#oo'L O
<___=E M

2008-03-30 05:31:42

by Jan Engelhardt

[permalink] [raw]
Subject: Re: style of function definitions (Re: [patch] bkl2mtd: cleanup)


On Sunday 2008-03-30 06:29, Oleg Verych wrote:
> []
>>>> -static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>>>> +static int
>>>> +block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>>>> size_t *retlen, const u_char *buf)
>>>
>>> Again, why split it that way?
>>
>> these are really nuances, so unless you are interested in such nuances
>> nowhere found in CodingStyle, stop reading here :-)
>
> Nuances, or not, there are all kinds of stuff in Linux. Very small
> journey from simple `grep` and trivial multi-line `sed` to a huge
> discovery can be found here:
>
> http://kernelnewbies.org/olecom#Function_definitions
>
> No matter what coding style is, the winner is "linux-2.6/fs/xfs", and
> i doubt anyone can fix that :)

They at least have an excuse of having a historical unix background :)

2008-03-31 03:37:54

by Oleg Verych

[permalink] [raw]
Subject: Re: style of function definitions (Re: [patch] bkl2mtd: cleanup)

Jan Engelhardt @ Sun, Mar 30, 2008 at 6:31 AM:

> > No matter what coding style is, the winner is "linux-2.6/fs/xfs", and
> > i doubt anyone can fix that :)
>
> They at least have an excuse of having a historical unix background :)

i.e. zoo of proprietary versions, expensive, without reliable implementation
of basic tools and compilers, etc.

Historical UNIX(R) isn't that good at all. Now it's POSIX(R) forcing all
new development back.

http://kernelnewbies.org/olecom#trailing_comments_crap
http://kernelnewbies.org/olecom#trailing_multiline_comments_crap

Q: why every parameter is on new line?
A: thus we have more actual code line count

Q: why trailing comments on every parameter?
A: our C compiler have (had) 8 characters limit for variable names

Q: don't you think streaming editor can handle that?
A: our tools have not such thing

Q: you have multi-line trailing comments there, don't you know
it complicates line-oriented grep-like text processing?
why don't you write freely in comment block before function definition?
A: why do you care?

--
-o--=O`C
#oo'L O
<___=E M