2019-05-21 06:19:36

by Masahiro Yamada

[permalink] [raw]
Subject: [PATCH] powerpc/mm: mark more tlb functions as __always_inline

With CONFIG_OPTIMIZE_INLINING enabled, Laura Abbott reported error
with gcc 9.1.1:

arch/powerpc/mm/book3s64/radix_tlb.c: In function '_tlbiel_pid':
arch/powerpc/mm/book3s64/radix_tlb.c:104:2: warning: asm operand 3 probably doesn't match constraints
104 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
| ^~~
arch/powerpc/mm/book3s64/radix_tlb.c:104:2: error: impossible constraint in 'asm'

Fixing _tlbiel_pid() is enough to address the warning above, but I
inlined more functions to fix all potential issues.

To meet the 'i' (immediate) constraint for the asm operands, functions
propagating propagated 'ric' must be always inlined.

Fixes: 9012d011660e ("compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING")
Reported-by: Laura Abbott <[email protected]>
Signed-off-by: Masahiro Yamada <[email protected]>
---

arch/powerpc/mm/book3s64/hash_native.c | 8 +++--
arch/powerpc/mm/book3s64/radix_tlb.c | 44 +++++++++++++++-----------
2 files changed, 30 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index aaa28fd918fe..bc2c35c0d2b1 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -60,9 +60,11 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
* tlbiel instruction for hash, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
*/
-static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
- unsigned int pid,
- unsigned int ric, unsigned int prs)
+static __always_inline void tlbiel_hash_set_isa300(unsigned int set,
+ unsigned int is,
+ unsigned int pid,
+ unsigned int ric,
+ unsigned int prs)
{
unsigned long rb;
unsigned long rs;
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 4d841369399f..91c4242c1be3 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -29,9 +29,11 @@
* tlbiel instruction for radix, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
*/
-static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
- unsigned int pid,
- unsigned int ric, unsigned int prs)
+static __always_inline void tlbiel_radix_set_isa300(unsigned int set,
+ unsigned int is,
+ unsigned int pid,
+ unsigned int ric,
+ unsigned int prs)
{
unsigned long rb;
unsigned long rs;
@@ -150,8 +152,8 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
}

-static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
- unsigned long ric)
+static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
+ unsigned long ric)
{
unsigned long rb,rs,prs,r;

@@ -167,8 +169,8 @@ static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
}


-static inline void __tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;

@@ -183,8 +185,8 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 1, rb, rs, ric, prs, r);
}

-static inline void __tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;

@@ -199,8 +201,10 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}

-static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbie_lpid_va(unsigned long va,
+ unsigned long lpid,
+ unsigned long ap,
+ unsigned long ric)
{
unsigned long rb,rs,prs,r;

@@ -239,7 +243,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;

@@ -341,7 +345,8 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}

-static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
+static __always_inline void _tlbiel_lpid_guest(unsigned long lpid,
+ unsigned long ric)
{
int set;

@@ -381,8 +386,8 @@ static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
__tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
}

-static inline void _tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long psize, unsigned long ric)
+static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);

@@ -413,8 +418,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
__tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
}

-static inline void _tlbie_va(unsigned long va, unsigned long pid,
- unsigned long psize, unsigned long ric)
+static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);

@@ -424,8 +429,9 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}

-static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
- unsigned long psize, unsigned long ric)
+static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ unsigned long psize,
+ unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);

--
2.17.1



2019-05-21 06:54:58

by Christophe Leroy

[permalink] [raw]
Subject: Re: [PATCH] powerpc/mm: mark more tlb functions as __always_inline



Le 21/05/2019 à 08:16, Masahiro Yamada a écrit :
> With CONFIG_OPTIMIZE_INLINING enabled, Laura Abbott reported error
> with gcc 9.1.1:
>
> arch/powerpc/mm/book3s64/radix_tlb.c: In function '_tlbiel_pid':
> arch/powerpc/mm/book3s64/radix_tlb.c:104:2: warning: asm operand 3 probably doesn't match constraints
> 104 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
> | ^~~
> arch/powerpc/mm/book3s64/radix_tlb.c:104:2: error: impossible constraint in 'asm'
>
> Fixing _tlbiel_pid() is enough to address the warning above, but I
> inlined more functions to fix all potential issues.
>
> To meet the 'i' (immediate) constraint for the asm operands, functions
> propagating propagated 'ric' must be always inlined.
>
> Fixes: 9012d011660e ("compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING")
> Reported-by: Laura Abbott <[email protected]>
> Signed-off-by: Masahiro Yamada <[email protected]>
> ---
>
> arch/powerpc/mm/book3s64/hash_native.c | 8 +++--
> arch/powerpc/mm/book3s64/radix_tlb.c | 44 +++++++++++++++-----------
> 2 files changed, 30 insertions(+), 22 deletions(-)
>
> diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
> index aaa28fd918fe..bc2c35c0d2b1 100644
> --- a/arch/powerpc/mm/book3s64/hash_native.c
> +++ b/arch/powerpc/mm/book3s64/hash_native.c
> @@ -60,9 +60,11 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
> * tlbiel instruction for hash, set invalidation
> * i.e., r=1 and is=01 or is=10 or is=11
> */
> -static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
> - unsigned int pid,
> - unsigned int ric, unsigned int prs)
> +static __always_inline void tlbiel_hash_set_isa300(unsigned int set,
> + unsigned int is,
> + unsigned int pid,
> + unsigned int ric,
> + unsigned int prs)

Please don't split the line more than it is.

powerpc accepts lines up to 90 chars, see arch/powerpc/tools/checkpatch.pl

> {
> unsigned long rb;
> unsigned long rs;
> diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
> index 4d841369399f..91c4242c1be3 100644
> --- a/arch/powerpc/mm/book3s64/radix_tlb.c
> +++ b/arch/powerpc/mm/book3s64/radix_tlb.c
> @@ -29,9 +29,11 @@
> * tlbiel instruction for radix, set invalidation
> * i.e., r=1 and is=01 or is=10 or is=11
> */
> -static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
> - unsigned int pid,
> - unsigned int ric, unsigned int prs)
> +static __always_inline void tlbiel_radix_set_isa300(unsigned int set,
> + unsigned int is,
> + unsigned int pid,
> + unsigned int ric,
> + unsigned int prs)

Please don't split the line more than it is.

> {
> unsigned long rb;
> unsigned long rs;
> @@ -150,8 +152,8 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
> trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
> }
>
> -static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
> - unsigned long ric)
> +static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
> + unsigned long ric)
> {
> unsigned long rb,rs,prs,r;
>
> @@ -167,8 +169,8 @@ static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
> }
>
>
> -static inline void __tlbiel_va(unsigned long va, unsigned long pid,
> - unsigned long ap, unsigned long ric)
> +static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
> + unsigned long ap, unsigned long ric)
> {
> unsigned long rb,rs,prs,r;
>
> @@ -183,8 +185,8 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid,
> trace_tlbie(0, 1, rb, rs, ric, prs, r);
> }
>
> -static inline void __tlbie_va(unsigned long va, unsigned long pid,
> - unsigned long ap, unsigned long ric)
> +static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
> + unsigned long ap, unsigned long ric)
> {
> unsigned long rb,rs,prs,r;
>
> @@ -199,8 +201,10 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid,
> trace_tlbie(0, 0, rb, rs, ric, prs, r);
> }
>
> -static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
> - unsigned long ap, unsigned long ric)
> +static __always_inline void __tlbie_lpid_va(unsigned long va,
> + unsigned long lpid,
> + unsigned long ap,
> + unsigned long ric)

Please don't split the line more than it is.


> {
> unsigned long rb,rs,prs,r;
>
> @@ -239,7 +243,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
> /*
> * We use 128 set in radix mode and 256 set in hpt mode.
> */
> -static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
> +static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
> {
> int set;
>
> @@ -341,7 +345,8 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
> asm volatile("eieio; tlbsync; ptesync": : :"memory");
> }
>
> -static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
> +static __always_inline void _tlbiel_lpid_guest(unsigned long lpid,
> + unsigned long ric)

Please don't split the line more than it is.

> {
> int set;
>
> @@ -381,8 +386,8 @@ static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
> __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
> }
>
> -static inline void _tlbiel_va(unsigned long va, unsigned long pid,
> - unsigned long psize, unsigned long ric)
> +static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
> + unsigned long psize, unsigned long ric)
> {
> unsigned long ap = mmu_get_ap(psize);
>
> @@ -413,8 +418,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
> __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
> }
>
> -static inline void _tlbie_va(unsigned long va, unsigned long pid,
> - unsigned long psize, unsigned long ric)
> +static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
> + unsigned long psize, unsigned long ric)
> {
> unsigned long ap = mmu_get_ap(psize);
>
> @@ -424,8 +429,9 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
> asm volatile("eieio; tlbsync; ptesync": : :"memory");
> }
>
> -static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
> - unsigned long psize, unsigned long ric)
> +static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
> + unsigned long psize,
> + unsigned long ric)

Please don't split the line more than it is.

> {
> unsigned long ap = mmu_get_ap(psize);
>
>

Christophe

2019-05-21 07:26:38

by Joe Perches

[permalink] [raw]
Subject: Re: [PATCH] powerpc/mm: mark more tlb functions as __always_inline

On Tue, 2019-05-21 at 08:53 +0200, Christophe Leroy wrote:
> powerpc accepts lines up to 90 chars, see arch/powerpc/tools/checkpatch.pl

arch/powerpc/tools/checkpatch.sh



2019-05-21 07:30:49

by Masahiro Yamada

[permalink] [raw]
Subject: Re: [PATCH] powerpc/mm: mark more tlb functions as __always_inline

On Tue, May 21, 2019 at 3:54 PM Christophe Leroy
<[email protected]> wrote:
>
>
>
> Le 21/05/2019 à 08:16, Masahiro Yamada a écrit :
> > With CONFIG_OPTIMIZE_INLINING enabled, Laura Abbott reported error
> > with gcc 9.1.1:
> >
> > arch/powerpc/mm/book3s64/radix_tlb.c: In function '_tlbiel_pid':
> > arch/powerpc/mm/book3s64/radix_tlb.c:104:2: warning: asm operand 3 probably doesn't match constraints
> > 104 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
> > | ^~~
> > arch/powerpc/mm/book3s64/radix_tlb.c:104:2: error: impossible constraint in 'asm'
> >
> > Fixing _tlbiel_pid() is enough to address the warning above, but I
> > inlined more functions to fix all potential issues.
> >
> > To meet the 'i' (immediate) constraint for the asm operands, functions
> > propagating propagated 'ric' must be always inlined.
> >
> > Fixes: 9012d011660e ("compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING")
> > Reported-by: Laura Abbott <[email protected]>
> > Signed-off-by: Masahiro Yamada <[email protected]>
> > ---
> >
> > arch/powerpc/mm/book3s64/hash_native.c | 8 +++--
> > arch/powerpc/mm/book3s64/radix_tlb.c | 44 +++++++++++++++-----------
> > 2 files changed, 30 insertions(+), 22 deletions(-)
> >
> > diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
> > index aaa28fd918fe..bc2c35c0d2b1 100644
> > --- a/arch/powerpc/mm/book3s64/hash_native.c
> > +++ b/arch/powerpc/mm/book3s64/hash_native.c
> > @@ -60,9 +60,11 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
> > * tlbiel instruction for hash, set invalidation
> > * i.e., r=1 and is=01 or is=10 or is=11
> > */
> > -static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
> > - unsigned int pid,
> > - unsigned int ric, unsigned int prs)
> > +static __always_inline void tlbiel_hash_set_isa300(unsigned int set,
> > + unsigned int is,
> > + unsigned int pid,
> > + unsigned int ric,
> > + unsigned int prs)
>
> Please don't split the line more than it is.
>
> powerpc accepts lines up to 90 chars, see arch/powerpc/tools/checkpatch.pl

Ugh, I did not know this. Horrible.

The Linux coding style should be global in the kernel tree.
No subsystem should adopts its own coding style.


--
Best Regards
Masahiro Yamada

2019-05-21 07:44:00

by Michael Ellerman

[permalink] [raw]
Subject: Re: [PATCH] powerpc/mm: mark more tlb functions as __always_inline

Masahiro Yamada <[email protected]> writes:
> On Tue, May 21, 2019 at 3:54 PM Christophe Leroy
> <[email protected]> wrote:
>> Le 21/05/2019 à 08:16, Masahiro Yamada a écrit :
>> > With CONFIG_OPTIMIZE_INLINING enabled, Laura Abbott reported error
>> > with gcc 9.1.1:
>> >
>> > arch/powerpc/mm/book3s64/radix_tlb.c: In function '_tlbiel_pid':
>> > arch/powerpc/mm/book3s64/radix_tlb.c:104:2: warning: asm operand 3 probably doesn't match constraints
>> > 104 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
>> > | ^~~
>> > arch/powerpc/mm/book3s64/radix_tlb.c:104:2: error: impossible constraint in 'asm'
>> >
>> > Fixing _tlbiel_pid() is enough to address the warning above, but I
>> > inlined more functions to fix all potential issues.
>> >
>> > To meet the 'i' (immediate) constraint for the asm operands, functions
>> > propagating propagated 'ric' must be always inlined.
>> >
>> > Fixes: 9012d011660e ("compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING")
>> > Reported-by: Laura Abbott <[email protected]>
>> > Signed-off-by: Masahiro Yamada <[email protected]>
>> > ---
>> >
>> > arch/powerpc/mm/book3s64/hash_native.c | 8 +++--
>> > arch/powerpc/mm/book3s64/radix_tlb.c | 44 +++++++++++++++-----------
>> > 2 files changed, 30 insertions(+), 22 deletions(-)
>> >
>> > diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
>> > index aaa28fd918fe..bc2c35c0d2b1 100644
>> > --- a/arch/powerpc/mm/book3s64/hash_native.c
>> > +++ b/arch/powerpc/mm/book3s64/hash_native.c
>> > @@ -60,9 +60,11 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
>> > * tlbiel instruction for hash, set invalidation
>> > * i.e., r=1 and is=01 or is=10 or is=11
>> > */
>> > -static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
>> > - unsigned int pid,
>> > - unsigned int ric, unsigned int prs)
>> > +static __always_inline void tlbiel_hash_set_isa300(unsigned int set,
>> > + unsigned int is,
>> > + unsigned int pid,
>> > + unsigned int ric,
>> > + unsigned int prs)
>>
>> Please don't split the line more than it is.
>>
>> powerpc accepts lines up to 90 chars, see arch/powerpc/tools/checkpatch.pl
>
> Ugh, I did not know this. Horrible.
>
> The Linux coding style should be global in the kernel tree.
> No subsystem should adopts its own coding style.

Well that ship sailed long ago.

But we don't have our own coding style, we just don't enforce 80 columns
rigidly, there are cases where a slightly longer line (up to ~90) is
preferable to a split line.

In a case like this with a long attribute and function name I think this
is probably the least worst option:

static __always_inline
void tlbiel_hash_set_isa300(unsigned int set, unsigned int is, unsigned int pid,
unsigned int ric, unsigned int prs)
{
...

cheers

2019-05-21 08:00:22

by Joe Perches

[permalink] [raw]
Subject: Re: [PATCH] powerpc/mm: mark more tlb functions as __always_inline

On Tue, 2019-05-21 at 16:27 +0900, Masahiro Yamada wrote:
> On Tue, May 21, 2019 at 3:54 PM Christophe Leroy
> > powerpc accepts lines up to 90 chars, see arch/powerpc/tools/checkpatch.pl
>
> Ugh, I did not know this. Horrible.
>
> The Linux coding style should be global in the kernel tree.
> No subsystem should adopts its own coding style.

I don't see a problem using 90 column lines by arch/<foo>

There are other subsystem specific variations like the net/

/* multiline comments without initial blank comment lines
* look like this...
*/

If there were arch specific drivers with style variations
in say drivers/net, then that might be more of an issue.