2017-09-06 03:45:39

by Boris Ostrovsky

[permalink] [raw]
Subject: SME/32-bit regression

It appears there is a regression for 32-bit kernels due to SME changes.

I bisected my particular problem (Xen PV guest) to
21729f81ce8ae76a6995681d40e16f7ce8075db4 but I also saw pmd_clear_bad()
errors on baremetal. This seems to be caused by sme_me_mask being an
unsigned long as opposed to phys_addr_t (the actual problem is that
__PHYSICAL_MASK is truncated). When I declare it as u64 and drop
unsigned long cast in __sme_set()/__sme_clr() the problem goes way.
(This presumably won't work for non-PAE which I haven't tried).


-boris


2017-09-06 09:26:30

by Borislav Petkov

[permalink] [raw]
Subject: Re: SME/32-bit regression

On Tue, Sep 05, 2017 at 11:45:07PM -0400, Boris Ostrovsky wrote:
> It appears there is a regression for 32-bit kernels due to SME changes.
>
> I bisected my particular problem

It being? Doesn't boot, splats?

> (Xen PV guest) to
> 21729f81ce8ae76a6995681d40e16f7ce8075db4 but I also saw pmd_clear_bad()
> errors on baremetal. This seems to be caused by sme_me_mask being an
> unsigned long as opposed to phys_addr_t (the actual problem is that
> __PHYSICAL_MASK is truncated). When I declare it as u64 and drop unsigned
> long cast in __sme_set()/__sme_clr() the problem goes way. (This presumably
> won't work for non-PAE which I haven't tried).

Right, so I think we should do this because those macros should not have
any effect on !CONFIG_AMD_MEM_ENCRYPT setups.

---
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09f5e42..823eec6ba951 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -35,6 +35,7 @@ static inline unsigned long sme_get_me_mask(void)
return sme_me_mask;
}

+#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable
@@ -42,6 +43,10 @@ static inline unsigned long sme_get_me_mask(void)
*/
#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif

#endif /* __ASSEMBLY__ */




--
Regards/Gruss,
Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

2017-09-06 09:45:15

by Borislav Petkov

[permalink] [raw]
Subject: Re: SME/32-bit regression

Btw,

Tom, pls remind me again, why didn't we make sme_me_mask u64?

Because this is the most logical type for a memory encryption mask which
covers 64 bits... In any case, the below builds fine here.

---
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 8e618fcf1f7c..6a77c63540f7 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,7 +21,7 @@

#ifdef CONFIG_AMD_MEM_ENCRYPT

-extern unsigned long sme_me_mask;
+extern u64 sme_me_mask;

void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
unsigned long decrypted_kernel_vaddr,
@@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);

#else /* !CONFIG_AMD_MEM_ENCRYPT */

-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL

static inline void __init sme_early_encrypt(resource_size_t paddr,
unsigned long size) { }
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 0fbd09269757..3fcc8e01683b 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off";
* reside in the .data section so as not to be zeroed out when the .bss
* section is later cleared.
*/
-unsigned long sme_me_mask __section(.data) = 0;
+u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask);

/* Buffer used for early in-place encryption by BSP, no locking needed */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09f5e42..265a9cd21cb4 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -21,7 +21,7 @@

#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */

-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL

#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */

@@ -30,18 +30,23 @@ static inline bool sme_active(void)
return !!sme_me_mask;
}

-static inline unsigned long sme_get_me_mask(void)
+static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}

+#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable
* entries).
*/
-#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
-#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+#define __sme_set(x) ((x) | sme_me_mask)
+#define __sme_clr(x) ((x) & ~sme_me_mask)
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif

#endif /* __ASSEMBLY__ */


--
Regards/Gruss,
Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

2017-09-06 13:55:18

by Boris Ostrovsky

[permalink] [raw]
Subject: Re: SME/32-bit regression

On 09/06/2017 05:26 AM, Borislav Petkov wrote:
> On Tue, Sep 05, 2017 at 11:45:07PM -0400, Boris Ostrovsky wrote:
>> It appears there is a regression for 32-bit kernels due to SME changes.
>>
>> I bisected my particular problem
> It being? Doesn't boot, splats?

Xen guest crashes very early, before a splat can can be generated.

>
>> (Xen PV guest) to
>> 21729f81ce8ae76a6995681d40e16f7ce8075db4 but I also saw pmd_clear_bad()
>> errors on baremetal. This seems to be caused by sme_me_mask being an
>> unsigned long as opposed to phys_addr_t (the actual problem is that
>> __PHYSICAL_MASK is truncated). When I declare it as u64 and drop unsigned
>> long cast in __sme_set()/__sme_clr() the problem goes way. (This presumably
>> won't work for non-PAE which I haven't tried).
> Right, so I think we should do this because those macros should not have
> any effect on !CONFIG_AMD_MEM_ENCRYPT setups.

This won't help though if kernel is built with SME support.

-boris

>
> ---
> diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
> index 1255f09f5e42..823eec6ba951 100644
> --- a/include/linux/mem_encrypt.h
> +++ b/include/linux/mem_encrypt.h
> @@ -35,6 +35,7 @@ static inline unsigned long sme_get_me_mask(void)
> return sme_me_mask;
> }
>
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> /*
> * The __sme_set() and __sme_clr() macros are useful for adding or removing
> * the encryption mask from a value (e.g. when dealing with pagetable
> @@ -42,6 +43,10 @@ static inline unsigned long sme_get_me_mask(void)
> */
> #define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
> #define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
> +#else
> +#define __sme_set(x) (x)
> +#define __sme_clr(x) (x)
> +#endif
>
> #endif /* __ASSEMBLY__ */
>
>
>
>

2017-09-06 13:57:56

by Thomas Gleixner

[permalink] [raw]
Subject: Re: SME/32-bit regression

On Wed, 6 Sep 2017, Boris Ostrovsky wrote:
> On 09/06/2017 05:26 AM, Borislav Petkov wrote:
> > On Tue, Sep 05, 2017 at 11:45:07PM -0400, Boris Ostrovsky wrote:
> >> It appears there is a regression for 32-bit kernels due to SME changes.
> >>
> >> I bisected my particular problem
> > It being? Doesn't boot, splats?
>
> Xen guest crashes very early, before a splat can can be generated.
>
> >
> >> (Xen PV guest) to
> >> 21729f81ce8ae76a6995681d40e16f7ce8075db4 but I also saw pmd_clear_bad()
> >> errors on baremetal. This seems to be caused by sme_me_mask being an
> >> unsigned long as opposed to phys_addr_t (the actual problem is that
> >> __PHYSICAL_MASK is truncated). When I declare it as u64 and drop unsigned
> >> long cast in __sme_set()/__sme_clr() the problem goes way. (This presumably
> >> won't work for non-PAE which I haven't tried).
> > Right, so I think we should do this because those macros should not have
> > any effect on !CONFIG_AMD_MEM_ENCRYPT setups.
>
> This won't help though if kernel is built with SME support.

Which is not the case for 32bit. SME depends on 64bit

2017-09-06 14:04:04

by Boris Ostrovsky

[permalink] [raw]
Subject: Re: SME/32-bit regression

On 09/06/2017 09:57 AM, Thomas Gleixner wrote:
> On Wed, 6 Sep 2017, Boris Ostrovsky wrote:
>> On 09/06/2017 05:26 AM, Borislav Petkov wrote:
>>> On Tue, Sep 05, 2017 at 11:45:07PM -0400, Boris Ostrovsky wrote:
>>>> It appears there is a regression for 32-bit kernels due to SME changes.
>>>>
>>>> I bisected my particular problem
>>> It being? Doesn't boot, splats?
>> Xen guest crashes very early, before a splat can can be generated.
>>
>>>> (Xen PV guest) to
>>>> 21729f81ce8ae76a6995681d40e16f7ce8075db4 but I also saw pmd_clear_bad()
>>>> errors on baremetal. This seems to be caused by sme_me_mask being an
>>>> unsigned long as opposed to phys_addr_t (the actual problem is that
>>>> __PHYSICAL_MASK is truncated). When I declare it as u64 and drop unsigned
>>>> long cast in __sme_set()/__sme_clr() the problem goes way. (This presumably
>>>> won't work for non-PAE which I haven't tried).
>>> Right, so I think we should do this because those macros should not have
>>> any effect on !CONFIG_AMD_MEM_ENCRYPT setups.
>> This won't help though if kernel is built with SME support.
> Which is not the case for 32bit. SME depends on 64bit

Oh, OK, I didn't realize that.

-boris


2017-09-06 16:44:24

by Borislav Petkov

[permalink] [raw]
Subject: Re: SME/32-bit regression

On Wed, Sep 06, 2017 at 04:30:23PM +0000, Lendacky, Thomas wrote:
> Sorry for the top post, I'm on holiday and don't have access to a good
> email client... I went with unsigned long to match all the page table
> related declarations. If changing to u64 doesn't generate any warnings
> or other issues them I'm good with that.

Ok, no worries. Lemme run the smoke-tests on it and test it to see
everything else still works.

Thx.

--
Regards/Gruss,
Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

2017-09-06 18:07:00

by Brijesh Singh

[permalink] [raw]
Subject: Re: SME/32-bit regression



On 09/06/2017 11:44 AM, Borislav Petkov wrote:
> On Wed, Sep 06, 2017 at 04:30:23PM +0000, Lendacky, Thomas wrote:
>> Sorry for the top post, I'm on holiday and don't have access to a good
>> email client... I went with unsigned long to match all the page table
>> related declarations. If changing to u64 doesn't generate any warnings
>> or other issues them I'm good with that.
>
> Ok, no worries. Lemme run the smoke-tests on it and test it to see
> everything else still works.
>

I did the following quick run with your patch and everything seems to be
working okay

64-bit build:
-------
1) Baremetal SME *enabled* - System boots fine
a) 32-bit guest launch : successful (KVM HV)
b) 64-bit guest launch : successful (KVM HV)
c) 64-bit SEV guest launch : successful (KVM HV)

2) Baremetal SME *disabled* - System boots fine
a) 32-bit guest launch : successful (KVM HV)
b) 64-bit guest launch : successful (KVM HV)
c) 64-bit SEV guest launch : successful (KVM HV)

32-bit build
----------
I am installing 32-bit distro to verify 32-bit baremetal boot and will
report my findings soon.

-Brijesh

2017-09-06 18:19:59

by Borislav Petkov

[permalink] [raw]
Subject: Re: SME/32-bit regression

On Wed, Sep 06, 2017 at 01:06:50PM -0500, Brijesh Singh wrote:
> I did the following quick run with your patch and everything seems to be
> working okay
>
> 64-bit build:
> -------
> 1) Baremetal SME *enabled* - System boots fine
> a) 32-bit guest launch : successful (KVM HV)
> b) 64-bit guest launch : successful (KVM HV)
> c) 64-bit SEV guest launch : successful (KVM HV)
>
> 2) Baremetal SME *disabled* - System boots fine
> a) 32-bit guest launch : successful (KVM HV)
> b) 64-bit guest launch : successful (KVM HV)
> c) 64-bit SEV guest launch : successful (KVM HV)
>
> 32-bit build
> ----------
> I am installing 32-bit distro to verify 32-bit baremetal boot and will
> report my findings soon.

Thanks Brijesh, that's awesome!

I'll add your Tested-by once you're done testing successfully.

:-)

--
Regards/Gruss,
Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

2017-09-06 21:04:21

by Boris Ostrovsky

[permalink] [raw]
Subject: Re: SME/32-bit regression

On 09/06/2017 02:19 PM, Borislav Petkov wrote:
> On Wed, Sep 06, 2017 at 01:06:50PM -0500, Brijesh Singh wrote:
>> I did the following quick run with your patch and everything seems to be
>> working okay
>>
>> 64-bit build:
>> -------
>> 1) Baremetal SME *enabled* - System boots fine
>> a) 32-bit guest launch : successful (KVM HV)
>> b) 64-bit guest launch : successful (KVM HV)
>> c) 64-bit SEV guest launch : successful (KVM HV)
>>
>> 2) Baremetal SME *disabled* - System boots fine
>> a) 32-bit guest launch : successful (KVM HV)
>> b) 64-bit guest launch : successful (KVM HV)
>> c) 64-bit SEV guest launch : successful (KVM HV)
>>
>> 32-bit build
>> ----------
>> I am installing 32-bit distro to verify 32-bit baremetal boot and will
>> report my findings soon.
> Thanks Brijesh, that's awesome!
>
> I'll add your Tested-by once you're done testing successfully.


You can have my Tested-by (mostly Xen but some baremetal too).

-boris

2017-09-07 00:26:52

by Brijesh Singh

[permalink] [raw]
Subject: Re: SME/32-bit regression



On 09/06/2017 04:03 PM, Boris Ostrovsky wrote:
> On 09/06/2017 02:19 PM, Borislav Petkov wrote:
>> On Wed, Sep 06, 2017 at 01:06:50PM -0500, Brijesh Singh wrote:
>>> I did the following quick run with your patch and everything seems to be
>>> working okay
>>>
>>> 64-bit build:
>>> -------
>>> 1) Baremetal SME *enabled* - System boots fine
>>> a) 32-bit guest launch : successful (KVM HV)
>>> b) 64-bit guest launch : successful (KVM HV)
>>> c) 64-bit SEV guest launch : successful (KVM HV)
>>>
>>> 2) Baremetal SME *disabled* - System boots fine
>>> a) 32-bit guest launch : successful (KVM HV)
>>> b) 64-bit guest launch : successful (KVM HV)
>>> c) 64-bit SEV guest launch : successful (KVM HV)
>>>
>>> 32-bit build
>>> ----------
>>> I am installing 32-bit distro to verify 32-bit baremetal boot and will
>>> report my findings soon.
>> Thanks Brijesh, that's awesome!
>>
>> I'll add your Tested-by once you're done testing successfully.
>

32-bit seems to be working well - thanks

-Brijesh

>
> You can have my Tested-by (mostly Xen but some baremetal too).
>
> -boris
>

2017-09-07 09:38:50

by Borislav Petkov

[permalink] [raw]
Subject: [PATCH] x86/mm: Make the SME mask a u64

On Wed, Sep 06, 2017 at 07:26:40PM -0500, Brijesh Singh wrote:
> 32-bit seems to be working well - thanks

Thanks Boris and Brijesh for testing!

---
From: Borislav Petkov <[email protected]>

The SME encryption mask is for masking 64-bit pagetable entries. It
being an unsigned long works fine on X86_64 but on 32-bit builds in
truncates bits leading to Xen guests crashing very early.

And regardless, the whole SME mask handling shouldnt've leaked into
32-bit because SME is X86_64-only feature. So, first make the mask u64.
And then, add trivial 32-bit versions of the __sme_* macros so that
nothing happens there.

Reported-and-tested-by: Boris Ostrovsky <[email protected]>
Tested-by: Brijesh Singh <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Acked-by: Tom Lendacky <[email protected]>
Fixes: 21729f81ce8a ("x86/mm: Provide general kernel support for memory encryption")
---
arch/x86/include/asm/mem_encrypt.h | 4 ++--
arch/x86/mm/mem_encrypt.c | 2 +-
include/linux/mem_encrypt.h | 13 +++++++++----
3 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 8e618fcf1f7c..6a77c63540f7 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,7 +21,7 @@

#ifdef CONFIG_AMD_MEM_ENCRYPT

-extern unsigned long sme_me_mask;
+extern u64 sme_me_mask;

void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
unsigned long decrypted_kernel_vaddr,
@@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);

#else /* !CONFIG_AMD_MEM_ENCRYPT */

-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL

static inline void __init sme_early_encrypt(resource_size_t paddr,
unsigned long size) { }
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 0fbd09269757..3fcc8e01683b 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off";
* reside in the .data section so as not to be zeroed out when the .bss
* section is later cleared.
*/
-unsigned long sme_me_mask __section(.data) = 0;
+u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask);

/* Buffer used for early in-place encryption by BSP, no locking needed */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09f5e42..265a9cd21cb4 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -21,7 +21,7 @@

#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */

-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL

#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */

@@ -30,18 +30,23 @@ static inline bool sme_active(void)
return !!sme_me_mask;
}

-static inline unsigned long sme_get_me_mask(void)
+static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}

+#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable
* entries).
*/
-#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
-#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+#define __sme_set(x) ((x) | sme_me_mask)
+#define __sme_clr(x) ((x) & ~sme_me_mask)
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif

#endif /* __ASSEMBLY__ */

--
2.13.0

--
Regards/Gruss,
Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Subject: [tip:x86/urgent] x86/mm: Make the SME mask a u64

Commit-ID: 21d9bb4a05bac50fb4f850517af4030baecd00f6
Gitweb: http://git.kernel.org/tip/21d9bb4a05bac50fb4f850517af4030baecd00f6
Author: Borislav Petkov <[email protected]>
AuthorDate: Thu, 7 Sep 2017 11:38:37 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Thu, 7 Sep 2017 11:53:11 +0200

x86/mm: Make the SME mask a u64

The SME encryption mask is for masking 64-bit pagetable entries. It
being an unsigned long works fine on X86_64 but on 32-bit builds in
truncates bits leading to Xen guests crashing very early.

And regardless, the whole SME mask handling shouldnt've leaked into
32-bit because SME is X86_64-only feature. So, first make the mask u64.
And then, add trivial 32-bit versions of the __sme_* macros so that
nothing happens there.

Reported-and-tested-by: Boris Ostrovsky <[email protected]>
Tested-by: Brijesh Singh <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Acked-by: Tom Lendacky <[email protected]>
Acked-by: Thomas Gleixner <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas <[email protected]>
Fixes: 21729f81ce8a ("x86/mm: Provide general kernel support for memory encryption")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/mem_encrypt.h | 4 ++--
arch/x86/mm/mem_encrypt.c | 2 +-
include/linux/mem_encrypt.h | 13 +++++++++----
3 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 8e618fc..6a77c63 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,7 +21,7 @@

#ifdef CONFIG_AMD_MEM_ENCRYPT

-extern unsigned long sme_me_mask;
+extern u64 sme_me_mask;

void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
unsigned long decrypted_kernel_vaddr,
@@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);

#else /* !CONFIG_AMD_MEM_ENCRYPT */

-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL

static inline void __init sme_early_encrypt(resource_size_t paddr,
unsigned long size) { }
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 0fbd092..3fcc8e0 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off";
* reside in the .data section so as not to be zeroed out when the .bss
* section is later cleared.
*/
-unsigned long sme_me_mask __section(.data) = 0;
+u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask);

/* Buffer used for early in-place encryption by BSP, no locking needed */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09..265a9cd 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -21,7 +21,7 @@

#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */

-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL

#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */

@@ -30,18 +30,23 @@ static inline bool sme_active(void)
return !!sme_me_mask;
}

-static inline unsigned long sme_get_me_mask(void)
+static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}

+#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable
* entries).
*/
-#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
-#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+#define __sme_set(x) ((x) | sme_me_mask)
+#define __sme_clr(x) ((x) & ~sme_me_mask)
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif

#endif /* __ASSEMBLY__ */