2021-06-03 18:17:39

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

From f1e9f051c86b09fe660f49b0307bc7c6cec5e6f4 Mon Sep 17 00:00:00 2001
From: Borislav Petkov <[email protected]>
Date: Thu, 3 Jun 2021 20:03:31 +0200
Subject: Convert sme_active()

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 9c80c68d75b5..1bb9f22629fc 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
void __init mem_encrypt_init(void);

void __init sev_es_init_vc_handling(void);
-bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);

@@ -75,7 +74,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }

static inline void sev_es_init_vc_handling(void) { }
-static inline bool sme_active(void) { return false; }
static inline bool sev_active(void) { return false; }
static inline bool sev_es_active(void) { return false; }

diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index c078b0d3ab0e..1d88232146ab 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -387,7 +387,7 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list,
image->start,
image->preserve_context,
- sme_active());
+ protected_guest_has(VM_HOST_MEM_ENCRYPT));

#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index c2cfa5e7c152..ce6f2b9a05c7 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -49,7 +49,7 @@ int __init pci_swiotlb_detect_4gb(void)
* buffers are allocated and used for devices that do not support
* the addressing range required for the encryption mask.
*/
- if (sme_active())
+ if (protected_guest_has(VM_HOST_MEM_ENCRYPT))
swiotlb = 1;

return swiotlb;
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 01a224fdb897..3aa2658ced52 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -1409,6 +1409,11 @@ bool sev_protected_guest_has(unsigned long flag)
case VM_MEM_ENCRYPT:
case VM_MEM_ENCRYPT_ACTIVE:
return true;
+ case VM_HOST_MEM_ENCRYPT:
+ return sme_me_mask && !sev_active();
+ default:
+ WARN_ON_ONCE(1);
+ return false;
}

return false;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 667bba74e4c8..50ed2a768844 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -703,7 +703,7 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
if (flags & MEMREMAP_DEC)
return false;

- if (sme_active()) {
+ if (protected_guest_has(VM_HOST_MEM_ENCRYPT)) {
if (memremap_is_setup_data(phys_addr, size) ||
memremap_is_efi_data(phys_addr, size))
return false;
@@ -729,7 +729,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,

encrypted_prot = true;

- if (sme_active()) {
+ if (protected_guest_has(VM_HOST_MEM_ENCRYPT)) {
if (early_memremap_is_setup_data(phys_addr, size) ||
memremap_is_efi_data(phys_addr, size))
encrypted_prot = false;
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 49d11bb6e02a..9b0cdac895ca 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -145,7 +145,7 @@ void __init sme_unmap_bootdata(char *real_mode_data)
struct boot_params *boot_data;
unsigned long cmdline_paddr;

- if (!sme_active())
+ if (!protected_guest_has(VM_HOST_MEM_ENCRYPT))
return;

/* Get the command line address before unmapping the real_mode_data */
@@ -165,7 +165,7 @@ void __init sme_map_bootdata(char *real_mode_data)
struct boot_params *boot_data;
unsigned long cmdline_paddr;

- if (!sme_active())
+ if (!protected_guest_has(VM_HOST_MEM_ENCRYPT))
return;

__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
@@ -365,7 +365,7 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
/*
* SME and SEV are very similar but they are not the same, so there are
* times that the kernel will need to distinguish between SME and SEV. The
- * sme_active() and sev_active() functions are used for this. When a
+ * protected_guest_has(VM_HOST_MEM_ENCRYPT) and sev_active() functions are used for this. When a
* distinction isn't needed, the mem_encrypt_active() function can be used.
*
* The trampoline code is a good example for this requirement. Before
@@ -378,11 +378,6 @@ bool sev_active(void)
{
return sev_status & MSR_AMD64_SEV_ENABLED;
}
-
-bool sme_active(void)
-{
- return sme_me_mask && !sev_active();
-}
EXPORT_SYMBOL_GPL(sev_active);

/* Needs to be called from non-instrumentable code */
@@ -405,7 +400,7 @@ bool amd_force_dma_unencrypted(struct device *dev)
* device does not support DMA to addresses that include the
* encryption mask.
*/
- if (sme_active()) {
+ if (protected_guest_has(VM_HOST_MEM_ENCRYPT)) {
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
dev->bus_dma_limit);
@@ -446,7 +441,7 @@ static void print_mem_encrypt_feature_info(void)
pr_info("AMD Memory Encryption Features active:");

/* Secure Memory Encryption */
- if (sme_active()) {
+ if (protected_guest_has(VM_HOST_MEM_ENCRYPT)) {
/*
* SME is mutually exclusive with any of the SEV
* features below.
diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c
index da94fc2e9b56..286357956762 100644
--- a/arch/x86/mm/mem_encrypt_common.c
+++ b/arch/x86/mm/mem_encrypt_common.c
@@ -15,7 +15,7 @@
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
{
- if (sev_active() || sme_active())
+ if (sev_active() || protected_guest_has(VM_HOST_MEM_ENCRYPT))
return amd_force_dma_unencrypted(dev);

if (protected_guest_has(VM_MEM_ENCRYPT))
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index a9639f663d25..a92b49aa0d73 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mem_encrypt.h>
+#include <linux/protected_guest.h>

#include <asm/setup.h>
#include <asm/sections.h>
@@ -287,7 +288,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
unsigned long pgtable_area_len;
unsigned long decrypted_base;

- if (!sme_active())
+ if (!protected_guest_has(VM_HOST_MEM_ENCRYPT))
return;

/*
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 2e1c1bec0f9e..7f9a708986a3 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -42,7 +42,7 @@ void __init reserve_real_mode(void)
static void sme_sev_setup_real_mode(struct trampoline_header *th)
{
#ifdef CONFIG_AMD_MEM_ENCRYPT
- if (sme_active())
+ if (protected_guest_has(VM_HOST_MEM_ENCRYPT))
th->flags |= TH_FLAGS_SME_ACTIVE;

if (sev_es_active()) {
@@ -79,7 +79,7 @@ static void __init setup_real_mode(void)
* decrypted memory in order to bring up other processors
* successfully. This is not needed for SEV.
*/
- if (sme_active())
+ if (protected_guest_has(VM_HOST_MEM_ENCRYPT))
set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);

memcpy(base, real_mode_blob, size);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index d006724f4dc2..3c2365f13cc3 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -965,7 +965,7 @@ static bool copy_device_table(void)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = (sme_active() && is_kdump_kernel())
+ old_devtb = (protected_guest_has(VM_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
dev_table_size)
: memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -3022,7 +3022,7 @@ static int __init amd_iommu_init(void)

static bool amd_iommu_sme_check(void)
{
- if (!sme_active() || (boot_cpu_data.x86 != 0x17))
+ if (!protected_guest_has(VM_HOST_MEM_ENCRYPT) || (boot_cpu_data.x86 != 0x17))
return true;

/* For Fam17h, a specific level of support is required */

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette


2021-06-04 22:03:33

by Tom Lendacky

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

On 6/3/21 1:15 PM, Borislav Petkov wrote:
> From f1e9f051c86b09fe660f49b0307bc7c6cec5e6f4 Mon Sep 17 00:00:00 2001
> From: Borislav Petkov <[email protected]>
> Date: Thu, 3 Jun 2021 20:03:31 +0200
> Subject: Convert sme_active()
>
> */
> - if (sme_active())
> + if (protected_guest_has(VM_HOST_MEM_ENCRYPT))
> swiotlb = 1;

I still feel this is confusing. SME is a host/bare-metal technology, so
calling protected_guest_has() seems odd and using VM_HOST_MEM_ENCRYPT,
where I assume VM is short for virtual machine, also seems odd.

How about just protected_os_has()? Then you could have
- HOST_MEM_ENCRYPT for host memory encryption
- GUEST_MEM_ENCRYPT for guest memory encryption
- MEM_ENCRYPT for either host or guest memory encryption.

The first is analogous to sme_active(), the second to sev_active() and the
third to mem_encrypt_active(). Just my opinion, though...

>
> return swiotlb;
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index 01a224fdb897..3aa2658ced52 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -1409,6 +1409,11 @@ bool sev_protected_guest_has(unsigned long flag)
> case VM_MEM_ENCRYPT:
> case VM_MEM_ENCRYPT_ACTIVE:
> return true;
> + case VM_HOST_MEM_ENCRYPT:
> + return sme_me_mask && !sev_active();
> + default:
> + WARN_ON_ONCE(1);
> + return false;

I don't think you want a WARN_ON_ONCE() here. The code will be written to
work with either SEV or TDX, so we shouldn't warn on a check for a TDX
supported feature when running on AMD (or vice-versa).

Thanks,
Tom

Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn



On 6/4/21 3:01 PM, Tom Lendacky wrote:
>> */
>> - if (sme_active())
>> + if (protected_guest_has(VM_HOST_MEM_ENCRYPT))
>> swiotlb = 1;
> I still feel this is confusing. SME is a host/bare-metal technology, so
> calling protected_guest_has() seems odd and using VM_HOST_MEM_ENCRYPT,
> where I assume VM is short for virtual machine, also seems odd.
>
> How about just protected_os_has()? Then you could have
> - HOST_MEM_ENCRYPT for host memory encryption
> - GUEST_MEM_ENCRYPT for guest memory encryption
> - MEM_ENCRYPT for either host or guest memory encryption.
>
> The first is analogous to sme_active(), the second to sev_active() and the
> third to mem_encrypt_active(). Just my opinion, though...
>

I am not sure whether OS makes sense here. But I am fine with it if
it is maintainers choice.

Other option could be protected_boot_has()?

--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer

2021-06-04 22:20:36

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

On Fri, Jun 04, 2021 at 05:01:31PM -0500, Tom Lendacky wrote:
> The first is analogous to sme_active(), the second to sev_active() and the
> third to mem_encrypt_active(). Just my opinion, though...

Yeah, or cc_has() where "cc" means "confidential computing". Or "coco"...

Yeah, no good idea yet.

> I don't think you want a WARN_ON_ONCE() here. The code will be written to
> work with either SEV or TDX, so we shouldn't warn on a check for a TDX
> supported feature when running on AMD (or vice-versa).

That's an AMD-specific path so it would warn only when a flag is used
which is unknown/unused yet on AMD.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

2021-06-04 23:32:34

by Tom Lendacky

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

On 6/4/21 5:15 PM, Borislav Petkov wrote:
> On Fri, Jun 04, 2021 at 05:01:31PM -0500, Tom Lendacky wrote:
>> The first is analogous to sme_active(), the second to sev_active() and the
>> third to mem_encrypt_active(). Just my opinion, though...
>
> Yeah, or cc_has() where "cc" means "confidential computing". Or "coco"...
>
> Yeah, no good idea yet.
>
>> I don't think you want a WARN_ON_ONCE() here. The code will be written to
>> work with either SEV or TDX, so we shouldn't warn on a check for a TDX
>> supported feature when running on AMD (or vice-versa).
>
> That's an AMD-specific path so it would warn only when a flag is used
> which is unknown/unused yet on AMD.

But the check can happen on Intel or AMD. We have lots of checks for
sme_active() in common code that are executed on Intel today, but they
just return false. It's the same principle, you don't want to WARN on
those, just return false. E.g.:

/* some common code path */
if (cc_has(XYZ))
do_y();

If Intel has XYZ but AMD does not, you don't want to WARN, just return false.

Thanks,
Tom

>

2021-06-05 11:08:07

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

On Fri, Jun 04, 2021 at 06:31:03PM -0500, Tom Lendacky wrote:
> If Intel has XYZ but AMD does not, you don't want to WARN, just return false.

Aha, *now*, I see what you mean. Ok, so the reason why I added the
WARN is to sanity-check whether we're handling all possible VM_* or
PROT_GUEST_* flags properly and whether we're missing some. As a
debugging help. It'll get removed before applying I guess.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn



On 6/5/21 4:03 AM, Borislav Petkov wrote:
> Aha,*now*, I see what you mean. Ok, so the reason why I added the
> WARN is to sanity-check whether we're handling all possible VM_* or
> PROT_GUEST_* flags properly and whether we're missing some. As a
> debugging help. It'll get removed before applying I guess.

Borislav/Tom,

Any consensus on function name and flag prefix?

Currently suggested function names are,

cc_has() or protected_guest_has() or prot_guest_has() or protected_boot_has()

For flag prefix either PR_GUEST_* or CC_*

I am planning to submit another version of this patch with suggested fixes.
If we could reach some consensus on function and flag names, I can include
them in it. If not, I will submit next version without any renames.

Please let me know your comments.

BTW, my choice is protected_guest_has() or CC_has().

--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer

2021-06-05 20:15:08

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

On Sat, Jun 05, 2021 at 11:12:57AM -0700, Kuppuswamy, Sathyanarayanan wrote:
> cc_has() or protected_guest_has() or prot_guest_has() or protected_boot_has()

Even if I still think it is not optimal, prot_guest_has() seems to be
best what we have because protected_guest_has() together with the flag
will become just too long to scan at a quick glance. And if you have to
do two tests, you'd have to break the line.

> For flag prefix either PR_GUEST_* or CC_*

PR_GUEST_* sounds ok to me.

The "cc" prefix stuff is nice and short but it doesn't say what it means
because it is simply too short. And code readability is very important.

I'd say.

Still open for better suggestions though.

Thx.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

2021-06-07 19:57:53

by Kirill A. Shutemov

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

On Thu, Jun 03, 2021 at 08:15:46PM +0200, Borislav Petkov wrote:
> From f1e9f051c86b09fe660f49b0307bc7c6cec5e6f4 Mon Sep 17 00:00:00 2001
> From: Borislav Petkov <[email protected]>
> Date: Thu, 3 Jun 2021 20:03:31 +0200
> Subject: Convert sme_active()
>
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 9c80c68d75b5..1bb9f22629fc 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
> void __init mem_encrypt_init(void);
>
> void __init sev_es_init_vc_handling(void);
> -bool sme_active(void);
> bool sev_active(void);
> bool sev_es_active(void);
>
> @@ -75,7 +74,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
> static inline void __init sme_enable(struct boot_params *bp) { }
>
> static inline void sev_es_init_vc_handling(void) { }
> -static inline bool sme_active(void) { return false; }
> static inline bool sev_active(void) { return false; }
> static inline bool sev_es_active(void) { return false; }
>
> diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
> index c078b0d3ab0e..1d88232146ab 100644
> --- a/arch/x86/kernel/machine_kexec_64.c
> +++ b/arch/x86/kernel/machine_kexec_64.c
> @@ -387,7 +387,7 @@ void machine_kexec(struct kimage *image)
> (unsigned long)page_list,
> image->start,
> image->preserve_context,
> - sme_active());
> + protected_guest_has(VM_HOST_MEM_ENCRYPT));
>
> #ifdef CONFIG_KEXEC_JUMP
> if (image->preserve_context)

I think conversions like this are wrong: relocate_kernel(), which got
called here, only knows how to deal with SME, not how to handle some
generic case.

(After a quick check, looks like all conversions in the patch are wrong
for the same reason.)

If code is written to handle a specific technology we need to stick with
a check that makes it clear. Trying to make sound generic only leads to
confusion.

Also, we have host memory encryption that doesn't require any of this
code: TME makes the ectryption transparently to OS.

Maybe it's better to take a conservative path: keep a check specific until
we find it can serve more than one HW feature?

--
Kirill A. Shutemov

2021-06-07 20:15:59

by Borislav Petkov

[permalink] [raw]
Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn

On Mon, Jun 07, 2021 at 10:55:44PM +0300, Kirill A. Shutemov wrote:
> I think conversions like this are wrong: relocate_kernel(), which got
> called here, only knows how to deal with SME, not how to handle some
> generic case.

What do you mean wrong? Wrong for TDX?

If so, then that can be

protected_guest_has(SME)

or so, which would be false on Intel.

And this patch was only a mechanical conversion to see how it would look
like.

> If code is written to handle a specific technology we need to stick
> with a check that makes it clear. Trying to make sound generic only
> leads to confusion.

Sure, fine by me.

And I don't want a zoo of gazillion small checking functions per
technology. sev_<something>, tdx_<something>, yadda yadda.

So stuff better be unified. Even if you'd have vendor-specific defines
you hand into that function - and you will have such - it is still much
saner than what it turns into with the AMD side of things.

--
Regards/Gruss,
Boris.

https://people.kernel.org/tglx/notes-about-netiquette

Subject: Re: [RFC v2-fix-v2 1/1] x86: Introduce generic protected guest abstractionn



On 6/7/21 1:14 PM, Borislav Petkov wrote:
> On Mon, Jun 07, 2021 at 10:55:44PM +0300, Kirill A. Shutemov wrote:
>> I think conversions like this are wrong: relocate_kernel(), which got
>> called here, only knows how to deal with SME, not how to handle some
>> generic case.
>
> What do you mean wrong? Wrong for TDX?
>
> If so, then that can be
>
> protected_guest_has(SME)
>
> or so, which would be false on Intel.

I agree. Since most of the code changed in this patch is
not applicable to TDX, it might need product specific or
new function specific flags.

>
> And this patch was only a mechanical conversion to see how it would look
> like.
>
>> If code is written to handle a specific technology we need to stick
>> with a check that makes it clear. Trying to make sound generic only
>> leads to confusion.
>
> Sure, fine by me.
>
> And I don't want a zoo of gazillion small checking functions per
> technology. sev_<something>, tdx_<something>, yadda yadda.
>
> So stuff better be unified. Even if you'd have vendor-specific defines
> you hand into that function - and you will have such - it is still much
> saner than what it turns into with the AMD side of things.

Agree. Currently we share code with AMD SEV in memory encryption support and
string I/O handling code. So defining common flag for such code is
useful.

>

--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer

Subject: [RFC v2-fix-v3 1/1] x86: Introduce generic protected guest abstraction

Add a generic way to check if we run with an encrypted guest,
without requiring x86 specific ifdefs. This can then be used in
non architecture specific code. 

prot_guest_has() is used to check for protected guest feature
flags.

Originally-by: Andi Kleen <[email protected]>
Signed-off-by: Kuppuswamy Sathyanarayanan <[email protected]>
---

Changes since RFC v2-fix-v2:
* Renamed protected_guest_has() to prot_guest_has().
* Changed flag prefix from VM_ to PR_GUEST_
* Merged Borislav AMD implementation fix.

arch/x86/include/asm/sev.h | 3 +++
arch/x86/include/asm/tdx.h | 7 ++++++
arch/x86/kernel/sev.c | 15 +++++++++++++
arch/x86/kernel/tdx.c | 15 +++++++++++++
arch/x86/mm/mem_encrypt.c | 1 +
include/linux/protected_guest.h | 38 +++++++++++++++++++++++++++++++++
6 files changed, 79 insertions(+)
create mode 100644 include/linux/protected_guest.h

diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index fa5cd05d3b5b..e9b0b93a3157 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -81,12 +81,15 @@ static __always_inline void sev_es_nmi_complete(void)
__sev_es_nmi_complete();
}
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
+bool sev_protected_guest_has(unsigned long flag);
+
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
static inline void sev_es_nmi_complete(void) { }
static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
+static inline bool sev_protected_guest_has(unsigned long flag) { return false; }
#endif

#endif
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index f0c1912837c8..cbfe7479f2a3 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -71,6 +71,8 @@ u64 __tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
u64 __tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15,
struct tdx_hypercall_output *out);

+bool tdx_protected_guest_has(unsigned long flag);
+
#else // !CONFIG_INTEL_TDX_GUEST

static inline bool is_tdx_guest(void)
@@ -80,6 +82,11 @@ static inline bool is_tdx_guest(void)

static inline void tdx_early_init(void) { };

+static inline bool tdx_protected_guest_has(unsigned long flag)
+{
+ return false;
+}
+
#endif /* CONFIG_INTEL_TDX_GUEST */

#ifdef CONFIG_INTEL_TDX_GUEST_KVM
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 651b81cd648e..16e5c5f25e6f 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -19,6 +19,7 @@
#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/protected_guest.h>

#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
@@ -1493,3 +1494,17 @@ bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
while (true)
halt();
}
+
+bool sev_protected_guest_has(unsigned long flag)
+{
+ switch (flag) {
+ case PR_GUEST_MEM_ENCRYPT:
+ case PR_GUEST_MEM_ENCRYPT_ACTIVE:
+ case PR_GUEST_UNROLL_STRING_IO:
+ case PR_GUEST_HOST_MEM_ENCRYPT:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(sev_protected_guest_has);
diff --git a/arch/x86/kernel/tdx.c b/arch/x86/kernel/tdx.c
index 17725646eb30..111f15c05e24 100644
--- a/arch/x86/kernel/tdx.c
+++ b/arch/x86/kernel/tdx.c
@@ -7,6 +7,7 @@
#include <asm/vmx.h>

#include <linux/cpu.h>
+#include <linux/protected_guest.h>

/* TDX Module call Leaf IDs */
#define TDINFO 1
@@ -75,6 +76,20 @@ bool is_tdx_guest(void)
}
EXPORT_SYMBOL_GPL(is_tdx_guest);

+bool tdx_protected_guest_has(unsigned long flag)
+{
+ switch (flag) {
+ case PR_GUEST_MEM_ENCRYPT:
+ case PR_GUEST_MEM_ENCRYPT_ACTIVE:
+ case PR_GUEST_UNROLL_STRING_IO:
+ case PR_GUEST_SHARED_MAPPING_INIT:
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(tdx_protected_guest_has);
+
static void tdg_get_info(void)
{
u64 ret;
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ff08dc463634..d0026bce47df 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -20,6 +20,7 @@
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/virtio_config.h>
+#include <linux/protected_guest.h>

#include <asm/tlbflush.h>
#include <asm/fixmap.h>
diff --git a/include/linux/protected_guest.h b/include/linux/protected_guest.h
new file mode 100644
index 000000000000..adfa62e2615e
--- /dev/null
+++ b/include/linux/protected_guest.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_PROTECTED_GUEST_H
+#define _LINUX_PROTECTED_GUEST_H 1
+
+#include <asm/processor.h>
+#include <asm/tdx.h>
+#include <asm/sev.h>
+
+/* Protected Guest Feature Flags (leave 0-0xff for arch specific flags) */
+
+/* Support for guest encryption */
+#define PR_GUEST_MEM_ENCRYPT 0x100
+/* Encryption support is active */
+#define PR_GUEST_MEM_ENCRYPT_ACTIVE 0x101
+/* Support for unrolled string IO */
+#define PR_GUEST_UNROLL_STRING_IO 0x102
+/* Support for host memory encryption */
+#define PR_GUEST_HOST_MEM_ENCRYPT 0x103
+/* Support for shared mapping initialization (after early init) */
+#define PR_GUEST_SHARED_MAPPING_INIT 0x104
+
+#if defined(CONFIG_INTEL_TDX_GUEST) || defined(CONFIG_AMD_MEM_ENCRYPT)
+
+static inline bool prot_guest_has(unsigned long flag)
+{
+ if (is_tdx_guest())
+ return tdx_protected_guest_has(flag);
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return sev_protected_guest_has(flag);
+
+ return false;
+}
+
+#else
+static inline bool prot_guest_has(unsigned long flag) { return false; }
+#endif
+
+#endif /* _LINUX_PROTECTED_GUEST_H */
--
2.25.1