2023-05-30 12:19:35

by Alexander Shishkin

[permalink] [raw]
Subject: [PATCH] x86/sev: Move sev_setup_arch() to mem_encrypt.c

Since commit 4d96f9109109b ("x86/sev: Replace occurrences of
sev_active() with cc_platform_has()"), the SWIOTLB bounce buffer size
adjustment and restricted virtio memory setting also inadvertently apply
to TDX, which just happens to be what we want.

To reflect this, move the corresponding code to generic mem_encrypt.c.
No functional changes intended.

Signed-off-by: Alexander Shishkin <[email protected]>
---
arch/x86/include/asm/mem_encrypt.h | 11 ++++++++--
arch/x86/kernel/setup.c | 2 +-
arch/x86/mm/mem_encrypt.c | 34 ++++++++++++++++++++++++++++++
arch/x86/mm/mem_encrypt_amd.c | 34 ------------------------------
4 files changed, 44 insertions(+), 37 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index b7126701574c..4283063c1e1c 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -37,7 +37,6 @@ void __init sme_map_bootdata(char *real_mode_data);
void __init sme_unmap_bootdata(char *real_mode_data);

void __init sme_early_init(void);
-void __init sev_setup_arch(void);

void __init sme_encrypt_kernel(struct boot_params *bp);
void __init sme_enable(struct boot_params *bp);
@@ -67,7 +66,6 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
static inline void __init sme_unmap_bootdata(char *real_mode_data) { }

static inline void __init sme_early_init(void) { }
-static inline void __init sev_setup_arch(void) { }

static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
@@ -92,6 +90,15 @@ void __init mem_encrypt_init(void);

void add_encrypt_protection_map(void);

+#ifdef CONFIG_X86_MEM_ENCRYPT
+
+void __init mem_encrypt_setup_arch(void);
+
+#else /* !CONFIG_X86_MEM_ENCRYPT */
+
+static inline void __init mem_encrypt_setup_arch(void) { }
+
+#endif /* CONFIG_X86_MEM_ENCRYPT */
/*
* The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
* writing to or comparing values from the cr3 register. Having the
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 16babff771bd..e2aa1d5b37a9 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1121,7 +1121,7 @@ void __init setup_arch(char **cmdline_p)
* Needs to run after memblock setup because it needs the physical
* memory size.
*/
- sev_setup_arch();
+ mem_encrypt_setup_arch();

efi_fake_memmap();
efi_find_mirror();
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9f27e14e185f..c290c55b632b 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -12,6 +12,7 @@
#include <linux/swiotlb.h>
#include <linux/cc_platform.h>
#include <linux/mem_encrypt.h>
+#include <linux/virtio_anchor.h>

/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
@@ -86,3 +87,36 @@ void __init mem_encrypt_init(void)

print_mem_encrypt_feature_info();
}
+
+void __init mem_encrypt_setup_arch(void)
+{
+ phys_addr_t total_mem = memblock_phys_mem_size();
+ unsigned long size;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ return;
+
+ /*
+ * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
+ * Kernel uses SWIOTLB to make this happen without changing device
+ * drivers. However, depending on the workload being run, the
+ * default 64MB of SWIOTLB may not be enough and SWIOTLB may
+ * run out of buffers for DMA, resulting in I/O errors and/or
+ * performance degradation especially with high I/O workloads.
+ *
+ * Adjust the default size of SWIOTLB using a percentage of guest
+ * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
+ * memory is allocated from low memory, ensure that the adjusted size
+ * is within the limits of low available memory.
+ *
+ * The percentage of guest memory used here for SWIOTLB buffers
+ * is more of an approximation of the static adjustment which
+ * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
+ */
+ size = total_mem * 6 / 100;
+ size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
+ swiotlb_adjust_size(size);
+
+ /* Set restricted memory access for virtio. */
+ virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
+}
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index e0b51c09109f..3b95e6fdf160 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -215,40 +215,6 @@ void __init sme_map_bootdata(char *real_mode_data)
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
}

-void __init sev_setup_arch(void)
-{
- phys_addr_t total_mem = memblock_phys_mem_size();
- unsigned long size;
-
- if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
- return;
-
- /*
- * For SEV, all DMA has to occur via shared/unencrypted pages.
- * SEV uses SWIOTLB to make this happen without changing device
- * drivers. However, depending on the workload being run, the
- * default 64MB of SWIOTLB may not be enough and SWIOTLB may
- * run out of buffers for DMA, resulting in I/O errors and/or
- * performance degradation especially with high I/O workloads.
- *
- * Adjust the default size of SWIOTLB for SEV guests using
- * a percentage of guest memory for SWIOTLB buffers.
- * Also, as the SWIOTLB bounce buffer memory is allocated
- * from low memory, ensure that the adjusted size is within
- * the limits of low available memory.
- *
- * The percentage of guest memory used here for SWIOTLB buffers
- * is more of an approximation of the static adjustment which
- * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
- */
- size = total_mem * 6 / 100;
- size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
- swiotlb_adjust_size(size);
-
- /* Set restricted memory access for virtio. */
- virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
-}
-
static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
{
unsigned long pfn = 0;
--
2.39.2



2023-05-30 19:04:15

by Alison Schofield

[permalink] [raw]
Subject: Re: [PATCH] x86/sev: Move sev_setup_arch() to mem_encrypt.c

On Tue, May 30, 2023 at 03:17:28PM +0300, Alexander Shishkin wrote:
> Since commit 4d96f9109109b ("x86/sev: Replace occurrences of
> sev_active() with cc_platform_has()"), the SWIOTLB bounce buffer size
> adjustment and restricted virtio memory setting also inadvertently apply
> to TDX, which just happens to be what we want.

Hi Alexander,

Can you offer more context on how this inadvertently applies?

One bit below...


>
> To reflect this, move the corresponding code to generic mem_encrypt.c.
> No functional changes intended.
>
> Signed-off-by: Alexander Shishkin <[email protected]>
> ---
> arch/x86/include/asm/mem_encrypt.h | 11 ++++++++--
> arch/x86/kernel/setup.c | 2 +-
> arch/x86/mm/mem_encrypt.c | 34 ++++++++++++++++++++++++++++++
> arch/x86/mm/mem_encrypt_amd.c | 34 ------------------------------
> 4 files changed, 44 insertions(+), 37 deletions(-)
>
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index b7126701574c..4283063c1e1c 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -37,7 +37,6 @@ void __init sme_map_bootdata(char *real_mode_data);
> void __init sme_unmap_bootdata(char *real_mode_data);
>
> void __init sme_early_init(void);
> -void __init sev_setup_arch(void);
>
> void __init sme_encrypt_kernel(struct boot_params *bp);
> void __init sme_enable(struct boot_params *bp);
> @@ -67,7 +66,6 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
> static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
>
> static inline void __init sme_early_init(void) { }
> -static inline void __init sev_setup_arch(void) { }
>
> static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
> static inline void __init sme_enable(struct boot_params *bp) { }
> @@ -92,6 +90,15 @@ void __init mem_encrypt_init(void);
>
> void add_encrypt_protection_map(void);
>
> +#ifdef CONFIG_X86_MEM_ENCRYPT
> +
> +void __init mem_encrypt_setup_arch(void);
> +
> +#else /* !CONFIG_X86_MEM_ENCRYPT */
> +
> +static inline void __init mem_encrypt_setup_arch(void) { }
> +
> +#endif /* CONFIG_X86_MEM_ENCRYPT */
> /*
> * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
> * writing to or comparing values from the cr3 register. Having the
> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
> index 16babff771bd..e2aa1d5b37a9 100644
> --- a/arch/x86/kernel/setup.c
> +++ b/arch/x86/kernel/setup.c
> @@ -1121,7 +1121,7 @@ void __init setup_arch(char **cmdline_p)
> * Needs to run after memblock setup because it needs the physical
> * memory size.
> */
> - sev_setup_arch();
> + mem_encrypt_setup_arch();
>
> efi_fake_memmap();
> efi_find_mirror();
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 9f27e14e185f..c290c55b632b 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -12,6 +12,7 @@
> #include <linux/swiotlb.h>
> #include <linux/cc_platform.h>
> #include <linux/mem_encrypt.h>
> +#include <linux/virtio_anchor.h>

It looks like this #include can be removed from mem_encrypt_amd.c

Alison

>
> /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
> bool force_dma_unencrypted(struct device *dev)
> @@ -86,3 +87,36 @@ void __init mem_encrypt_init(void)
>
> print_mem_encrypt_feature_info();
> }
> +
> +void __init mem_encrypt_setup_arch(void)
> +{
> + phys_addr_t total_mem = memblock_phys_mem_size();
> + unsigned long size;
> +
> + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
> + return;
> +
> + /*
> + * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
> + * Kernel uses SWIOTLB to make this happen without changing device
> + * drivers. However, depending on the workload being run, the
> + * default 64MB of SWIOTLB may not be enough and SWIOTLB may
> + * run out of buffers for DMA, resulting in I/O errors and/or
> + * performance degradation especially with high I/O workloads.
> + *
> + * Adjust the default size of SWIOTLB using a percentage of guest
> + * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
> + * memory is allocated from low memory, ensure that the adjusted size
> + * is within the limits of low available memory.
> + *
> + * The percentage of guest memory used here for SWIOTLB buffers
> + * is more of an approximation of the static adjustment which
> + * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
> + */
> + size = total_mem * 6 / 100;
> + size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
> + swiotlb_adjust_size(size);
> +
> + /* Set restricted memory access for virtio. */
> + virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
> +}
> diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
> index e0b51c09109f..3b95e6fdf160 100644
> --- a/arch/x86/mm/mem_encrypt_amd.c
> +++ b/arch/x86/mm/mem_encrypt_amd.c
> @@ -215,40 +215,6 @@ void __init sme_map_bootdata(char *real_mode_data)
> __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
> }
>
> -void __init sev_setup_arch(void)
> -{
> - phys_addr_t total_mem = memblock_phys_mem_size();
> - unsigned long size;
> -
> - if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
> - return;
> -
> - /*
> - * For SEV, all DMA has to occur via shared/unencrypted pages.
> - * SEV uses SWIOTLB to make this happen without changing device
> - * drivers. However, depending on the workload being run, the
> - * default 64MB of SWIOTLB may not be enough and SWIOTLB may
> - * run out of buffers for DMA, resulting in I/O errors and/or
> - * performance degradation especially with high I/O workloads.
> - *
> - * Adjust the default size of SWIOTLB for SEV guests using
> - * a percentage of guest memory for SWIOTLB buffers.
> - * Also, as the SWIOTLB bounce buffer memory is allocated
> - * from low memory, ensure that the adjusted size is within
> - * the limits of low available memory.
> - *
> - * The percentage of guest memory used here for SWIOTLB buffers
> - * is more of an approximation of the static adjustment which
> - * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
> - */
> - size = total_mem * 6 / 100;
> - size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
> - swiotlb_adjust_size(size);
> -
> - /* Set restricted memory access for virtio. */
> - virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
> -}
> -
> static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
> {
> unsigned long pfn = 0;
> --
> 2.39.2
>

2023-06-09 17:36:28

by Alexander Shishkin

[permalink] [raw]
Subject: Re: [PATCH] x86/sev: Move sev_setup_arch() to mem_encrypt.c

Alison Schofield <[email protected]> writes:

> On Tue, May 30, 2023 at 03:17:28PM +0300, Alexander Shishkin wrote:
>> Since commit 4d96f9109109b ("x86/sev: Replace occurrences of
>> sev_active() with cc_platform_has()"), the SWIOTLB bounce buffer size
>> adjustment and restricted virtio memory setting also inadvertently apply
>> to TDX, which just happens to be what we want.
>
> Hi Alexander,

Hi Alison,

> Can you offer more context on how this inadvertently applies?

Yes, the code uses cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) condition
for setting the bounce buffer size and enabling restricted virtio
memory, which is also true for TDX. I've added a bit about this to v2
[0].

> One bit below...
>> --- a/arch/x86/mm/mem_encrypt.c
>> +++ b/arch/x86/mm/mem_encrypt.c
>> @@ -12,6 +12,7 @@
>> #include <linux/swiotlb.h>
>> #include <linux/cc_platform.h>
>> #include <linux/mem_encrypt.h>
>> +#include <linux/virtio_anchor.h>
>
> It looks like this #include can be removed from mem_encrypt_amd.c

Good catch! This is also addressed in v2.

[0] https://lore.kernel.org/lkml/[email protected]/

Thanks,
--
Alex