When a device requires unencrypted memory and the context does not allow
blocking, memory must be returned from the atomic coherent pools.
This avoids the remap when CONFIG_DMA_DIRECT_REMAP is not enabled and the
config only requires CONFIG_DMA_COHERENT_POOL. This will be used for
CONFIG_AMD_MEM_ENCRYPT in a subsequent patch.
Keep all memory in these pools unencrypted. When set_memory_decrypted()
fails, this prohibits the memory from being added. If adding memory to
the genpool fails, and set_memory_encrypted() subsequently fails, there
is no alternative other than leaking the memory.
Signed-off-by: David Rientjes <[email protected]>
---
kernel/dma/direct.c | 46 ++++++++++++++++++++++++++++++++++++++-------
kernel/dma/pool.c | 27 +++++++++++++++++++++++---
2 files changed, 63 insertions(+), 10 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index a834ee22f8ff..07ecc5c4d134 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -76,6 +76,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
+/*
+ * Decrypting memory is allowed to block, so if this device requires
+ * unencrypted memory it must come from atomic pools.
+ */
+static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
+ unsigned long attrs)
+{
+ if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
+ return false;
+ if (gfpflags_allow_blocking(gfp))
+ return false;
+ if (force_dma_unencrypted(dev))
+ return true;
+ if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
+ return false;
+ if (dma_alloc_need_uncached(dev, attrs))
+ return true;
+ return false;
+}
+
+static inline bool dma_should_free_from_pool(struct device *dev,
+ unsigned long attrs)
+{
+ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
+ return true;
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev))
+ return false;
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
+ return true;
+ return false;
+}
+
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs)
{
@@ -125,9 +158,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
- if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_alloc_need_uncached(dev, attrs) &&
- !gfpflags_allow_blocking(gfp)) {
+ if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
if (!ret)
return NULL;
@@ -204,6 +235,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
+ /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
+ if (dma_should_free_from_pool(dev, attrs) &&
+ dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
+ return;
+
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
@@ -211,10 +247,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
return;
}
- if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
- return;
-
if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 9e2da17ed17b..cf052314d9e4 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -7,6 +7,7 @@
#include <linux/dma-contiguous.h>
#include <linux/init.h>
#include <linux/genalloc.h>
+#include <linux/set_memory.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
@@ -53,22 +54,42 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
arch_dma_prep_coherent(page, pool_size);
+#ifdef CONFIG_DMA_DIRECT_REMAP
addr = dma_common_contiguous_remap(page, pool_size,
pgprot_dmacoherent(PAGE_KERNEL),
__builtin_return_address(0));
if (!addr)
goto free_page;
-
+#else
+ addr = page_to_virt(page);
+#endif
+ /*
+ * Memory in the atomic DMA pools must be unencrypted, the pools do not
+ * shrink so no re-encryption occurs in dma_direct_free_pages().
+ */
+ ret = set_memory_decrypted((unsigned long)page_to_virt(page),
+ 1 << order);
+ if (ret)
+ goto remove_mapping;
ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
pool_size, NUMA_NO_NODE);
if (ret)
- goto remove_mapping;
+ goto encrypt_mapping;
return 0;
+encrypt_mapping:
+ ret = set_memory_encrypted((unsigned long)page_to_virt(page),
+ 1 << order);
+ if (WARN_ON_ONCE(ret)) {
+ /* Decrypt succeeded but encrypt failed, purposely leak */
+ goto out;
+ }
remove_mapping:
+#ifdef CONFIG_DMA_DIRECT_REMAP
dma_common_free_remap(addr, pool_size);
-free_page:
+#endif
+free_page: __maybe_unused
if (!dma_release_from_contiguous(NULL, page, 1 << order))
__free_pages(page, order);
out:
The subject should say something like "atomic unencrypted allocations.."
as many other atomic allocations are fine. Which brings up that with
the codebase in this patch we can't really support architectures that
require both an atomic pool for uncached remapping for just some devices
and unencrypted for others. We don't have such an archicture right now,
and I hope we don't grow one, but we probably need a little safeguard
with a BUILD_BUG_ON if both options are set. I can send an incremental
patch for that if that is ok with you.