Skip to content

Commit 9087c37

Browse files
tlendackyChristoph Hellwig
authored andcommitted
dma-direct: Force unencrypted DMA under SME for certain DMA masks
If a device doesn't support DMA to a physical address that includes the encryption bit (currently bit 47, so 48-bit DMA), then the DMA must occur to unencrypted memory. SWIOTLB is used to satisfy that requirement if an IOMMU is not active (enabled or configured in passthrough mode). However, commit fafadcd ("swiotlb: don't dip into swiotlb pool for coherent allocations") modified the coherent allocation support in SWIOTLB to use the DMA direct coherent allocation support. When an IOMMU is not active, this resulted in dma_alloc_coherent() failing for devices that didn't support DMA addresses that included the encryption bit. Addressing this requires changes to the force_dma_unencrypted() function in kernel/dma/direct.c. Since the function is now non-trivial and SME/SEV specific, update the DMA direct support to add an arch override for the force_dma_unencrypted() function. The arch override is selected when CONFIG_AMD_MEM_ENCRYPT is set. The arch override function resides in the arch/x86/mm/mem_encrypt.c file and forces unencrypted DMA when either SEV is active or SME is active and the device does not support DMA to physical addresses that include the encryption bit. Fixes: fafadcd ("swiotlb: don't dip into swiotlb pool for coherent allocations") Suggested-by: Christoph Hellwig <[email protected]> Signed-off-by: Tom Lendacky <[email protected]> Acked-by: Thomas Gleixner <[email protected]> [hch: moved the force_dma_unencrypted declaration to dma-mapping.h, fold the s390 fix from Halil Pasic] Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 9637d51 commit 9087c37

File tree

7 files changed

+54
-13
lines changed

7 files changed

+54
-13
lines changed

arch/s390/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ config S390
189189
select VIRT_CPU_ACCOUNTING
190190
select ARCH_HAS_SCALED_CPUTIME
191191
select HAVE_NMI
192+
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
192193
select SWIOTLB
193194
select GENERIC_ALLOCATOR
194195

arch/s390/mm/init.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
#include <linux/export.h>
3131
#include <linux/cma.h>
3232
#include <linux/gfp.h>
33-
#include <linux/dma-mapping.h>
33+
#include <linux/dma-direct.h>
3434
#include <asm/processor.h>
3535
#include <linux/uaccess.h>
3636
#include <asm/pgtable.h>
@@ -161,6 +161,11 @@ bool sev_active(void)
161161
return is_prot_virt_guest();
162162
}
163163

164+
bool force_dma_unencrypted(struct device *dev)
165+
{
166+
return sev_active();
167+
}
168+
164169
/* protected virtualization */
165170
static void pv_init(void)
166171
{

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1528,6 +1528,7 @@ config AMD_MEM_ENCRYPT
15281528
depends on X86_64 && CPU_SUP_AMD
15291529
select DYNAMIC_PHYSICAL_MASK
15301530
select ARCH_USE_MEMREMAP_PROT
1531+
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
15311532
---help---
15321533
Say yes to enable support for the encryption of system memory.
15331534
This requires an AMD processor that supports Secure Memory

arch/x86/mm/mem_encrypt.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@
1515
#include <linux/dma-direct.h>
1616
#include <linux/swiotlb.h>
1717
#include <linux/mem_encrypt.h>
18+
#include <linux/device.h>
19+
#include <linux/kernel.h>
20+
#include <linux/bitops.h>
21+
#include <linux/dma-mapping.h>
1822

1923
#include <asm/tlbflush.h>
2024
#include <asm/fixmap.h>
@@ -348,6 +352,32 @@ bool sev_active(void)
348352
}
349353
EXPORT_SYMBOL(sev_active);
350354

355+
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
356+
bool force_dma_unencrypted(struct device *dev)
357+
{
358+
/*
359+
* For SEV, all DMA must be to unencrypted addresses.
360+
*/
361+
if (sev_active())
362+
return true;
363+
364+
/*
365+
* For SME, all DMA must be to unencrypted addresses if the
366+
* device does not support DMA to addresses that include the
367+
* encryption mask.
368+
*/
369+
if (sme_active()) {
370+
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
371+
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
372+
dev->bus_dma_mask);
373+
374+
if (dma_dev_mask <= dma_enc_mask)
375+
return true;
376+
}
377+
378+
return false;
379+
}
380+
351381
/* Architecture __weak replacement functions */
352382
void __init mem_encrypt_free_decrypted_mem(void)
353383
{

include/linux/dma-direct.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
3232
}
3333
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
3434

35+
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
36+
bool force_dma_unencrypted(struct device *dev);
37+
#else
38+
static inline bool force_dma_unencrypted(struct device *dev)
39+
{
40+
return false;
41+
}
42+
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
43+
3544
/*
3645
* If memory encryption is supported, phys_to_dma will set the memory encryption
3746
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma

kernel/dma/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ config ARCH_HAS_DMA_COHERENT_TO_PFN
4848
config ARCH_HAS_DMA_MMAP_PGPROT
4949
bool
5050

51+
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
52+
bool
53+
5154
config DMA_NONCOHERENT_CACHE_SYNC
5255
bool
5356

kernel/dma/direct.c

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,6 @@
2323
#define ARCH_ZONE_DMA_BITS 24
2424
#endif
2525

26-
/*
27-
* For AMD SEV all DMA must be to unencrypted addresses.
28-
*/
29-
static inline bool force_dma_unencrypted(void)
30-
{
31-
return sev_active();
32-
}
33-
3426
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
3527
{
3628
if (!dev->dma_mask) {
@@ -46,7 +38,7 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
4638
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
4739
phys_addr_t phys)
4840
{
49-
if (force_dma_unencrypted())
41+
if (force_dma_unencrypted(dev))
5042
return __phys_to_dma(dev, phys);
5143
return phys_to_dma(dev, phys);
5244
}
@@ -67,7 +59,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
6759
if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
6860
dma_mask = dev->bus_dma_mask;
6961

70-
if (force_dma_unencrypted())
62+
if (force_dma_unencrypted(dev))
7163
*phys_mask = __dma_to_phys(dev, dma_mask);
7264
else
7365
*phys_mask = dma_to_phys(dev, dma_mask);
@@ -159,7 +151,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
159151
}
160152

161153
ret = page_address(page);
162-
if (force_dma_unencrypted()) {
154+
if (force_dma_unencrypted(dev)) {
163155
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
164156
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
165157
} else {
@@ -192,7 +184,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
192184
return;
193185
}
194186

195-
if (force_dma_unencrypted())
187+
if (force_dma_unencrypted(dev))
196188
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
197189

198190
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&

0 commit comments

Comments
 (0)