Skip to content

Commit 3de18c8

Browse files
committed
Merge branch 'stable/for-linus-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb
Pull swiotlb updates from Konrad Rzeszutek Wilk: "A new feature called restricted DMA pools. It allows SWIOTLB to utilize per-device (or per-platform) allocated memory pools instead of using the global one. The first big user of this is ARM Confidential Computing where the memory for DMA operations can be set per platform" * 'stable/for-linus-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb: (23 commits) swiotlb: use depends on for DMA_RESTRICTED_POOL of: restricted dma: Don't fail device probe on rmem init failure of: Move of_dma_set_restricted_buffer() into device.c powerpc/svm: Don't issue ultracalls if !mem_encrypt_active() s390/pv: fix the forcing of the swiotlb swiotlb: Free tbl memory in swiotlb_exit() swiotlb: Emit diagnostic in swiotlb_exit() swiotlb: Convert io_default_tlb_mem to static allocation of: Return success from of_dma_set_restricted_buffer() when !OF_ADDRESS swiotlb: add overflow checks to swiotlb_bounce swiotlb: fix implicit debugfs declarations of: Add plumbing for restricted DMA pool dt-bindings: of: Add restricted DMA pool swiotlb: Add restricted DMA pool initialization swiotlb: Add restricted DMA alloc/free support swiotlb: Refactor swiotlb_tbl_unmap_single swiotlb: Move alloc_size to swiotlb_find_slots swiotlb: Use is_swiotlb_force_bounce for swiotlb data bouncing swiotlb: Update is_swiotlb_active to add a struct device argument swiotlb: Update is_swiotlb_buffer to add a struct device argument ...
2 parents 1472690 + f3c4b13 commit 3de18c8

File tree

16 files changed

+469
-136
lines changed

16 files changed

+469
-136
lines changed

Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,23 @@ compatible (optional) - standard definition
5151
used as a shared pool of DMA buffers for a set of devices. It can
5252
be used by an operating system to instantiate the necessary pool
5353
management subsystem if necessary.
54+
- restricted-dma-pool: This indicates a region of memory meant to be
55+
used as a pool of restricted DMA buffers for a set of devices. The
56+
memory region would be the only region accessible to those devices.
57+
When using this, the no-map and reusable properties must not be set,
58+
so the operating system can create a virtual mapping that will be used
59+
for synchronization. The main purpose for restricted DMA is to
60+
mitigate the lack of DMA access control on systems without an IOMMU,
61+
which could result in the DMA accessing the system memory at
62+
unexpected times and/or unexpected addresses, possibly leading to data
63+
leakage or corruption. The feature on its own provides a basic level
64+
of protection against the DMA overwriting buffer contents at
65+
unexpected times. However, to protect against general data leakage and
66+
system memory corruption, the system needs to provide way to lock down
67+
the memory access, e.g., MPU. Note that since coherent allocation
68+
needs remapping, one must set up another device coherent pool by
69+
shared-dma-pool and use dma_alloc_from_dev_coherent instead for atomic
70+
coherent allocation.
5471
- vendor specific string in the form <vendor>,[<device>-]<usage>
5572
no-map (optional) - empty property
5673
- Indicates the operating system must not create a virtual mapping
@@ -85,10 +102,11 @@ memory-region-names (optional) - a list of names, one for each corresponding
85102

86103
Example
87104
-------
88-
This example defines 3 contiguous regions are defined for Linux kernel:
105+
This example defines 4 contiguous regions for Linux kernel:
89106
one default of all device drivers (named linux,cma@72000000 and 64MiB in size),
90-
one dedicated to the framebuffer device (named framebuffer@78000000, 8MiB), and
91-
one for multimedia processing (named multimedia-memory@77000000, 64MiB).
107+
one dedicated to the framebuffer device (named framebuffer@78000000, 8MiB),
108+
one for multimedia processing (named multimedia-memory@77000000, 64MiB), and
109+
one for restricted dma pool (named restricted_dma_reserved@0x50000000, 64MiB).
92110

93111
/ {
94112
#address-cells = <1>;
@@ -120,6 +138,11 @@ one for multimedia processing (named multimedia-memory@77000000, 64MiB).
120138
compatible = "acme,multimedia-memory";
121139
reg = <0x77000000 0x4000000>;
122140
};
141+
142+
restricted_dma_reserved: restricted_dma_reserved {
143+
compatible = "restricted-dma-pool";
144+
reg = <0x50000000 0x4000000>;
145+
};
123146
};
124147

125148
/* ... */
@@ -138,4 +161,11 @@ one for multimedia processing (named multimedia-memory@77000000, 64MiB).
138161
memory-region = <&multimedia_reserved>;
139162
/* ... */
140163
};
164+
165+
pcie_device: pcie_device@0,0 {
166+
reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
167+
0x83010000 0x0 0x00100000 0x0 0x00100000>;
168+
memory-region = <&restricted_dma_reserved>;
169+
/* ... */
170+
};
141171
};

arch/powerpc/platforms/pseries/svm.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,9 @@ void __init svm_swiotlb_init(void)
6363

6464
int set_memory_encrypted(unsigned long addr, int numpages)
6565
{
66+
if (!mem_encrypt_active())
67+
return 0;
68+
6669
if (!PAGE_ALIGNED(addr))
6770
return -EINVAL;
6871

@@ -73,6 +76,9 @@ int set_memory_encrypted(unsigned long addr, int numpages)
7376

7477
int set_memory_decrypted(unsigned long addr, int numpages)
7578
{
79+
if (!mem_encrypt_active())
80+
return 0;
81+
7682
if (!PAGE_ALIGNED(addr))
7783
return -EINVAL;
7884

arch/s390/mm/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,9 +187,9 @@ static void pv_init(void)
187187
return;
188188

189189
/* make sure bounce buffers are shared */
190+
swiotlb_force = SWIOTLB_FORCE;
190191
swiotlb_init(1);
191192
swiotlb_update_mem_attributes();
192-
swiotlb_force = SWIOTLB_FORCE;
193193
}
194194

195195
void __init mem_init(void)

drivers/base/core.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <linux/netdevice.h>
2828
#include <linux/sched/signal.h>
2929
#include <linux/sched/mm.h>
30+
#include <linux/swiotlb.h>
3031
#include <linux/sysfs.h>
3132
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
3233

@@ -2851,6 +2852,9 @@ void device_initialize(struct device *dev)
28512852
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
28522853
dev->dma_coherent = dma_default_coherent;
28532854
#endif
2855+
#ifdef CONFIG_SWIOTLB
2856+
dev->dma_io_tlb_mem = &io_tlb_default_mem;
2857+
#endif
28542858
}
28552859
EXPORT_SYMBOL_GPL(device_initialize);
28562860

drivers/gpu/drm/i915/gem/i915_gem_internal.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
4242

4343
max_order = MAX_ORDER;
4444
#ifdef CONFIG_SWIOTLB
45-
if (is_swiotlb_active()) {
45+
if (is_swiotlb_active(obj->base.dev->dev)) {
4646
unsigned int max_segment;
4747

4848
max_segment = swiotlb_max_segment();

drivers/gpu/drm/nouveau/nouveau_ttm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
276276
}
277277

278278
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
279-
need_swiotlb = is_swiotlb_active();
279+
need_swiotlb = is_swiotlb_active(dev->dev);
280280
#endif
281281

282282
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,

drivers/iommu/dma-iommu.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
506506

507507
__iommu_dma_unmap(dev, dma_addr, size);
508508

509-
if (unlikely(is_swiotlb_buffer(phys)))
509+
if (unlikely(is_swiotlb_buffer(dev, phys)))
510510
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
511511
}
512512

@@ -577,7 +577,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
577577
}
578578

579579
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
580-
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
580+
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
581581
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
582582
return iova;
583583
}
@@ -784,7 +784,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
784784
if (!dev_is_dma_coherent(dev))
785785
arch_sync_dma_for_cpu(phys, size, dir);
786786

787-
if (is_swiotlb_buffer(phys))
787+
if (is_swiotlb_buffer(dev, phys))
788788
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
789789
}
790790

@@ -797,7 +797,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
797797
return;
798798

799799
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
800-
if (is_swiotlb_buffer(phys))
800+
if (is_swiotlb_buffer(dev, phys))
801801
swiotlb_sync_single_for_device(dev, phys, size, dir);
802802

803803
if (!dev_is_dma_coherent(dev))
@@ -818,7 +818,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
818818
if (!dev_is_dma_coherent(dev))
819819
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
820820

821-
if (is_swiotlb_buffer(sg_phys(sg)))
821+
if (is_swiotlb_buffer(dev, sg_phys(sg)))
822822
swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
823823
sg->length, dir);
824824
}
@@ -835,7 +835,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
835835
return;
836836

837837
for_each_sg(sgl, sg, nelems, i) {
838-
if (is_swiotlb_buffer(sg_phys(sg)))
838+
if (is_swiotlb_buffer(dev, sg_phys(sg)))
839839
swiotlb_sync_single_for_device(dev, sg_phys(sg),
840840
sg->length, dir);
841841

drivers/of/device.c

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <linux/of_device.h>
66
#include <linux/of_address.h>
77
#include <linux/of_iommu.h>
8+
#include <linux/of_reserved_mem.h>
89
#include <linux/dma-direct.h> /* for bus_dma_region */
910
#include <linux/dma-map-ops.h>
1011
#include <linux/init.h>
@@ -52,6 +53,42 @@ int of_device_add(struct platform_device *ofdev)
5253
return device_add(&ofdev->dev);
5354
}
5455

56+
static void
57+
of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
58+
{
59+
struct device_node *node, *of_node = dev->of_node;
60+
int count, i;
61+
62+
if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
63+
return;
64+
65+
count = of_property_count_elems_of_size(of_node, "memory-region",
66+
sizeof(u32));
67+
/*
68+
* If dev->of_node doesn't exist or doesn't contain memory-region, try
69+
* the OF node having DMA configuration.
70+
*/
71+
if (count <= 0) {
72+
of_node = np;
73+
count = of_property_count_elems_of_size(
74+
of_node, "memory-region", sizeof(u32));
75+
}
76+
77+
for (i = 0; i < count; i++) {
78+
node = of_parse_phandle(of_node, "memory-region", i);
79+
/*
80+
* There might be multiple memory regions, but only one
81+
* restricted-dma-pool region is allowed.
82+
*/
83+
if (of_device_is_compatible(node, "restricted-dma-pool") &&
84+
of_device_is_available(node))
85+
break;
86+
}
87+
88+
if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
89+
dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
90+
}
91+
5592
/**
5693
* of_dma_configure_id - Setup DMA configuration
5794
* @dev: Device to apply DMA configuration
@@ -165,6 +202,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
165202

166203
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
167204

205+
if (!iommu)
206+
of_dma_set_restricted_buffer(dev, np);
207+
168208
return 0;
169209
}
170210
EXPORT_SYMBOL_GPL(of_dma_configure_id);

drivers/pci/xen-pcifront.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -699,7 +699,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
699699

700700
spin_unlock(&pcifront_dev_lock);
701701

702-
if (!err && !is_swiotlb_active()) {
702+
if (!err && !is_swiotlb_active(&pdev->xdev->dev)) {
703703
err = pci_xen_swiotlb_init_late();
704704
if (err)
705705
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");

drivers/xen/swiotlb-xen.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
100100
* in our domain. Therefore _only_ check address within our domain.
101101
*/
102102
if (pfn_valid(PFN_DOWN(paddr)))
103-
return is_swiotlb_buffer(paddr);
103+
return is_swiotlb_buffer(dev, paddr);
104104
return 0;
105105
}
106106

@@ -164,7 +164,7 @@ int __ref xen_swiotlb_init(void)
164164
int rc = -ENOMEM;
165165
char *start;
166166

167-
if (io_tlb_default_mem != NULL) {
167+
if (io_tlb_default_mem.nslabs) {
168168
pr_warn("swiotlb buffer already initialized\n");
169169
return -EEXIST;
170170
}
@@ -374,7 +374,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
374374
if (dma_capable(dev, dev_addr, size, true) &&
375375
!range_straddles_page_boundary(phys, size) &&
376376
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
377-
swiotlb_force != SWIOTLB_FORCE)
377+
!is_swiotlb_force_bounce(dev))
378378
goto done;
379379

380380
/*
@@ -547,7 +547,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
547547
static int
548548
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
549549
{
550-
return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
550+
return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
551551
}
552552

553553
const struct dma_map_ops xen_swiotlb_dma_ops = {

0 commit comments

Comments
 (0)