Skip to content

Commit 4491b85

Browse files
committed
Merge tag 'dma-mapping-6.12-2024-09-24' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - sort out a few issues with the direct calls to iommu-dma (Christoph Hellwig, Leon Romanovsky) * tag 'dma-mapping-6.12-2024-09-24' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: report unlimited DMA addressing in IOMMU DMA path iommu/dma: remove most stubs in iommu-dma.h dma-mapping: fix vmap and mmap of noncontiougs allocations
2 parents db78436 + b348b6d commit 4491b85

File tree

4 files changed

+62
-147
lines changed

4 files changed

+62
-147
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1038,6 +1038,21 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
10381038
return NULL;
10391039
}
10401040

1041+
/*
1042+
* This is the actual return value from the iommu_dma_alloc_noncontiguous.
1043+
*
1044+
* The users of the DMA API should only care about the sg_table, but to make
1045+
* the DMA-API internal vmaping and freeing easier we stash away the page
1046+
* array as well (except for the fallback case). This can go away any time,
1047+
* e.g. when a vmap-variant that takes a scatterlist comes along.
1048+
*/
1049+
struct dma_sgt_handle {
1050+
struct sg_table sgt;
1051+
struct page **pages;
1052+
};
1053+
#define sgt_handle(sgt) \
1054+
container_of((sgt), struct dma_sgt_handle, sgt)
1055+
10411056
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
10421057
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
10431058
{
@@ -1066,6 +1081,24 @@ void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
10661081
kfree(sh);
10671082
}
10681083

1084+
void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
1085+
struct sg_table *sgt)
1086+
{
1087+
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1088+
1089+
return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
1090+
}
1091+
1092+
int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
1093+
size_t size, struct sg_table *sgt)
1094+
{
1095+
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1096+
1097+
if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
1098+
return -ENXIO;
1099+
return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
1100+
}
1101+
10691102
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
10701103
size_t size, enum dma_data_direction dir)
10711104
{

include/linux/dma-map-ops.h

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,6 @@ struct dma_map_ops {
2424
gfp_t gfp);
2525
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
2626
dma_addr_t dma_handle, enum dma_data_direction dir);
27-
struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
28-
enum dma_data_direction dir, gfp_t gfp,
29-
unsigned long attrs);
30-
void (*free_noncontiguous)(struct device *dev, size_t size,
31-
struct sg_table *sgt, enum dma_data_direction dir);
3227
int (*mmap)(struct device *, struct vm_area_struct *,
3328
void *, dma_addr_t, size_t, unsigned long attrs);
3429

@@ -206,20 +201,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
206201
}
207202
#endif /* CONFIG_DMA_GLOBAL_POOL */
208203

209-
/*
210-
* This is the actual return value from the ->alloc_noncontiguous method.
211-
* The users of the DMA API should only care about the sg_table, but to make
212-
* the DMA-API internal vmaping and freeing easier we stash away the page
213-
* array as well (except for the fallback case). This can go away any time,
214-
* e.g. when a vmap-variant that takes a scatterlist comes along.
215-
*/
216-
struct dma_sgt_handle {
217-
struct sg_table sgt;
218-
struct page **pages;
219-
};
220-
#define sgt_handle(sgt) \
221-
container_of((sgt), struct dma_sgt_handle, sgt)
222-
223204
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
224205
void *cpu_addr, dma_addr_t dma_addr, size_t size,
225206
unsigned long attrs);

include/linux/iommu-dma.h

Lines changed: 14 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,13 @@ static inline bool use_dma_iommu(struct device *dev)
1414
{
1515
return dev->dma_iommu;
1616
}
17+
#else
18+
static inline bool use_dma_iommu(struct device *dev)
19+
{
20+
return false;
21+
}
22+
#endif /* CONFIG_IOMMU_DMA */
23+
1724
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
1825
unsigned long offset, size_t size, enum dma_data_direction dir,
1926
unsigned long attrs);
@@ -44,6 +51,12 @@ struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
4451
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
4552
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
4653
struct sg_table *sgt, enum dma_data_direction dir);
54+
void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
55+
struct sg_table *sgt);
56+
#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
57+
vunmap(vaddr);
58+
int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
59+
size_t size, struct sg_table *sgt);
4760
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
4861
size_t size, enum dma_data_direction dir);
4962
void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
@@ -52,104 +65,5 @@ void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
5265
int nelems, enum dma_data_direction dir);
5366
void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
5467
int nelems, enum dma_data_direction dir);
55-
#else
56-
static inline bool use_dma_iommu(struct device *dev)
57-
{
58-
return false;
59-
}
60-
static inline dma_addr_t iommu_dma_map_page(struct device *dev,
61-
struct page *page, unsigned long offset, size_t size,
62-
enum dma_data_direction dir, unsigned long attrs)
63-
{
64-
return DMA_MAPPING_ERROR;
65-
}
66-
static inline void iommu_dma_unmap_page(struct device *dev,
67-
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir,
68-
unsigned long attrs)
69-
{
70-
}
71-
static inline int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
72-
int nents, enum dma_data_direction dir, unsigned long attrs)
73-
{
74-
return -EINVAL;
75-
}
76-
static inline void iommu_dma_unmap_sg(struct device *dev,
77-
struct scatterlist *sg, int nents, enum dma_data_direction dir,
78-
unsigned long attrs)
79-
{
80-
}
81-
static inline void *iommu_dma_alloc(struct device *dev, size_t size,
82-
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
83-
{
84-
return NULL;
85-
}
86-
static inline int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
87-
void *cpu_addr, dma_addr_t dma_addr, size_t size,
88-
unsigned long attrs)
89-
{
90-
return -EINVAL;
91-
}
92-
static inline int iommu_dma_get_sgtable(struct device *dev,
93-
struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
94-
size_t size, unsigned long attrs)
95-
{
96-
return -EINVAL;
97-
}
98-
static inline unsigned long iommu_dma_get_merge_boundary(struct device *dev)
99-
{
100-
return 0;
101-
}
102-
static inline size_t iommu_dma_opt_mapping_size(void)
103-
{
104-
return 0;
105-
}
106-
static inline size_t iommu_dma_max_mapping_size(struct device *dev)
107-
{
108-
return 0;
109-
}
110-
static inline void iommu_dma_free(struct device *dev, size_t size,
111-
void *cpu_addr, dma_addr_t handle, unsigned long attrs)
112-
{
113-
}
114-
static inline dma_addr_t iommu_dma_map_resource(struct device *dev,
115-
phys_addr_t phys, size_t size, enum dma_data_direction dir,
116-
unsigned long attrs)
117-
{
118-
return DMA_MAPPING_ERROR;
119-
}
120-
static inline void iommu_dma_unmap_resource(struct device *dev,
121-
dma_addr_t handle, size_t size, enum dma_data_direction dir,
122-
unsigned long attrs)
123-
{
124-
}
125-
static inline struct sg_table *
126-
iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
127-
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
128-
{
129-
return NULL;
130-
}
131-
static inline void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
132-
struct sg_table *sgt, enum dma_data_direction dir)
133-
{
134-
}
135-
static inline void iommu_dma_sync_single_for_cpu(struct device *dev,
136-
dma_addr_t dma_handle, size_t size,
137-
enum dma_data_direction dir)
138-
{
139-
}
140-
static inline void iommu_dma_sync_single_for_device(struct device *dev,
141-
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
142-
{
143-
}
144-
static inline void iommu_dma_sync_sg_for_cpu(struct device *dev,
145-
struct scatterlist *sgl, int nelems,
146-
enum dma_data_direction dir)
147-
{
148-
}
149-
static inline void iommu_dma_sync_sg_for_device(struct device *dev,
150-
struct scatterlist *sgl, int nelems,
151-
enum dma_data_direction dir)
152-
{
153-
}
154-
#endif /* CONFIG_IOMMU_DMA */
68+
15569
#endif /* _LINUX_IOMMU_DMA_H */

kernel/dma/mapping.c

Lines changed: 15 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -569,6 +569,10 @@ u64 dma_get_required_mask(struct device *dev)
569569

570570
if (dma_alloc_direct(dev, ops))
571571
return dma_direct_get_required_mask(dev);
572+
573+
if (use_dma_iommu(dev))
574+
return DMA_BIT_MASK(32);
575+
572576
if (ops->get_required_mask)
573577
return ops->get_required_mask(dev);
574578

@@ -750,17 +754,14 @@ static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
750754
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
751755
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
752756
{
753-
const struct dma_map_ops *ops = get_dma_ops(dev);
754757
struct sg_table *sgt;
755758

756759
if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
757760
return NULL;
758761
if (WARN_ON_ONCE(gfp & __GFP_COMP))
759762
return NULL;
760763

761-
if (ops && ops->alloc_noncontiguous)
762-
sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
763-
else if (use_dma_iommu(dev))
764+
if (use_dma_iommu(dev))
764765
sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
765766
else
766767
sgt = alloc_single_sgt(dev, size, dir, gfp);
@@ -786,13 +787,10 @@ static void free_single_sgt(struct device *dev, size_t size,
786787
void dma_free_noncontiguous(struct device *dev, size_t size,
787788
struct sg_table *sgt, enum dma_data_direction dir)
788789
{
789-
const struct dma_map_ops *ops = get_dma_ops(dev);
790-
791790
trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
792791
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
793-
if (ops && ops->free_noncontiguous)
794-
ops->free_noncontiguous(dev, size, sgt, dir);
795-
else if (use_dma_iommu(dev))
792+
793+
if (use_dma_iommu(dev))
796794
iommu_dma_free_noncontiguous(dev, size, sgt, dir);
797795
else
798796
free_single_sgt(dev, size, sgt, dir);
@@ -802,37 +800,26 @@ EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
802800
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
803801
struct sg_table *sgt)
804802
{
805-
const struct dma_map_ops *ops = get_dma_ops(dev);
806-
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
807803

808-
if (ops && ops->alloc_noncontiguous)
809-
return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
804+
if (use_dma_iommu(dev))
805+
return iommu_dma_vmap_noncontiguous(dev, size, sgt);
806+
810807
return page_address(sg_page(sgt->sgl));
811808
}
812809
EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
813810

814811
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
815812
{
816-
const struct dma_map_ops *ops = get_dma_ops(dev);
817-
818-
if (ops && ops->alloc_noncontiguous)
819-
vunmap(vaddr);
813+
if (use_dma_iommu(dev))
814+
iommu_dma_vunmap_noncontiguous(dev, vaddr);
820815
}
821816
EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
822817

823818
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
824819
size_t size, struct sg_table *sgt)
825820
{
826-
const struct dma_map_ops *ops = get_dma_ops(dev);
827-
828-
if (ops && ops->alloc_noncontiguous) {
829-
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
830-
831-
if (vma->vm_pgoff >= count ||
832-
vma_pages(vma) > count - vma->vm_pgoff)
833-
return -ENXIO;
834-
return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
835-
}
821+
if (use_dma_iommu(dev))
822+
return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt);
836823
return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
837824
}
838825
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
@@ -926,7 +913,7 @@ bool dma_addressing_limited(struct device *dev)
926913
dma_get_required_mask(dev))
927914
return true;
928915

929-
if (unlikely(ops))
916+
if (unlikely(ops) || use_dma_iommu(dev))
930917
return false;
931918
return !dma_direct_all_ram_mapped(dev);
932919
}

0 commit comments

Comments
 (0)