28
28
29
29
#include "i915_drv.h"
30
30
31
- #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
31
+ struct remap_pfn {
32
+ struct mm_struct * mm ;
33
+ unsigned long pfn ;
34
+ pgprot_t prot ;
35
+
36
+ struct sgt_iter sgt ;
37
+ resource_size_t iobase ;
38
+ };
32
39
33
40
#define use_dma (io ) ((io) != -1)
34
41
42
+ static inline unsigned long sgt_pfn (const struct remap_pfn * r )
43
+ {
44
+ if (use_dma (r -> iobase ))
45
+ return (r -> sgt .dma + r -> sgt .curr + r -> iobase ) >> PAGE_SHIFT ;
46
+ else
47
+ return r -> sgt .pfn + (r -> sgt .curr >> PAGE_SHIFT );
48
+ }
49
+
50
+ static int remap_sg (pte_t * pte , unsigned long addr , void * data )
51
+ {
52
+ struct remap_pfn * r = data ;
53
+
54
+ if (GEM_WARN_ON (!r -> sgt .sgp ))
55
+ return - EINVAL ;
56
+
57
+ /* Special PTE are not associated with any struct page */
58
+ set_pte_at (r -> mm , addr , pte ,
59
+ pte_mkspecial (pfn_pte (sgt_pfn (r ), r -> prot )));
60
+ r -> pfn ++ ; /* track insertions in case we need to unwind later */
61
+
62
+ r -> sgt .curr += PAGE_SIZE ;
63
+ if (r -> sgt .curr >= r -> sgt .max )
64
+ r -> sgt = __sgt_iter (__sg_next (r -> sgt .sgp ), use_dma (r -> iobase ));
65
+
66
+ return 0 ;
67
+ }
68
+
69
+ #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
70
+
35
71
/**
36
72
* remap_io_sg - remap an IO mapping to userspace
37
73
* @vma: user vma to map to
@@ -46,7 +82,12 @@ int remap_io_sg(struct vm_area_struct *vma,
46
82
unsigned long addr , unsigned long size ,
47
83
struct scatterlist * sgl , resource_size_t iobase )
48
84
{
49
- unsigned long pfn , len , remapped = 0 ;
85
+ struct remap_pfn r = {
86
+ .mm = vma -> vm_mm ,
87
+ .prot = vma -> vm_page_prot ,
88
+ .sgt = __sgt_iter (sgl , use_dma (iobase )),
89
+ .iobase = iobase ,
90
+ };
50
91
int err ;
51
92
52
93
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma,
55
96
if (!use_dma (iobase ))
56
97
flush_cache_range (vma , addr , size );
57
98
58
- do {
59
- if (use_dma (iobase )) {
60
- if (!sg_dma_len (sgl ))
61
- break ;
62
- pfn = (sg_dma_address (sgl ) + iobase ) >> PAGE_SHIFT ;
63
- len = sg_dma_len (sgl );
64
- } else {
65
- pfn = page_to_pfn (sg_page (sgl ));
66
- len = sgl -> length ;
67
- }
68
-
69
- err = remap_pfn_range (vma , addr + remapped , pfn , len ,
70
- vma -> vm_page_prot );
71
- if (err )
72
- break ;
73
- remapped += len ;
74
- } while ((sgl = __sg_next (sgl )));
75
-
76
- if (err )
77
- zap_vma_ptes (vma , addr , remapped );
78
- return err ;
99
+ err = apply_to_page_range (r .mm , addr , size , remap_sg , & r );
100
+ if (unlikely (err )) {
101
+ zap_vma_ptes (vma , addr , r .pfn << PAGE_SHIFT );
102
+ return err ;
103
+ }
104
+
105
+ return 0 ;
79
106
}
0 commit comments