@@ -36,20 +36,11 @@ static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
36
36
return 0 ;
37
37
}
38
38
39
- FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS (drm_fbdev_dma ,
40
- drm_fb_helper_damage_range ,
41
- drm_fb_helper_damage_area );
42
-
43
39
static int drm_fbdev_dma_fb_mmap (struct fb_info * info , struct vm_area_struct * vma )
44
40
{
45
41
struct drm_fb_helper * fb_helper = info -> par ;
46
- struct drm_framebuffer * fb = fb_helper -> fb ;
47
- struct drm_gem_dma_object * dma = drm_fb_dma_get_gem_obj (fb , 0 );
48
42
49
- if (!dma -> map_noncoherent )
50
- vma -> vm_page_prot = pgprot_writecombine (vma -> vm_page_prot );
51
-
52
- return fb_deferred_io_mmap (info , vma );
43
+ return drm_gem_prime_mmap (fb_helper -> buffer -> gem , vma );
53
44
}
54
45
55
46
static void drm_fbdev_dma_fb_destroy (struct fb_info * info )
@@ -70,13 +61,40 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
70
61
}
71
62
72
63
static const struct fb_ops drm_fbdev_dma_fb_ops = {
64
+ .owner = THIS_MODULE ,
65
+ .fb_open = drm_fbdev_dma_fb_open ,
66
+ .fb_release = drm_fbdev_dma_fb_release ,
67
+ __FB_DEFAULT_DMAMEM_OPS_RDWR ,
68
+ DRM_FB_HELPER_DEFAULT_OPS ,
69
+ __FB_DEFAULT_DMAMEM_OPS_DRAW ,
70
+ .fb_mmap = drm_fbdev_dma_fb_mmap ,
71
+ .fb_destroy = drm_fbdev_dma_fb_destroy ,
72
+ };
73
+
74
+ FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS (drm_fbdev_dma ,
75
+ drm_fb_helper_damage_range ,
76
+ drm_fb_helper_damage_area );
77
+
78
+ static int drm_fbdev_dma_deferred_fb_mmap (struct fb_info * info , struct vm_area_struct * vma )
79
+ {
80
+ struct drm_fb_helper * fb_helper = info -> par ;
81
+ struct drm_framebuffer * fb = fb_helper -> fb ;
82
+ struct drm_gem_dma_object * dma = drm_fb_dma_get_gem_obj (fb , 0 );
83
+
84
+ if (!dma -> map_noncoherent )
85
+ vma -> vm_page_prot = pgprot_writecombine (vma -> vm_page_prot );
86
+
87
+ return fb_deferred_io_mmap (info , vma );
88
+ }
89
+
90
+ static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
73
91
.owner = THIS_MODULE ,
74
92
.fb_open = drm_fbdev_dma_fb_open ,
75
93
.fb_release = drm_fbdev_dma_fb_release ,
76
94
__FB_DEFAULT_DEFERRED_OPS_RDWR (drm_fbdev_dma ),
77
95
DRM_FB_HELPER_DEFAULT_OPS ,
78
96
__FB_DEFAULT_DEFERRED_OPS_DRAW (drm_fbdev_dma ),
79
- .fb_mmap = drm_fbdev_dma_fb_mmap ,
97
+ .fb_mmap = drm_fbdev_dma_deferred_fb_mmap ,
80
98
.fb_destroy = drm_fbdev_dma_fb_destroy ,
81
99
};
82
100
@@ -89,6 +107,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
89
107
{
90
108
struct drm_client_dev * client = & fb_helper -> client ;
91
109
struct drm_device * dev = fb_helper -> dev ;
110
+ bool use_deferred_io = false;
92
111
struct drm_client_buffer * buffer ;
93
112
struct drm_gem_dma_object * dma_obj ;
94
113
struct drm_framebuffer * fb ;
@@ -111,6 +130,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
111
130
112
131
fb = buffer -> fb ;
113
132
133
+ /*
134
+ * Deferred I/O requires struct page for framebuffer memory,
135
+ * which is not guaranteed for all DMA ranges. We thus only
136
+ * install deferred I/O if we have a framebuffer that requires
137
+ * it.
138
+ */
139
+ if (fb -> funcs -> dirty )
140
+ use_deferred_io = true;
141
+
114
142
ret = drm_client_buffer_vmap (buffer , & map );
115
143
if (ret ) {
116
144
goto err_drm_client_buffer_delete ;
@@ -130,7 +158,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
130
158
131
159
drm_fb_helper_fill_info (info , fb_helper , sizes );
132
160
133
- info -> fbops = & drm_fbdev_dma_fb_ops ;
161
+ if (use_deferred_io )
162
+ info -> fbops = & drm_fbdev_dma_deferred_fb_ops ;
163
+ else
164
+ info -> fbops = & drm_fbdev_dma_fb_ops ;
134
165
135
166
/* screen */
136
167
info -> flags |= FBINFO_VIRTFB ; /* system memory */
@@ -144,14 +175,28 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
144
175
}
145
176
info -> fix .smem_len = info -> screen_size ;
146
177
147
- /* deferred I/O */
148
- fb_helper -> fbdefio .delay = HZ / 20 ;
149
- fb_helper -> fbdefio .deferred_io = drm_fb_helper_deferred_io ;
178
+ /*
179
+ * Only set up deferred I/O if the screen buffer supports
180
+ * it. If this disagrees with the previous test for ->dirty,
181
+ * mmap on the /dev/fb file might not work correctly.
182
+ */
183
+ if (!is_vmalloc_addr (info -> screen_buffer ) && info -> fix .smem_start ) {
184
+ unsigned long pfn = info -> fix .smem_start >> PAGE_SHIFT ;
150
185
151
- info -> fbdefio = & fb_helper -> fbdefio ;
152
- ret = fb_deferred_io_init (info );
153
- if (ret )
154
- goto err_drm_fb_helper_release_info ;
186
+ if (drm_WARN_ON (dev , !pfn_to_page (pfn )))
187
+ use_deferred_io = false;
188
+ }
189
+
190
+ /* deferred I/O */
191
+ if (use_deferred_io ) {
192
+ fb_helper -> fbdefio .delay = HZ / 20 ;
193
+ fb_helper -> fbdefio .deferred_io = drm_fb_helper_deferred_io ;
194
+
195
+ info -> fbdefio = & fb_helper -> fbdefio ;
196
+ ret = fb_deferred_io_init (info );
197
+ if (ret )
198
+ goto err_drm_fb_helper_release_info ;
199
+ }
155
200
156
201
return 0 ;
157
202
0 commit comments