Skip to content

Commit 3603996

Browse files
author
Thomas Zimmermann
committed
drm/fbdev-dma: Add shadow buffering for deferred I/O
DMA areas are not necessarily backed by struct page, so we cannot rely on it for deferred I/O. Allocate a shadow buffer for drivers that require deferred I/O and use it as framebuffer memory. Fixes driver errors about being "Unable to handle kernel NULL pointer dereference at virtual address" or "Unable to handle kernel paging request at virtual address". The patch splits drm_fbdev_dma_driver_fbdev_probe() in an initial allocation, which creates the DMA-backed buffer object, and a tail that sets up the fbdev data structures. There is a tail function for direct memory mappings and a tail function for deferred I/O with the shadow buffer. It is no longer possible to use deferred I/O without shadow buffer. It can be re-added if there exists a reliably test for usable struct page in the allocated DMA-backed buffer object. Signed-off-by: Thomas Zimmermann <[email protected]> Reported-by: Nuno Gonçalves <[email protected]> CLoses: https://lore.kernel.org/dri-devel/CAEXMXLR55DziAMbv_+2hmLeH-jP96pmit6nhs6siB22cpQFr9w@mail.gmail.com/ Tested-by: Nuno Gonçalves <[email protected]> Fixes: 5ab9144 ("drm/tiny/ili9225: Use fbdev-dma") Cc: Thomas Zimmermann <[email protected]> Cc: <[email protected]> # v6.11+ Reviewed-by: Simona Vetter <[email protected]> Reviewed-by: Javier Martinez Canillas <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 01f1d77 commit 3603996

File tree

1 file changed

+155
-62
lines changed

1 file changed

+155
-62
lines changed

drivers/gpu/drm/drm_fbdev_dma.c

Lines changed: 155 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// SPDX-License-Identifier: MIT
22

33
#include <linux/fb.h>
4+
#include <linux/vmalloc.h>
45

56
#include <drm/drm_drv.h>
67
#include <drm/drm_fbdev_dma.h>
@@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
7071
.fb_destroy = drm_fbdev_dma_fb_destroy,
7172
};
7273

73-
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
74+
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
7475
drm_fb_helper_damage_range,
7576
drm_fb_helper_damage_area);
7677

77-
static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
78+
static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
7879
{
7980
struct drm_fb_helper *fb_helper = info->par;
80-
struct drm_framebuffer *fb = fb_helper->fb;
81-
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
81+
void *shadow = info->screen_buffer;
82+
83+
if (!fb_helper->dev)
84+
return;
8285

83-
if (!dma->map_noncoherent)
84-
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
86+
if (info->fbdefio)
87+
fb_deferred_io_cleanup(info);
88+
drm_fb_helper_fini(fb_helper);
89+
vfree(shadow);
8590

86-
return fb_deferred_io_mmap(info, vma);
91+
drm_client_buffer_vunmap(fb_helper->buffer);
92+
drm_client_framebuffer_delete(fb_helper->buffer);
93+
drm_client_release(&fb_helper->client);
94+
drm_fb_helper_unprepare(fb_helper);
95+
kfree(fb_helper);
8796
}
8897

89-
static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
98+
static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
9099
.owner = THIS_MODULE,
91100
.fb_open = drm_fbdev_dma_fb_open,
92101
.fb_release = drm_fbdev_dma_fb_release,
93-
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
102+
FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
94103
DRM_FB_HELPER_DEFAULT_OPS,
95-
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
96-
.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
97-
.fb_destroy = drm_fbdev_dma_fb_destroy,
104+
.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
98105
};
99106

100107
/*
101108
* struct drm_fb_helper
102109
*/
103110

111+
static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
112+
struct drm_clip_rect *clip,
113+
struct iosys_map *dst)
114+
{
115+
struct drm_framebuffer *fb = fb_helper->fb;
116+
size_t offset = clip->y1 * fb->pitches[0];
117+
size_t len = clip->x2 - clip->x1;
118+
unsigned int y;
119+
void *src;
120+
121+
switch (drm_format_info_bpp(fb->format, 0)) {
122+
case 1:
123+
offset += clip->x1 / 8;
124+
len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
125+
break;
126+
case 2:
127+
offset += clip->x1 / 4;
128+
len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
129+
break;
130+
case 4:
131+
offset += clip->x1 / 2;
132+
len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
133+
break;
134+
default:
135+
offset += clip->x1 * fb->format->cpp[0];
136+
len *= fb->format->cpp[0];
137+
break;
138+
}
139+
140+
src = fb_helper->info->screen_buffer + offset;
141+
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
142+
143+
for (y = clip->y1; y < clip->y2; y++) {
144+
iosys_map_memcpy_to(dst, 0, src, len);
145+
iosys_map_incr(dst, fb->pitches[0]);
146+
src += fb->pitches[0];
147+
}
148+
}
149+
150+
static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
151+
struct drm_clip_rect *clip)
152+
{
153+
struct drm_client_buffer *buffer = fb_helper->buffer;
154+
struct iosys_map dst;
155+
156+
/*
157+
* For fbdev emulation, we only have to protect against fbdev modeset
158+
* operations. Nothing else will involve the client buffer's BO. So it
159+
* is sufficient to acquire struct drm_fb_helper.lock here.
160+
*/
161+
mutex_lock(&fb_helper->lock);
162+
163+
dst = buffer->map;
164+
drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
165+
166+
mutex_unlock(&fb_helper->lock);
167+
168+
return 0;
169+
}
104170
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
105171
struct drm_clip_rect *clip)
106172
{
@@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
112178
return 0;
113179

114180
if (helper->fb->funcs->dirty) {
181+
ret = drm_fbdev_dma_damage_blit(helper, clip);
182+
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
183+
return ret;
184+
115185
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
116186
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
117187
return ret;
@@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
128198
* struct drm_fb_helper
129199
*/
130200

201+
static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
202+
struct drm_fb_helper_surface_size *sizes)
203+
{
204+
struct drm_device *dev = fb_helper->dev;
205+
struct drm_client_buffer *buffer = fb_helper->buffer;
206+
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
207+
struct drm_framebuffer *fb = fb_helper->fb;
208+
struct fb_info *info = fb_helper->info;
209+
struct iosys_map map = buffer->map;
210+
211+
info->fbops = &drm_fbdev_dma_fb_ops;
212+
213+
/* screen */
214+
info->flags |= FBINFO_VIRTFB; /* system memory */
215+
if (dma_obj->map_noncoherent)
216+
info->flags |= FBINFO_READS_FAST; /* signal caching */
217+
info->screen_size = sizes->surface_height * fb->pitches[0];
218+
info->screen_buffer = map.vaddr;
219+
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
220+
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
221+
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
222+
}
223+
info->fix.smem_len = info->screen_size;
224+
225+
return 0;
226+
}
227+
228+
static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
229+
struct drm_fb_helper_surface_size *sizes)
230+
{
231+
struct drm_client_buffer *buffer = fb_helper->buffer;
232+
struct fb_info *info = fb_helper->info;
233+
size_t screen_size = buffer->gem->size;
234+
void *screen_buffer;
235+
int ret;
236+
237+
/*
238+
* Deferred I/O requires struct page for framebuffer memory,
239+
* which is not guaranteed for all DMA ranges. We thus create
240+
* a shadow buffer in system memory.
241+
*/
242+
screen_buffer = vzalloc(screen_size);
243+
if (!screen_buffer)
244+
return -ENOMEM;
245+
246+
info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
247+
248+
/* screen */
249+
info->flags |= FBINFO_VIRTFB; /* system memory */
250+
info->flags |= FBINFO_READS_FAST; /* signal caching */
251+
info->screen_buffer = screen_buffer;
252+
info->fix.smem_len = screen_size;
253+
254+
fb_helper->fbdefio.delay = HZ / 20;
255+
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
256+
257+
info->fbdefio = &fb_helper->fbdefio;
258+
ret = fb_deferred_io_init(info);
259+
if (ret)
260+
goto err_vfree;
261+
262+
return 0;
263+
264+
err_vfree:
265+
vfree(screen_buffer);
266+
return ret;
267+
}
268+
131269
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
132270
struct drm_fb_helper_surface_size *sizes)
133271
{
134272
struct drm_client_dev *client = &fb_helper->client;
135273
struct drm_device *dev = fb_helper->dev;
136-
bool use_deferred_io = false;
137274
struct drm_client_buffer *buffer;
138-
struct drm_gem_dma_object *dma_obj;
139275
struct drm_framebuffer *fb;
140276
struct fb_info *info;
141277
u32 format;
@@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
152288
sizes->surface_height, format);
153289
if (IS_ERR(buffer))
154290
return PTR_ERR(buffer);
155-
dma_obj = to_drm_gem_dma_obj(buffer->gem);
156291

157292
fb = buffer->fb;
158293

159-
/*
160-
* Deferred I/O requires struct page for framebuffer memory,
161-
* which is not guaranteed for all DMA ranges. We thus only
162-
* install deferred I/O if we have a framebuffer that requires
163-
* it.
164-
*/
165-
if (fb->funcs->dirty)
166-
use_deferred_io = true;
167-
168294
ret = drm_client_buffer_vmap(buffer, &map);
169295
if (ret) {
170296
goto err_drm_client_buffer_delete;
@@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
185311

186312
drm_fb_helper_fill_info(info, fb_helper, sizes);
187313

188-
if (use_deferred_io)
189-
info->fbops = &drm_fbdev_dma_deferred_fb_ops;
314+
if (fb->funcs->dirty)
315+
ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
190316
else
191-
info->fbops = &drm_fbdev_dma_fb_ops;
192-
193-
/* screen */
194-
info->flags |= FBINFO_VIRTFB; /* system memory */
195-
if (dma_obj->map_noncoherent)
196-
info->flags |= FBINFO_READS_FAST; /* signal caching */
197-
info->screen_size = sizes->surface_height * fb->pitches[0];
198-
info->screen_buffer = map.vaddr;
199-
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
200-
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
201-
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
202-
}
203-
info->fix.smem_len = info->screen_size;
204-
205-
/*
206-
* Only set up deferred I/O if the screen buffer supports
207-
* it. If this disagrees with the previous test for ->dirty,
208-
* mmap on the /dev/fb file might not work correctly.
209-
*/
210-
if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
211-
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
212-
213-
if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
214-
use_deferred_io = false;
215-
}
216-
217-
/* deferred I/O */
218-
if (use_deferred_io) {
219-
fb_helper->fbdefio.delay = HZ / 20;
220-
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
221-
222-
info->fbdefio = &fb_helper->fbdefio;
223-
ret = fb_deferred_io_init(info);
224-
if (ret)
225-
goto err_drm_fb_helper_release_info;
226-
}
317+
ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
318+
if (ret)
319+
goto err_drm_fb_helper_release_info;
227320

228321
return 0;
229322

0 commit comments

Comments
 (0)