17
17
#include "exynos_drm_drv.h"
18
18
#include "exynos_drm_gem.h"
19
19
20
- static int exynos_drm_alloc_buf (struct exynos_drm_gem * exynos_gem )
20
+ static int exynos_drm_alloc_buf (struct exynos_drm_gem * exynos_gem , bool kvmap )
21
21
{
22
22
struct drm_device * dev = exynos_gem -> base .dev ;
23
- unsigned long attr ;
24
- unsigned int nr_pages ;
25
- struct sg_table sgt ;
26
- int ret = - ENOMEM ;
23
+ unsigned long attr = 0 ;
27
24
28
25
if (exynos_gem -> dma_addr ) {
29
26
DRM_DEV_DEBUG_KMS (to_dma_dev (dev ), "already allocated.\n" );
30
27
return 0 ;
31
28
}
32
29
33
- exynos_gem -> dma_attrs = 0 ;
34
-
35
30
/*
36
31
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
37
32
* region will be allocated else physically contiguous
38
33
* as possible.
39
34
*/
40
35
if (!(exynos_gem -> flags & EXYNOS_BO_NONCONTIG ))
41
- exynos_gem -> dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS ;
36
+ attr |= DMA_ATTR_FORCE_CONTIGUOUS ;
42
37
43
38
/*
44
39
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
45
40
* else cachable mapping.
46
41
*/
47
42
if (exynos_gem -> flags & EXYNOS_BO_WC ||
48
43
!(exynos_gem -> flags & EXYNOS_BO_CACHABLE ))
49
- attr = DMA_ATTR_WRITE_COMBINE ;
44
+ attr | = DMA_ATTR_WRITE_COMBINE ;
50
45
else
51
- attr = DMA_ATTR_NON_CONSISTENT ;
52
-
53
- exynos_gem -> dma_attrs |= attr ;
54
- exynos_gem -> dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING ;
55
-
56
- nr_pages = exynos_gem -> size >> PAGE_SHIFT ;
46
+ attr |= DMA_ATTR_NON_CONSISTENT ;
57
47
58
- exynos_gem -> pages = kvmalloc_array (nr_pages , sizeof (struct page * ),
59
- GFP_KERNEL | __GFP_ZERO );
60
- if (!exynos_gem -> pages ) {
61
- DRM_DEV_ERROR (to_dma_dev (dev ), "failed to allocate pages.\n" );
62
- return - ENOMEM ;
63
- }
48
+ /* FBDev emulation requires kernel mapping */
49
+ if (!kvmap )
50
+ attr |= DMA_ATTR_NO_KERNEL_MAPPING ;
64
51
52
+ exynos_gem -> dma_attrs = attr ;
65
53
exynos_gem -> cookie = dma_alloc_attrs (to_dma_dev (dev ), exynos_gem -> size ,
66
54
& exynos_gem -> dma_addr , GFP_KERNEL ,
67
55
exynos_gem -> dma_attrs );
68
56
if (!exynos_gem -> cookie ) {
69
57
DRM_DEV_ERROR (to_dma_dev (dev ), "failed to allocate buffer.\n" );
70
- goto err_free ;
71
- }
72
-
73
- ret = dma_get_sgtable_attrs (to_dma_dev (dev ), & sgt , exynos_gem -> cookie ,
74
- exynos_gem -> dma_addr , exynos_gem -> size ,
75
- exynos_gem -> dma_attrs );
76
- if (ret < 0 ) {
77
- DRM_DEV_ERROR (to_dma_dev (dev ), "failed to get sgtable.\n" );
78
- goto err_dma_free ;
79
- }
80
-
81
- if (drm_prime_sg_to_page_addr_arrays (& sgt , exynos_gem -> pages , NULL ,
82
- nr_pages )) {
83
- DRM_DEV_ERROR (to_dma_dev (dev ), "invalid sgtable.\n" );
84
- ret = - EINVAL ;
85
- goto err_sgt_free ;
58
+ return - ENOMEM ;
86
59
}
87
60
88
- sg_free_table (& sgt );
61
+ if (kvmap )
62
+ exynos_gem -> kvaddr = exynos_gem -> cookie ;
89
63
90
64
DRM_DEV_DEBUG_KMS (to_dma_dev (dev ), "dma_addr(0x%lx), size(0x%lx)\n" ,
91
65
(unsigned long )exynos_gem -> dma_addr , exynos_gem -> size );
92
-
93
66
return 0 ;
94
-
95
- err_sgt_free :
96
- sg_free_table (& sgt );
97
- err_dma_free :
98
- dma_free_attrs (to_dma_dev (dev ), exynos_gem -> size , exynos_gem -> cookie ,
99
- exynos_gem -> dma_addr , exynos_gem -> dma_attrs );
100
- err_free :
101
- kvfree (exynos_gem -> pages );
102
-
103
- return ret ;
104
67
}
105
68
106
69
static void exynos_drm_free_buf (struct exynos_drm_gem * exynos_gem )
@@ -118,8 +81,6 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
118
81
dma_free_attrs (to_dma_dev (dev ), exynos_gem -> size , exynos_gem -> cookie ,
119
82
(dma_addr_t )exynos_gem -> dma_addr ,
120
83
exynos_gem -> dma_attrs );
121
-
122
- kvfree (exynos_gem -> pages );
123
84
}
124
85
125
86
static int exynos_drm_gem_handle_create (struct drm_gem_object * obj ,
@@ -203,7 +164,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
203
164
204
165
struct exynos_drm_gem * exynos_drm_gem_create (struct drm_device * dev ,
205
166
unsigned int flags ,
206
- unsigned long size )
167
+ unsigned long size ,
168
+ bool kvmap )
207
169
{
208
170
struct exynos_drm_gem * exynos_gem ;
209
171
int ret ;
@@ -237,7 +199,7 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
237
199
/* set memory type and cache attribute from user side. */
238
200
exynos_gem -> flags = flags ;
239
201
240
- ret = exynos_drm_alloc_buf (exynos_gem );
202
+ ret = exynos_drm_alloc_buf (exynos_gem , kvmap );
241
203
if (ret < 0 ) {
242
204
drm_gem_object_release (& exynos_gem -> base );
243
205
kfree (exynos_gem );
@@ -254,7 +216,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
254
216
struct exynos_drm_gem * exynos_gem ;
255
217
int ret ;
256
218
257
- exynos_gem = exynos_drm_gem_create (dev , args -> flags , args -> size );
219
+ exynos_gem = exynos_drm_gem_create (dev , args -> flags , args -> size , false );
258
220
if (IS_ERR (exynos_gem ))
259
221
return PTR_ERR (exynos_gem );
260
222
@@ -365,7 +327,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
365
327
else
366
328
flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC ;
367
329
368
- exynos_gem = exynos_drm_gem_create (dev , flags , args -> size );
330
+ exynos_gem = exynos_drm_gem_create (dev , flags , args -> size , false );
369
331
if (IS_ERR (exynos_gem )) {
370
332
dev_warn (dev -> dev , "FB allocation failed.\n" );
371
333
return PTR_ERR (exynos_gem );
@@ -442,11 +404,24 @@ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
442
404
struct sg_table * exynos_drm_gem_prime_get_sg_table (struct drm_gem_object * obj )
443
405
{
444
406
struct exynos_drm_gem * exynos_gem = to_exynos_gem (obj );
445
- int npages ;
407
+ struct drm_device * drm_dev = obj -> dev ;
408
+ struct sg_table * sgt ;
409
+ int ret ;
410
+
411
+ sgt = kzalloc (sizeof (* sgt ), GFP_KERNEL );
412
+ if (!sgt )
413
+ return ERR_PTR (- ENOMEM );
446
414
447
- npages = exynos_gem -> size >> PAGE_SHIFT ;
415
+ ret = dma_get_sgtable_attrs (to_dma_dev (drm_dev ), sgt , exynos_gem -> cookie ,
416
+ exynos_gem -> dma_addr , exynos_gem -> size ,
417
+ exynos_gem -> dma_attrs );
418
+ if (ret ) {
419
+ DRM_ERROR ("failed to get sgtable, %d\n" , ret );
420
+ kfree (sgt );
421
+ return ERR_PTR (ret );
422
+ }
448
423
449
- return drm_prime_pages_to_sg ( exynos_gem -> pages , npages ) ;
424
+ return sgt ;
450
425
}
451
426
452
427
struct drm_gem_object *
@@ -455,8 +430,6 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
455
430
struct sg_table * sgt )
456
431
{
457
432
struct exynos_drm_gem * exynos_gem ;
458
- int npages ;
459
- int ret ;
460
433
461
434
if (sgt -> nents < 1 )
462
435
return ERR_PTR (- EINVAL );
@@ -482,26 +455,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
482
455
}
483
456
484
457
exynos_gem = exynos_drm_gem_init (dev , attach -> dmabuf -> size );
485
- if (IS_ERR (exynos_gem )) {
486
- ret = PTR_ERR (exynos_gem );
487
- return ERR_PTR (ret );
488
- }
489
-
490
- exynos_gem -> dma_addr = sg_dma_address (sgt -> sgl );
491
-
492
- npages = exynos_gem -> size >> PAGE_SHIFT ;
493
- exynos_gem -> pages = kvmalloc_array (npages , sizeof (struct page * ), GFP_KERNEL );
494
- if (!exynos_gem -> pages ) {
495
- ret = - ENOMEM ;
496
- goto err ;
497
- }
498
-
499
- ret = drm_prime_sg_to_page_addr_arrays (sgt , exynos_gem -> pages , NULL ,
500
- npages );
501
- if (ret < 0 )
502
- goto err_free_large ;
503
-
504
- exynos_gem -> sgt = sgt ;
458
+ if (IS_ERR (exynos_gem ))
459
+ return ERR_CAST (exynos_gem );
505
460
506
461
/*
507
462
* Buffer has been mapped as contiguous into DMA address space,
@@ -513,14 +468,9 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
513
468
else
514
469
exynos_gem -> flags |= EXYNOS_BO_CONTIG ;
515
470
471
+ exynos_gem -> dma_addr = sg_dma_address (sgt -> sgl );
472
+ exynos_gem -> sgt = sgt ;
516
473
return & exynos_gem -> base ;
517
-
518
- err_free_large :
519
- kvfree (exynos_gem -> pages );
520
- err :
521
- drm_gem_object_release (& exynos_gem -> base );
522
- kfree (exynos_gem );
523
- return ERR_PTR (ret );
524
474
}
525
475
526
476
void * exynos_drm_gem_prime_vmap (struct drm_gem_object * obj )
0 commit comments