24
24
MODULE_IMPORT_NS ("DMA_BUF" );
25
25
26
26
static int
27
- amdxdna_gem_insert_node_locked (struct amdxdna_gem_obj * abo , bool use_vmap )
27
+ amdxdna_gem_heap_alloc (struct amdxdna_gem_obj * abo )
28
28
{
29
29
struct amdxdna_client * client = abo -> client ;
30
30
struct amdxdna_dev * xdna = client -> xdna ;
31
31
struct amdxdna_mem * mem = & abo -> mem ;
32
+ struct amdxdna_gem_obj * heap ;
32
33
u64 offset ;
33
34
u32 align ;
34
35
int ret ;
35
36
37
+ mutex_lock (& client -> mm_lock );
38
+
39
+ heap = client -> dev_heap ;
40
+ if (!heap ) {
41
+ ret = - EINVAL ;
42
+ goto unlock_out ;
43
+ }
44
+
45
+ if (heap -> mem .userptr == AMDXDNA_INVALID_ADDR ) {
46
+ XDNA_ERR (xdna , "Invalid dev heap userptr" );
47
+ ret = - EINVAL ;
48
+ goto unlock_out ;
49
+ }
50
+
51
+ if (mem -> size == 0 || mem -> size > heap -> mem .size ) {
52
+ XDNA_ERR (xdna , "Invalid dev bo size 0x%lx, limit 0x%lx" ,
53
+ mem -> size , heap -> mem .size );
54
+ ret = - EINVAL ;
55
+ goto unlock_out ;
56
+ }
57
+
36
58
align = 1 << max (PAGE_SHIFT , xdna -> dev_info -> dev_mem_buf_shift );
37
- ret = drm_mm_insert_node_generic (& abo -> dev_heap -> mm , & abo -> mm_node ,
59
+ ret = drm_mm_insert_node_generic (& heap -> mm , & abo -> mm_node ,
38
60
mem -> size , align ,
39
61
0 , DRM_MM_INSERT_BEST );
40
62
if (ret ) {
41
63
XDNA_ERR (xdna , "Failed to alloc dev bo memory, ret %d" , ret );
42
- return ret ;
64
+ goto unlock_out ;
43
65
}
44
66
45
67
mem -> dev_addr = abo -> mm_node .start ;
46
- offset = mem -> dev_addr - abo -> dev_heap -> mem .dev_addr ;
47
- mem -> userptr = abo -> dev_heap -> mem .userptr + offset ;
48
- mem -> pages = & abo -> dev_heap -> base .pages [offset >> PAGE_SHIFT ];
49
- mem -> nr_pages = mem -> size >> PAGE_SHIFT ;
50
-
51
- if (use_vmap ) {
52
- mem -> kva = vmap (mem -> pages , mem -> nr_pages , VM_MAP , PAGE_KERNEL );
53
- if (!mem -> kva ) {
54
- XDNA_ERR (xdna , "Failed to vmap" );
55
- drm_mm_remove_node (& abo -> mm_node );
56
- return - EFAULT ;
57
- }
58
- }
68
+ offset = mem -> dev_addr - heap -> mem .dev_addr ;
69
+ mem -> userptr = heap -> mem .userptr + offset ;
70
+ mem -> kva = heap -> mem .kva + offset ;
59
71
60
- return 0 ;
72
+ drm_gem_object_get (to_gobj (heap ));
73
+
74
+ unlock_out :
75
+ mutex_unlock (& client -> mm_lock );
76
+
77
+ return ret ;
78
+ }
79
+
80
+ static void
81
+ amdxdna_gem_destroy_obj (struct amdxdna_gem_obj * abo )
82
+ {
83
+ mutex_destroy (& abo -> lock );
84
+ kfree (abo );
85
+ }
86
+
87
+ static void
88
+ amdxdna_gem_heap_free (struct amdxdna_gem_obj * abo )
89
+ {
90
+ struct amdxdna_gem_obj * heap ;
91
+
92
+ mutex_lock (& abo -> client -> mm_lock );
93
+
94
+ drm_mm_remove_node (& abo -> mm_node );
95
+
96
+ heap = abo -> client -> dev_heap ;
97
+ drm_gem_object_put (to_gobj (heap ));
98
+
99
+ mutex_unlock (& abo -> client -> mm_lock );
61
100
}
62
101
63
102
static bool amdxdna_hmm_invalidate (struct mmu_interval_notifier * mni ,
@@ -213,6 +252,20 @@ static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
213
252
return ret ;
214
253
}
215
254
255
+ static void amdxdna_gem_dev_obj_free (struct drm_gem_object * gobj )
256
+ {
257
+ struct amdxdna_dev * xdna = to_xdna_dev (gobj -> dev );
258
+ struct amdxdna_gem_obj * abo = to_xdna_obj (gobj );
259
+
260
+ XDNA_DBG (xdna , "BO type %d xdna_addr 0x%llx" , abo -> type , abo -> mem .dev_addr );
261
+ if (abo -> pinned )
262
+ amdxdna_gem_unpin (abo );
263
+
264
+ amdxdna_gem_heap_free (abo );
265
+ drm_gem_object_release (gobj );
266
+ amdxdna_gem_destroy_obj (abo );
267
+ }
268
+
216
269
static int amdxdna_insert_pages (struct amdxdna_gem_obj * abo ,
217
270
struct vm_area_struct * vma )
218
271
{
@@ -374,19 +427,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
374
427
if (abo -> pinned )
375
428
amdxdna_gem_unpin (abo );
376
429
377
- if (abo -> type == AMDXDNA_BO_DEV ) {
378
- mutex_lock (& abo -> client -> mm_lock );
379
- drm_mm_remove_node (& abo -> mm_node );
380
- mutex_unlock (& abo -> client -> mm_lock );
381
-
382
- vunmap (abo -> mem .kva );
383
- drm_gem_object_put (to_gobj (abo -> dev_heap ));
384
- drm_gem_object_release (gobj );
385
- mutex_destroy (& abo -> lock );
386
- kfree (abo );
387
- return ;
388
- }
389
-
390
430
if (abo -> type == AMDXDNA_BO_DEV_HEAP )
391
431
drm_mm_takedown (& abo -> mm );
392
432
@@ -402,7 +442,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
402
442
}
403
443
404
444
static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
405
- .free = amdxdna_gem_obj_free ,
445
+ .free = amdxdna_gem_dev_obj_free ,
406
446
};
407
447
408
448
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -527,6 +567,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
527
567
struct drm_file * filp )
528
568
{
529
569
struct amdxdna_client * client = filp -> driver_priv ;
570
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR (NULL );
530
571
struct amdxdna_dev * xdna = to_xdna_dev (dev );
531
572
struct drm_gem_shmem_object * shmem ;
532
573
struct amdxdna_gem_obj * abo ;
@@ -553,18 +594,26 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
553
594
554
595
shmem -> map_wc = false;
555
596
abo = to_xdna_obj (& shmem -> base );
556
-
557
597
abo -> type = AMDXDNA_BO_DEV_HEAP ;
558
598
abo -> client = client ;
559
599
abo -> mem .dev_addr = client -> xdna -> dev_info -> dev_mem_base ;
560
600
drm_mm_init (& abo -> mm , abo -> mem .dev_addr , abo -> mem .size );
561
601
602
+ ret = drm_gem_vmap (to_gobj (abo ), & map );
603
+ if (ret ) {
604
+ XDNA_ERR (xdna , "Vmap heap bo failed, ret %d" , ret );
605
+ goto release_obj ;
606
+ }
607
+ abo -> mem .kva = map .vaddr ;
608
+
562
609
client -> dev_heap = abo ;
563
610
drm_gem_object_get (to_gobj (abo ));
564
611
mutex_unlock (& client -> mm_lock );
565
612
566
613
return abo ;
567
614
615
+ release_obj :
616
+ drm_gem_object_put (to_gobj (abo ));
568
617
mm_unlock :
569
618
mutex_unlock (& client -> mm_lock );
570
619
return ERR_PTR (ret );
@@ -573,69 +622,43 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
573
622
struct amdxdna_gem_obj *
574
623
amdxdna_drm_alloc_dev_bo (struct drm_device * dev ,
575
624
struct amdxdna_drm_create_bo * args ,
576
- struct drm_file * filp , bool use_vmap )
625
+ struct drm_file * filp )
577
626
{
578
627
struct amdxdna_client * client = filp -> driver_priv ;
579
628
struct amdxdna_dev * xdna = to_xdna_dev (dev );
580
629
size_t aligned_sz = PAGE_ALIGN (args -> size );
581
- struct amdxdna_gem_obj * abo , * heap ;
630
+ struct amdxdna_gem_obj * abo ;
582
631
int ret ;
583
632
584
- mutex_lock (& client -> mm_lock );
585
- heap = client -> dev_heap ;
586
- if (!heap ) {
587
- ret = - EINVAL ;
588
- goto mm_unlock ;
589
- }
590
-
591
- if (heap -> mem .userptr == AMDXDNA_INVALID_ADDR ) {
592
- XDNA_ERR (xdna , "Invalid dev heap userptr" );
593
- ret = - EINVAL ;
594
- goto mm_unlock ;
595
- }
596
-
597
- if (args -> size > heap -> mem .size ) {
598
- XDNA_ERR (xdna , "Invalid dev bo size 0x%llx, limit 0x%lx" ,
599
- args -> size , heap -> mem .size );
600
- ret = - EINVAL ;
601
- goto mm_unlock ;
602
- }
603
-
604
633
abo = amdxdna_gem_create_obj (& xdna -> ddev , aligned_sz );
605
- if (IS_ERR (abo )) {
606
- ret = PTR_ERR (abo );
607
- goto mm_unlock ;
608
- }
634
+ if (IS_ERR (abo ))
635
+ return abo ;
636
+
609
637
to_gobj (abo )-> funcs = & amdxdna_gem_dev_obj_funcs ;
610
638
abo -> type = AMDXDNA_BO_DEV ;
611
639
abo -> client = client ;
612
- abo -> dev_heap = heap ;
613
- ret = amdxdna_gem_insert_node_locked (abo , use_vmap );
640
+
641
+ ret = amdxdna_gem_heap_alloc (abo );
614
642
if (ret ) {
615
643
XDNA_ERR (xdna , "Failed to alloc dev bo memory, ret %d" , ret );
616
- goto mm_unlock ;
644
+ amdxdna_gem_destroy_obj (abo );
645
+ return ERR_PTR (ret );
617
646
}
618
647
619
- drm_gem_object_get (to_gobj (heap ));
620
648
drm_gem_private_object_init (& xdna -> ddev , to_gobj (abo ), aligned_sz );
621
649
622
- mutex_unlock (& client -> mm_lock );
623
650
return abo ;
624
-
625
- mm_unlock :
626
- mutex_unlock (& client -> mm_lock );
627
- return ERR_PTR (ret );
628
651
}
629
652
630
653
static struct amdxdna_gem_obj *
631
654
amdxdna_drm_create_cmd_bo (struct drm_device * dev ,
632
655
struct amdxdna_drm_create_bo * args ,
633
656
struct drm_file * filp )
634
657
{
658
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR (NULL );
635
659
struct amdxdna_dev * xdna = to_xdna_dev (dev );
636
660
struct drm_gem_shmem_object * shmem ;
637
661
struct amdxdna_gem_obj * abo ;
638
- struct iosys_map map ;
639
662
int ret ;
640
663
641
664
if (args -> size > XDNA_MAX_CMD_BO_SIZE ) {
@@ -692,7 +715,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
692
715
abo = amdxdna_drm_create_dev_heap (dev , args , filp );
693
716
break ;
694
717
case AMDXDNA_BO_DEV :
695
- abo = amdxdna_drm_alloc_dev_bo (dev , args , filp , false );
718
+ abo = amdxdna_drm_alloc_dev_bo (dev , args , filp );
696
719
break ;
697
720
case AMDXDNA_BO_CMD :
698
721
abo = amdxdna_drm_create_cmd_bo (dev , args , filp );
@@ -724,20 +747,13 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
724
747
struct amdxdna_dev * xdna = to_xdna_dev (to_gobj (abo )-> dev );
725
748
int ret ;
726
749
750
+ if (abo -> type == AMDXDNA_BO_DEV )
751
+ abo = abo -> client -> dev_heap ;
752
+
727
753
if (is_import_bo (abo ))
728
754
return 0 ;
729
755
730
- switch (abo -> type ) {
731
- case AMDXDNA_BO_SHMEM :
732
- case AMDXDNA_BO_DEV_HEAP :
733
- ret = drm_gem_shmem_pin (& abo -> base );
734
- break ;
735
- case AMDXDNA_BO_DEV :
736
- ret = drm_gem_shmem_pin (& abo -> dev_heap -> base );
737
- break ;
738
- default :
739
- ret = - EOPNOTSUPP ;
740
- }
756
+ ret = drm_gem_shmem_pin (& abo -> base );
741
757
742
758
XDNA_DBG (xdna , "BO type %d ret %d" , abo -> type , ret );
743
759
return ret ;
@@ -747,9 +763,6 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
747
763
{
748
764
int ret ;
749
765
750
- if (abo -> type == AMDXDNA_BO_DEV )
751
- abo = abo -> dev_heap ;
752
-
753
766
mutex_lock (& abo -> lock );
754
767
ret = amdxdna_gem_pin_nolock (abo );
755
768
mutex_unlock (& abo -> lock );
@@ -759,12 +772,12 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
759
772
760
773
void amdxdna_gem_unpin (struct amdxdna_gem_obj * abo )
761
774
{
775
+ if (abo -> type == AMDXDNA_BO_DEV )
776
+ abo = abo -> client -> dev_heap ;
777
+
762
778
if (is_import_bo (abo ))
763
779
return ;
764
780
765
- if (abo -> type == AMDXDNA_BO_DEV )
766
- abo = abo -> dev_heap ;
767
-
768
781
mutex_lock (& abo -> lock );
769
782
drm_gem_shmem_unpin (& abo -> base );
770
783
mutex_unlock (& abo -> lock );
@@ -855,10 +868,12 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
855
868
856
869
if (is_import_bo (abo ))
857
870
drm_clflush_sg (abo -> base .sgt );
858
- else if (abo -> type == AMDXDNA_BO_DEV )
859
- drm_clflush_pages (abo -> mem .pages , abo -> mem . nr_pages );
860
- else
871
+ else if (abo -> mem . kva )
872
+ drm_clflush_virt_range (abo -> mem .kva + args -> offset , args -> size );
873
+ else if ( abo -> base . pages )
861
874
drm_clflush_pages (abo -> base .pages , gobj -> size >> PAGE_SHIFT );
875
+ else
876
+ drm_WARN (& xdna -> ddev , 1 , "Can not get flush memory" );
862
877
863
878
amdxdna_gem_unpin (abo );
864
879
0 commit comments