@@ -278,12 +278,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
278
278
{
279
279
struct amdgpu_device * adev = ring -> adev ;
280
280
unsigned vmhub = ring -> funcs -> vmhub ;
281
+ struct amdgpu_vmid_mgr * id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
281
282
uint64_t fence_context = adev -> fence_context + ring -> idx ;
282
283
bool needs_flush = vm -> use_cpu_for_update ;
283
284
uint64_t updates = amdgpu_vm_tlb_seq (vm );
284
285
int r ;
285
286
286
- * id = vm -> reserved_vmid [ vmhub ] ;
287
+ * id = id_mgr -> reserved ;
287
288
if ((* id )-> owner != vm -> immediate .fence_context ||
288
289
!amdgpu_vmid_compatible (* id , job ) ||
289
290
(* id )-> flushed_updates < updates ||
@@ -462,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
462
463
struct amdgpu_vm * vm ,
463
464
unsigned vmhub )
464
465
{
465
- struct amdgpu_vmid_mgr * id_mgr ;
466
- struct amdgpu_vmid * idle ;
467
- int r = 0 ;
466
+ struct amdgpu_vmid_mgr * id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
468
467
469
- id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
470
468
mutex_lock (& id_mgr -> lock );
471
469
if (vm -> reserved_vmid [vmhub ])
472
470
goto unlock ;
473
- if (atomic_inc_return (& id_mgr -> reserved_vmid_num ) >
474
- AMDGPU_VM_MAX_RESERVED_VMID ) {
475
- DRM_ERROR ("Over limitation of reserved vmid\n" );
476
- atomic_dec (& id_mgr -> reserved_vmid_num );
477
- r = - EINVAL ;
478
- goto unlock ;
471
+
472
+ ++ id_mgr -> reserved_use_count ;
473
+ if (!id_mgr -> reserved ) {
474
+ struct amdgpu_vmid * id ;
475
+
476
+ id = list_first_entry (& id_mgr -> ids_lru , struct amdgpu_vmid ,
477
+ list );
478
+ /* Remove from normal round robin handling */
479
+ list_del_init (& id -> list );
480
+ id_mgr -> reserved = id ;
479
481
}
480
- /* Select the first entry VMID */
481
- idle = list_first_entry (& id_mgr -> ids_lru , struct amdgpu_vmid , list );
482
- list_del_init (& idle -> list );
483
- vm -> reserved_vmid [vmhub ] = idle ;
484
- mutex_unlock (& id_mgr -> lock );
482
+ vm -> reserved_vmid [vmhub ] = true;
485
483
486
- return 0 ;
487
484
unlock :
488
485
mutex_unlock (& id_mgr -> lock );
489
- return r ;
486
+ return 0 ;
490
487
}
491
488
492
489
void amdgpu_vmid_free_reserved (struct amdgpu_device * adev ,
@@ -496,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
496
493
struct amdgpu_vmid_mgr * id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
497
494
498
495
mutex_lock (& id_mgr -> lock );
499
- if (vm -> reserved_vmid [vmhub ]) {
500
- list_add (& vm -> reserved_vmid [vmhub ]-> list ,
501
- & id_mgr -> ids_lru );
502
- vm -> reserved_vmid [vmhub ] = NULL ;
503
- atomic_dec (& id_mgr -> reserved_vmid_num );
496
+ if (vm -> reserved_vmid [vmhub ] &&
497
+ !-- id_mgr -> reserved_use_count ) {
498
+ /* give the reserved ID back to normal round robin */
499
+ list_add (& id_mgr -> reserved -> list , & id_mgr -> ids_lru );
504
500
}
501
+ vm -> reserved_vmid [vmhub ] = false;
505
502
mutex_unlock (& id_mgr -> lock );
506
503
}
507
504
@@ -568,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
568
565
569
566
mutex_init (& id_mgr -> lock );
570
567
INIT_LIST_HEAD (& id_mgr -> ids_lru );
571
- atomic_set ( & id_mgr -> reserved_vmid_num , 0 ) ;
568
+ id_mgr -> reserved_use_count = 0 ;
572
569
573
570
/* manage only VMIDs not used by KFD */
574
571
id_mgr -> num_ids = adev -> vm_manager .first_kfd_vmid ;
0 commit comments