@@ -278,12 +278,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
278278{
279279 struct amdgpu_device * adev = ring -> adev ;
280280 unsigned vmhub = ring -> funcs -> vmhub ;
281+ struct amdgpu_vmid_mgr * id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
281282 uint64_t fence_context = adev -> fence_context + ring -> idx ;
282283 bool needs_flush = vm -> use_cpu_for_update ;
283284 uint64_t updates = amdgpu_vm_tlb_seq (vm );
284285 int r ;
285286
286- * id = vm -> reserved_vmid [ vmhub ] ;
287+ * id = id_mgr -> reserved ;
287288 if ((* id )-> owner != vm -> immediate .fence_context ||
288289 !amdgpu_vmid_compatible (* id , job ) ||
289290 (* id )-> flushed_updates < updates ||
@@ -462,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
462463 struct amdgpu_vm * vm ,
463464 unsigned vmhub )
464465{
465- struct amdgpu_vmid_mgr * id_mgr ;
466- struct amdgpu_vmid * idle ;
467- int r = 0 ;
466+ struct amdgpu_vmid_mgr * id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
468467
469- id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
470468 mutex_lock (& id_mgr -> lock );
471469 if (vm -> reserved_vmid [vmhub ])
472470 goto unlock ;
473- if (atomic_inc_return (& id_mgr -> reserved_vmid_num ) >
474- AMDGPU_VM_MAX_RESERVED_VMID ) {
475- DRM_ERROR ("Over limitation of reserved vmid\n" );
476- atomic_dec (& id_mgr -> reserved_vmid_num );
477- r = - EINVAL ;
478- goto unlock ;
471+
472+ ++ id_mgr -> reserved_use_count ;
473+ if (!id_mgr -> reserved ) {
474+ struct amdgpu_vmid * id ;
475+
476+ id = list_first_entry (& id_mgr -> ids_lru , struct amdgpu_vmid ,
477+ list );
478+ /* Remove from normal round robin handling */
479+ list_del_init (& id -> list );
480+ id_mgr -> reserved = id ;
479481 }
480- /* Select the first entry VMID */
481- idle = list_first_entry (& id_mgr -> ids_lru , struct amdgpu_vmid , list );
482- list_del_init (& idle -> list );
483- vm -> reserved_vmid [vmhub ] = idle ;
484- mutex_unlock (& id_mgr -> lock );
482+ vm -> reserved_vmid [vmhub ] = true;
485483
486- return 0 ;
487484unlock :
488485 mutex_unlock (& id_mgr -> lock );
489- return r ;
486+ return 0 ;
490487}
491488
492489void amdgpu_vmid_free_reserved (struct amdgpu_device * adev ,
@@ -496,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
496493 struct amdgpu_vmid_mgr * id_mgr = & adev -> vm_manager .id_mgr [vmhub ];
497494
498495 mutex_lock (& id_mgr -> lock );
499- if (vm -> reserved_vmid [vmhub ]) {
500- list_add (& vm -> reserved_vmid [vmhub ]-> list ,
501- & id_mgr -> ids_lru );
502- vm -> reserved_vmid [vmhub ] = NULL ;
503- atomic_dec (& id_mgr -> reserved_vmid_num );
496+ if (vm -> reserved_vmid [vmhub ] &&
497+ !-- id_mgr -> reserved_use_count ) {
498+ /* give the reserved ID back to normal round robin */
499+ list_add (& id_mgr -> reserved -> list , & id_mgr -> ids_lru );
504500 }
501+ vm -> reserved_vmid [vmhub ] = false;
505502 mutex_unlock (& id_mgr -> lock );
506503}
507504
@@ -568,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
568565
569566 mutex_init (& id_mgr -> lock );
570567 INIT_LIST_HEAD (& id_mgr -> ids_lru );
571- atomic_set ( & id_mgr -> reserved_vmid_num , 0 ) ;
568+ id_mgr -> reserved_use_count = 0 ;
572569
573570 /* manage only VMIDs not used by KFD */
574571 id_mgr -> num_ids = adev -> vm_manager .first_kfd_vmid ;
0 commit comments