28
28
#include "amdgpu_atomfirmware.h"
29
29
#include "atom.h"
30
30
31
- static int amdgpu_vram_mgr_free_backup_pages (struct amdgpu_vram_mgr * mgr ,
32
- uint32_t num_pages );
33
-
34
31
static inline struct amdgpu_vram_mgr * to_vram_mgr (struct ttm_resource_manager * man )
35
32
{
36
33
return container_of (man , struct amdgpu_vram_mgr , manager );
@@ -189,7 +186,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
189
186
spin_lock_init (& mgr -> lock );
190
187
INIT_LIST_HEAD (& mgr -> reservations_pending );
191
188
INIT_LIST_HEAD (& mgr -> reserved_pages );
192
- INIT_LIST_HEAD (& mgr -> backup_pages );
193
189
194
190
/* Add the two VRAM-related sysfs files */
195
191
ret = sysfs_create_files (& adev -> dev -> kobj , amdgpu_vram_mgr_attributes );
@@ -230,11 +226,6 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
230
226
drm_mm_remove_node (& rsv -> mm_node );
231
227
kfree (rsv );
232
228
}
233
-
234
- list_for_each_entry_safe (rsv , temp , & mgr -> backup_pages , node ) {
235
- drm_mm_remove_node (& rsv -> mm_node );
236
- kfree (rsv );
237
- }
238
229
drm_mm_takedown (& mgr -> mm );
239
230
spin_unlock (& mgr -> lock );
240
231
@@ -306,14 +297,12 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
306
297
continue ;
307
298
308
299
dev_dbg (adev -> dev , "Reservation 0x%llx - %lld, Succeeded\n" ,
309
- rsv -> mm_node .start << PAGE_SHIFT , rsv -> mm_node .size );
300
+ rsv -> mm_node .start , rsv -> mm_node .size );
310
301
311
302
vis_usage = amdgpu_vram_mgr_vis_size (adev , & rsv -> mm_node );
312
303
atomic64_add (vis_usage , & mgr -> vis_usage );
313
304
atomic64_add (rsv -> mm_node .size << PAGE_SHIFT , & mgr -> usage );
314
305
list_move (& rsv -> node , & mgr -> reserved_pages );
315
-
316
- amdgpu_vram_mgr_free_backup_pages (mgr , rsv -> mm_node .size );
317
306
}
318
307
}
319
308
@@ -330,7 +319,6 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
330
319
uint64_t start , uint64_t size )
331
320
{
332
321
struct amdgpu_vram_mgr * mgr = to_vram_mgr (man );
333
- struct amdgpu_device * adev = to_amdgpu_device (mgr );
334
322
struct amdgpu_vram_reservation * rsv ;
335
323
336
324
rsv = kzalloc (sizeof (* rsv ), GFP_KERNEL );
@@ -341,94 +329,14 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
341
329
rsv -> mm_node .start = start >> PAGE_SHIFT ;
342
330
rsv -> mm_node .size = size >> PAGE_SHIFT ;
343
331
344
- dev_dbg (adev -> dev , "Pending Reservation: 0x%llx\n" , start );
345
-
346
332
spin_lock (& mgr -> lock );
347
- list_add_tail (& rsv -> node , & mgr -> reservations_pending );
333
+ list_add_tail (& mgr -> reservations_pending , & rsv -> node );
348
334
amdgpu_vram_mgr_do_reserve (man );
349
335
spin_unlock (& mgr -> lock );
350
336
351
337
return 0 ;
352
338
}
353
339
354
- static int amdgpu_vram_mgr_free_backup_pages (struct amdgpu_vram_mgr * mgr ,
355
- uint32_t num_pages )
356
- {
357
- struct amdgpu_device * adev = to_amdgpu_device (mgr );
358
- struct amdgpu_vram_reservation * rsv ;
359
- uint32_t i ;
360
- uint64_t vis_usage = 0 , total_usage = 0 ;
361
-
362
- if (num_pages > mgr -> num_backup_pages ) {
363
- dev_warn (adev -> dev , "No enough backup pages\n" );
364
- return - EINVAL ;
365
- }
366
-
367
- for (i = 0 ; i < num_pages ; i ++ ) {
368
- rsv = list_first_entry (& mgr -> backup_pages ,
369
- struct amdgpu_vram_reservation , node );
370
- vis_usage += amdgpu_vram_mgr_vis_size (adev , & rsv -> mm_node );
371
- total_usage += (rsv -> mm_node .size << PAGE_SHIFT );
372
- drm_mm_remove_node (& rsv -> mm_node );
373
- list_del (& rsv -> node );
374
- kfree (rsv );
375
- mgr -> num_backup_pages -- ;
376
- }
377
-
378
- atomic64_sub (total_usage , & mgr -> usage );
379
- atomic64_sub (vis_usage , & mgr -> vis_usage );
380
-
381
- return 0 ;
382
- }
383
-
384
- int amdgpu_vram_mgr_reserve_backup_pages (struct ttm_resource_manager * man ,
385
- uint32_t num_pages )
386
- {
387
- struct amdgpu_vram_mgr * mgr = to_vram_mgr (man );
388
- struct amdgpu_device * adev = to_amdgpu_device (mgr );
389
- struct amdgpu_vram_reservation * rsv ;
390
- struct drm_mm * mm = & mgr -> mm ;
391
- uint32_t i ;
392
- int ret = 0 ;
393
- uint64_t vis_usage , total_usage ;
394
-
395
- for (i = 0 ; i < num_pages ; i ++ ) {
396
- rsv = kzalloc (sizeof (* rsv ), GFP_KERNEL );
397
- if (!rsv ) {
398
- ret = - ENOMEM ;
399
- goto pro_end ;
400
- }
401
-
402
- INIT_LIST_HEAD (& rsv -> node );
403
-
404
- ret = drm_mm_insert_node (mm , & rsv -> mm_node , 1 );
405
- if (ret ) {
406
- dev_err (adev -> dev , "failed to reserve backup page %d, ret 0x%x\n" , i , ret );
407
- kfree (rsv );
408
- goto pro_end ;
409
- }
410
-
411
- vis_usage = amdgpu_vram_mgr_vis_size (adev , & rsv -> mm_node );
412
- total_usage = (rsv -> mm_node .size << PAGE_SHIFT );
413
-
414
- spin_lock (& mgr -> lock );
415
- atomic64_add (vis_usage , & mgr -> vis_usage );
416
- atomic64_add (total_usage , & mgr -> usage );
417
- list_add_tail (& rsv -> node , & mgr -> backup_pages );
418
- mgr -> num_backup_pages ++ ;
419
- spin_unlock (& mgr -> lock );
420
- }
421
-
422
- pro_end :
423
- if (ret ) {
424
- spin_lock (& mgr -> lock );
425
- amdgpu_vram_mgr_free_backup_pages (mgr , mgr -> num_backup_pages );
426
- spin_unlock (& mgr -> lock );
427
- }
428
-
429
- return ret ;
430
- }
431
-
432
340
/**
433
341
* amdgpu_vram_mgr_query_page_status - query the reservation status
434
342
*
0 commit comments