@@ -367,6 +367,52 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
367
367
mutex_unlock (& drm -> dmem -> mutex );
368
368
}
369
369
370
+ /*
371
+ * Evict all pages mapping a chunk.
372
+ */
373
+ static void
374
+ nouveau_dmem_evict_chunk (struct nouveau_dmem_chunk * chunk )
375
+ {
376
+ unsigned long i , npages = range_len (& chunk -> pagemap .range ) >> PAGE_SHIFT ;
377
+ unsigned long * src_pfns , * dst_pfns ;
378
+ dma_addr_t * dma_addrs ;
379
+ struct nouveau_fence * fence ;
380
+
381
+ src_pfns = kcalloc (npages , sizeof (* src_pfns ), GFP_KERNEL );
382
+ dst_pfns = kcalloc (npages , sizeof (* dst_pfns ), GFP_KERNEL );
383
+ dma_addrs = kcalloc (npages , sizeof (* dma_addrs ), GFP_KERNEL );
384
+
385
+ migrate_device_range (src_pfns , chunk -> pagemap .range .start >> PAGE_SHIFT ,
386
+ npages );
387
+
388
+ for (i = 0 ; i < npages ; i ++ ) {
389
+ if (src_pfns [i ] & MIGRATE_PFN_MIGRATE ) {
390
+ struct page * dpage ;
391
+
392
+ /*
393
+ * _GFP_NOFAIL because the GPU is going away and there
394
+ * is nothing sensible we can do if we can't copy the
395
+ * data back.
396
+ */
397
+ dpage = alloc_page (GFP_HIGHUSER | __GFP_NOFAIL );
398
+ dst_pfns [i ] = migrate_pfn (page_to_pfn (dpage ));
399
+ nouveau_dmem_copy_one (chunk -> drm ,
400
+ migrate_pfn_to_page (src_pfns [i ]), dpage ,
401
+ & dma_addrs [i ]);
402
+ }
403
+ }
404
+
405
+ nouveau_fence_new (chunk -> drm -> dmem -> migrate .chan , false, & fence );
406
+ migrate_device_pages (src_pfns , dst_pfns , npages );
407
+ nouveau_dmem_fence_done (& fence );
408
+ migrate_device_finalize (src_pfns , dst_pfns , npages );
409
+ kfree (src_pfns );
410
+ kfree (dst_pfns );
411
+ for (i = 0 ; i < npages ; i ++ )
412
+ dma_unmap_page (chunk -> drm -> dev -> dev , dma_addrs [i ], PAGE_SIZE , DMA_BIDIRECTIONAL );
413
+ kfree (dma_addrs );
414
+ }
415
+
370
416
void
371
417
nouveau_dmem_fini (struct nouveau_drm * drm )
372
418
{
@@ -378,8 +424,10 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
378
424
mutex_lock (& drm -> dmem -> mutex );
379
425
380
426
list_for_each_entry_safe (chunk , tmp , & drm -> dmem -> chunks , list ) {
427
+ nouveau_dmem_evict_chunk (chunk );
381
428
nouveau_bo_unpin (chunk -> bo );
382
429
nouveau_bo_ref (NULL , & chunk -> bo );
430
+ WARN_ON (chunk -> callocated );
383
431
list_del (& chunk -> list );
384
432
memunmap_pages (& chunk -> pagemap );
385
433
release_mem_region (chunk -> pagemap .range .start ,
0 commit comments