@@ -56,6 +56,8 @@ enum nouveau_aper {
56
56
typedef int (* nouveau_migrate_copy_t )(struct nouveau_drm * drm , u64 npages ,
57
57
enum nouveau_aper , u64 dst_addr ,
58
58
enum nouveau_aper , u64 src_addr );
59
+ typedef int (* nouveau_clear_page_t )(struct nouveau_drm * drm , u32 length ,
60
+ enum nouveau_aper , u64 dst_addr );
59
61
60
62
struct nouveau_dmem_chunk {
61
63
struct list_head list ;
@@ -67,6 +69,7 @@ struct nouveau_dmem_chunk {
67
69
68
70
struct nouveau_dmem_migrate {
69
71
nouveau_migrate_copy_t copy_func ;
72
+ nouveau_clear_page_t clear_func ;
70
73
struct nouveau_channel * chan ;
71
74
};
72
75
@@ -436,6 +439,52 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
436
439
return 0 ;
437
440
}
438
441
442
+ static int
443
+ nvc0b5_migrate_clear (struct nouveau_drm * drm , u32 length ,
444
+ enum nouveau_aper dst_aper , u64 dst_addr )
445
+ {
446
+ struct nouveau_channel * chan = drm -> dmem -> migrate .chan ;
447
+ u32 launch_dma = (1 << 10 ) /* REMAP_ENABLE_TRUE */ |
448
+ (1 << 8 ) /* DST_MEMORY_LAYOUT_PITCH. */ |
449
+ (1 << 7 ) /* SRC_MEMORY_LAYOUT_PITCH. */ |
450
+ (1 << 2 ) /* FLUSH_ENABLE_TRUE. */ |
451
+ (2 << 0 ) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */ ;
452
+ u32 remap = (4 << 0 ) /* DST_X_CONST_A */ |
453
+ (5 << 4 ) /* DST_Y_CONST_B */ |
454
+ (3 << 16 ) /* COMPONENT_SIZE_FOUR */ |
455
+ (1 << 24 ) /* NUM_DST_COMPONENTS_TWO */ ;
456
+ int ret ;
457
+
458
+ ret = RING_SPACE (chan , 12 );
459
+ if (ret )
460
+ return ret ;
461
+
462
+ switch (dst_aper ) {
463
+ case NOUVEAU_APER_VRAM :
464
+ BEGIN_IMC0 (chan , NvSubCopy , 0x0264 , 0 );
465
+ break ;
466
+ case NOUVEAU_APER_HOST :
467
+ BEGIN_IMC0 (chan , NvSubCopy , 0x0264 , 1 );
468
+ break ;
469
+ default :
470
+ return - EINVAL ;
471
+ }
472
+ launch_dma |= 0x00002000 ; /* DST_TYPE_PHYSICAL. */
473
+
474
+ BEGIN_NVC0 (chan , NvSubCopy , 0x0700 , 3 );
475
+ OUT_RING (chan , 0 );
476
+ OUT_RING (chan , 0 );
477
+ OUT_RING (chan , remap );
478
+ BEGIN_NVC0 (chan , NvSubCopy , 0x0408 , 2 );
479
+ OUT_RING (chan , upper_32_bits (dst_addr ));
480
+ OUT_RING (chan , lower_32_bits (dst_addr ));
481
+ BEGIN_NVC0 (chan , NvSubCopy , 0x0418 , 1 );
482
+ OUT_RING (chan , length >> 3 );
483
+ BEGIN_NVC0 (chan , NvSubCopy , 0x0300 , 1 );
484
+ OUT_RING (chan , launch_dma );
485
+ return 0 ;
486
+ }
487
+
439
488
static int
440
489
nouveau_dmem_migrate_init (struct nouveau_drm * drm )
441
490
{
@@ -445,6 +494,7 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
445
494
case VOLTA_DMA_COPY_A :
446
495
case TURING_DMA_COPY_A :
447
496
drm -> dmem -> migrate .copy_func = nvc0b5_migrate_copy ;
497
+ drm -> dmem -> migrate .clear_func = nvc0b5_migrate_clear ;
448
498
drm -> dmem -> migrate .chan = drm -> ttm .chan ;
449
499
return 0 ;
450
500
default :
@@ -487,21 +537,28 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
487
537
unsigned long paddr ;
488
538
489
539
spage = migrate_pfn_to_page (src );
490
- if (!spage || ! (src & MIGRATE_PFN_MIGRATE ))
540
+ if (!(src & MIGRATE_PFN_MIGRATE ))
491
541
goto out ;
492
542
493
543
dpage = nouveau_dmem_page_alloc_locked (drm );
494
544
if (!dpage )
495
545
goto out ;
496
546
497
- * dma_addr = dma_map_page (dev , spage , 0 , PAGE_SIZE , DMA_BIDIRECTIONAL );
498
- if (dma_mapping_error (dev , * dma_addr ))
499
- goto out_free_page ;
500
-
501
547
paddr = nouveau_dmem_page_addr (dpage );
502
- if (drm -> dmem -> migrate .copy_func (drm , 1 , NOUVEAU_APER_VRAM ,
503
- paddr , NOUVEAU_APER_HOST , * dma_addr ))
504
- goto out_dma_unmap ;
548
+ if (spage ) {
549
+ * dma_addr = dma_map_page (dev , spage , 0 , page_size (spage ),
550
+ DMA_BIDIRECTIONAL );
551
+ if (dma_mapping_error (dev , * dma_addr ))
552
+ goto out_free_page ;
553
+ if (drm -> dmem -> migrate .copy_func (drm , page_size (spage ),
554
+ NOUVEAU_APER_VRAM , paddr , NOUVEAU_APER_HOST , * dma_addr ))
555
+ goto out_dma_unmap ;
556
+ } else {
557
+ * dma_addr = DMA_MAPPING_ERROR ;
558
+ if (drm -> dmem -> migrate .clear_func (drm , page_size (dpage ),
559
+ NOUVEAU_APER_VRAM , paddr ))
560
+ goto out_free_page ;
561
+ }
505
562
506
563
* pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
507
564
((paddr >> PAGE_SHIFT ) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT );
@@ -528,7 +585,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
528
585
for (i = 0 ; addr < args -> end ; i ++ ) {
529
586
args -> dst [i ] = nouveau_dmem_migrate_copy_one (drm , args -> src [i ],
530
587
dma_addrs + nr_dma , pfns + i );
531
- if (args -> dst [ i ] )
588
+ if (! dma_mapping_error ( drm -> dev -> dev , dma_addrs [ nr_dma ]) )
532
589
nr_dma ++ ;
533
590
addr += PAGE_SIZE ;
534
591
}
0 commit comments