45
45
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
46
46
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
47
47
48
+ #define NORMALIZE_VCN_REG_OFFSET (offset ) \
49
+ (offset & 0x1FFFF)
50
+
48
51
static int vcn_v4_0_3_start_sriov (struct amdgpu_device * adev );
49
52
static void vcn_v4_0_3_set_unified_ring_funcs (struct amdgpu_device * adev );
50
53
static void vcn_v4_0_3_set_irq_funcs (struct amdgpu_device * adev );
@@ -1375,6 +1378,43 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1375
1378
regUVD_RB_WPTR );
1376
1379
}
1377
1380
1381
+ static void vcn_v4_0_3_enc_ring_emit_reg_wait (struct amdgpu_ring * ring , uint32_t reg ,
1382
+ uint32_t val , uint32_t mask )
1383
+ {
1384
+ /* For VF, only local offsets should be used */
1385
+ if (amdgpu_sriov_vf (ring -> adev ))
1386
+ reg = NORMALIZE_VCN_REG_OFFSET (reg );
1387
+
1388
+ amdgpu_ring_write (ring , VCN_ENC_CMD_REG_WAIT );
1389
+ amdgpu_ring_write (ring , reg << 2 );
1390
+ amdgpu_ring_write (ring , mask );
1391
+ amdgpu_ring_write (ring , val );
1392
+ }
1393
+
1394
+ static void vcn_v4_0_3_enc_ring_emit_wreg (struct amdgpu_ring * ring , uint32_t reg , uint32_t val )
1395
+ {
1396
+ /* For VF, only local offsets should be used */
1397
+ if (amdgpu_sriov_vf (ring -> adev ))
1398
+ reg = NORMALIZE_VCN_REG_OFFSET (reg );
1399
+
1400
+ amdgpu_ring_write (ring , VCN_ENC_CMD_REG_WRITE );
1401
+ amdgpu_ring_write (ring , reg << 2 );
1402
+ amdgpu_ring_write (ring , val );
1403
+ }
1404
+
1405
+ static void vcn_v4_0_3_enc_ring_emit_vm_flush (struct amdgpu_ring * ring ,
1406
+ unsigned int vmid , uint64_t pd_addr )
1407
+ {
1408
+ struct amdgpu_vmhub * hub = & ring -> adev -> vmhub [ring -> vm_hub ];
1409
+
1410
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb (ring , vmid , pd_addr );
1411
+
1412
+ /* wait for reg writes */
1413
+ vcn_v4_0_3_enc_ring_emit_reg_wait (ring , hub -> ctx0_ptb_addr_lo32 +
1414
+ vmid * hub -> ctx_addr_distance ,
1415
+ lower_32_bits (pd_addr ), 0xffffffff );
1416
+ }
1417
+
1378
1418
static void vcn_v4_0_3_ring_emit_hdp_flush (struct amdgpu_ring * ring )
1379
1419
{
1380
1420
/* VCN engine access for HDP flush doesn't work when RRMT is enabled.
@@ -1421,7 +1461,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1421
1461
.emit_ib_size = 5 , /* vcn_v2_0_enc_ring_emit_ib */
1422
1462
.emit_ib = vcn_v2_0_enc_ring_emit_ib ,
1423
1463
.emit_fence = vcn_v2_0_enc_ring_emit_fence ,
1424
- .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush ,
1464
+ .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush ,
1425
1465
.emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush ,
1426
1466
.test_ring = amdgpu_vcn_enc_ring_test_ring ,
1427
1467
.test_ib = amdgpu_vcn_unified_ring_test_ib ,
@@ -1430,8 +1470,8 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1430
1470
.pad_ib = amdgpu_ring_generic_pad_ib ,
1431
1471
.begin_use = amdgpu_vcn_ring_begin_use ,
1432
1472
.end_use = amdgpu_vcn_ring_end_use ,
1433
- .emit_wreg = vcn_v2_0_enc_ring_emit_wreg ,
1434
- .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait ,
1473
+ .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg ,
1474
+ .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait ,
1435
1475
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper ,
1436
1476
};
1437
1477
0 commit comments