@@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
280280 u32 ref_and_mask = 0 ;
281281 const struct nbio_hdp_flush_reg * nbio_hf_reg = adev -> nbio .hdp_flush_reg ;
282282
283- ref_and_mask = nbio_hf_reg -> ref_and_mask_sdma0 << ring -> me ;
284-
285- amdgpu_ring_write (ring , SDMA_PKT_HEADER_OP (SDMA_OP_POLL_REGMEM ) |
286- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH (1 ) |
287- SDMA_PKT_POLL_REGMEM_HEADER_FUNC (3 )); /* == */
288- amdgpu_ring_write (ring , (adev -> nbio .funcs -> get_hdp_flush_done_offset (adev )) << 2 );
289- amdgpu_ring_write (ring , (adev -> nbio .funcs -> get_hdp_flush_req_offset (adev )) << 2 );
290- amdgpu_ring_write (ring , ref_and_mask ); /* reference */
291- amdgpu_ring_write (ring , ref_and_mask ); /* mask */
292- amdgpu_ring_write (ring , SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT (0xfff ) |
293- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL (10 )); /* retry count, poll interval */
283+ if (ring -> me > 1 ) {
284+ amdgpu_asic_flush_hdp (adev , ring );
285+ } else {
286+ ref_and_mask = nbio_hf_reg -> ref_and_mask_sdma0 << ring -> me ;
287+
288+ amdgpu_ring_write (ring , SDMA_PKT_HEADER_OP (SDMA_OP_POLL_REGMEM ) |
289+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH (1 ) |
290+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC (3 )); /* == */
291+ amdgpu_ring_write (ring , (adev -> nbio .funcs -> get_hdp_flush_done_offset (adev )) << 2 );
292+ amdgpu_ring_write (ring , (adev -> nbio .funcs -> get_hdp_flush_req_offset (adev )) << 2 );
293+ amdgpu_ring_write (ring , ref_and_mask ); /* reference */
294+ amdgpu_ring_write (ring , ref_and_mask ); /* mask */
295+ amdgpu_ring_write (ring , SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT (0xfff ) |
296+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL (10 )); /* retry count, poll interval */
297+ }
294298}
295299
296300/**
0 commit comments