@@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
110
110
return i ;
111
111
}
112
112
113
- static int emit_flush_invalidate (u32 * dw , int i )
113
+ static int emit_flush_invalidate (u32 addr , u32 val , u32 * dw , int i )
114
114
{
115
115
dw [i ++ ] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
116
- MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX ;
117
- dw [ i ++ ] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR ;
118
- dw [i ++ ] = 0 ;
116
+ MI_FLUSH_IMM_DW ;
117
+
118
+ dw [i ++ ] = addr | MI_FLUSH_DW_USE_GTT ;
119
119
dw [i ++ ] = 0 ;
120
+ dw [i ++ ] = val ;
120
121
121
122
return i ;
122
123
}
@@ -397,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
397
398
static void emit_migration_job_gen12 (struct xe_sched_job * job ,
398
399
struct xe_lrc * lrc , u32 seqno )
399
400
{
401
+ u32 saddr = xe_lrc_start_seqno_ggtt_addr (lrc );
400
402
u32 dw [MAX_JOB_SIZE_DW ], i = 0 ;
401
403
402
404
i = emit_copy_timestamp (lrc , dw , i );
403
405
404
- i = emit_store_imm_ggtt (xe_lrc_start_seqno_ggtt_addr (lrc ),
405
- seqno , dw , i );
406
+ i = emit_store_imm_ggtt (saddr , seqno , dw , i );
406
407
407
408
dw [i ++ ] = MI_ARB_ON_OFF | MI_ARB_DISABLE ; /* Enabled again below */
408
409
409
410
i = emit_bb_start (job -> ptrs [0 ].batch_addr , BIT (8 ), dw , i );
410
411
411
- if (!IS_SRIOV_VF (gt_to_xe (job -> q -> gt ))) {
412
- /* XXX: Do we need this? Leaving for now. */
413
- dw [i ++ ] = preparser_disable (true);
414
- i = emit_flush_invalidate (dw , i );
415
- dw [i ++ ] = preparser_disable (false);
416
- }
412
+ dw [i ++ ] = preparser_disable (true);
413
+ i = emit_flush_invalidate (saddr , seqno , dw , i );
414
+ dw [i ++ ] = preparser_disable (false);
417
415
418
416
i = emit_bb_start (job -> ptrs [1 ].batch_addr , BIT (8 ), dw , i );
419
417
0 commit comments