@@ -3985,6 +3985,36 @@ static void hexdump(const void *buf, size_t len)
3985
3985
}
3986
3986
}
3987
3987
3988
+ static int emit_semaphore_signal (struct intel_context * ce , void * slot )
3989
+ {
3990
+ const u32 offset =
3991
+ i915_ggtt_offset (ce -> engine -> status_page .vma ) +
3992
+ offset_in_page (slot );
3993
+ struct i915_request * rq ;
3994
+ u32 * cs ;
3995
+
3996
+ rq = intel_context_create_request (ce );
3997
+ if (IS_ERR (rq ))
3998
+ return PTR_ERR (rq );
3999
+
4000
+ cs = intel_ring_begin (rq , 4 );
4001
+ if (IS_ERR (cs )) {
4002
+ i915_request_add (rq );
4003
+ return PTR_ERR (cs );
4004
+ }
4005
+
4006
+ * cs ++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT ;
4007
+ * cs ++ = offset ;
4008
+ * cs ++ = 0 ;
4009
+ * cs ++ = 1 ;
4010
+
4011
+ intel_ring_advance (rq , cs );
4012
+
4013
+ rq -> sched .attr .priority = I915_PRIORITY_BARRIER ;
4014
+ i915_request_add (rq );
4015
+ return 0 ;
4016
+ }
4017
+
3988
4018
static int live_lrc_layout (void * arg )
3989
4019
{
3990
4020
struct intel_gt * gt = arg ;
@@ -4455,6 +4485,204 @@ static int live_gpr_clear(void *arg)
4455
4485
return err ;
4456
4486
}
4457
4487
4488
+ static struct i915_request *
4489
+ create_timestamp (struct intel_context * ce , void * slot , int idx )
4490
+ {
4491
+ const u32 offset =
4492
+ i915_ggtt_offset (ce -> engine -> status_page .vma ) +
4493
+ offset_in_page (slot );
4494
+ struct i915_request * rq ;
4495
+ u32 * cs ;
4496
+ int err ;
4497
+
4498
+ rq = intel_context_create_request (ce );
4499
+ if (IS_ERR (rq ))
4500
+ return rq ;
4501
+
4502
+ cs = intel_ring_begin (rq , 10 );
4503
+ if (IS_ERR (cs )) {
4504
+ err = PTR_ERR (cs );
4505
+ goto err ;
4506
+ }
4507
+
4508
+ * cs ++ = MI_ARB_ON_OFF | MI_ARB_ENABLE ;
4509
+ * cs ++ = MI_NOOP ;
4510
+
4511
+ * cs ++ = MI_SEMAPHORE_WAIT |
4512
+ MI_SEMAPHORE_GLOBAL_GTT |
4513
+ MI_SEMAPHORE_POLL |
4514
+ MI_SEMAPHORE_SAD_NEQ_SDD ;
4515
+ * cs ++ = 0 ;
4516
+ * cs ++ = offset ;
4517
+ * cs ++ = 0 ;
4518
+
4519
+ * cs ++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT ;
4520
+ * cs ++ = i915_mmio_reg_offset (RING_CTX_TIMESTAMP (rq -> engine -> mmio_base ));
4521
+ * cs ++ = offset + idx * sizeof (u32 );
4522
+ * cs ++ = 0 ;
4523
+
4524
+ intel_ring_advance (rq , cs );
4525
+
4526
+ rq -> sched .attr .priority = I915_PRIORITY_MASK ;
4527
+ err = 0 ;
4528
+ err :
4529
+ i915_request_get (rq );
4530
+ i915_request_add (rq );
4531
+ if (err ) {
4532
+ i915_request_put (rq );
4533
+ return ERR_PTR (err );
4534
+ }
4535
+
4536
+ return rq ;
4537
+ }
4538
+
4539
+ struct lrc_timestamp {
4540
+ struct intel_engine_cs * engine ;
4541
+ struct intel_context * ce [2 ];
4542
+ u32 poison ;
4543
+ };
4544
+
4545
+ static bool timestamp_advanced (u32 start , u32 end )
4546
+ {
4547
+ return (s32 )(end - start ) > 0 ;
4548
+ }
4549
+
4550
+ static int __lrc_timestamp (const struct lrc_timestamp * arg , bool preempt )
4551
+ {
4552
+ u32 * slot = memset32 (arg -> engine -> status_page .addr + 1000 , 0 , 4 );
4553
+ struct i915_request * rq ;
4554
+ u32 timestamp ;
4555
+ int err = 0 ;
4556
+
4557
+ arg -> ce [0 ]-> lrc_reg_state [CTX_TIMESTAMP ] = arg -> poison ;
4558
+ rq = create_timestamp (arg -> ce [0 ], slot , 1 );
4559
+ if (IS_ERR (rq ))
4560
+ return PTR_ERR (rq );
4561
+
4562
+ err = wait_for_submit (rq -> engine , rq , HZ / 2 );
4563
+ if (err )
4564
+ goto err ;
4565
+
4566
+ if (preempt ) {
4567
+ arg -> ce [1 ]-> lrc_reg_state [CTX_TIMESTAMP ] = 0xdeadbeef ;
4568
+ err = emit_semaphore_signal (arg -> ce [1 ], slot );
4569
+ if (err )
4570
+ goto err ;
4571
+ } else {
4572
+ slot [0 ] = 1 ;
4573
+ wmb ();
4574
+ }
4575
+
4576
+ if (i915_request_wait (rq , 0 , HZ / 2 ) < 0 ) {
4577
+ err = - ETIME ;
4578
+ goto err ;
4579
+ }
4580
+
4581
+ /* and wait for switch to kernel */
4582
+ if (igt_flush_test (arg -> engine -> i915 )) {
4583
+ err = - EIO ;
4584
+ goto err ;
4585
+ }
4586
+
4587
+ rmb ();
4588
+
4589
+ if (!timestamp_advanced (arg -> poison , slot [1 ])) {
4590
+ pr_err ("%s(%s): invalid timestamp on restore, context:%x, request:%x\n" ,
4591
+ arg -> engine -> name , preempt ? "preempt" : "simple" ,
4592
+ arg -> poison , slot [1 ]);
4593
+ err = - EINVAL ;
4594
+ }
4595
+
4596
+ timestamp = READ_ONCE (arg -> ce [0 ]-> lrc_reg_state [CTX_TIMESTAMP ]);
4597
+ if (!timestamp_advanced (slot [1 ], timestamp )) {
4598
+ pr_err ("%s(%s): invalid timestamp on save, request:%x, context:%x\n" ,
4599
+ arg -> engine -> name , preempt ? "preempt" : "simple" ,
4600
+ slot [1 ], timestamp );
4601
+ err = - EINVAL ;
4602
+ }
4603
+
4604
+ err :
4605
+ memset32 (slot , -1 , 4 );
4606
+ i915_request_put (rq );
4607
+ return err ;
4608
+ }
4609
+
4610
+ static int live_lrc_timestamp (void * arg )
4611
+ {
4612
+ struct intel_gt * gt = arg ;
4613
+ enum intel_engine_id id ;
4614
+ struct lrc_timestamp data ;
4615
+ const u32 poison [] = {
4616
+ 0 ,
4617
+ S32_MAX ,
4618
+ (u32 )S32_MAX + 1 ,
4619
+ U32_MAX ,
4620
+ };
4621
+
4622
+ /*
4623
+ * We want to verify that the timestamp is saved and restore across
4624
+ * context switches and is monotonic.
4625
+ *
4626
+ * So we do this with a little bit of LRC poisoning to check various
4627
+ * boundary conditions, and see what happens if we preempt the context
4628
+ * with a second request (carrying more poison into the timestamp).
4629
+ */
4630
+
4631
+ for_each_engine (data .engine , gt , id ) {
4632
+ unsigned long heartbeat ;
4633
+ int i , err = 0 ;
4634
+
4635
+ engine_heartbeat_disable (data .engine , & heartbeat );
4636
+
4637
+ for (i = 0 ; i < ARRAY_SIZE (data .ce ); i ++ ) {
4638
+ struct intel_context * tmp ;
4639
+
4640
+ tmp = intel_context_create (data .engine );
4641
+ if (IS_ERR (tmp )) {
4642
+ err = PTR_ERR (tmp );
4643
+ goto err ;
4644
+ }
4645
+
4646
+ err = intel_context_pin (tmp );
4647
+ if (err ) {
4648
+ intel_context_put (tmp );
4649
+ goto err ;
4650
+ }
4651
+
4652
+ data .ce [i ] = tmp ;
4653
+ }
4654
+
4655
+ for (i = 0 ; i < ARRAY_SIZE (poison ); i ++ ) {
4656
+ data .poison = poison [i ];
4657
+
4658
+ err = __lrc_timestamp (& data , false);
4659
+ if (err )
4660
+ break ;
4661
+
4662
+ err = __lrc_timestamp (& data , true);
4663
+ if (err )
4664
+ break ;
4665
+ }
4666
+
4667
+ err :
4668
+ engine_heartbeat_enable (data .engine , heartbeat );
4669
+ for (i = 0 ; i < ARRAY_SIZE (data .ce ); i ++ ) {
4670
+ if (!data .ce [i ])
4671
+ break ;
4672
+
4673
+ intel_context_unpin (data .ce [i ]);
4674
+ intel_context_put (data .ce [i ]);
4675
+ }
4676
+
4677
+ if (igt_flush_test (gt -> i915 ))
4678
+ err = - EIO ;
4679
+ if (err )
4680
+ return err ;
4681
+ }
4682
+
4683
+ return 0 ;
4684
+ }
4685
+
4458
4686
static int __live_pphwsp_runtime (struct intel_engine_cs * engine )
4459
4687
{
4460
4688
struct intel_context * ce ;
@@ -4552,6 +4780,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
4552
4780
SUBTEST (live_lrc_fixed ),
4553
4781
SUBTEST (live_lrc_state ),
4554
4782
SUBTEST (live_gpr_clear ),
4783
+ SUBTEST (live_lrc_timestamp ),
4555
4784
SUBTEST (live_pphwsp_runtime ),
4556
4785
};
4557
4786
0 commit comments