@@ -513,7 +513,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
513
513
if (err )
514
514
return err ;
515
515
516
- ptr = i915_gem_object_pin_map_unlocked (obj , I915_MAP_WC );
516
+ ptr = i915_gem_object_pin_map (obj , I915_MAP_WC );
517
517
if (IS_ERR (ptr ))
518
518
return PTR_ERR (ptr );
519
519
@@ -593,7 +593,9 @@ static int igt_gpu_write(struct i915_gem_context *ctx,
593
593
if (err )
594
594
break ;
595
595
596
+ i915_gem_object_lock (obj , NULL );
596
597
err = igt_cpu_check (obj , dword , rng );
598
+ i915_gem_object_unlock (obj );
597
599
if (err )
598
600
break ;
599
601
} while (!__igt_timeout (end_time , NULL ));
@@ -629,6 +631,88 @@ static int igt_lmem_create(void *arg)
629
631
return err ;
630
632
}
631
633
634
+ static int igt_lmem_create_cleared_cpu (void * arg )
635
+ {
636
+ struct drm_i915_private * i915 = arg ;
637
+ I915_RND_STATE (prng );
638
+ IGT_TIMEOUT (end_time );
639
+ u32 size , i ;
640
+ int err ;
641
+
642
+ i915_gem_drain_freed_objects (i915 );
643
+
644
+ size = max_t (u32 , PAGE_SIZE , i915_prandom_u32_max_state (SZ_32M , & prng ));
645
+ size = round_up (size , PAGE_SIZE );
646
+ i = 0 ;
647
+
648
+ do {
649
+ struct drm_i915_gem_object * obj ;
650
+ unsigned int flags ;
651
+ u32 dword , val ;
652
+ void * vaddr ;
653
+
654
+ /*
655
+ * Alternate between cleared and uncleared allocations, while
656
+ * also dirtying the pages each time to check that the pages are
657
+ * always cleared if requested, since we should get some overlap
658
+ * of the underlying pages, if not all, since we are the only
659
+ * user.
660
+ */
661
+
662
+ flags = I915_BO_ALLOC_CPU_CLEAR ;
663
+ if (i & 1 )
664
+ flags = 0 ;
665
+
666
+ obj = i915_gem_object_create_lmem (i915 , size , flags );
667
+ if (IS_ERR (obj ))
668
+ return PTR_ERR (obj );
669
+
670
+ i915_gem_object_lock (obj , NULL );
671
+ err = i915_gem_object_pin_pages (obj );
672
+ if (err )
673
+ goto out_put ;
674
+
675
+ dword = i915_prandom_u32_max_state (PAGE_SIZE / sizeof (u32 ),
676
+ & prng );
677
+
678
+ if (flags & I915_BO_ALLOC_CPU_CLEAR ) {
679
+ err = igt_cpu_check (obj , dword , 0 );
680
+ if (err ) {
681
+ pr_err ("%s failed with size=%u, flags=%u\n" ,
682
+ __func__ , size , flags );
683
+ goto out_unpin ;
684
+ }
685
+ }
686
+
687
+ vaddr = i915_gem_object_pin_map (obj , I915_MAP_WC );
688
+ if (IS_ERR (vaddr )) {
689
+ err = PTR_ERR (vaddr );
690
+ goto out_unpin ;
691
+ }
692
+
693
+ val = prandom_u32_state (& prng );
694
+
695
+ memset32 (vaddr , val , obj -> base .size / sizeof (u32 ));
696
+
697
+ i915_gem_object_flush_map (obj );
698
+ i915_gem_object_unpin_map (obj );
699
+ out_unpin :
700
+ i915_gem_object_unpin_pages (obj );
701
+ __i915_gem_object_put_pages (obj );
702
+ out_put :
703
+ i915_gem_object_unlock (obj );
704
+ i915_gem_object_put (obj );
705
+
706
+ if (err )
707
+ break ;
708
+ ++ i ;
709
+ } while (!__igt_timeout (end_time , NULL ));
710
+
711
+ pr_info ("%s completed (%u) iterations\n" , __func__ , i );
712
+
713
+ return err ;
714
+ }
715
+
632
716
static int igt_lmem_write_gpu (void * arg )
633
717
{
634
718
struct drm_i915_private * i915 = arg ;
@@ -1043,6 +1127,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
1043
1127
{
1044
1128
static const struct i915_subtest tests [] = {
1045
1129
SUBTEST (igt_lmem_create ),
1130
+ SUBTEST (igt_lmem_create_cleared_cpu ),
1046
1131
SUBTEST (igt_lmem_write_cpu ),
1047
1132
SUBTEST (igt_lmem_write_gpu ),
1048
1133
};
0 commit comments