@@ -1096,32 +1096,20 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
10961096 unsigned long addr ,
10971097 bool unfaultable )
10981098{
1099- struct vm_area_struct * area ;
1100- int err = 0 , i ;
1099+ int i ;
11011100
11021101 pr_info ("igt_mmap(%s, %d) @ %lx\n" ,
11031102 obj -> mm .region -> name , I915_MMAP_TYPE_FIXED , addr );
11041103
1105- mmap_read_lock (current -> mm );
1106- area = vma_lookup (current -> mm , addr );
1107- mmap_read_unlock (current -> mm );
1108- if (!area ) {
1109- pr_err ("%s: Did not create a vm_area_struct for the mmap\n" ,
1110- obj -> mm .region -> name );
1111- err = - EINVAL ;
1112- goto out_unmap ;
1113- }
1114-
11151104 for (i = 0 ; i < obj -> base .size / sizeof (u32 ); i ++ ) {
11161105 u32 __user * ux = u64_to_user_ptr ((u64 )(addr + i * sizeof (* ux )));
11171106 u32 x ;
11181107
11191108 if (get_user (x , ux )) {
1120- err = - EFAULT ;
11211109 if (!unfaultable ) {
11221110 pr_err ("%s: Unable to read from mmap, offset:%zd\n" ,
11231111 obj -> mm .region -> name , i * sizeof (x ));
1124- goto out_unmap ;
1112+ return - EFAULT ;
11251113 }
11261114
11271115 continue ;
@@ -1130,37 +1118,29 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
11301118 if (unfaultable ) {
11311119 pr_err ("%s: Faulted unmappable memory\n" ,
11321120 obj -> mm .region -> name );
1133- err = - EINVAL ;
1134- goto out_unmap ;
1121+ return - EINVAL ;
11351122 }
11361123
11371124 if (x != expand32 (POISON_INUSE )) {
11381125 pr_err ("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n" ,
11391126 obj -> mm .region -> name ,
11401127 i * sizeof (x ), x , expand32 (POISON_INUSE ));
1141- err = - EINVAL ;
1142- goto out_unmap ;
1128+ return - EINVAL ;
11431129 }
11441130
11451131 x = expand32 (POISON_FREE );
11461132 if (put_user (x , ux )) {
11471133 pr_err ("%s: Unable to write to mmap, offset:%zd\n" ,
11481134 obj -> mm .region -> name , i * sizeof (x ));
1149- err = - EFAULT ;
1150- goto out_unmap ;
1135+ return - EFAULT ;
11511136 }
11521137 }
11531138
1154- if (unfaultable ) {
1155- if (err == - EFAULT )
1156- err = 0 ;
1157- } else {
1158- obj -> flags &= ~I915_BO_ALLOC_GPU_ONLY ;
1159- err = wc_check (obj );
1160- }
1161- out_unmap :
1162- vm_munmap (addr , obj -> base .size );
1163- return err ;
1139+ if (unfaultable )
1140+ return 0 ;
1141+
1142+ obj -> flags &= ~I915_BO_ALLOC_GPU_ONLY ;
1143+ return wc_check (obj );
11641144}
11651145
11661146#define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0)
@@ -1176,6 +1156,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
11761156 struct drm_i915_private * i915 = placements [0 ]-> i915 ;
11771157 struct drm_i915_gem_object * obj ;
11781158 struct i915_request * rq = NULL ;
1159+ struct vm_area_struct * area ;
11791160 unsigned long addr ;
11801161 LIST_HEAD (objects );
11811162 u64 offset ;
@@ -1207,28 +1188,38 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
12071188 goto out_put ;
12081189 }
12091190
1191+ mmap_read_lock (current -> mm );
1192+ area = vma_lookup (current -> mm , addr );
1193+ mmap_read_unlock (current -> mm );
1194+ if (!area ) {
1195+ pr_err ("%s: Did not create a vm_area_struct for the mmap\n" ,
1196+ obj -> mm .region -> name );
1197+ err = - EINVAL ;
1198+ goto out_addr ;
1199+ }
1200+
12101201 if (flags & IGT_MMAP_MIGRATE_FILL ) {
12111202 err = igt_fill_mappable (placements [0 ], & objects );
12121203 if (err )
1213- goto out_put ;
1204+ goto out_addr ;
12141205 }
12151206
12161207 err = i915_gem_object_lock (obj , NULL );
12171208 if (err )
1218- goto out_put ;
1209+ goto out_addr ;
12191210
12201211 err = i915_gem_object_pin_pages (obj );
12211212 if (err ) {
12221213 i915_gem_object_unlock (obj );
1223- goto out_put ;
1214+ goto out_addr ;
12241215 }
12251216
12261217 err = intel_context_migrate_clear (to_gt (i915 )-> migrate .context , NULL ,
12271218 obj -> mm .pages -> sgl , obj -> pat_index ,
12281219 i915_gem_object_is_lmem (obj ),
12291220 expand32 (POISON_INUSE ), & rq );
12301221 i915_gem_object_unpin_pages (obj );
1231- if (rq ) {
1222+ if (rq && ! err ) {
12321223 err = dma_resv_reserve_fences (obj -> base .resv , 1 );
12331224 if (!err )
12341225 dma_resv_add_fence (obj -> base .resv , & rq -> fence ,
@@ -1237,24 +1228,24 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
12371228 }
12381229 i915_gem_object_unlock (obj );
12391230 if (err )
1240- goto out_put ;
1231+ goto out_addr ;
12411232
12421233 if (flags & IGT_MMAP_MIGRATE_EVICTABLE )
12431234 igt_make_evictable (& objects );
12441235
12451236 if (flags & IGT_MMAP_MIGRATE_FAIL_GPU ) {
12461237 err = i915_gem_object_lock (obj , NULL );
12471238 if (err )
1248- goto out_put ;
1239+ goto out_addr ;
12491240
12501241 /*
1251- * Ensure we only simulate the gpu failuire when faulting the
1242+ * Ensure we only simulate the gpu failure when faulting the
12521243 * pages.
12531244 */
12541245 err = i915_gem_object_wait_moving_fence (obj , true);
12551246 i915_gem_object_unlock (obj );
12561247 if (err )
1257- goto out_put ;
1248+ goto out_addr ;
12581249 i915_ttm_migrate_set_failure_modes (true, false);
12591250 }
12601251
@@ -1298,6 +1289,9 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
12981289 }
12991290 }
13001291
1292+ out_addr :
1293+ vm_munmap (addr , obj -> base .size );
1294+
13011295out_put :
13021296 i915_gem_object_put (obj );
13031297 igt_close_objects (i915 , & objects );
0 commit comments