@@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
616616 vops -> pt_update_ops [i ].num_ops += inc_val ;
617617}
618618
619+ #define XE_VMA_CREATE_MASK ( \
620+ XE_VMA_READ_ONLY | \
621+ XE_VMA_DUMPABLE | \
622+ XE_VMA_SYSTEM_ALLOCATOR | \
623+ DRM_GPUVA_SPARSE | \
624+ XE_VMA_MADV_AUTORESET)
625+
619626static void xe_vm_populate_rebind (struct xe_vma_op * op , struct xe_vma * vma ,
620627 u8 tile_mask )
621628{
@@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
628635 op -> base .map .gem .offset = vma -> gpuva .gem .offset ;
629636 op -> map .vma = vma ;
630637 op -> map .immediate = true;
631- op -> map .dumpable = vma -> gpuva .flags & XE_VMA_DUMPABLE ;
632- op -> map .is_null = xe_vma_is_null (vma );
638+ op -> map .vma_flags = vma -> gpuva .flags & XE_VMA_CREATE_MASK ;
633639}
634640
635641static int xe_vm_ops_add_rebind (struct xe_vma_ops * vops , struct xe_vma * vma ,
@@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)
932938 kfree (vma );
933939}
934940
935- #define VMA_CREATE_FLAG_READ_ONLY BIT(0)
936- #define VMA_CREATE_FLAG_IS_NULL BIT(1)
937- #define VMA_CREATE_FLAG_DUMPABLE BIT(2)
938- #define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
939-
940941static struct xe_vma * xe_vma_create (struct xe_vm * vm ,
941942 struct xe_bo * bo ,
942943 u64 bo_offset_or_userptr ,
@@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
947948 struct xe_vma * vma ;
948949 struct xe_tile * tile ;
949950 u8 id ;
950- bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY );
951- bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL );
952- bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE );
953- bool is_cpu_addr_mirror =
954- (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR );
951+ bool is_null = (flags & DRM_GPUVA_SPARSE );
952+ bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR );
955953
956954 xe_assert (vm -> xe , start < end );
957955 xe_assert (vm -> xe , end < vm -> size );
@@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
972970 if (!vma )
973971 return ERR_PTR (- ENOMEM );
974972
975- if (is_cpu_addr_mirror )
976- vma -> gpuva .flags |= XE_VMA_SYSTEM_ALLOCATOR ;
977- if (is_null )
978- vma -> gpuva .flags |= DRM_GPUVA_SPARSE ;
979973 if (bo )
980974 vma -> gpuva .gem .obj = & bo -> ttm .base ;
981975 }
@@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
986980 vma -> gpuva .vm = & vm -> gpuvm ;
987981 vma -> gpuva .va .addr = start ;
988982 vma -> gpuva .va .range = end - start + 1 ;
989- if (read_only )
990- vma -> gpuva .flags |= XE_VMA_READ_ONLY ;
991- if (dumpable )
992- vma -> gpuva .flags |= XE_VMA_DUMPABLE ;
983+ vma -> gpuva .flags = flags ;
993984
994985 for_each_tile (tile , vm -> xe , id )
995986 vma -> tile_mask |= 0x1 << id ;
@@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
22722263 if (__op -> op == DRM_GPUVA_OP_MAP ) {
22732264 op -> map .immediate =
22742265 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE ;
2275- op -> map .read_only =
2276- flags & DRM_XE_VM_BIND_FLAG_READONLY ;
2277- op -> map .is_null = flags & DRM_XE_VM_BIND_FLAG_NULL ;
2278- op -> map .is_cpu_addr_mirror = flags &
2279- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR ;
2280- op -> map .dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE ;
2266+ if (flags & DRM_XE_VM_BIND_FLAG_READONLY )
2267+ op -> map .vma_flags |= XE_VMA_READ_ONLY ;
2268+ if (flags & DRM_XE_VM_BIND_FLAG_NULL )
2269+ op -> map .vma_flags |= DRM_GPUVA_SPARSE ;
2270+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR )
2271+ op -> map .vma_flags |= XE_VMA_SYSTEM_ALLOCATOR ;
2272+ if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE )
2273+ op -> map .vma_flags |= XE_VMA_DUMPABLE ;
2274+ if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET )
2275+ op -> map .vma_flags |= XE_VMA_MADV_AUTORESET ;
22812276 op -> map .pat_index = pat_index ;
22822277 op -> map .invalidate_on_bind =
22832278 __xe_vm_needs_clear_scratch_pages (vm , flags );
@@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
25902585 .pat_index = op -> map .pat_index ,
25912586 };
25922587
2593- flags |= op -> map .read_only ?
2594- VMA_CREATE_FLAG_READ_ONLY : 0 ;
2595- flags |= op -> map .is_null ?
2596- VMA_CREATE_FLAG_IS_NULL : 0 ;
2597- flags |= op -> map .dumpable ?
2598- VMA_CREATE_FLAG_DUMPABLE : 0 ;
2599- flags |= op -> map .is_cpu_addr_mirror ?
2600- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0 ;
2588+ flags |= op -> map .vma_flags & XE_VMA_CREATE_MASK ;
26012589
26022590 vma = new_vma (vm , & op -> base .map , & default_attr ,
26032591 flags );
@@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
26062594
26072595 op -> map .vma = vma ;
26082596 if (((op -> map .immediate || !xe_vm_in_fault_mode (vm )) &&
2609- !op -> map .is_cpu_addr_mirror ) ||
2597+ !( op -> map .vma_flags & XE_VMA_SYSTEM_ALLOCATOR ) ) ||
26102598 op -> map .invalidate_on_bind )
26112599 xe_vma_ops_incr_pt_update_ops (vops ,
26122600 op -> tile_mask , 1 );
@@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
26372625 op -> remap .start = xe_vma_start (old );
26382626 op -> remap .range = xe_vma_size (old );
26392627
2640- flags |= op -> base .remap .unmap -> va -> flags &
2641- XE_VMA_READ_ONLY ?
2642- VMA_CREATE_FLAG_READ_ONLY : 0 ;
2643- flags |= op -> base .remap .unmap -> va -> flags &
2644- DRM_GPUVA_SPARSE ?
2645- VMA_CREATE_FLAG_IS_NULL : 0 ;
2646- flags |= op -> base .remap .unmap -> va -> flags &
2647- XE_VMA_DUMPABLE ?
2648- VMA_CREATE_FLAG_DUMPABLE : 0 ;
2649- flags |= xe_vma_is_cpu_addr_mirror (old ) ?
2650- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0 ;
2651-
2628+ flags |= op -> base .remap .unmap -> va -> flags & XE_VMA_CREATE_MASK ;
26522629 if (op -> base .remap .prev ) {
26532630 vma = new_vma (vm , op -> base .remap .prev ,
26542631 & old -> attr , flags );
@@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
32793256 DRM_XE_VM_BIND_FLAG_NULL | \
32803257 DRM_XE_VM_BIND_FLAG_DUMPABLE | \
32813258 DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
3282- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
3259+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
3260+ DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
32833261
32843262#ifdef TEST_VM_OPS_ERROR
32853263#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
33943372 XE_IOCTL_DBG (xe , (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
33953373 !(BIT (prefetch_region ) & xe -> info .mem_region_mask ))) ||
33963374 XE_IOCTL_DBG (xe , obj &&
3397- op == DRM_XE_VM_BIND_OP_UNMAP )) {
3375+ op == DRM_XE_VM_BIND_OP_UNMAP ) ||
3376+ XE_IOCTL_DBG (xe , (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET ) &&
3377+ (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP ))) {
33983378 err = - EINVAL ;
33993379 goto free_bind_ops ;
34003380 }
@@ -4212,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42124192 struct xe_vma_ops vops ;
42134193 struct drm_gpuva_ops * ops = NULL ;
42144194 struct drm_gpuva_op * __op ;
4215- bool is_cpu_addr_mirror = false ;
4195+ unsigned int vma_flags = 0 ;
42164196 bool remap_op = false;
42174197 struct xe_vma_mem_attr tmp_attr ;
42184198 u16 default_pat ;
@@ -4242,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42424222 vma = gpuva_to_vma (op -> base .unmap .va );
42434223 XE_WARN_ON (!xe_vma_has_default_mem_attrs (vma ));
42444224 default_pat = vma -> attr .default_pat_index ;
4225+ vma_flags = vma -> gpuva .flags ;
42454226 }
42464227
42474228 if (__op -> op == DRM_GPUVA_OP_REMAP ) {
42484229 vma = gpuva_to_vma (op -> base .remap .unmap -> va );
42494230 default_pat = vma -> attr .default_pat_index ;
4231+ vma_flags = vma -> gpuva .flags ;
42504232 }
42514233
42524234 if (__op -> op == DRM_GPUVA_OP_MAP ) {
4253- op -> map .is_cpu_addr_mirror = true ;
4235+ op -> map .vma_flags |= vma_flags & XE_VMA_CREATE_MASK ;
42544236 op -> map .pat_index = default_pat ;
42554237 }
42564238 } else {
@@ -4259,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42594241 xe_assert (vm -> xe , !remap_op );
42604242 xe_assert (vm -> xe , xe_vma_has_no_bo (vma ));
42614243 remap_op = true;
4262-
4263- if (xe_vma_is_cpu_addr_mirror (vma ))
4264- is_cpu_addr_mirror = true;
4265- else
4266- is_cpu_addr_mirror = false;
4244+ vma_flags = vma -> gpuva .flags ;
42674245 }
42684246
42694247 if (__op -> op == DRM_GPUVA_OP_MAP ) {
@@ -4272,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42724250 /*
42734251 * In case of madvise ops DRM_GPUVA_OP_MAP is
42744252 * always after DRM_GPUVA_OP_REMAP, so ensure
4275- * we assign op->map.is_cpu_addr_mirror true
4276- * if REMAP is for xe_vma_is_cpu_addr_mirror vma
4253+ * to propagate the flags from the vma we're
4254+ * unmapping.
42774255 */
4278- op -> map .is_cpu_addr_mirror = is_cpu_addr_mirror ;
4256+ op -> map .vma_flags |= vma_flags & XE_VMA_CREATE_MASK ;
42794257 }
42804258 }
42814259 print_op (vm -> xe , __op );
0 commit comments