@@ -616,6 +616,12 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
616616 vops -> pt_update_ops [i ].num_ops += inc_val ;
617617}
618618
619+ #define XE_VMA_CREATE_MASK ( \
620+ XE_VMA_READ_ONLY | \
621+ XE_VMA_DUMPABLE | \
622+ XE_VMA_SYSTEM_ALLOCATOR | \
623+ DRM_GPUVA_SPARSE)
624+
619625static void xe_vm_populate_rebind (struct xe_vma_op * op , struct xe_vma * vma ,
620626 u8 tile_mask )
621627{
@@ -628,8 +634,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
628634 op -> base .map .gem .offset = vma -> gpuva .gem .offset ;
629635 op -> map .vma = vma ;
630636 op -> map .immediate = true;
631- op -> map .dumpable = vma -> gpuva .flags & XE_VMA_DUMPABLE ;
632- op -> map .is_null = xe_vma_is_null (vma );
637+ op -> map .vma_flags = vma -> gpuva .flags & XE_VMA_CREATE_MASK ;
633638}
634639
635640static int xe_vm_ops_add_rebind (struct xe_vma_ops * vops , struct xe_vma * vma ,
@@ -932,11 +937,6 @@ static void xe_vma_free(struct xe_vma *vma)
932937 kfree (vma );
933938}
934939
935- #define VMA_CREATE_FLAG_READ_ONLY BIT(0)
936- #define VMA_CREATE_FLAG_IS_NULL BIT(1)
937- #define VMA_CREATE_FLAG_DUMPABLE BIT(2)
938- #define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
939-
940940static struct xe_vma * xe_vma_create (struct xe_vm * vm ,
941941 struct xe_bo * bo ,
942942 u64 bo_offset_or_userptr ,
@@ -947,11 +947,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
947947 struct xe_vma * vma ;
948948 struct xe_tile * tile ;
949949 u8 id ;
950- bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY );
951- bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL );
952- bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE );
953- bool is_cpu_addr_mirror =
954- (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR );
950+ bool is_null = (flags & DRM_GPUVA_SPARSE );
951+ bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR );
955952
956953 xe_assert (vm -> xe , start < end );
957954 xe_assert (vm -> xe , end < vm -> size );
@@ -972,10 +969,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
972969 if (!vma )
973970 return ERR_PTR (- ENOMEM );
974971
975- if (is_cpu_addr_mirror )
976- vma -> gpuva .flags |= XE_VMA_SYSTEM_ALLOCATOR ;
977- if (is_null )
978- vma -> gpuva .flags |= DRM_GPUVA_SPARSE ;
979972 if (bo )
980973 vma -> gpuva .gem .obj = & bo -> ttm .base ;
981974 }
@@ -986,10 +979,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
986979 vma -> gpuva .vm = & vm -> gpuvm ;
987980 vma -> gpuva .va .addr = start ;
988981 vma -> gpuva .va .range = end - start + 1 ;
989- if (read_only )
990- vma -> gpuva .flags |= XE_VMA_READ_ONLY ;
991- if (dumpable )
992- vma -> gpuva .flags |= XE_VMA_DUMPABLE ;
982+ vma -> gpuva .flags = flags ;
993983
994984 for_each_tile (tile , vm -> xe , id )
995985 vma -> tile_mask |= 0x1 << id ;
@@ -2272,12 +2262,14 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
22722262 if (__op -> op == DRM_GPUVA_OP_MAP ) {
22732263 op -> map .immediate =
22742264 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE ;
2275- op -> map .read_only =
2276- flags & DRM_XE_VM_BIND_FLAG_READONLY ;
2277- op -> map .is_null = flags & DRM_XE_VM_BIND_FLAG_NULL ;
2278- op -> map .is_cpu_addr_mirror = flags &
2279- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR ;
2280- op -> map .dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE ;
2265+ if (flags & DRM_XE_VM_BIND_FLAG_READONLY )
2266+ op -> map .vma_flags |= XE_VMA_READ_ONLY ;
2267+ if (flags & DRM_XE_VM_BIND_FLAG_NULL )
2268+ op -> map .vma_flags |= DRM_GPUVA_SPARSE ;
2269+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR )
2270+ op -> map .vma_flags |= XE_VMA_SYSTEM_ALLOCATOR ;
2271+ if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE )
2272+ op -> map .vma_flags |= XE_VMA_DUMPABLE ;
22812273 op -> map .pat_index = pat_index ;
22822274 op -> map .invalidate_on_bind =
22832275 __xe_vm_needs_clear_scratch_pages (vm , flags );
@@ -2590,14 +2582,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
25902582 .pat_index = op -> map .pat_index ,
25912583 };
25922584
2593- flags |= op -> map .read_only ?
2594- VMA_CREATE_FLAG_READ_ONLY : 0 ;
2595- flags |= op -> map .is_null ?
2596- VMA_CREATE_FLAG_IS_NULL : 0 ;
2597- flags |= op -> map .dumpable ?
2598- VMA_CREATE_FLAG_DUMPABLE : 0 ;
2599- flags |= op -> map .is_cpu_addr_mirror ?
2600- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0 ;
2585+ flags |= op -> map .vma_flags & XE_VMA_CREATE_MASK ;
26012586
26022587 vma = new_vma (vm , & op -> base .map , & default_attr ,
26032588 flags );
@@ -2606,7 +2591,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
26062591
26072592 op -> map .vma = vma ;
26082593 if (((op -> map .immediate || !xe_vm_in_fault_mode (vm )) &&
2609- !op -> map .is_cpu_addr_mirror ) ||
2594+ !( op -> map .vma_flags & XE_VMA_SYSTEM_ALLOCATOR ) ) ||
26102595 op -> map .invalidate_on_bind )
26112596 xe_vma_ops_incr_pt_update_ops (vops ,
26122597 op -> tile_mask , 1 );
@@ -2637,18 +2622,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
26372622 op -> remap .start = xe_vma_start (old );
26382623 op -> remap .range = xe_vma_size (old );
26392624
2640- flags |= op -> base .remap .unmap -> va -> flags &
2641- XE_VMA_READ_ONLY ?
2642- VMA_CREATE_FLAG_READ_ONLY : 0 ;
2643- flags |= op -> base .remap .unmap -> va -> flags &
2644- DRM_GPUVA_SPARSE ?
2645- VMA_CREATE_FLAG_IS_NULL : 0 ;
2646- flags |= op -> base .remap .unmap -> va -> flags &
2647- XE_VMA_DUMPABLE ?
2648- VMA_CREATE_FLAG_DUMPABLE : 0 ;
2649- flags |= xe_vma_is_cpu_addr_mirror (old ) ?
2650- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0 ;
2651-
2625+ flags |= op -> base .remap .unmap -> va -> flags & XE_VMA_CREATE_MASK ;
26522626 if (op -> base .remap .prev ) {
26532627 vma = new_vma (vm , op -> base .remap .prev ,
26542628 & old -> attr , flags );
@@ -4212,7 +4186,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42124186 struct xe_vma_ops vops ;
42134187 struct drm_gpuva_ops * ops = NULL ;
42144188 struct drm_gpuva_op * __op ;
4215- bool is_cpu_addr_mirror = false ;
4189+ unsigned int vma_flags = 0 ;
42164190 bool remap_op = false;
42174191 struct xe_vma_mem_attr tmp_attr ;
42184192 u16 default_pat ;
@@ -4242,15 +4216,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42424216 vma = gpuva_to_vma (op -> base .unmap .va );
42434217 XE_WARN_ON (!xe_vma_has_default_mem_attrs (vma ));
42444218 default_pat = vma -> attr .default_pat_index ;
4219+ vma_flags = vma -> gpuva .flags ;
42454220 }
42464221
42474222 if (__op -> op == DRM_GPUVA_OP_REMAP ) {
42484223 vma = gpuva_to_vma (op -> base .remap .unmap -> va );
42494224 default_pat = vma -> attr .default_pat_index ;
4225+ vma_flags = vma -> gpuva .flags ;
42504226 }
42514227
42524228 if (__op -> op == DRM_GPUVA_OP_MAP ) {
4253- op -> map .is_cpu_addr_mirror = true ;
4229+ op -> map .vma_flags |= vma_flags & XE_VMA_CREATE_MASK ;
42544230 op -> map .pat_index = default_pat ;
42554231 }
42564232 } else {
@@ -4259,11 +4235,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42594235 xe_assert (vm -> xe , !remap_op );
42604236 xe_assert (vm -> xe , xe_vma_has_no_bo (vma ));
42614237 remap_op = true;
4262-
4263- if (xe_vma_is_cpu_addr_mirror (vma ))
4264- is_cpu_addr_mirror = true;
4265- else
4266- is_cpu_addr_mirror = false;
4238+ vma_flags = vma -> gpuva .flags ;
42674239 }
42684240
42694241 if (__op -> op == DRM_GPUVA_OP_MAP ) {
@@ -4272,10 +4244,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
42724244 /*
42734245 * In case of madvise ops DRM_GPUVA_OP_MAP is
42744246 * always after DRM_GPUVA_OP_REMAP, so ensure
4275- * we assign op->map.is_cpu_addr_mirror true
4276- * if REMAP is for xe_vma_is_cpu_addr_mirror vma
4247+ * to propagate the flags from the vma we're
4248+ * unmapping.
42774249 */
4278- op -> map .is_cpu_addr_mirror = is_cpu_addr_mirror ;
4250+ op -> map .vma_flags |= vma_flags & XE_VMA_CREATE_MASK ;
42794251 }
42804252 }
42814253 print_op (vm -> xe , __op );
0 commit comments