@@ -964,10 +964,16 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
964
964
}
965
965
}
966
966
967
+ #define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull))
968
+
967
969
static void xe_pt_commit_prepare_locks_assert (struct xe_vma * vma )
968
970
{
969
- struct xe_vm * vm = xe_vma_vm ( vma ) ;
971
+ struct xe_vm * vm ;
970
972
973
+ if (vma == XE_INVALID_VMA )
974
+ return ;
975
+
976
+ vm = xe_vma_vm (vma );
971
977
lockdep_assert_held (& vm -> lock );
972
978
973
979
if (!xe_vma_has_no_bo (vma ))
@@ -978,8 +984,12 @@ static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
978
984
979
985
static void xe_pt_commit_locks_assert (struct xe_vma * vma )
980
986
{
981
- struct xe_vm * vm = xe_vma_vm ( vma ) ;
987
+ struct xe_vm * vm ;
982
988
989
+ if (vma == XE_INVALID_VMA )
990
+ return ;
991
+
992
+ vm = xe_vma_vm (vma );
983
993
xe_pt_commit_prepare_locks_assert (vma );
984
994
985
995
if (xe_vma_is_userptr (vma ))
@@ -1007,7 +1017,8 @@ static void xe_pt_commit(struct xe_vma *vma,
1007
1017
int j_ = j + entries [i ].ofs ;
1008
1018
1009
1019
pt_dir -> children [j_ ] = pt_dir -> staging [j_ ];
1010
- xe_pt_destroy (oldpte , xe_vma_vm (vma )-> flags , deferred );
1020
+ xe_pt_destroy (oldpte , (vma == XE_INVALID_VMA ) ? 0 :
1021
+ xe_vma_vm (vma )-> flags , deferred );
1011
1022
}
1012
1023
}
1013
1024
}
@@ -1420,6 +1431,9 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
1420
1431
list_for_each_entry (op , & vops -> list , link ) {
1421
1432
struct xe_svm_range * range = op -> map_range .range ;
1422
1433
1434
+ if (op -> subop == XE_VMA_SUBOP_UNMAP_RANGE )
1435
+ continue ;
1436
+
1423
1437
xe_assert (vm -> xe , xe_vma_is_cpu_addr_mirror (op -> map_range .vma ));
1424
1438
xe_assert (vm -> xe , op -> subop == XE_VMA_SUBOP_MAP_RANGE );
1425
1439
@@ -1617,7 +1631,9 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
1617
1631
* xe_pt_stage_unbind() - Build page-table update structures for an unbind
1618
1632
* operation
1619
1633
* @tile: The tile we're unbinding for.
1634
+ * @vm: The vm
1620
1635
* @vma: The vma we're unbinding.
1636
+ * @range: The range we're unbinding.
1621
1637
* @entries: Caller-provided storage for the update structures.
1622
1638
*
1623
1639
* Builds page-table update structures for an unbind operation. The function
@@ -1627,9 +1643,14 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
1627
1643
*
1628
1644
* Return: The number of entries used.
1629
1645
*/
1630
- static unsigned int xe_pt_stage_unbind (struct xe_tile * tile , struct xe_vma * vma ,
1646
+ static unsigned int xe_pt_stage_unbind (struct xe_tile * tile ,
1647
+ struct xe_vm * vm ,
1648
+ struct xe_vma * vma ,
1649
+ struct xe_svm_range * range ,
1631
1650
struct xe_vm_pgtable_update * entries )
1632
1651
{
1652
+ u64 start = range ? range -> base .itree .start : xe_vma_start (vma );
1653
+ u64 end = range ? range -> base .itree .last + 1 : xe_vma_end (vma );
1633
1654
struct xe_pt_stage_unbind_walk xe_walk = {
1634
1655
.base = {
1635
1656
.ops = & xe_pt_stage_unbind_ops ,
@@ -1638,14 +1659,14 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
1638
1659
.staging = true,
1639
1660
},
1640
1661
.tile = tile ,
1641
- .modified_start = xe_vma_start ( vma ) ,
1642
- .modified_end = xe_vma_end ( vma ) ,
1662
+ .modified_start = start ,
1663
+ .modified_end = end ,
1643
1664
.wupd .entries = entries ,
1644
1665
};
1645
- struct xe_pt * pt = xe_vma_vm ( vma ) -> pt_root [tile -> id ];
1666
+ struct xe_pt * pt = vm -> pt_root [tile -> id ];
1646
1667
1647
- (void )xe_pt_walk_shared (& pt -> base , pt -> level , xe_vma_start ( vma ) ,
1648
- xe_vma_end ( vma ), & xe_walk .base );
1668
+ (void )xe_pt_walk_shared (& pt -> base , pt -> level , start , end ,
1669
+ & xe_walk .base );
1649
1670
1650
1671
return xe_walk .wupd .num_used_entries ;
1651
1672
}
@@ -1887,13 +1908,6 @@ static int unbind_op_prepare(struct xe_tile *tile,
1887
1908
"Preparing unbind, with range [%llx...%llx)\n" ,
1888
1909
xe_vma_start (vma ), xe_vma_end (vma ) - 1 );
1889
1910
1890
- /*
1891
- * Wait for invalidation to complete. Can corrupt internal page table
1892
- * state if an invalidation is running while preparing an unbind.
1893
- */
1894
- if (xe_vma_is_userptr (vma ) && xe_vm_in_fault_mode (xe_vma_vm (vma )))
1895
- mmu_interval_read_begin (& to_userptr_vma (vma )-> userptr .notifier );
1896
-
1897
1911
pt_op -> vma = vma ;
1898
1912
pt_op -> bind = false;
1899
1913
pt_op -> rebind = false;
@@ -1902,7 +1916,8 @@ static int unbind_op_prepare(struct xe_tile *tile,
1902
1916
if (err )
1903
1917
return err ;
1904
1918
1905
- pt_op -> num_entries = xe_pt_stage_unbind (tile , vma , pt_op -> entries );
1919
+ pt_op -> num_entries = xe_pt_stage_unbind (tile , xe_vma_vm (vma ),
1920
+ vma , NULL , pt_op -> entries );
1906
1921
1907
1922
xe_vm_dbg_print_entries (tile_to_xe (tile ), pt_op -> entries ,
1908
1923
pt_op -> num_entries , false);
@@ -1917,6 +1932,42 @@ static int unbind_op_prepare(struct xe_tile *tile,
1917
1932
return 0 ;
1918
1933
}
1919
1934
1935
+ static int unbind_range_prepare (struct xe_vm * vm ,
1936
+ struct xe_tile * tile ,
1937
+ struct xe_vm_pgtable_update_ops * pt_update_ops ,
1938
+ struct xe_svm_range * range )
1939
+ {
1940
+ u32 current_op = pt_update_ops -> current_op ;
1941
+ struct xe_vm_pgtable_update_op * pt_op = & pt_update_ops -> ops [current_op ];
1942
+
1943
+ if (!(range -> tile_present & BIT (tile -> id )))
1944
+ return 0 ;
1945
+
1946
+ vm_dbg (& vm -> xe -> drm ,
1947
+ "Preparing unbind, with range [%lx...%lx)\n" ,
1948
+ range -> base .itree .start , range -> base .itree .last );
1949
+
1950
+ pt_op -> vma = XE_INVALID_VMA ;
1951
+ pt_op -> bind = false;
1952
+ pt_op -> rebind = false;
1953
+
1954
+ pt_op -> num_entries = xe_pt_stage_unbind (tile , vm , NULL , range ,
1955
+ pt_op -> entries );
1956
+
1957
+ xe_vm_dbg_print_entries (tile_to_xe (tile ), pt_op -> entries ,
1958
+ pt_op -> num_entries , false);
1959
+ xe_pt_update_ops_rfence_interval (pt_update_ops , range -> base .itree .start ,
1960
+ range -> base .itree .last + 1 );
1961
+ ++ pt_update_ops -> current_op ;
1962
+ pt_update_ops -> needs_svm_lock = true;
1963
+ pt_update_ops -> needs_invalidation = true;
1964
+
1965
+ xe_pt_commit_prepare_unbind (XE_INVALID_VMA , pt_op -> entries ,
1966
+ pt_op -> num_entries );
1967
+
1968
+ return 0 ;
1969
+ }
1970
+
1920
1971
static int op_prepare (struct xe_vm * vm ,
1921
1972
struct xe_tile * tile ,
1922
1973
struct xe_vm_pgtable_update_ops * pt_update_ops ,
@@ -1984,6 +2035,9 @@ static int op_prepare(struct xe_vm *vm,
1984
2035
err = bind_range_prepare (vm , tile , pt_update_ops ,
1985
2036
op -> map_range .vma ,
1986
2037
op -> map_range .range );
2038
+ } else if (op -> subop == XE_VMA_SUBOP_UNMAP_RANGE ) {
2039
+ err = unbind_range_prepare (vm , tile , pt_update_ops ,
2040
+ op -> unmap_range .range );
1987
2041
}
1988
2042
break ;
1989
2043
default :
@@ -2173,6 +2227,8 @@ static void op_commit(struct xe_vm *vm,
2173
2227
if (op -> subop == XE_VMA_SUBOP_MAP_RANGE ) {
2174
2228
op -> map_range .range -> tile_present |= BIT (tile -> id );
2175
2229
op -> map_range .range -> tile_invalidated &= ~BIT (tile -> id );
2230
+ } else if (op -> subop == XE_VMA_SUBOP_UNMAP_RANGE ) {
2231
+ op -> unmap_range .range -> tile_present &= ~BIT (tile -> id );
2176
2232
}
2177
2233
break ;
2178
2234
}
0 commit comments