3
3
* Copyright © 2022 Intel Corporation
4
4
*/
5
5
6
+ #include <linux/dma-fence-array.h>
7
+
6
8
#include "xe_pt.h"
7
9
8
10
#include "regs/xe_gtt_defs.h"
@@ -1627,9 +1629,11 @@ xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
1627
1629
1628
1630
static int vma_reserve_fences (struct xe_device * xe , struct xe_vma * vma )
1629
1631
{
1632
+ int shift = xe_device_get_root_tile (xe )-> media_gt ? 1 : 0 ;
1633
+
1630
1634
if (!xe_vma_has_no_bo (vma ) && !xe_vma_bo (vma )-> vm )
1631
1635
return dma_resv_reserve_fences (xe_vma_bo (vma )-> ttm .base .resv ,
1632
- xe -> info .tile_count );
1636
+ xe -> info .tile_count << shift );
1633
1637
1634
1638
return 0 ;
1635
1639
}
@@ -1816,6 +1820,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
1816
1820
struct xe_vm_pgtable_update_ops * pt_update_ops =
1817
1821
& vops -> pt_update_ops [tile -> id ];
1818
1822
struct xe_vma_op * op ;
1823
+ int shift = tile -> media_gt ? 1 : 0 ;
1819
1824
int err ;
1820
1825
1821
1826
lockdep_assert_held (& vops -> vm -> lock );
@@ -1824,7 +1829,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
1824
1829
xe_pt_update_ops_init (pt_update_ops );
1825
1830
1826
1831
err = dma_resv_reserve_fences (xe_vm_resv (vops -> vm ),
1827
- tile_to_xe (tile )-> info .tile_count );
1832
+ tile_to_xe (tile )-> info .tile_count << shift );
1828
1833
if (err )
1829
1834
return err ;
1830
1835
@@ -1849,13 +1854,20 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
1849
1854
1850
1855
static void bind_op_commit (struct xe_vm * vm , struct xe_tile * tile ,
1851
1856
struct xe_vm_pgtable_update_ops * pt_update_ops ,
1852
- struct xe_vma * vma , struct dma_fence * fence )
1857
+ struct xe_vma * vma , struct dma_fence * fence ,
1858
+ struct dma_fence * fence2 )
1853
1859
{
1854
- if (!xe_vma_has_no_bo (vma ) && !xe_vma_bo (vma )-> vm )
1860
+ if (!xe_vma_has_no_bo (vma ) && !xe_vma_bo (vma )-> vm ) {
1855
1861
dma_resv_add_fence (xe_vma_bo (vma )-> ttm .base .resv , fence ,
1856
1862
pt_update_ops -> wait_vm_bookkeep ?
1857
1863
DMA_RESV_USAGE_KERNEL :
1858
1864
DMA_RESV_USAGE_BOOKKEEP );
1865
+ if (fence2 )
1866
+ dma_resv_add_fence (xe_vma_bo (vma )-> ttm .base .resv , fence2 ,
1867
+ pt_update_ops -> wait_vm_bookkeep ?
1868
+ DMA_RESV_USAGE_KERNEL :
1869
+ DMA_RESV_USAGE_BOOKKEEP );
1870
+ }
1859
1871
vma -> tile_present |= BIT (tile -> id );
1860
1872
vma -> tile_staged &= ~BIT (tile -> id );
1861
1873
if (xe_vma_is_userptr (vma )) {
@@ -1875,13 +1887,20 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
1875
1887
1876
1888
static void unbind_op_commit (struct xe_vm * vm , struct xe_tile * tile ,
1877
1889
struct xe_vm_pgtable_update_ops * pt_update_ops ,
1878
- struct xe_vma * vma , struct dma_fence * fence )
1890
+ struct xe_vma * vma , struct dma_fence * fence ,
1891
+ struct dma_fence * fence2 )
1879
1892
{
1880
- if (!xe_vma_has_no_bo (vma ) && !xe_vma_bo (vma )-> vm )
1893
+ if (!xe_vma_has_no_bo (vma ) && !xe_vma_bo (vma )-> vm ) {
1881
1894
dma_resv_add_fence (xe_vma_bo (vma )-> ttm .base .resv , fence ,
1882
1895
pt_update_ops -> wait_vm_bookkeep ?
1883
1896
DMA_RESV_USAGE_KERNEL :
1884
1897
DMA_RESV_USAGE_BOOKKEEP );
1898
+ if (fence2 )
1899
+ dma_resv_add_fence (xe_vma_bo (vma )-> ttm .base .resv , fence2 ,
1900
+ pt_update_ops -> wait_vm_bookkeep ?
1901
+ DMA_RESV_USAGE_KERNEL :
1902
+ DMA_RESV_USAGE_BOOKKEEP );
1903
+ }
1885
1904
vma -> tile_present &= ~BIT (tile -> id );
1886
1905
if (!vma -> tile_present ) {
1887
1906
list_del_init (& vma -> combined_links .rebind );
@@ -1898,7 +1917,8 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
1898
1917
static void op_commit (struct xe_vm * vm ,
1899
1918
struct xe_tile * tile ,
1900
1919
struct xe_vm_pgtable_update_ops * pt_update_ops ,
1901
- struct xe_vma_op * op , struct dma_fence * fence )
1920
+ struct xe_vma_op * op , struct dma_fence * fence ,
1921
+ struct dma_fence * fence2 )
1902
1922
{
1903
1923
xe_vm_assert_held (vm );
1904
1924
@@ -1907,26 +1927,28 @@ static void op_commit(struct xe_vm *vm,
1907
1927
if (!op -> map .immediate && xe_vm_in_fault_mode (vm ))
1908
1928
break ;
1909
1929
1910
- bind_op_commit (vm , tile , pt_update_ops , op -> map .vma , fence );
1930
+ bind_op_commit (vm , tile , pt_update_ops , op -> map .vma , fence ,
1931
+ fence2 );
1911
1932
break ;
1912
1933
case DRM_GPUVA_OP_REMAP :
1913
1934
unbind_op_commit (vm , tile , pt_update_ops ,
1914
- gpuva_to_vma (op -> base .remap .unmap -> va ), fence );
1935
+ gpuva_to_vma (op -> base .remap .unmap -> va ), fence ,
1936
+ fence2 );
1915
1937
1916
1938
if (op -> remap .prev )
1917
1939
bind_op_commit (vm , tile , pt_update_ops , op -> remap .prev ,
1918
- fence );
1940
+ fence , fence2 );
1919
1941
if (op -> remap .next )
1920
1942
bind_op_commit (vm , tile , pt_update_ops , op -> remap .next ,
1921
- fence );
1943
+ fence , fence2 );
1922
1944
break ;
1923
1945
case DRM_GPUVA_OP_UNMAP :
1924
1946
unbind_op_commit (vm , tile , pt_update_ops ,
1925
- gpuva_to_vma (op -> base .unmap .va ), fence );
1947
+ gpuva_to_vma (op -> base .unmap .va ), fence , fence2 );
1926
1948
break ;
1927
1949
case DRM_GPUVA_OP_PREFETCH :
1928
1950
bind_op_commit (vm , tile , pt_update_ops ,
1929
- gpuva_to_vma (op -> base .prefetch .va ), fence );
1951
+ gpuva_to_vma (op -> base .prefetch .va ), fence , fence2 );
1930
1952
break ;
1931
1953
default :
1932
1954
drm_warn (& vm -> xe -> drm , "NOT POSSIBLE" );
@@ -1963,7 +1985,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
1963
1985
struct xe_vm_pgtable_update_ops * pt_update_ops =
1964
1986
& vops -> pt_update_ops [tile -> id ];
1965
1987
struct dma_fence * fence ;
1966
- struct invalidation_fence * ifence = NULL ;
1988
+ struct invalidation_fence * ifence = NULL , * mfence = NULL ;
1989
+ struct dma_fence * * fences = NULL ;
1990
+ struct dma_fence_array * cf = NULL ;
1967
1991
struct xe_range_fence * rfence ;
1968
1992
struct xe_vma_op * op ;
1969
1993
int err = 0 , i ;
@@ -1996,6 +2020,23 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
1996
2020
err = - ENOMEM ;
1997
2021
goto kill_vm_tile1 ;
1998
2022
}
2023
+ if (tile -> media_gt ) {
2024
+ mfence = kzalloc (sizeof (* ifence ), GFP_KERNEL );
2025
+ if (!mfence ) {
2026
+ err = - ENOMEM ;
2027
+ goto free_ifence ;
2028
+ }
2029
+ fences = kmalloc_array (2 , sizeof (* fences ), GFP_KERNEL );
2030
+ if (!fences ) {
2031
+ err = - ENOMEM ;
2032
+ goto free_ifence ;
2033
+ }
2034
+ cf = dma_fence_array_alloc (2 );
2035
+ if (!cf ) {
2036
+ err = - ENOMEM ;
2037
+ goto free_ifence ;
2038
+ }
2039
+ }
1999
2040
}
2000
2041
2001
2042
rfence = kzalloc (sizeof (* rfence ), GFP_KERNEL );
@@ -2027,19 +2068,50 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
2027
2068
2028
2069
/* tlb invalidation must be done before signaling rebind */
2029
2070
if (ifence ) {
2071
+ if (mfence )
2072
+ dma_fence_get (fence );
2030
2073
invalidation_fence_init (tile -> primary_gt , ifence , fence ,
2031
2074
pt_update_ops -> start ,
2032
2075
pt_update_ops -> last , vm -> usm .asid );
2033
- fence = & ifence -> base .base ;
2076
+ if (mfence ) {
2077
+ invalidation_fence_init (tile -> media_gt , mfence , fence ,
2078
+ pt_update_ops -> start ,
2079
+ pt_update_ops -> last , vm -> usm .asid );
2080
+ fences [0 ] = & ifence -> base .base ;
2081
+ fences [1 ] = & mfence -> base .base ;
2082
+ dma_fence_array_init (cf , 2 , fences ,
2083
+ vm -> composite_fence_ctx ,
2084
+ vm -> composite_fence_seqno ++ ,
2085
+ false);
2086
+ fence = & cf -> base ;
2087
+ } else {
2088
+ fence = & ifence -> base .base ;
2089
+ }
2034
2090
}
2035
2091
2036
- dma_resv_add_fence (xe_vm_resv (vm ), fence ,
2037
- pt_update_ops -> wait_vm_bookkeep ?
2038
- DMA_RESV_USAGE_KERNEL :
2039
- DMA_RESV_USAGE_BOOKKEEP );
2092
+ if (!mfence ) {
2093
+ dma_resv_add_fence (xe_vm_resv (vm ), fence ,
2094
+ pt_update_ops -> wait_vm_bookkeep ?
2095
+ DMA_RESV_USAGE_KERNEL :
2096
+ DMA_RESV_USAGE_BOOKKEEP );
2040
2097
2041
- list_for_each_entry (op , & vops -> list , link )
2042
- op_commit (vops -> vm , tile , pt_update_ops , op , fence );
2098
+ list_for_each_entry (op , & vops -> list , link )
2099
+ op_commit (vops -> vm , tile , pt_update_ops , op , fence , NULL );
2100
+ } else {
2101
+ dma_resv_add_fence (xe_vm_resv (vm ), & ifence -> base .base ,
2102
+ pt_update_ops -> wait_vm_bookkeep ?
2103
+ DMA_RESV_USAGE_KERNEL :
2104
+ DMA_RESV_USAGE_BOOKKEEP );
2105
+
2106
+ dma_resv_add_fence (xe_vm_resv (vm ), & mfence -> base .base ,
2107
+ pt_update_ops -> wait_vm_bookkeep ?
2108
+ DMA_RESV_USAGE_KERNEL :
2109
+ DMA_RESV_USAGE_BOOKKEEP );
2110
+
2111
+ list_for_each_entry (op , & vops -> list , link )
2112
+ op_commit (vops -> vm , tile , pt_update_ops , op ,
2113
+ & ifence -> base .base , & mfence -> base .base );
2114
+ }
2043
2115
2044
2116
if (pt_update_ops -> needs_userptr_lock )
2045
2117
up_read (& vm -> userptr .notifier_lock );
@@ -2049,6 +2121,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
2049
2121
free_rfence :
2050
2122
kfree (rfence );
2051
2123
free_ifence :
2124
+ kfree (cf );
2125
+ kfree (fences );
2126
+ kfree (mfence );
2052
2127
kfree (ifence );
2053
2128
kill_vm_tile1 :
2054
2129
if (err != - EAGAIN && tile -> id )
0 commit comments