@@ -1917,3 +1917,144 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
1917
1917
}
1918
1918
mutex_unlock (& adev -> enforce_isolation_mutex );
1919
1919
}
1920
+
1921
+ /*
1922
+ * debugfs for to enable/disable gfx job submission to specific core.
1923
+ */
1924
+ #if defined(CONFIG_DEBUG_FS )
1925
+ static int amdgpu_debugfs_gfx_sched_mask_set (void * data , u64 val )
1926
+ {
1927
+ struct amdgpu_device * adev = (struct amdgpu_device * )data ;
1928
+ u32 i ;
1929
+ u64 mask = 0 ;
1930
+ struct amdgpu_ring * ring ;
1931
+
1932
+ if (!adev )
1933
+ return - ENODEV ;
1934
+
1935
+ mask = (1 << adev -> gfx .num_gfx_rings ) - 1 ;
1936
+ if ((val & mask ) == 0 )
1937
+ return - EINVAL ;
1938
+
1939
+ for (i = 0 ; i < adev -> gfx .num_gfx_rings ; ++ i ) {
1940
+ ring = & adev -> gfx .gfx_ring [i ];
1941
+ if (val & (1 << i ))
1942
+ ring -> sched .ready = true;
1943
+ else
1944
+ ring -> sched .ready = false;
1945
+ }
1946
+ /* publish sched.ready flag update effective immediately across smp */
1947
+ smp_rmb ();
1948
+ return 0 ;
1949
+ }
1950
+
1951
+ static int amdgpu_debugfs_gfx_sched_mask_get (void * data , u64 * val )
1952
+ {
1953
+ struct amdgpu_device * adev = (struct amdgpu_device * )data ;
1954
+ u32 i ;
1955
+ u64 mask = 0 ;
1956
+ struct amdgpu_ring * ring ;
1957
+
1958
+ if (!adev )
1959
+ return - ENODEV ;
1960
+ for (i = 0 ; i < adev -> gfx .num_gfx_rings ; ++ i ) {
1961
+ ring = & adev -> gfx .gfx_ring [i ];
1962
+ if (ring -> sched .ready )
1963
+ mask |= 1 << i ;
1964
+ }
1965
+
1966
+ * val = mask ;
1967
+ return 0 ;
1968
+ }
1969
+
1970
+ DEFINE_DEBUGFS_ATTRIBUTE (amdgpu_debugfs_gfx_sched_mask_fops ,
1971
+ amdgpu_debugfs_gfx_sched_mask_get ,
1972
+ amdgpu_debugfs_gfx_sched_mask_set , "%llx\n" );
1973
+
1974
+ #endif
1975
+
1976
+ void amdgpu_debugfs_gfx_sched_mask_init (struct amdgpu_device * adev )
1977
+ {
1978
+ #if defined(CONFIG_DEBUG_FS )
1979
+ struct drm_minor * minor = adev_to_drm (adev )-> primary ;
1980
+ struct dentry * root = minor -> debugfs_root ;
1981
+ char name [32 ];
1982
+
1983
+ if (!(adev -> gfx .num_gfx_rings > 1 ))
1984
+ return ;
1985
+ sprintf (name , "amdgpu_gfx_sched_mask" );
1986
+ debugfs_create_file (name , 0600 , root , adev ,
1987
+ & amdgpu_debugfs_gfx_sched_mask_fops );
1988
+ #endif
1989
+ }
1990
+
1991
+ /*
1992
+ * debugfs for to enable/disable compute job submission to specific core.
1993
+ */
1994
+ #if defined(CONFIG_DEBUG_FS )
1995
+ static int amdgpu_debugfs_compute_sched_mask_set (void * data , u64 val )
1996
+ {
1997
+ struct amdgpu_device * adev = (struct amdgpu_device * )data ;
1998
+ u32 i ;
1999
+ u64 mask = 0 ;
2000
+ struct amdgpu_ring * ring ;
2001
+
2002
+ if (!adev )
2003
+ return - ENODEV ;
2004
+
2005
+ mask = (1 << adev -> gfx .num_compute_rings ) - 1 ;
2006
+ if ((val & mask ) == 0 )
2007
+ return - EINVAL ;
2008
+
2009
+ for (i = 0 ; i < adev -> gfx .num_compute_rings ; ++ i ) {
2010
+ ring = & adev -> gfx .compute_ring [i ];
2011
+ if (val & (1 << i ))
2012
+ ring -> sched .ready = true;
2013
+ else
2014
+ ring -> sched .ready = false;
2015
+ }
2016
+
2017
+ /* publish sched.ready flag update effective immediately across smp */
2018
+ smp_rmb ();
2019
+ return 0 ;
2020
+ }
2021
+
2022
+ static int amdgpu_debugfs_compute_sched_mask_get (void * data , u64 * val )
2023
+ {
2024
+ struct amdgpu_device * adev = (struct amdgpu_device * )data ;
2025
+ u32 i ;
2026
+ u64 mask = 0 ;
2027
+ struct amdgpu_ring * ring ;
2028
+
2029
+ if (!adev )
2030
+ return - ENODEV ;
2031
+ for (i = 0 ; i < adev -> gfx .num_compute_rings ; ++ i ) {
2032
+ ring = & adev -> gfx .compute_ring [i ];
2033
+ if (ring -> sched .ready )
2034
+ mask |= 1 << i ;
2035
+ }
2036
+
2037
+ * val = mask ;
2038
+ return 0 ;
2039
+ }
2040
+
2041
+ DEFINE_DEBUGFS_ATTRIBUTE (amdgpu_debugfs_compute_sched_mask_fops ,
2042
+ amdgpu_debugfs_compute_sched_mask_get ,
2043
+ amdgpu_debugfs_compute_sched_mask_set , "%llx\n" );
2044
+
2045
+ #endif
2046
+
2047
+ void amdgpu_debugfs_compute_sched_mask_init (struct amdgpu_device * adev )
2048
+ {
2049
+ #if defined(CONFIG_DEBUG_FS )
2050
+ struct drm_minor * minor = adev_to_drm (adev )-> primary ;
2051
+ struct dentry * root = minor -> debugfs_root ;
2052
+ char name [32 ];
2053
+
2054
+ if (!(adev -> gfx .num_compute_rings > 1 ))
2055
+ return ;
2056
+ sprintf (name , "amdgpu_compute_sched_mask" );
2057
+ debugfs_create_file (name , 0600 , root , adev ,
2058
+ & amdgpu_debugfs_compute_sched_mask_fops );
2059
+ #endif
2060
+ }
0 commit comments