107
107
#include "amdgpu_fdinfo.h"
108
108
#include "amdgpu_mca.h"
109
109
#include "amdgpu_ras.h"
110
+ #include "amdgpu_xcp.h"
110
111
111
- #define MAX_GPU_INSTANCE 16
112
+ #define MAX_GPU_INSTANCE 64
112
113
113
114
struct amdgpu_gpu_instance
114
115
{
@@ -212,6 +213,8 @@ extern int amdgpu_noretry;
212
213
extern int amdgpu_force_asic_type ;
213
214
extern int amdgpu_smartshift_bias ;
214
215
extern int amdgpu_use_xgmi_p2p ;
216
+ extern int amdgpu_mtype_local ;
217
+ extern bool enforce_isolation ;
215
218
#ifdef CONFIG_HSA_AMD
216
219
extern int sched_policy ;
217
220
extern bool debug_evictions ;
@@ -242,9 +245,10 @@ extern int amdgpu_num_kcq;
242
245
extern int amdgpu_vcnfw_log ;
243
246
extern int amdgpu_sg_display ;
244
247
248
+ extern int amdgpu_user_partt_mode ;
249
+
245
250
#define AMDGPU_VM_MAX_NUM_CTX 4096
246
251
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
247
- #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
248
252
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
249
253
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
250
254
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
@@ -282,6 +286,7 @@ extern int amdgpu_sg_display;
282
286
#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
283
287
#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
284
288
289
+ struct amdgpu_xcp_mgr ;
285
290
struct amdgpu_device ;
286
291
struct amdgpu_irq_src ;
287
292
struct amdgpu_fpriv ;
@@ -463,6 +468,8 @@ struct amdgpu_fpriv {
463
468
struct mutex bo_list_lock ;
464
469
struct idr bo_list_handles ;
465
470
struct amdgpu_ctx_mgr ctx_mgr ;
471
+ /** GPU partition selection */
472
+ uint32_t xcp_id ;
466
473
};
467
474
468
475
int amdgpu_file_to_fpriv (struct file * filp , struct amdgpu_fpriv * * fpriv );
@@ -573,6 +580,8 @@ struct amdgpu_asic_funcs {
573
580
/* query video codecs */
574
581
int (* query_video_codecs )(struct amdgpu_device * adev , bool encode ,
575
582
const struct amdgpu_video_codecs * * codecs );
583
+ /* encode "> 32bits" smn addressing */
584
+ u64 (* encode_ext_smn_addressing )(int ext_id );
576
585
};
577
586
578
587
/*
@@ -607,6 +616,9 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
607
616
typedef uint32_t (* amdgpu_rreg_t )(struct amdgpu_device * , uint32_t );
608
617
typedef void (* amdgpu_wreg_t )(struct amdgpu_device * , uint32_t , uint32_t );
609
618
619
+ typedef uint32_t (* amdgpu_rreg_ext_t )(struct amdgpu_device * , uint64_t );
620
+ typedef void (* amdgpu_wreg_ext_t )(struct amdgpu_device * , uint64_t , uint32_t );
621
+
610
622
typedef uint64_t (* amdgpu_rreg64_t )(struct amdgpu_device * , uint32_t );
611
623
typedef void (* amdgpu_wreg64_t )(struct amdgpu_device * , uint32_t , uint64_t );
612
624
@@ -657,14 +669,25 @@ enum amd_hw_ip_block_type {
657
669
MAX_HWIP
658
670
};
659
671
660
- #define HWIP_MAX_INSTANCE 28
672
+ #define HWIP_MAX_INSTANCE 44
661
673
662
674
#define HW_ID_MAX 300
663
675
#define IP_VERSION (mj , mn , rv ) (((mj) << 16) | ((mn) << 8) | (rv))
664
676
#define IP_VERSION_MAJ (ver ) ((ver) >> 16)
665
677
#define IP_VERSION_MIN (ver ) (((ver) >> 8) & 0xFF)
666
678
#define IP_VERSION_REV (ver ) ((ver) & 0xFF)
667
679
680
+ struct amdgpu_ip_map_info {
681
+ /* Map of logical to actual dev instances/mask */
682
+ uint32_t dev_inst [MAX_HWIP ][HWIP_MAX_INSTANCE ];
683
+ int8_t (* logical_to_dev_inst )(struct amdgpu_device * adev ,
684
+ enum amd_hw_ip_block_type block ,
685
+ int8_t inst );
686
+ uint32_t (* logical_to_dev_mask )(struct amdgpu_device * adev ,
687
+ enum amd_hw_ip_block_type block ,
688
+ uint32_t mask );
689
+ };
690
+
668
691
struct amd_powerplay {
669
692
void * pp_handle ;
670
693
const struct amd_pm_funcs * pp_funcs ;
@@ -750,6 +773,7 @@ struct amdgpu_device {
750
773
struct amdgpu_acp acp ;
751
774
#endif
752
775
struct amdgpu_hive_info * hive ;
776
+ struct amdgpu_xcp_mgr * xcp_mgr ;
753
777
/* ASIC */
754
778
enum amd_asic_type asic_type ;
755
779
uint32_t family ;
@@ -797,6 +821,8 @@ struct amdgpu_device {
797
821
amdgpu_wreg_t pcie_wreg ;
798
822
amdgpu_rreg_t pciep_rreg ;
799
823
amdgpu_wreg_t pciep_wreg ;
824
+ amdgpu_rreg_ext_t pcie_rreg_ext ;
825
+ amdgpu_wreg_ext_t pcie_wreg_ext ;
800
826
amdgpu_rreg64_t pcie_rreg64 ;
801
827
amdgpu_wreg64_t pcie_wreg64 ;
802
828
/* protects concurrent UVD register access */
@@ -830,7 +856,7 @@ struct amdgpu_device {
830
856
dma_addr_t dummy_page_addr ;
831
857
struct amdgpu_vm_manager vm_manager ;
832
858
struct amdgpu_vmhub vmhub [AMDGPU_MAX_VMHUBS ];
833
- unsigned num_vmhubs ;
859
+ DECLARE_BITMAP ( vmhubs_mask , AMDGPU_MAX_VMHUBS ) ;
834
860
835
861
/* memory management */
836
862
struct amdgpu_mman mman ;
@@ -962,6 +988,7 @@ struct amdgpu_device {
962
988
963
989
/* soc15 register offset based on ip, instance and segment */
964
990
uint32_t * reg_offset [MAX_HWIP ][HWIP_MAX_INSTANCE ];
991
+ struct amdgpu_ip_map_info ip_map ;
965
992
966
993
/* delayed work_func for deferring clockgating during resume */
967
994
struct delayed_work delayed_init_work ;
@@ -1020,6 +1047,9 @@ struct amdgpu_device {
1020
1047
struct pci_saved_state * pci_state ;
1021
1048
pci_channel_state_t pci_channel_state ;
1022
1049
1050
+ /* Track auto wait count on s_barrier settings */
1051
+ bool barrier_has_auto_waitcnt ;
1052
+
1023
1053
struct amdgpu_reset_control * reset_cntl ;
1024
1054
uint32_t ip_versions [MAX_HWIP ][HWIP_MAX_INSTANCE ];
1025
1055
@@ -1050,6 +1080,8 @@ struct amdgpu_device {
1050
1080
1051
1081
bool job_hang ;
1052
1082
bool dc_enabled ;
1083
+ /* Mask of active clusters */
1084
+ uint32_t aid_mask ;
1053
1085
};
1054
1086
1055
1087
static inline struct amdgpu_device * drm_to_adev (struct drm_device * ddev )
@@ -1081,11 +1113,18 @@ size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
1081
1113
1082
1114
void amdgpu_device_vram_access (struct amdgpu_device * adev , loff_t pos ,
1083
1115
void * buf , size_t size , bool write );
1116
+ uint32_t amdgpu_device_wait_on_rreg (struct amdgpu_device * adev ,
1117
+ uint32_t inst , uint32_t reg_addr , char reg_name [],
1118
+ uint32_t expected_value , uint32_t mask );
1084
1119
uint32_t amdgpu_device_rreg (struct amdgpu_device * adev ,
1085
1120
uint32_t reg , uint32_t acc_flags );
1121
+ u32 amdgpu_device_indirect_rreg_ext (struct amdgpu_device * adev ,
1122
+ u64 reg_addr );
1086
1123
void amdgpu_device_wreg (struct amdgpu_device * adev ,
1087
1124
uint32_t reg , uint32_t v ,
1088
1125
uint32_t acc_flags );
1126
+ void amdgpu_device_indirect_wreg_ext (struct amdgpu_device * adev ,
1127
+ u64 reg_addr , u32 reg_data );
1089
1128
void amdgpu_mm_wreg_mmio_rlc (struct amdgpu_device * adev ,
1090
1129
uint32_t reg , uint32_t v );
1091
1130
void amdgpu_mm_wreg8 (struct amdgpu_device * adev , uint32_t offset , uint8_t value );
@@ -1137,6 +1176,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
1137
1176
#define WREG32_PCIE (reg , v ) adev->pcie_wreg(adev, (reg), (v))
1138
1177
#define RREG32_PCIE_PORT (reg ) adev->pciep_rreg(adev, (reg))
1139
1178
#define WREG32_PCIE_PORT (reg , v ) adev->pciep_wreg(adev, (reg), (v))
1179
+ #define RREG32_PCIE_EXT (reg ) adev->pcie_rreg_ext(adev, (reg))
1180
+ #define WREG32_PCIE_EXT (reg , v ) adev->pcie_wreg_ext(adev, (reg), (v))
1140
1181
#define RREG64_PCIE (reg ) adev->pcie_rreg64(adev, (reg))
1141
1182
#define WREG64_PCIE (reg , v ) adev->pcie_wreg64(adev, (reg), (v))
1142
1183
#define RREG32_SMC (reg ) adev->smc_rreg(adev, (reg))
@@ -1204,7 +1245,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
1204
1245
/*
1205
1246
* ASICs macro.
1206
1247
*/
1207
- #define amdgpu_asic_set_vga_state (adev , state ) (adev)->asic_funcs->set_vga_state((adev), (state))
1248
+ #define amdgpu_asic_set_vga_state (adev , state ) \
1249
+ ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
1208
1250
#define amdgpu_asic_reset (adev ) (adev)->asic_funcs->reset((adev))
1209
1251
#define amdgpu_asic_reset_method (adev ) (adev)->asic_funcs->reset_method((adev))
1210
1252
#define amdgpu_asic_get_xclk (adev ) (adev)->asic_funcs->get_xclk((adev))
@@ -1235,6 +1277,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
1235
1277
1236
1278
#define amdgpu_inc_vram_lost (adev ) atomic_inc(&((adev)->vram_lost_counter));
1237
1279
1280
+ #define for_each_inst (i , inst_mask ) \
1281
+ for (i = ffs(inst_mask) - 1; inst_mask; \
1282
+ inst_mask &= ~(1U << i), i = ffs(inst_mask) - 1)
1283
+
1238
1284
#define MIN (X , Y ) ((X) < (Y) ? (X) : (Y))
1239
1285
1240
1286
/* Common functions */
@@ -1348,6 +1394,12 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1348
1394
1349
1395
/* amdgpu_acpi.c */
1350
1396
1397
+ struct amdgpu_numa_info {
1398
+ uint64_t size ;
1399
+ int pxm ;
1400
+ int nid ;
1401
+ };
1402
+
1351
1403
/* ATCS Device/Driver State */
1352
1404
#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
1353
1405
#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
@@ -1365,15 +1417,32 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1365
1417
u8 dev_state , bool drv_state );
1366
1418
int amdgpu_acpi_smart_shift_update (struct drm_device * dev , enum amdgpu_ss ss_state );
1367
1419
int amdgpu_acpi_pcie_notify_device_ready (struct amdgpu_device * adev );
1420
+ int amdgpu_acpi_get_tmr_info (struct amdgpu_device * adev , u64 * tmr_offset ,
1421
+ u64 * tmr_size );
1422
+ int amdgpu_acpi_get_mem_info (struct amdgpu_device * adev , int xcc_id ,
1423
+ struct amdgpu_numa_info * numa_info );
1368
1424
1369
1425
void amdgpu_acpi_get_backlight_caps (struct amdgpu_dm_backlight_caps * caps );
1370
1426
bool amdgpu_acpi_should_gpu_reset (struct amdgpu_device * adev );
1371
1427
void amdgpu_acpi_detect (void );
1428
+ void amdgpu_acpi_release (void );
1372
1429
#else
1373
1430
static inline int amdgpu_acpi_init (struct amdgpu_device * adev ) { return 0 ; }
1431
+ static inline int amdgpu_acpi_get_tmr_info (struct amdgpu_device * adev ,
1432
+ u64 * tmr_offset , u64 * tmr_size )
1433
+ {
1434
+ return - EINVAL ;
1435
+ }
1436
+ static inline int amdgpu_acpi_get_mem_info (struct amdgpu_device * adev ,
1437
+ int xcc_id ,
1438
+ struct amdgpu_numa_info * numa_info )
1439
+ {
1440
+ return - EINVAL ;
1441
+ }
1374
1442
static inline void amdgpu_acpi_fini (struct amdgpu_device * adev ) { }
1375
1443
static inline bool amdgpu_acpi_should_gpu_reset (struct amdgpu_device * adev ) { return false; }
1376
1444
static inline void amdgpu_acpi_detect (void ) { }
1445
+ static inline void amdgpu_acpi_release (void ) { }
1377
1446
static inline bool amdgpu_acpi_is_power_shift_control_supported (void ) { return false; }
1378
1447
static inline int amdgpu_acpi_power_shift_control (struct amdgpu_device * adev ,
1379
1448
u8 dev_state , bool drv_state ) { return 0 ; }
0 commit comments