@@ -101,38 +101,25 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
101101#define MCA_BANK_IPID (_ip , _hwid , _type ) \
102102 [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
103103
104- static inline bool smu_v13_0_6_is_unified_metrics (struct smu_context * smu )
105- {
106- return (smu -> adev -> flags & AMD_IS_APU ) &&
107- smu -> smc_fw_version <= 0x4556900 ;
108- }
109-
110- static inline bool smu_v13_0_6_is_other_end_count_available (struct smu_context * smu )
111- {
112- switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
113- case IP_VERSION (13 , 0 , 6 ):
114- return smu -> smc_fw_version >= 0x557600 ;
115- case IP_VERSION (13 , 0 , 14 ):
116- return smu -> smc_fw_version >= 0x05550E00 ;
117- default :
118- return false;
119- }
120- }
121-
122- static inline bool smu_v13_0_6_is_blw_host_limit_available (struct smu_context * smu )
123- {
124- if (smu -> adev -> flags & AMD_IS_APU )
125- return smu -> smc_fw_version >= 0x04556F00 ;
104+ enum smu_v13_0_6_caps {
105+ SMU_13_0_6_CAPS_DPM ,
106+ SMU_13_0_6_CAPS_UNI_METRICS ,
107+ SMU_13_0_6_CAPS_DPM_POLICY ,
108+ SMU_13_0_6_CAPS_OTHER_END_METRICS ,
109+ SMU_13_0_6_CAPS_SET_UCLK_MAX ,
110+ SMU_13_0_6_CAPS_PCIE_METRICS ,
111+ SMU_13_0_6_CAPS_HST_LIMIT_METRICS ,
112+ SMU_13_0_6_CAPS_MCA_DEBUG_MODE ,
113+ SMU_13_0_6_CAPS_PER_INST_METRICS ,
114+ SMU_13_0_6_CAPS_CTF_LIMIT ,
115+ SMU_13_0_6_CAPS_RMA_MSG ,
116+ SMU_13_0_6_CAPS_ACA_SYND ,
117+ SMU_13_0_6_CAPS_SDMA_RESET ,
118+ SMU_13_0_6_CAPS_ALL ,
119+ };
126120
127- switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
128- case IP_VERSION (13 , 0 , 6 ):
129- return smu -> smc_fw_version >= 0x557900 ;
130- case IP_VERSION (13 , 0 , 14 ):
131- return smu -> smc_fw_version >= 0x05551000 ;
132- default :
133- return false;
134- }
135- }
121+ #define SMU_CAPS_MASK (x ) (ULL(1) << x)
122+ #define SMU_CAPS (x ) SMU_CAPS_MASK(SMU_13_0_6_CAPS_##x)
136123
137124struct mca_bank_ipid {
138125 enum amdgpu_mca_ip ip ;
@@ -297,6 +284,119 @@ struct smu_v13_0_6_dpm_map {
297284 uint32_t * freq_table ;
298285};
299286
287+ static void smu_v13_0_14_init_caps (struct smu_context * smu )
288+ {
289+ struct smu_13_0_dpm_context * dpm_context = smu -> smu_dpm .dpm_context ;
290+ uint64_t caps = SMU_CAPS (DPM ) | SMU_CAPS (UNI_METRICS ) |
291+ SMU_CAPS (SET_UCLK_MAX ) | SMU_CAPS (DPM_POLICY ) |
292+ SMU_CAPS (PCIE_METRICS ) | SMU_CAPS (CTF_LIMIT ) |
293+ SMU_CAPS (MCA_DEBUG_MODE ) | SMU_CAPS (RMA_MSG ) |
294+ SMU_CAPS (ACA_SYND );
295+ uint32_t fw_ver = smu -> smc_fw_version ;
296+
297+ if (fw_ver >= 0x05550E00 )
298+ caps |= SMU_CAPS (OTHER_END_METRICS );
299+ if (fw_ver >= 0x05551000 )
300+ caps |= SMU_CAPS (HST_LIMIT_METRICS );
301+ if (fw_ver >= 0x05550B00 )
302+ caps |= SMU_CAPS (PER_INST_METRICS );
303+ if (fw_ver > 0x05550f00 )
304+ caps |= SMU_CAPS (SDMA_RESET );
305+
306+ dpm_context -> caps = caps ;
307+ }
308+
309+ static void smu_v13_0_6_init_caps (struct smu_context * smu )
310+ {
311+ uint64_t caps = SMU_CAPS (DPM ) | SMU_CAPS (UNI_METRICS ) |
312+ SMU_CAPS (SET_UCLK_MAX ) | SMU_CAPS (DPM_POLICY ) |
313+ SMU_CAPS (PCIE_METRICS ) | SMU_CAPS (MCA_DEBUG_MODE ) |
314+ SMU_CAPS (CTF_LIMIT ) | SMU_CAPS (RMA_MSG ) |
315+ SMU_CAPS (ACA_SYND );
316+ struct smu_13_0_dpm_context * dpm_context = smu -> smu_dpm .dpm_context ;
317+ struct amdgpu_device * adev = smu -> adev ;
318+ uint32_t fw_ver = smu -> smc_fw_version ;
319+ uint32_t pgm = (fw_ver >> 24 ) & 0xFF ;
320+
321+ if (fw_ver < 0x552F00 )
322+ caps &= ~SMU_CAPS (DPM );
323+
324+ if (adev -> flags & AMD_IS_APU ) {
325+ caps &= ~SMU_CAPS (PCIE_METRICS );
326+ caps &= ~SMU_CAPS (SET_UCLK_MAX );
327+ caps &= ~SMU_CAPS (DPM_POLICY );
328+ caps &= ~SMU_CAPS (RMA_MSG );
329+ caps &= ~SMU_CAPS (ACA_SYND );
330+
331+ if (fw_ver <= 0x4556900 )
332+ caps &= ~SMU_CAPS (UNI_METRICS );
333+
334+ if (fw_ver >= 0x04556F00 )
335+ caps |= SMU_CAPS (HST_LIMIT_METRICS );
336+ if (fw_ver >= 0x04556A00 )
337+ caps |= SMU_CAPS (PER_INST_METRICS );
338+ if (fw_ver < 0x554500 )
339+ caps &= ~SMU_CAPS (CTF_LIMIT );
340+ } else {
341+ if (fw_ver >= 0x557600 )
342+ caps |= SMU_CAPS (OTHER_END_METRICS );
343+ if (fw_ver < 0x00556000 )
344+ caps &= ~SMU_CAPS (DPM_POLICY );
345+ if (amdgpu_sriov_vf (adev ) && (fw_ver < 0x556600 ))
346+ caps &= ~SMU_CAPS (SET_UCLK_MAX );
347+ if (fw_ver < 0x556300 )
348+ caps &= ~SMU_CAPS (PCIE_METRICS );
349+ if (fw_ver < 0x554800 )
350+ caps &= ~SMU_CAPS (MCA_DEBUG_MODE );
351+ if (fw_ver >= 0x556F00 )
352+ caps |= SMU_CAPS (PER_INST_METRICS );
353+ if (fw_ver < 0x554500 )
354+ caps &= ~SMU_CAPS (CTF_LIMIT );
355+ if (fw_ver < 0x00555a00 )
356+ caps &= ~SMU_CAPS (RMA_MSG );
357+ if (fw_ver < 0x00555600 )
358+ caps &= ~SMU_CAPS (ACA_SYND );
359+ if (pgm == 0 && fw_ver >= 0x557900 )
360+ caps |= SMU_CAPS (HST_LIMIT_METRICS );
361+ }
362+ if (((pgm == 7 ) && (fw_ver > 0x07550700 )) ||
363+ ((pgm == 0 ) && (fw_ver > 0x00557700 )) ||
364+ ((pgm == 4 ) && (fw_ver > 0x4556e6c )))
365+ caps |= SMU_CAPS (SDMA_RESET );
366+
367+ dpm_context -> caps = caps ;
368+ }
369+
370+ static inline bool smu_v13_0_6_caps_supported (struct smu_context * smu ,
371+ enum smu_v13_0_6_caps caps )
372+ {
373+ struct smu_13_0_dpm_context * dpm_context = smu -> smu_dpm .dpm_context ;
374+
375+ return (dpm_context -> caps & SMU_CAPS_MASK (caps )) == SMU_CAPS_MASK (caps );
376+ }
377+
378+ static void smu_v13_0_x_init_caps (struct smu_context * smu )
379+ {
380+ switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
381+ case IP_VERSION (13 , 0 , 14 ):
382+ return smu_v13_0_14_init_caps (smu );
383+ default :
384+ return smu_v13_0_6_init_caps (smu );
385+ }
386+ }
387+
388+ static int smu_v13_0_6_check_fw_version (struct smu_context * smu )
389+ {
390+ int r ;
391+
392+ r = smu_v13_0_check_fw_version (smu );
393+ /* Initialize caps flags once fw version is fetched */
394+ if (!r )
395+ smu_v13_0_x_init_caps (smu );
396+
397+ return r ;
398+ }
399+
300400static int smu_v13_0_6_init_microcode (struct smu_context * smu )
301401{
302402 const struct smc_firmware_header_v2_1 * v2_1 ;
@@ -618,7 +718,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
618718 MetricsTableA_t * metrics_a = (MetricsTableA_t * )smu_table -> metrics_table ;
619719 struct PPTable_t * pptable =
620720 (struct PPTable_t * )smu_table -> driver_pptable ;
621- bool flag = smu_v13_0_6_is_unified_metrics (smu );
721+ bool flag = ! smu_v13_0_6_caps_supported (smu , SMU_CAPS ( UNI_METRICS ) );
622722 int ret , i , retry = 100 ;
623723 uint32_t table_version ;
624724
@@ -814,8 +914,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
814914 smu_v13_0_6_setup_driver_pptable (smu );
815915
816916 /* DPM policy not supported in older firmwares */
817- if (!(smu -> adev -> flags & AMD_IS_APU ) &&
818- (smu -> smc_fw_version < 0x00556000 )) {
917+ if (!smu_v13_0_6_caps_supported (smu , SMU_CAPS (DPM_POLICY ))) {
819918 struct smu_dpm_context * smu_dpm = & smu -> smu_dpm ;
820919
821920 smu_dpm -> dpm_policies -> policy_mask &=
@@ -992,7 +1091,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
9921091 struct smu_table_context * smu_table = & smu -> smu_table ;
9931092 MetricsTableX_t * metrics_x = (MetricsTableX_t * )smu_table -> metrics_table ;
9941093 MetricsTableA_t * metrics_a = (MetricsTableA_t * )smu_table -> metrics_table ;
995- bool flag = smu_v13_0_6_is_unified_metrics (smu );
1094+ bool flag = ! smu_v13_0_6_caps_supported (smu , SMU_CAPS ( UNI_METRICS ) );
9961095 struct amdgpu_device * adev = smu -> adev ;
9971096 int ret = 0 ;
9981097 int xcc_id ;
@@ -1005,7 +1104,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
10051104 switch (member ) {
10061105 case METRICS_CURR_GFXCLK :
10071106 case METRICS_AVERAGE_GFXCLK :
1008- if (smu -> smc_fw_version >= 0x552F00 ) {
1107+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( DPM )) ) {
10091108 xcc_id = GET_INST (GC , 0 );
10101109 * value = SMUQ10_ROUND (GET_METRIC_FIELD (GfxclkFrequency , flag )[xcc_id ]);
10111110 } else {
@@ -1692,7 +1791,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
16921791static int smu_v13_0_6_mca_set_debug_mode (struct smu_context * smu , bool enable )
16931792{
16941793 /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
1695- if (smu -> smc_fw_version < 0x554800 )
1794+ if (! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( MCA_DEBUG_MODE )) )
16961795 return 0 ;
16971796
16981797 return smu_cmn_send_smc_msg_with_param (smu , SMU_MSG_ClearMcaOnRead ,
@@ -1837,9 +1936,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
18371936 if (max == pstate_table -> uclk_pstate .curr .max )
18381937 return 0 ;
18391938 /* For VF, only allowed in FW versions 85.102 or greater */
1840- if (amdgpu_sriov_vf (adev ) &&
1841- ((smu -> smc_fw_version < 0x556600 ) ||
1842- (adev -> flags & AMD_IS_APU )))
1939+ if (!smu_v13_0_6_caps_supported (smu ,
1940+ SMU_CAPS (SET_UCLK_MAX )))
18431941 return - EOPNOTSUPP ;
18441942 /* Only max clock limiting is allowed for UCLK */
18451943 ret = smu_v13_0_set_soft_freq_limited_range (
@@ -2043,7 +2141,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
20432141
20442142 ret = smu_cmn_get_enabled_mask (smu , feature_mask );
20452143
2046- if (ret == - EIO && smu -> smc_fw_version < 0x552F00 ) {
2144+ if (ret == - EIO && ! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( DPM )) ) {
20472145 * feature_mask = 0 ;
20482146 ret = 0 ;
20492147 }
@@ -2336,18 +2434,18 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
23362434
23372435static ssize_t smu_v13_0_6_get_gpu_metrics (struct smu_context * smu , void * * table )
23382436{
2339- bool per_inst , smu_13_0_6_per_inst , smu_13_0_14_per_inst , apu_per_inst ;
23402437 struct smu_table_context * smu_table = & smu -> smu_table ;
23412438 struct gpu_metrics_v1_7 * gpu_metrics =
23422439 (struct gpu_metrics_v1_7 * )smu_table -> gpu_metrics_table ;
2343- bool flag = smu_v13_0_6_is_unified_metrics (smu );
2440+ bool flag = ! smu_v13_0_6_caps_supported (smu , SMU_CAPS ( UNI_METRICS ) );
23442441 int ret = 0 , xcc_id , inst , i , j , k , idx ;
23452442 struct amdgpu_device * adev = smu -> adev ;
23462443 MetricsTableX_t * metrics_x ;
23472444 MetricsTableA_t * metrics_a ;
23482445 struct amdgpu_xcp * xcp ;
23492446 u16 link_width_level ;
23502447 u32 inst_mask ;
2448+ bool per_inst ;
23512449
23522450 metrics_x = kzalloc (max (sizeof (MetricsTableX_t ), sizeof (MetricsTableA_t )), GFP_KERNEL );
23532451 ret = smu_v13_0_6_get_metrics_table (smu , metrics_x , true);
@@ -2421,7 +2519,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
24212519 * table for both pf & one vf for smu version 85.99.0 or higher else report only
24222520 * for pf from registers
24232521 */
2424- if (smu -> smc_fw_version >= 0x556300 ) {
2522+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( PCIE_METRICS )) ) {
24252523 gpu_metrics -> pcie_link_width = metrics_x -> PCIeLinkWidth ;
24262524 gpu_metrics -> pcie_link_speed =
24272525 pcie_gen_to_speed (metrics_x -> PCIeLinkSpeed );
@@ -2450,7 +2548,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
24502548 metrics_x -> PCIeNAKSentCountAcc ;
24512549 gpu_metrics -> pcie_nak_rcvd_count_acc =
24522550 metrics_x -> PCIeNAKReceivedCountAcc ;
2453- if (smu_v13_0_6_is_other_end_count_available (smu ))
2551+ if (smu_v13_0_6_caps_supported (smu ,
2552+ SMU_CAPS (OTHER_END_METRICS )))
24542553 gpu_metrics -> pcie_lc_perf_other_end_recovery =
24552554 metrics_x -> PCIeOtherEndRecoveryAcc ;
24562555
@@ -2475,17 +2574,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
24752574
24762575 gpu_metrics -> num_partition = adev -> xcp_mgr -> num_xcps ;
24772576
2478- apu_per_inst = (adev -> flags & AMD_IS_APU ) && (smu -> smc_fw_version >= 0x04556A00 );
2479- smu_13_0_6_per_inst = !(adev -> flags & AMD_IS_APU ) &&
2480- (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )
2481- == IP_VERSION (13 , 0 , 6 )) &&
2482- (smu -> smc_fw_version >= 0x556F00 );
2483- smu_13_0_14_per_inst = !(adev -> flags & AMD_IS_APU ) &&
2484- (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )
2485- == IP_VERSION (13 , 0 , 14 )) &&
2486- (smu -> smc_fw_version >= 0x05550B00 );
2487-
2488- per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst ;
2577+ per_inst = smu_v13_0_6_caps_supported (smu , SMU_CAPS (PER_INST_METRICS ));
24892578
24902579 for_each_xcp (adev -> xcp_mgr , xcp , i ) {
24912580 amdgpu_xcp_get_inst_details (xcp , AMDGPU_XCP_VCN , & inst_mask );
@@ -2516,7 +2605,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
25162605 gpu_metrics -> xcp_stats [i ].gfx_busy_acc [idx ] =
25172606 SMUQ10_ROUND (metrics_x -> GfxBusyAcc [inst ]);
25182607
2519- if (smu_v13_0_6_is_blw_host_limit_available (smu ))
2608+ if (smu_v13_0_6_caps_supported (
2609+ smu , SMU_CAPS (HST_LIMIT_METRICS )))
25202610 gpu_metrics -> xcp_stats [i ].gfx_below_host_limit_acc [idx ] =
25212611 SMUQ10_ROUND (metrics_x -> GfxclkBelowHostLimitAcc
25222612 [inst ]);
@@ -2624,7 +2714,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
26242714 return - EINVAL ;
26252715
26262716 /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
2627- if (smu -> smc_fw_version < 0x554500 )
2717+ if (! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( CTF_LIMIT )) )
26282718 return 0 ;
26292719
26302720 /* Get SOC Max operating temperature */
@@ -2726,11 +2816,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
27262816
27272817static int smu_v13_0_6_send_rma_reason (struct smu_context * smu )
27282818{
2729- struct amdgpu_device * adev = smu -> adev ;
27302819 int ret ;
27312820
27322821 /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
2733- if (( adev -> flags & AMD_IS_APU ) || smu -> smc_fw_version < 0x00555a00 )
2822+ if (! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( RMA_MSG )) )
27342823 return 0 ;
27352824
27362825 ret = smu_cmn_send_smc_msg (smu , SMU_MSG_RmaDueToBadPageThreshold , NULL );
@@ -2750,18 +2839,17 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
27502839 smu_program = (smu -> smc_fw_version >> 24 ) & 0xff ;
27512840 switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
27522841 case IP_VERSION (13 , 0 , 6 ):
2753- if ((( smu_program == 7 ) && ( smu -> smc_fw_version > 0x07550700 )) ||
2754- (( smu_program == 0 ) && (smu -> smc_fw_version > 0x00557700 )))
2842+ if ((smu_program == 7 || smu_program == 0 ) &&
2843+ smu_v13_0_6_caps_supported (smu , SMU_CAPS ( SDMA_RESET )))
27552844 ret = smu_cmn_send_smc_msg_with_param (smu ,
27562845 SMU_MSG_ResetSDMA , inst_mask , NULL );
27572846 else if ((smu_program == 4 ) &&
2758- (smu -> smc_fw_version > 0x4556e6c ))
2847+ smu_v13_0_6_caps_supported (smu , SMU_CAPS ( SDMA_RESET ) ))
27592848 ret = smu_cmn_send_smc_msg_with_param (smu ,
27602849 SMU_MSG_ResetSDMA2 , inst_mask , NULL );
27612850 break ;
27622851 case IP_VERSION (13 , 0 , 14 ):
2763- if ((smu_program == 5 ) &&
2764- (smu -> smc_fw_version > 0x05550f00 ))
2852+ if (smu_v13_0_6_caps_supported (smu , SMU_CAPS (SDMA_RESET )))
27652853 ret = smu_cmn_send_smc_msg_with_param (smu ,
27662854 SMU_MSG_ResetSDMA2 , inst_mask , NULL );
27672855 break ;
@@ -3087,7 +3175,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
30873175 if (instlo != 0x03b30400 )
30883176 return false;
30893177
3090- if (!( adev -> flags & AMD_IS_APU ) && smu -> smc_fw_version >= 0x00555600 ) {
3178+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( ACA_SYND )) ) {
30913179 errcode = MCA_REG__SYND__ERRORINFORMATION (entry -> regs [MCA_REG_IDX_SYND ]);
30923180 errcode &= 0xff ;
30933181 } else {
@@ -3373,9 +3461,10 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
33733461
33743462static int aca_smu_parse_error_code (struct amdgpu_device * adev , struct aca_bank * bank )
33753463{
3464+ struct smu_context * smu = adev -> powerplay .pp_handle ;
33763465 int error_code ;
33773466
3378- if (!( adev -> flags & AMD_IS_APU ) && adev -> pm . fw_version >= 0x00555600 )
3467+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( ACA_SYND )) )
33793468 error_code = ACA_REG__SYND__ERRORINFORMATION (bank -> regs [ACA_REG_IDX_SYND ]);
33803469 else
33813470 error_code = ACA_REG__STATUS__ERRORCODE (bank -> regs [ACA_REG_IDX_STATUS ]);
@@ -3413,7 +3502,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
34133502 .fini_power = smu_v13_0_fini_power ,
34143503 .check_fw_status = smu_v13_0_6_check_fw_status ,
34153504 /* pptable related */
3416- .check_fw_version = smu_v13_0_check_fw_version ,
3505+ .check_fw_version = smu_v13_0_6_check_fw_version ,
34173506 .set_driver_table_location = smu_v13_0_set_driver_table_location ,
34183507 .set_tool_table_location = smu_v13_0_set_tool_table_location ,
34193508 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location ,
0 commit comments