@@ -159,7 +159,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
159
159
return sysfs_emit (buf , "%llu\n" , cnt );
160
160
}
161
161
162
- static DEVICE_ATTR (pcie_replay_count , S_IRUGO ,
162
+ static DEVICE_ATTR (pcie_replay_count , 0444 ,
163
163
amdgpu_device_get_pcie_replay_count , NULL) ;
164
164
165
165
static void amdgpu_device_get_pcie_info (struct amdgpu_device * adev );
@@ -183,7 +183,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
183
183
return sysfs_emit (buf , "%s\n" , adev -> product_name );
184
184
}
185
185
186
- static DEVICE_ATTR (product_name , S_IRUGO ,
186
+ static DEVICE_ATTR (product_name , 0444 ,
187
187
amdgpu_device_get_product_name , NULL) ;
188
188
189
189
/**
@@ -205,7 +205,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
205
205
return sysfs_emit (buf , "%s\n" , adev -> product_number );
206
206
}
207
207
208
- static DEVICE_ATTR (product_number , S_IRUGO ,
208
+ static DEVICE_ATTR (product_number , 0444 ,
209
209
amdgpu_device_get_product_number , NULL) ;
210
210
211
211
/**
@@ -227,7 +227,7 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
227
227
return sysfs_emit (buf , "%s\n" , adev -> serial );
228
228
}
229
229
230
- static DEVICE_ATTR (serial_number , S_IRUGO ,
230
+ static DEVICE_ATTR (serial_number , 0444 ,
231
231
amdgpu_device_get_serial_number , NULL) ;
232
232
233
233
/**
@@ -481,8 +481,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
481
481
/*
482
482
* MMIO register read with bytes helper functions
483
483
* @offset:bytes offset from MMIO start
484
- *
485
- */
484
+ */
486
485
487
486
/**
488
487
* amdgpu_mm_rreg8 - read a memory mapped IO register
@@ -506,8 +505,8 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
506
505
* MMIO register write with bytes helper functions
507
506
* @offset:bytes offset from MMIO start
508
507
* @value: the value want to be written to the register
509
- *
510
- */
508
+ */
509
+
511
510
/**
512
511
* amdgpu_mm_wreg8 - read a memory mapped IO register
513
512
*
@@ -991,7 +990,7 @@ static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
991
990
* @registers: pointer to the register array
992
991
* @array_size: size of the register array
993
992
*
994
- * Programs an array or registers with and and or masks.
993
+ * Programs an array or registers with and or masks.
995
994
* This is a helper for setting golden registers.
996
995
*/
997
996
void amdgpu_device_program_register_sequence (struct amdgpu_device * adev ,
@@ -1157,7 +1156,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1157
1156
int rbar_size = pci_rebar_bytes_to_size (adev -> gmc .real_vram_size );
1158
1157
struct pci_bus * root ;
1159
1158
struct resource * res ;
1160
- unsigned i ;
1159
+ unsigned int i ;
1161
1160
u16 cmd ;
1162
1161
int r ;
1163
1162
@@ -1226,9 +1225,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1226
1225
1227
1226
static bool amdgpu_device_read_bios (struct amdgpu_device * adev )
1228
1227
{
1229
- if (hweight32 (adev -> aid_mask ) && (adev -> flags & AMD_IS_APU )) {
1228
+ if (hweight32 (adev -> aid_mask ) && (adev -> flags & AMD_IS_APU ))
1230
1229
return false;
1231
- }
1232
1230
1233
1231
return true;
1234
1232
}
@@ -1264,6 +1262,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
1264
1262
if (adev -> asic_type == CHIP_FIJI ) {
1265
1263
int err ;
1266
1264
uint32_t fw_ver ;
1265
+
1267
1266
err = request_firmware (& adev -> pm .fw , "amdgpu/fiji_smc.bin" , adev -> dev );
1268
1267
/* force vPost if error occured */
1269
1268
if (err )
@@ -1366,6 +1365,7 @@ static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1366
1365
bool state )
1367
1366
{
1368
1367
struct amdgpu_device * adev = drm_to_adev (pci_get_drvdata (pdev ));
1368
+
1369
1369
amdgpu_asic_set_vga_state (adev , state );
1370
1370
if (state )
1371
1371
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -1388,7 +1388,8 @@ static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1388
1388
{
1389
1389
/* defines number of bits in page table versus page directory,
1390
1390
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1391
- * page table and the remaining bits are in the page directory */
1391
+ * page table and the remaining bits are in the page directory
1392
+ */
1392
1393
if (amdgpu_vm_block_size == -1 )
1393
1394
return ;
1394
1395
@@ -1620,7 +1621,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1620
1621
{
1621
1622
struct drm_device * dev = pci_get_drvdata (pdev );
1622
1623
1623
- /*
1624
+ /*
1624
1625
* FIXME: open_count is protected by drm_global_mutex but that would lead to
1625
1626
* locking inversion with the driver load path. And the access here is
1626
1627
* completely racy anyway. So don't bother with locking for now.
@@ -3265,7 +3266,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3265
3266
*
3266
3267
* Main resume function for hardware IPs. The hardware IPs
3267
3268
* are split into two resume functions because they are
3268
- * are also used in in recovering from a GPU reset and some additional
3269
+ * also used in recovering from a GPU reset and some additional
3269
3270
* steps need to be take between them. In this case (S3/S4) they are
3270
3271
* run sequentially.
3271
3272
* Returns 0 on success, negative error code on failure.
@@ -3367,8 +3368,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3367
3368
#else
3368
3369
default :
3369
3370
if (amdgpu_dc > 0 )
3370
- DRM_INFO_ONCE ("Display Core has been requested via kernel parameter "
3371
- "but isn't supported by ASIC, ignoring\n" );
3371
+ DRM_INFO_ONCE ("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n" );
3372
3372
return false;
3373
3373
#endif
3374
3374
}
@@ -3616,7 +3616,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
3616
3616
pdev -> subsystem_vendor , pdev -> subsystem_device , pdev -> revision );
3617
3617
3618
3618
/* mutex initialization are all done here so we
3619
- * can recall function without having locking issues */
3619
+ * can recall function without having locking issues
3620
+ */
3620
3621
mutex_init (& adev -> firmware .mutex );
3621
3622
mutex_init (& adev -> pm .mutex );
3622
3623
mutex_init (& adev -> gfx .gpu_clock_mutex );
@@ -3693,11 +3694,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
3693
3694
atomic_set (& adev -> pm .pwr_state [i ], POWER_STATE_UNKNOWN );
3694
3695
3695
3696
adev -> rmmio = ioremap (adev -> rmmio_base , adev -> rmmio_size );
3696
- if (adev -> rmmio == NULL ) {
3697
+ if (! adev -> rmmio )
3697
3698
return - ENOMEM ;
3698
- }
3699
+
3699
3700
DRM_INFO ("register mmio base: 0x%08X\n" , (uint32_t )adev -> rmmio_base );
3700
- DRM_INFO ("register mmio size: %u\n" , (unsigned )adev -> rmmio_size );
3701
+ DRM_INFO ("register mmio size: %u\n" , (unsigned int )adev -> rmmio_size );
3701
3702
3702
3703
/*
3703
3704
* Reset domain needs to be present early, before XGMI hive discovered
@@ -3951,7 +3952,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
3951
3952
3952
3953
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3953
3954
/* this will fail for cards that aren't VGA class devices, just
3954
- * ignore it */
3955
+ * ignore it
3956
+ */
3955
3957
if ((adev -> pdev -> class >> 8 ) == PCI_CLASS_DISPLAY_VGA )
3956
3958
vga_client_register (adev -> pdev , amdgpu_device_vga_set_decode );
3957
3959
@@ -4034,7 +4036,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4034
4036
4035
4037
/* make sure IB test finished before entering exclusive mode
4036
4038
* to avoid preemption on IB test
4037
- * * /
4039
+ */
4038
4040
if (amdgpu_sriov_vf (adev )) {
4039
4041
amdgpu_virt_request_full_gpu (adev , false);
4040
4042
amdgpu_virt_fini_data_exchange (adev );
@@ -4771,8 +4773,9 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4771
4773
if (!ring || !ring -> sched .thread )
4772
4774
continue ;
4773
4775
4774
- /*clear job fence from fence drv to avoid force_completion
4775
- *leave NULL and vm flush fence in fence drv */
4776
+ /* Clear job fence from fence drv to avoid force_completion
4777
+ * leave NULL and vm flush fence in fence drv
4778
+ */
4776
4779
amdgpu_fence_driver_clear_job_fences (ring );
4777
4780
4778
4781
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
@@ -4786,7 +4789,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4786
4789
4787
4790
r = amdgpu_reset_prepare_hwcontext (adev , reset_context );
4788
4791
/* If reset handler not implemented, continue; otherwise return */
4789
- if (r == - ENOSYS )
4792
+ if (r == - EOPNOTSUPP )
4790
4793
r = 0 ;
4791
4794
else
4792
4795
return r ;
@@ -4904,7 +4907,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4904
4907
reset_context -> reset_device_list = device_list_handle ;
4905
4908
r = amdgpu_reset_perform_reset (tmp_adev , reset_context );
4906
4909
/* If reset handler not implemented, continue; otherwise return */
4907
- if (r == - ENOSYS )
4910
+ if (r == - EOPNOTSUPP )
4908
4911
r = 0 ;
4909
4912
else
4910
4913
return r ;
@@ -5393,9 +5396,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5393
5396
if (adev -> enable_mes && adev -> ip_versions [GC_HWIP ][0 ] != IP_VERSION (11 , 0 , 3 ))
5394
5397
amdgpu_mes_self_test (tmp_adev );
5395
5398
5396
- if (!drm_drv_uses_atomic_modeset (adev_to_drm (tmp_adev )) && !job_signaled ) {
5399
+ if (!drm_drv_uses_atomic_modeset (adev_to_drm (tmp_adev )) && !job_signaled )
5397
5400
drm_helper_resume_force_mode (adev_to_drm (tmp_adev ));
5398
- }
5399
5401
5400
5402
if (tmp_adev -> asic_reset_res )
5401
5403
r = tmp_adev -> asic_reset_res ;
0 commit comments