@@ -66,19 +66,19 @@ static void nbio_v7_11_sdma_doorbell_range(struct amdgpu_device *adev, int insta
66
66
bool use_doorbell , int doorbell_index ,
67
67
int doorbell_size )
68
68
{
69
- u32 reg = SOC15_REG_OFFSET (NBIO , 0 , regGDC0_BIF_SDMA0_DOORBELL_RANGE );
69
+ u32 reg = SOC15_REG_OFFSET (NBIO , 0 , regGDC0_BIF_CSDMA_DOORBELL_RANGE );
70
70
u32 doorbell_range = RREG32_PCIE_PORT (reg );
71
71
72
72
if (use_doorbell ) {
73
73
doorbell_range = REG_SET_FIELD (doorbell_range ,
74
- GDC0_BIF_SDMA0_DOORBELL_RANGE ,
74
+ GDC0_BIF_CSDMA_DOORBELL_RANGE ,
75
75
OFFSET , doorbell_index );
76
76
doorbell_range = REG_SET_FIELD (doorbell_range ,
77
- GDC0_BIF_SDMA0_DOORBELL_RANGE ,
77
+ GDC0_BIF_CSDMA_DOORBELL_RANGE ,
78
78
SIZE , doorbell_size );
79
79
} else {
80
80
doorbell_range = REG_SET_FIELD (doorbell_range ,
81
- GDC0_BIF_SDMA0_DOORBELL_RANGE ,
81
+ GDC0_BIF_CSDMA_DOORBELL_RANGE ,
82
82
SIZE , 0 );
83
83
}
84
84
@@ -145,27 +145,25 @@ static void nbio_v7_11_enable_doorbell_aperture(struct amdgpu_device *adev,
145
145
static void nbio_v7_11_enable_doorbell_selfring_aperture (struct amdgpu_device * adev ,
146
146
bool enable )
147
147
{
148
- /* u32 tmp = 0;
148
+ u32 tmp = 0 ;
149
149
150
150
if (enable ) {
151
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL ,
151
+ tmp = REG_SET_FIELD (tmp , BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL ,
152
152
DOORBELL_SELFRING_GPA_APER_EN , 1 ) |
153
- REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL ,
153
+ REG_SET_FIELD (tmp , BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL ,
154
154
DOORBELL_SELFRING_GPA_APER_MODE , 1 ) |
155
- REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL ,
155
+ REG_SET_FIELD (tmp , BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL ,
156
156
DOORBELL_SELFRING_GPA_APER_SIZE , 0 );
157
157
158
158
WREG32_SOC15 (NBIO , 0 ,
159
- regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW ,
159
+ regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW ,
160
160
lower_32_bits (adev -> doorbell .base ));
161
161
WREG32_SOC15 (NBIO , 0 ,
162
- regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH ,
162
+ regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH ,
163
163
upper_32_bits (adev -> doorbell .base ));
164
164
}
165
165
166
- WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
167
- tmp);
168
- */
166
+ WREG32_SOC15 (NBIO , 0 , regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL , tmp );
169
167
}
170
168
171
169
@@ -216,12 +214,12 @@ static void nbio_v7_11_ih_control(struct amdgpu_device *adev)
216
214
217
215
static u32 nbio_v7_11_get_hdp_flush_req_offset (struct amdgpu_device * adev )
218
216
{
219
- return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF0_GPU_HDP_FLUSH_REQ );
217
+ return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF1_GPU_HDP_FLUSH_REQ );
220
218
}
221
219
222
220
static u32 nbio_v7_11_get_hdp_flush_done_offset (struct amdgpu_device * adev )
223
221
{
224
- return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF0_GPU_HDP_FLUSH_DONE );
222
+ return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF1_GPU_HDP_FLUSH_DONE );
225
223
}
226
224
227
225
static u32 nbio_v7_11_get_pcie_index_offset (struct amdgpu_device * adev )
@@ -236,27 +234,27 @@ static u32 nbio_v7_11_get_pcie_data_offset(struct amdgpu_device *adev)
236
234
237
235
static u32 nbio_v7_11_get_pcie_port_index_offset (struct amdgpu_device * adev )
238
236
{
239
- return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF0_RSMU_INDEX );
237
+ return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF1_RSMU_INDEX );
240
238
}
241
239
242
240
static u32 nbio_v7_11_get_pcie_port_data_offset (struct amdgpu_device * adev )
243
241
{
244
- return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF0_RSMU_DATA );
242
+ return SOC15_REG_OFFSET (NBIO , 0 , regBIF_BX_PF1_RSMU_DATA );
245
243
}
246
244
247
245
const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = {
248
- .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK ,
249
- .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK ,
250
- .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK ,
251
- .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK ,
252
- .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK ,
253
- .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK ,
254
- .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK ,
255
- .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK ,
256
- .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK ,
257
- .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK ,
258
- .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK ,
259
- .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK ,
246
+ .ref_and_mask_cp0 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP0_MASK ,
247
+ .ref_and_mask_cp1 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP1_MASK ,
248
+ .ref_and_mask_cp2 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP2_MASK ,
249
+ .ref_and_mask_cp3 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP3_MASK ,
250
+ .ref_and_mask_cp4 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP4_MASK ,
251
+ .ref_and_mask_cp5 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP5_MASK ,
252
+ .ref_and_mask_cp6 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP6_MASK ,
253
+ .ref_and_mask_cp7 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP7_MASK ,
254
+ .ref_and_mask_cp8 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP8_MASK ,
255
+ .ref_and_mask_cp9 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP9_MASK ,
256
+ .ref_and_mask_sdma0 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK ,
257
+ .ref_and_mask_sdma1 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK ,
260
258
};
261
259
262
260
static void nbio_v7_11_init_registers (struct amdgpu_device * adev )
0 commit comments