@@ -61,15 +61,18 @@ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
61
61
static int xgpu_nv_mailbox_rcv_msg (struct amdgpu_device * adev ,
62
62
enum idh_event event )
63
63
{
64
+ int r = 0 ;
64
65
u32 reg ;
65
66
66
67
reg = RREG32_NO_KIQ (mmMAILBOX_MSGBUF_RCV_DW0 );
67
- if (reg != event )
68
+ if (reg == IDH_FAIL )
69
+ r = - EINVAL ;
70
+ else if (reg != event )
68
71
return - ENOENT ;
69
72
70
73
xgpu_nv_mailbox_send_ack (adev );
71
74
72
- return 0 ;
75
+ return r ;
73
76
}
74
77
75
78
static uint8_t xgpu_nv_peek_ack (struct amdgpu_device * adev )
@@ -178,6 +181,9 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
178
181
if (data1 != 0 )
179
182
event = IDH_RAS_POISON_READY ;
180
183
break ;
184
+ case IDH_REQ_RAS_ERROR_COUNT :
185
+ event = IDH_RAS_ERROR_COUNT_READY ;
186
+ break ;
181
187
default :
182
188
break ;
183
189
}
@@ -456,6 +462,11 @@ static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
456
462
return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF );
457
463
}
458
464
465
+ static int xgpu_nv_req_ras_err_count (struct amdgpu_device * adev )
466
+ {
467
+ return xgpu_nv_send_access_requests (adev , IDH_REQ_RAS_ERROR_COUNT );
468
+ }
469
+
459
470
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
460
471
.req_full_gpu = xgpu_nv_request_full_gpu_access ,
461
472
.rel_full_gpu = xgpu_nv_release_full_gpu_access ,
@@ -466,4 +477,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
466
477
.trans_msg = xgpu_nv_mailbox_trans_msg ,
467
478
.ras_poison_handler = xgpu_nv_ras_poison_handler ,
468
479
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr ,
480
+ .req_ras_err_count = xgpu_nv_req_ras_err_count ,
469
481
};
0 commit comments