@@ -3014,8 +3014,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3014
3014
ack_vfs [vfid / 32 ] |= BIT ((vfid % 32 ));
3015
3015
p_hwfn -> pf_iov_info -> pending_flr [rel_vf_id / 64 ] &=
3016
3016
~(1ULL << (rel_vf_id % 64 ));
3017
- p_hwfn -> pf_iov_info -> pending_events [rel_vf_id / 64 ] &=
3018
- ~(1ULL << (rel_vf_id % 64 ));
3017
+ p_vf -> vf_mbx .b_pending_msg = false;
3019
3018
}
3020
3019
3021
3020
return rc ;
@@ -3128,11 +3127,20 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3128
3127
mbx = & p_vf -> vf_mbx ;
3129
3128
3130
3129
/* qed_iov_process_mbx_request */
3131
- DP_VERBOSE (p_hwfn , QED_MSG_IOV ,
3132
- "VF[%02x]: Processing mailbox message\n" , p_vf -> abs_vf_id );
3130
+ if (!mbx -> b_pending_msg ) {
3131
+ DP_NOTICE (p_hwfn ,
3132
+ "VF[%02x]: Trying to process mailbox message when none is pending\n" ,
3133
+ p_vf -> abs_vf_id );
3134
+ return ;
3135
+ }
3136
+ mbx -> b_pending_msg = false;
3133
3137
3134
3138
mbx -> first_tlv = mbx -> req_virt -> first_tlv ;
3135
3139
3140
+ DP_VERBOSE (p_hwfn , QED_MSG_IOV ,
3141
+ "VF[%02x]: Processing mailbox message [type %04x]\n" ,
3142
+ p_vf -> abs_vf_id , mbx -> first_tlv .tl .type );
3143
+
3136
3144
/* check if tlv type is known */
3137
3145
if (qed_iov_tlv_supported (mbx -> first_tlv .tl .type ) &&
3138
3146
!p_vf -> b_malicious ) {
@@ -3219,20 +3227,19 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3219
3227
}
3220
3228
}
3221
3229
3222
- static void qed_iov_pf_add_pending_events (struct qed_hwfn * p_hwfn , u8 vfid )
3230
+ void qed_iov_pf_get_pending_events (struct qed_hwfn * p_hwfn , u64 * events )
3223
3231
{
3224
- u64 add_bit = 1ULL << ( vfid % 64 ) ;
3232
+ int i ;
3225
3233
3226
- p_hwfn -> pf_iov_info -> pending_events [vfid / 64 ] |= add_bit ;
3227
- }
3234
+ memset (events , 0 , sizeof (u64 ) * QED_VF_ARRAY_LENGTH );
3228
3235
3229
- static void qed_iov_pf_get_and_clear_pending_events (struct qed_hwfn * p_hwfn ,
3230
- u64 * events )
3231
- {
3232
- u64 * p_pending_events = p_hwfn -> pf_iov_info -> pending_events ;
3236
+ qed_for_each_vf (p_hwfn , i ) {
3237
+ struct qed_vf_info * p_vf ;
3233
3238
3234
- memcpy (events , p_pending_events , sizeof (u64 ) * QED_VF_ARRAY_LENGTH );
3235
- memset (p_pending_events , 0 , sizeof (u64 ) * QED_VF_ARRAY_LENGTH );
3239
+ p_vf = & p_hwfn -> pf_iov_info -> vfs_array [i ];
3240
+ if (p_vf -> vf_mbx .b_pending_msg )
3241
+ events [i / 64 ] |= 1ULL << (i % 64 );
3242
+ }
3236
3243
}
3237
3244
3238
3245
static struct qed_vf_info * qed_sriov_get_vf_from_absid (struct qed_hwfn * p_hwfn ,
@@ -3266,7 +3273,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3266
3273
p_vf -> vf_mbx .pending_req = (((u64 )vf_msg -> hi ) << 32 ) | vf_msg -> lo ;
3267
3274
3268
3275
/* Mark the event and schedule the workqueue */
3269
- qed_iov_pf_add_pending_events ( p_hwfn , p_vf -> relative_vf_id ) ;
3276
+ p_vf -> vf_mbx . b_pending_msg = true ;
3270
3277
qed_schedule_iov (p_hwfn , QED_IOV_WQ_MSG_FLAG );
3271
3278
3272
3279
return 0 ;
@@ -4030,7 +4037,7 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4030
4037
return ;
4031
4038
}
4032
4039
4033
- qed_iov_pf_get_and_clear_pending_events (hwfn , events );
4040
+ qed_iov_pf_get_pending_events (hwfn , events );
4034
4041
4035
4042
DP_VERBOSE (hwfn , QED_MSG_IOV ,
4036
4043
"Event mask of VF events: 0x%llx 0x%llx 0x%llx\n" ,
0 commit comments