@@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
448
448
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT ) |
449
449
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT ) |
450
450
BIT (I40E_QINT_RQCTL_CAUSE_ENA_SHIFT ) |
451
- ( itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT );
451
+ FIELD_PREP ( I40E_QINT_RQCTL_ITR_INDX_MASK , itr_idx );
452
452
wr32 (hw , reg_idx , reg );
453
453
}
454
454
@@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
653
653
654
654
/* only set the required fields */
655
655
tx_ctx .base = info -> dma_ring_addr / 128 ;
656
+
657
+ /* ring_len has to be multiple of 8 */
658
+ if (!IS_ALIGNED (info -> ring_len , 8 ) ||
659
+ info -> ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710 ) {
660
+ ret = - EINVAL ;
661
+ goto error_context ;
662
+ }
656
663
tx_ctx .qlen = info -> ring_len ;
657
664
tx_ctx .rdylist = le16_to_cpu (vsi -> info .qs_handle [0 ]);
658
665
tx_ctx .rdylist_act = 0 ;
@@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
716
723
717
724
/* only set the required fields */
718
725
rx_ctx .base = info -> dma_ring_addr / 128 ;
726
+
727
+ /* ring_len has to be multiple of 32 */
728
+ if (!IS_ALIGNED (info -> ring_len , 32 ) ||
729
+ info -> ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710 ) {
730
+ ret = - EINVAL ;
731
+ goto error_param ;
732
+ }
719
733
rx_ctx .qlen = info -> ring_len ;
720
734
721
735
if (info -> splithdr_enabled ) {
@@ -1450,6 +1464,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1450
1464
* functions that may still be running at this point.
1451
1465
*/
1452
1466
clear_bit (I40E_VF_STATE_INIT , & vf -> vf_states );
1467
+ clear_bit (I40E_VF_STATE_RESOURCES_LOADED , & vf -> vf_states );
1453
1468
1454
1469
/* In the case of a VFLR, the HW has already reset the VF and we
1455
1470
* just need to clean up, so don't hit the VFRTRIG register.
@@ -2116,7 +2131,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2116
2131
size_t len = 0 ;
2117
2132
int ret ;
2118
2133
2119
- if (!i40e_sync_vf_state (vf , I40E_VF_STATE_INIT )) {
2134
+ i40e_sync_vf_state (vf , I40E_VF_STATE_INIT );
2135
+
2136
+ if (!test_bit (I40E_VF_STATE_INIT , & vf -> vf_states ) ||
2137
+ test_bit (I40E_VF_STATE_RESOURCES_LOADED , & vf -> vf_states )) {
2120
2138
aq_ret = - EINVAL ;
2121
2139
goto err ;
2122
2140
}
@@ -2219,6 +2237,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2219
2237
vf -> default_lan_addr .addr );
2220
2238
}
2221
2239
set_bit (I40E_VF_STATE_ACTIVE , & vf -> vf_states );
2240
+ set_bit (I40E_VF_STATE_RESOURCES_LOADED , & vf -> vf_states );
2222
2241
2223
2242
err :
2224
2243
/* send the response back to the VF */
@@ -2381,7 +2400,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2381
2400
}
2382
2401
2383
2402
if (vf -> adq_enabled ) {
2384
- if (idx >= ARRAY_SIZE ( vf -> ch ) ) {
2403
+ if (idx >= vf -> num_tc ) {
2385
2404
aq_ret = - ENODEV ;
2386
2405
goto error_param ;
2387
2406
}
@@ -2402,7 +2421,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2402
2421
* to its appropriate VSIs based on TC mapping
2403
2422
*/
2404
2423
if (vf -> adq_enabled ) {
2405
- if (idx >= ARRAY_SIZE ( vf -> ch ) ) {
2424
+ if (idx >= vf -> num_tc ) {
2406
2425
aq_ret = - ENODEV ;
2407
2426
goto error_param ;
2408
2427
}
@@ -2452,8 +2471,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2452
2471
u16 vsi_queue_id , queue_id ;
2453
2472
2454
2473
for_each_set_bit (vsi_queue_id , & queuemap , I40E_MAX_VSI_QP ) {
2455
- if (vf -> adq_enabled ) {
2456
- vsi_id = vf -> ch [vsi_queue_id / I40E_MAX_VF_VSI ].vsi_id ;
2474
+ u16 idx = vsi_queue_id / I40E_MAX_VF_VSI ;
2475
+
2476
+ if (vf -> adq_enabled && idx < vf -> num_tc ) {
2477
+ vsi_id = vf -> ch [idx ].vsi_id ;
2457
2478
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF );
2458
2479
} else {
2459
2480
queue_id = vsi_queue_id ;
@@ -2841,24 +2862,6 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2841
2862
(u8 * )& stats , sizeof (stats ));
2842
2863
}
2843
2864
2844
- /**
2845
- * i40e_can_vf_change_mac
2846
- * @vf: pointer to the VF info
2847
- *
2848
- * Return true if the VF is allowed to change its MAC filters, false otherwise
2849
- */
2850
- static bool i40e_can_vf_change_mac (struct i40e_vf * vf )
2851
- {
2852
- /* If the VF MAC address has been set administratively (via the
2853
- * ndo_set_vf_mac command), then deny permission to the VF to
2854
- * add/delete unicast MAC addresses, unless the VF is trusted
2855
- */
2856
- if (vf -> pf_set_mac && !vf -> trusted )
2857
- return false;
2858
-
2859
- return true;
2860
- }
2861
-
2862
2865
#define I40E_MAX_MACVLAN_PER_HW 3072
2863
2866
#define I40E_MAX_MACVLAN_PER_PF (num_ports ) (I40E_MAX_MACVLAN_PER_HW / \
2864
2867
(num_ports))
@@ -2897,8 +2900,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2897
2900
struct i40e_pf * pf = vf -> pf ;
2898
2901
struct i40e_vsi * vsi = pf -> vsi [vf -> lan_vsi_idx ];
2899
2902
struct i40e_hw * hw = & pf -> hw ;
2900
- int mac2add_cnt = 0 ;
2901
- int i ;
2903
+ int i , mac_add_max , mac_add_cnt = 0 ;
2904
+ bool vf_trusted ;
2905
+
2906
+ vf_trusted = test_bit (I40E_VIRTCHNL_VF_CAP_PRIVILEGE , & vf -> vf_caps );
2902
2907
2903
2908
for (i = 0 ; i < al -> num_elements ; i ++ ) {
2904
2909
struct i40e_mac_filter * f ;
@@ -2918,9 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2918
2923
* The VF may request to set the MAC address filter already
2919
2924
* assigned to it so do not return an error in that case.
2920
2925
*/
2921
- if (!i40e_can_vf_change_mac (vf ) &&
2922
- !is_multicast_ether_addr (addr ) &&
2923
- !ether_addr_equal (addr , vf -> default_lan_addr .addr )) {
2926
+ if (!vf_trusted && !is_multicast_ether_addr (addr ) &&
2927
+ vf -> pf_set_mac && !ether_addr_equal (addr , vf -> default_lan_addr .addr )) {
2924
2928
dev_err (& pf -> pdev -> dev ,
2925
2929
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n" );
2926
2930
return - EPERM ;
@@ -2929,29 +2933,33 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2929
2933
/*count filters that really will be added*/
2930
2934
f = i40e_find_mac (vsi , addr );
2931
2935
if (!f )
2932
- ++ mac2add_cnt ;
2936
+ ++ mac_add_cnt ;
2933
2937
}
2934
2938
2935
2939
/* If this VF is not privileged, then we can't add more than a limited
2936
- * number of addresses. Check to make sure that the additions do not
2937
- * push us over the limit.
2938
- */
2939
- if (!test_bit (I40E_VIRTCHNL_VF_CAP_PRIVILEGE , & vf -> vf_caps )) {
2940
- if ((i40e_count_filters (vsi ) + mac2add_cnt ) >
2941
- I40E_VC_MAX_MAC_ADDR_PER_VF ) {
2942
- dev_err (& pf -> pdev -> dev ,
2943
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n" );
2944
- return - EPERM ;
2945
- }
2946
- /* If this VF is trusted, it can use more resources than untrusted.
2940
+ * number of addresses.
2941
+ *
2942
+ * If this VF is trusted, it can use more resources than untrusted.
2947
2943
* However to ensure that every trusted VF has appropriate number of
2948
2944
* resources, divide whole pool of resources per port and then across
2949
2945
* all VFs.
2950
2946
*/
2951
- } else {
2952
- if ((i40e_count_filters (vsi ) + mac2add_cnt ) >
2953
- I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF (pf -> num_alloc_vfs ,
2954
- hw -> num_ports )) {
2947
+ if (!vf_trusted )
2948
+ mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF ;
2949
+ else
2950
+ mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF (pf -> num_alloc_vfs , hw -> num_ports );
2951
+
2952
+ /* VF can replace all its filters in one step, in this case mac_add_max
2953
+ * will be added as active and another mac_add_max will be in
2954
+ * a to-be-removed state. Account for that.
2955
+ */
2956
+ if ((i40e_count_active_filters (vsi ) + mac_add_cnt ) > mac_add_max ||
2957
+ (i40e_count_all_filters (vsi ) + mac_add_cnt ) > 2 * mac_add_max ) {
2958
+ if (!vf_trusted ) {
2959
+ dev_err (& pf -> pdev -> dev ,
2960
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n" );
2961
+ return - EPERM ;
2962
+ } else {
2955
2963
dev_err (& pf -> pdev -> dev ,
2956
2964
"Cannot add more MAC addresses, trusted VF exhausted it's resources\n" );
2957
2965
return - EPERM ;
@@ -3587,7 +3595,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3587
3595
3588
3596
/* action_meta is TC number here to which the filter is applied */
3589
3597
if (!tc_filter -> action_meta ||
3590
- tc_filter -> action_meta > vf -> num_tc ) {
3598
+ tc_filter -> action_meta >= vf -> num_tc ) {
3591
3599
dev_info (& pf -> pdev -> dev , "VF %d: Invalid TC number %u\n" ,
3592
3600
vf -> vf_id , tc_filter -> action_meta );
3593
3601
goto err ;
@@ -3884,6 +3892,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3884
3892
aq_ret );
3885
3893
}
3886
3894
3895
+ #define I40E_MAX_VF_CLOUD_FILTER 0xFF00
3896
+
3887
3897
/**
3888
3898
* i40e_vc_add_cloud_filter
3889
3899
* @vf: pointer to the VF info
@@ -3923,6 +3933,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3923
3933
goto err_out ;
3924
3934
}
3925
3935
3936
+ if (vf -> num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER ) {
3937
+ dev_warn (& pf -> pdev -> dev ,
3938
+ "VF %d: Max number of filters reached, can't apply cloud filter\n" ,
3939
+ vf -> vf_id );
3940
+ aq_ret = - ENOSPC ;
3941
+ goto err_out ;
3942
+ }
3943
+
3926
3944
cfilter = kzalloc (sizeof (* cfilter ), GFP_KERNEL );
3927
3945
if (!cfilter ) {
3928
3946
aq_ret = - ENOMEM ;
0 commit comments