@@ -587,12 +587,21 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
587
587
588
588
txbd1 -> tx_bd_hsize_lflags = lflags ;
589
589
if (skb_is_gso (skb )) {
590
+ bool udp_gso = !!(skb_shinfo (skb )-> gso_type & SKB_GSO_UDP_L4 );
590
591
u32 hdr_len ;
591
592
592
- if (skb -> encapsulation )
593
- hdr_len = skb_inner_tcp_all_headers (skb );
594
- else
593
+ if (skb -> encapsulation ) {
594
+ if (udp_gso )
595
+ hdr_len = skb_inner_transport_offset (skb ) +
596
+ sizeof (struct udphdr );
597
+ else
598
+ hdr_len = skb_inner_tcp_all_headers (skb );
599
+ } else if (udp_gso ) {
600
+ hdr_len = skb_transport_offset (skb ) +
601
+ sizeof (struct udphdr );
602
+ } else {
595
603
hdr_len = skb_tcp_all_headers (skb );
604
+ }
596
605
597
606
txbd1 -> tx_bd_hsize_lflags |= cpu_to_le32 (TX_BD_FLAGS_LSO |
598
607
TX_BD_FLAGS_T_IPID |
@@ -666,8 +675,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
666
675
tx_done :
667
676
668
677
if (unlikely (bnxt_tx_avail (bp , txr ) <= MAX_SKB_FRAGS + 1 )) {
669
- if (netdev_xmit_more () && !tx_buf -> is_push )
678
+ if (netdev_xmit_more () && !tx_buf -> is_push ) {
679
+ txbd0 -> tx_bd_len_flags_type &=
680
+ cpu_to_le32 (~TX_BD_FLAGS_NO_CMPL );
670
681
bnxt_txr_db_kick (bp , txr , prod );
682
+ }
671
683
672
684
netif_txq_try_stop (txq , bnxt_tx_avail (bp , txr ),
673
685
bp -> tx_wake_thresh );
@@ -781,7 +793,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
781
793
int i ;
782
794
783
795
bnxt_for_each_napi_tx (i , bnapi , txr ) {
784
- if (txr -> tx_hw_cons != txr -> tx_cons )
796
+ if (txr -> tx_hw_cons != RING_TX ( bp , txr -> tx_cons ) )
785
797
__bnxt_tx_int (bp , txr , budget );
786
798
}
787
799
bnapi -> events &= ~BNXT_TX_CMP_EVENT ;
@@ -2782,14 +2794,18 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2782
2794
*/
2783
2795
dma_rmb ();
2784
2796
cmp_type = TX_CMP_TYPE (txcmp );
2785
- if (cmp_type == CMP_TYPE_TX_L2_CMP ) {
2797
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2798
+ cmp_type == CMP_TYPE_TX_L2_COAL_CMP ) {
2786
2799
u32 opaque = txcmp -> tx_cmp_opaque ;
2787
2800
struct bnxt_tx_ring_info * txr ;
2788
2801
u16 tx_freed ;
2789
2802
2790
2803
txr = bnapi -> tx_ring [TX_OPAQUE_RING (opaque )];
2791
2804
event |= BNXT_TX_CMP_EVENT ;
2792
- txr -> tx_hw_cons = TX_OPAQUE_PROD (bp , opaque );
2805
+ if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP )
2806
+ txr -> tx_hw_cons = TX_CMP_SQ_CONS_IDX (txcmp );
2807
+ else
2808
+ txr -> tx_hw_cons = TX_OPAQUE_PROD (bp , opaque );
2793
2809
tx_freed = (txr -> tx_hw_cons - txr -> tx_cons ) &
2794
2810
bp -> tx_ring_mask ;
2795
2811
/* return full budget so NAPI will complete. */
@@ -5143,6 +5159,8 @@ int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
5143
5159
return hwrm_req_send (bp , req );
5144
5160
}
5145
5161
5162
+ static int bnxt_set_tpa (struct bnxt * bp , bool set_tpa );
5163
+
5146
5164
static int bnxt_hwrm_tunnel_dst_port_free (struct bnxt * bp , u8 tunnel_type )
5147
5165
{
5148
5166
struct hwrm_tunnel_dst_port_free_input * req ;
@@ -5172,6 +5190,11 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5172
5190
bp -> nge_port = 0 ;
5173
5191
bp -> nge_fw_dst_port_id = INVALID_HW_RING_ID ;
5174
5192
break ;
5193
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE :
5194
+ req -> tunnel_dst_port_id = cpu_to_le16 (bp -> vxlan_gpe_fw_dst_port_id );
5195
+ bp -> vxlan_gpe_port = 0 ;
5196
+ bp -> vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID ;
5197
+ break ;
5175
5198
default :
5176
5199
break ;
5177
5200
}
@@ -5180,6 +5203,8 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
5180
5203
if (rc )
5181
5204
netdev_err (bp -> dev , "hwrm_tunnel_dst_port_free failed. rc:%d\n" ,
5182
5205
rc );
5206
+ if (bp -> flags & BNXT_FLAG_TPA )
5207
+ bnxt_set_tpa (bp , true);
5183
5208
return rc ;
5184
5209
}
5185
5210
@@ -5215,9 +5240,16 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
5215
5240
bp -> nge_port = port ;
5216
5241
bp -> nge_fw_dst_port_id = le16_to_cpu (resp -> tunnel_dst_port_id );
5217
5242
break ;
5243
+ case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE :
5244
+ bp -> vxlan_gpe_port = port ;
5245
+ bp -> vxlan_gpe_fw_dst_port_id =
5246
+ le16_to_cpu (resp -> tunnel_dst_port_id );
5247
+ break ;
5218
5248
default :
5219
5249
break ;
5220
5250
}
5251
+ if (bp -> flags & BNXT_FLAG_TPA )
5252
+ bnxt_set_tpa (bp , true);
5221
5253
5222
5254
err_out :
5223
5255
hwrm_req_drop (bp , req );
@@ -5410,6 +5442,30 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5410
5442
return rc ;
5411
5443
}
5412
5444
5445
+ #define BNXT_DFLT_TUNL_TPA_BMAP \
5446
+ (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
5447
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
5448
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
5449
+
5450
+ static void bnxt_hwrm_vnic_update_tunl_tpa (struct bnxt * bp ,
5451
+ struct hwrm_vnic_tpa_cfg_input * req )
5452
+ {
5453
+ u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP ;
5454
+
5455
+ if (!(bp -> fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA ))
5456
+ return ;
5457
+
5458
+ if (bp -> vxlan_port )
5459
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN ;
5460
+ if (bp -> vxlan_gpe_port )
5461
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE ;
5462
+ if (bp -> nge_port )
5463
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE ;
5464
+
5465
+ req -> enables |= cpu_to_le32 (VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN );
5466
+ req -> tnl_tpa_en_bitmap = cpu_to_le32 (tunl_tpa_bmap );
5467
+ }
5468
+
5413
5469
static int bnxt_hwrm_vnic_set_tpa (struct bnxt * bp , u16 vnic_id , u32 tpa_flags )
5414
5470
{
5415
5471
struct bnxt_vnic_info * vnic = & bp -> vnic_info [vnic_id ];
@@ -5466,6 +5522,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5466
5522
req -> max_aggs = cpu_to_le16 (max_aggs );
5467
5523
5468
5524
req -> min_agg_len = cpu_to_le32 (512 );
5525
+ bnxt_hwrm_vnic_update_tunl_tpa (bp , req );
5469
5526
}
5470
5527
req -> vnic_id = cpu_to_le16 (vnic -> fw_vnic_id );
5471
5528
@@ -5960,6 +6017,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5960
6017
else
5961
6018
bp -> hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7 ;
5962
6019
}
6020
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP )
6021
+ bp -> fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA ;
5963
6022
}
5964
6023
hwrm_req_drop (bp , req );
5965
6024
return rc ;
@@ -6065,6 +6124,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
6065
6124
req -> length = cpu_to_le32 (bp -> tx_ring_mask + 1 );
6066
6125
req -> stat_ctx_id = cpu_to_le32 (grp_info -> fw_stats_ctx );
6067
6126
req -> queue_id = cpu_to_le16 (ring -> queue_id );
6127
+ if (bp -> flags & BNXT_FLAG_TX_COAL_CMPL )
6128
+ req -> cmpl_coal_cnt =
6129
+ RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 ;
6068
6130
break ;
6069
6131
}
6070
6132
case HWRM_RING_ALLOC_RX :
@@ -6489,6 +6551,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6489
6551
}
6490
6552
}
6491
6553
6554
+ static int __bnxt_trim_rings (struct bnxt * bp , int * rx , int * tx , int max ,
6555
+ bool shared );
6492
6556
static int bnxt_trim_rings (struct bnxt * bp , int * rx , int * tx , int max ,
6493
6557
bool shared );
6494
6558
@@ -6532,8 +6596,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
6532
6596
if (bp -> flags & BNXT_FLAG_AGG_RINGS )
6533
6597
rx >>= 1 ;
6534
6598
if (cp < (rx + tx )) {
6535
- rx = cp / 2 ;
6536
- tx = rx ;
6599
+ rc = __bnxt_trim_rings (bp , & rx , & tx , cp , false);
6600
+ if (rc )
6601
+ return rc ;
6537
6602
if (bp -> flags & BNXT_FLAG_AGG_RINGS )
6538
6603
rx <<= 1 ;
6539
6604
hw_resc -> resv_rx_rings = rx ;
@@ -7522,6 +7587,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7522
7587
ctxm -> max_entries = le32_to_cpu (resp -> qp_max_entries );
7523
7588
ctxm -> qp_qp1_entries = le16_to_cpu (resp -> qp_min_qp1_entries );
7524
7589
ctxm -> qp_l2_entries = le16_to_cpu (resp -> qp_max_l2_entries );
7590
+ ctxm -> qp_fast_qpmd_entries = le16_to_cpu (resp -> fast_qpmd_qp_num_entries );
7525
7591
ctxm -> entry_size = le16_to_cpu (resp -> qp_entry_size );
7526
7592
bnxt_init_ctx_initializer (ctxm , init_val , resp -> qp_init_offset ,
7527
7593
(init_mask & (1 << init_idx ++ )) != 0 );
@@ -7659,6 +7725,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7659
7725
bnxt_hwrm_set_pg_attr (& ctx_pg -> ring_mem ,
7660
7726
& req -> qpc_pg_size_qpc_lvl ,
7661
7727
& req -> qpc_page_dir );
7728
+
7729
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD )
7730
+ req -> qp_num_fast_qpmd_entries = cpu_to_le16 (ctxm -> qp_fast_qpmd_entries );
7662
7731
}
7663
7732
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ ) {
7664
7733
ctxm = & ctx -> ctx_arr [BNXT_CTX_SRQ ];
@@ -7991,6 +8060,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7991
8060
u32 num_mr , num_ah ;
7992
8061
u32 extra_srqs = 0 ;
7993
8062
u32 extra_qps = 0 ;
8063
+ u32 fast_qpmd_qps ;
7994
8064
u8 pg_lvl = 1 ;
7995
8065
int i , rc ;
7996
8066
@@ -8007,14 +8077,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8007
8077
ctxm = & ctx -> ctx_arr [BNXT_CTX_QP ];
8008
8078
l2_qps = ctxm -> qp_l2_entries ;
8009
8079
qp1_qps = ctxm -> qp_qp1_entries ;
8080
+ fast_qpmd_qps = ctxm -> qp_fast_qpmd_entries ;
8010
8081
max_qps = ctxm -> max_entries ;
8011
8082
ctxm = & ctx -> ctx_arr [BNXT_CTX_SRQ ];
8012
8083
srqs = ctxm -> srq_l2_entries ;
8013
8084
max_srqs = ctxm -> max_entries ;
8085
+ ena = 0 ;
8014
8086
if ((bp -> flags & BNXT_FLAG_ROCE_CAP ) && !is_kdump_kernel ()) {
8015
8087
pg_lvl = 2 ;
8016
8088
extra_qps = min_t (u32 , 65536 , max_qps - l2_qps - qp1_qps );
8089
+ /* allocate extra qps if fw supports RoCE fast qp destroy feature */
8090
+ extra_qps += fast_qpmd_qps ;
8017
8091
extra_srqs = min_t (u32 , 8192 , max_srqs - srqs );
8092
+ if (fast_qpmd_qps )
8093
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD ;
8018
8094
}
8019
8095
8020
8096
ctxm = & ctx -> ctx_arr [BNXT_CTX_QP ];
@@ -8044,7 +8120,6 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8044
8120
if (rc )
8045
8121
return rc ;
8046
8122
8047
- ena = 0 ;
8048
8123
if (!(bp -> flags & BNXT_FLAG_ROCE_CAP ))
8049
8124
goto skip_rdma ;
8050
8125
@@ -8061,7 +8136,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
8061
8136
rc = bnxt_setup_ctxm_pg_tbls (bp , ctxm , num_mr + num_ah , 2 );
8062
8137
if (rc )
8063
8138
return rc ;
8064
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV ;
8139
+ ena | = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV ;
8065
8140
8066
8141
ctxm = & ctx -> ctx_arr [BNXT_CTX_TIM ];
8067
8142
rc = bnxt_setup_ctxm_pg_tbls (bp , ctxm , l2_qps + qp1_qps + extra_qps , 1 );
@@ -8273,10 +8348,14 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
8273
8348
bp -> fw_cap |= BNXT_FW_CAP_LIVEPATCH ;
8274
8349
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED )
8275
8350
bp -> fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2 ;
8351
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP )
8352
+ bp -> flags |= BNXT_FLAG_TX_COAL_CMPL ;
8276
8353
8277
8354
flags_ext2 = le32_to_cpu (resp -> flags_ext2 );
8278
8355
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED )
8279
8356
bp -> fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS ;
8357
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED )
8358
+ bp -> flags |= BNXT_FLAG_UDP_GSO_CAP ;
8280
8359
8281
8360
bp -> tx_push_thresh = 0 ;
8282
8361
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED ) &&
@@ -11977,9 +12056,10 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11977
12056
struct udphdr * uh = udp_hdr (skb );
11978
12057
__be16 udp_port = uh -> dest ;
11979
12058
11980
- if (udp_port != bp -> vxlan_port && udp_port != bp -> nge_port )
12059
+ if (udp_port != bp -> vxlan_port && udp_port != bp -> nge_port &&
12060
+ udp_port != bp -> vxlan_gpe_port )
11981
12061
return false;
11982
- if (skb -> inner_protocol_type == ENCAP_TYPE_ETHER ) {
12062
+ if (skb -> inner_protocol == htons ( ETH_P_TEB ) ) {
11983
12063
struct ethhdr * eh = inner_eth_hdr (skb );
11984
12064
11985
12065
switch (eh -> h_proto ) {
@@ -11990,6 +12070,11 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11990
12070
skb_inner_network_offset (skb ),
11991
12071
NULL );
11992
12072
}
12073
+ } else if (skb -> inner_protocol == htons (ETH_P_IP )) {
12074
+ return true;
12075
+ } else if (skb -> inner_protocol == htons (ETH_P_IPV6 )) {
12076
+ return bnxt_exthdr_check (bp , skb , skb_inner_network_offset (skb ),
12077
+ NULL );
11993
12078
}
11994
12079
return false;
11995
12080
}
@@ -12721,14 +12806,14 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12721
12806
if (tcs )
12722
12807
tx_sets = tcs ;
12723
12808
12724
- if (bp -> flags & BNXT_FLAG_AGG_RINGS )
12725
- rx_rings <<= 1 ;
12726
-
12727
12809
_bnxt_get_max_rings (bp , & max_rx , & max_tx , & max_cp );
12728
12810
12729
12811
if (max_rx < rx_rings )
12730
12812
return - ENOMEM ;
12731
12813
12814
+ if (bp -> flags & BNXT_FLAG_AGG_RINGS )
12815
+ rx_rings <<= 1 ;
12816
+
12732
12817
tx_rings_needed = tx * tx_sets + tx_xdp ;
12733
12818
if (max_tx < tx_rings_needed )
12734
12819
return - ENOMEM ;
@@ -13648,9 +13733,11 @@ static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int tabl
13648
13733
unsigned int cmd ;
13649
13734
13650
13735
if (ti -> type == UDP_TUNNEL_TYPE_VXLAN )
13651
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN ;
13736
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN ;
13737
+ else if (ti -> type == UDP_TUNNEL_TYPE_GENEVE )
13738
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE ;
13652
13739
else
13653
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE ;
13740
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE ;
13654
13741
13655
13742
return bnxt_hwrm_tunnel_dst_port_alloc (bp , ti -> port , cmd );
13656
13743
}
@@ -13663,8 +13750,10 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
13663
13750
13664
13751
if (ti -> type == UDP_TUNNEL_TYPE_VXLAN )
13665
13752
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN ;
13666
- else
13753
+ else if ( ti -> type == UDP_TUNNEL_TYPE_GENEVE )
13667
13754
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE ;
13755
+ else
13756
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE ;
13668
13757
13669
13758
return bnxt_hwrm_tunnel_dst_port_free (bp , cmd );
13670
13759
}
@@ -13678,6 +13767,16 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
13678
13767
{ .n_entries = 1 , .tunnel_types = UDP_TUNNEL_TYPE_VXLAN , },
13679
13768
{ .n_entries = 1 , .tunnel_types = UDP_TUNNEL_TYPE_GENEVE , },
13680
13769
},
13770
+ }, bnxt_udp_tunnels_p7 = {
13771
+ .set_port = bnxt_udp_tunnel_set_port ,
13772
+ .unset_port = bnxt_udp_tunnel_unset_port ,
13773
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13774
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY ,
13775
+ .tables = {
13776
+ { .n_entries = 1 , .tunnel_types = UDP_TUNNEL_TYPE_VXLAN , },
13777
+ { .n_entries = 1 , .tunnel_types = UDP_TUNNEL_TYPE_GENEVE , },
13778
+ { .n_entries = 1 , .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE , },
13779
+ },
13681
13780
};
13682
13781
13683
13782
static int bnxt_bridge_getlink (struct sk_buff * skb , u32 pid , u32 seq ,
@@ -13885,9 +13984,12 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13885
13984
if (bp -> flags & BNXT_FLAG_AGG_RINGS )
13886
13985
* max_rx >>= 1 ;
13887
13986
if (bp -> flags & BNXT_FLAG_CHIP_P5_PLUS ) {
13888
- if (* max_cp < (* max_rx + * max_tx )) {
13889
- * max_rx = * max_cp / 2 ;
13890
- * max_tx = * max_rx ;
13987
+ int rc ;
13988
+
13989
+ rc = __bnxt_trim_rings (bp , max_rx , max_tx , * max_cp , false);
13990
+ if (rc ) {
13991
+ * max_rx = 0 ;
13992
+ * max_tx = 0 ;
13891
13993
}
13892
13994
/* On P5 chips, max_cp output param should be available NQs */
13893
13995
* max_cp = max_irq ;
@@ -14260,6 +14362,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
14260
14362
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
14261
14363
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
14262
14364
NETIF_F_RXCSUM | NETIF_F_GRO ;
14365
+ if (bp -> flags & BNXT_FLAG_UDP_GSO_CAP )
14366
+ dev -> hw_features |= NETIF_F_GSO_UDP_L4 ;
14263
14367
14264
14368
if (BNXT_SUPPORTS_TPA (bp ))
14265
14369
dev -> hw_features |= NETIF_F_LRO ;
@@ -14270,7 +14374,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
14270
14374
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
14271
14375
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
14272
14376
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL ;
14273
- dev -> udp_tunnel_nic_info = & bnxt_udp_tunnels ;
14377
+ if (bp -> flags & BNXT_FLAG_UDP_GSO_CAP )
14378
+ dev -> hw_enc_features |= NETIF_F_GSO_UDP_L4 ;
14379
+ if (bp -> flags & BNXT_FLAG_CHIP_P7 )
14380
+ dev -> udp_tunnel_nic_info = & bnxt_udp_tunnels_p7 ;
14381
+ else
14382
+ dev -> udp_tunnel_nic_info = & bnxt_udp_tunnels ;
14274
14383
14275
14384
dev -> gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
14276
14385
NETIF_F_GSO_GRE_CSUM ;
0 commit comments