Skip to content

Commit 9bab51b

Browse files
committed
Merge branch 'bnxt_en-update-for-net-next'
Michael Chan says: ==================== bnxt_en: Update for net-next The first 4 patches in the series fix issues in the net-next tree introduced in the last 4 weeks. The first 3 patches fix ring accounting and indexing logic. The 4th patch fix TX timeout when the TX ring is very small. The next 7 patches add new features on the P7 chips, including TX coalesced completions, VXLAN GPE and UDP GSO stateless offload, a new rx_filter_miss counters, and more QP backing store memory for RoCE. The last 2 patches are PTP improvements. ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 79ac113 + 056bce6 commit 9bab51b

File tree

6 files changed

+153
-47
lines changed

6 files changed

+153
-47
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 132 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -587,12 +587,21 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
587587

588588
txbd1->tx_bd_hsize_lflags = lflags;
589589
if (skb_is_gso(skb)) {
590+
bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
590591
u32 hdr_len;
591592

592-
if (skb->encapsulation)
593-
hdr_len = skb_inner_tcp_all_headers(skb);
594-
else
593+
if (skb->encapsulation) {
594+
if (udp_gso)
595+
hdr_len = skb_inner_transport_offset(skb) +
596+
sizeof(struct udphdr);
597+
else
598+
hdr_len = skb_inner_tcp_all_headers(skb);
599+
} else if (udp_gso) {
600+
hdr_len = skb_transport_offset(skb) +
601+
sizeof(struct udphdr);
602+
} else {
595603
hdr_len = skb_tcp_all_headers(skb);
604+
}
596605

597606
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
598607
TX_BD_FLAGS_T_IPID |
@@ -666,8 +675,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
666675
tx_done:
667676

668677
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
669-
if (netdev_xmit_more() && !tx_buf->is_push)
678+
if (netdev_xmit_more() && !tx_buf->is_push) {
679+
txbd0->tx_bd_len_flags_type &=
680+
cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
670681
bnxt_txr_db_kick(bp, txr, prod);
682+
}
671683

672684
netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
673685
bp->tx_wake_thresh);
@@ -781,7 +793,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
781793
int i;
782794

783795
bnxt_for_each_napi_tx(i, bnapi, txr) {
784-
if (txr->tx_hw_cons != txr->tx_cons)
796+
if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
785797
__bnxt_tx_int(bp, txr, budget);
786798
}
787799
bnapi->events &= ~BNXT_TX_CMP_EVENT;
@@ -2782,14 +2794,18 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
27822794
*/
27832795
dma_rmb();
27842796
cmp_type = TX_CMP_TYPE(txcmp);
2785-
if (cmp_type == CMP_TYPE_TX_L2_CMP) {
2797+
if (cmp_type == CMP_TYPE_TX_L2_CMP ||
2798+
cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
27862799
u32 opaque = txcmp->tx_cmp_opaque;
27872800
struct bnxt_tx_ring_info *txr;
27882801
u16 tx_freed;
27892802

27902803
txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
27912804
event |= BNXT_TX_CMP_EVENT;
2792-
txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
2805+
if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
2806+
txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
2807+
else
2808+
txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
27932809
tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
27942810
bp->tx_ring_mask;
27952811
/* return full budget so NAPI will complete. */
@@ -5143,6 +5159,8 @@ int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
51435159
return hwrm_req_send(bp, req);
51445160
}
51455161

5162+
static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
5163+
51465164
static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
51475165
{
51485166
struct hwrm_tunnel_dst_port_free_input *req;
@@ -5172,6 +5190,11 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
51725190
bp->nge_port = 0;
51735191
bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
51745192
break;
5193+
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
5194+
req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
5195+
bp->vxlan_gpe_port = 0;
5196+
bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
5197+
break;
51755198
default:
51765199
break;
51775200
}
@@ -5180,6 +5203,8 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
51805203
if (rc)
51815204
netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
51825205
rc);
5206+
if (bp->flags & BNXT_FLAG_TPA)
5207+
bnxt_set_tpa(bp, true);
51835208
return rc;
51845209
}
51855210

@@ -5215,9 +5240,16 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
52155240
bp->nge_port = port;
52165241
bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
52175242
break;
5243+
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
5244+
bp->vxlan_gpe_port = port;
5245+
bp->vxlan_gpe_fw_dst_port_id =
5246+
le16_to_cpu(resp->tunnel_dst_port_id);
5247+
break;
52185248
default:
52195249
break;
52205250
}
5251+
if (bp->flags & BNXT_FLAG_TPA)
5252+
bnxt_set_tpa(bp, true);
52215253

52225254
err_out:
52235255
hwrm_req_drop(bp, req);
@@ -5410,6 +5442,30 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
54105442
return rc;
54115443
}
54125444

5445+
#define BNXT_DFLT_TUNL_TPA_BMAP \
5446+
(VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
5447+
VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
5448+
VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
5449+
5450+
static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
5451+
struct hwrm_vnic_tpa_cfg_input *req)
5452+
{
5453+
u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
5454+
5455+
if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
5456+
return;
5457+
5458+
if (bp->vxlan_port)
5459+
tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
5460+
if (bp->vxlan_gpe_port)
5461+
tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
5462+
if (bp->nge_port)
5463+
tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
5464+
5465+
req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
5466+
req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
5467+
}
5468+
54135469
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
54145470
{
54155471
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5466,6 +5522,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
54665522
req->max_aggs = cpu_to_le16(max_aggs);
54675523

54685524
req->min_agg_len = cpu_to_le32(512);
5525+
bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
54695526
}
54705527
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
54715528

@@ -5960,6 +6017,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
59606017
else
59616018
bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
59626019
}
6020+
if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
6021+
bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
59636022
}
59646023
hwrm_req_drop(bp, req);
59656024
return rc;
@@ -6065,6 +6124,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
60656124
req->length = cpu_to_le32(bp->tx_ring_mask + 1);
60666125
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
60676126
req->queue_id = cpu_to_le16(ring->queue_id);
6127+
if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
6128+
req->cmpl_coal_cnt =
6129+
RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
60686130
break;
60696131
}
60706132
case HWRM_RING_ALLOC_RX:
@@ -6489,6 +6551,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
64896551
}
64906552
}
64916553

6554+
static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6555+
bool shared);
64926556
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
64936557
bool shared);
64946558

@@ -6532,8 +6596,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
65326596
if (bp->flags & BNXT_FLAG_AGG_RINGS)
65336597
rx >>= 1;
65346598
if (cp < (rx + tx)) {
6535-
rx = cp / 2;
6536-
tx = rx;
6599+
rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
6600+
if (rc)
6601+
return rc;
65376602
if (bp->flags & BNXT_FLAG_AGG_RINGS)
65386603
rx <<= 1;
65396604
hw_resc->resv_rx_rings = rx;
@@ -7522,6 +7587,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
75227587
ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
75237588
ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
75247589
ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7590+
ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
75257591
ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
75267592
bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
75277593
(init_mask & (1 << init_idx++)) != 0);
@@ -7659,6 +7725,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
76597725
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
76607726
&req->qpc_pg_size_qpc_lvl,
76617727
&req->qpc_page_dir);
7728+
7729+
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
7730+
req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
76627731
}
76637732
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
76647733
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
@@ -7991,6 +8060,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
79918060
u32 num_mr, num_ah;
79928061
u32 extra_srqs = 0;
79938062
u32 extra_qps = 0;
8063+
u32 fast_qpmd_qps;
79948064
u8 pg_lvl = 1;
79958065
int i, rc;
79968066

@@ -8007,14 +8077,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
80078077
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
80088078
l2_qps = ctxm->qp_l2_entries;
80098079
qp1_qps = ctxm->qp_qp1_entries;
8080+
fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
80108081
max_qps = ctxm->max_entries;
80118082
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
80128083
srqs = ctxm->srq_l2_entries;
80138084
max_srqs = ctxm->max_entries;
8085+
ena = 0;
80148086
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
80158087
pg_lvl = 2;
80168088
extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
8089+
/* allocate extra qps if fw supports RoCE fast qp destroy feature */
8090+
extra_qps += fast_qpmd_qps;
80178091
extra_srqs = min_t(u32, 8192, max_srqs - srqs);
8092+
if (fast_qpmd_qps)
8093+
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
80188094
}
80198095

80208096
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
@@ -8044,7 +8120,6 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
80448120
if (rc)
80458121
return rc;
80468122

8047-
ena = 0;
80488123
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
80498124
goto skip_rdma;
80508125

@@ -8061,7 +8136,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
80618136
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
80628137
if (rc)
80638138
return rc;
8064-
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
8139+
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
80658140

80668141
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
80678142
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
@@ -8273,10 +8348,14 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
82738348
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
82748349
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
82758350
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
8351+
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
8352+
bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
82768353

82778354
flags_ext2 = le32_to_cpu(resp->flags_ext2);
82788355
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
82798356
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
8357+
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
8358+
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
82808359

82818360
bp->tx_push_thresh = 0;
82828361
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -11977,9 +12056,10 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
1197712056
struct udphdr *uh = udp_hdr(skb);
1197812057
__be16 udp_port = uh->dest;
1197912058

11980-
if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
12059+
if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
12060+
udp_port != bp->vxlan_gpe_port)
1198112061
return false;
11982-
if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
12062+
if (skb->inner_protocol == htons(ETH_P_TEB)) {
1198312063
struct ethhdr *eh = inner_eth_hdr(skb);
1198412064

1198512065
switch (eh->h_proto) {
@@ -11990,6 +12070,11 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
1199012070
skb_inner_network_offset(skb),
1199112071
NULL);
1199212072
}
12073+
} else if (skb->inner_protocol == htons(ETH_P_IP)) {
12074+
return true;
12075+
} else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
12076+
return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
12077+
NULL);
1199312078
}
1199412079
return false;
1199512080
}
@@ -12721,14 +12806,14 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
1272112806
if (tcs)
1272212807
tx_sets = tcs;
1272312808

12724-
if (bp->flags & BNXT_FLAG_AGG_RINGS)
12725-
rx_rings <<= 1;
12726-
1272712809
_bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
1272812810

1272912811
if (max_rx < rx_rings)
1273012812
return -ENOMEM;
1273112813

12814+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
12815+
rx_rings <<= 1;
12816+
1273212817
tx_rings_needed = tx * tx_sets + tx_xdp;
1273312818
if (max_tx < tx_rings_needed)
1273412819
return -ENOMEM;
@@ -13648,9 +13733,11 @@ static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int tabl
1364813733
unsigned int cmd;
1364913734

1365013735
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
13651-
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13736+
cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
13737+
else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
13738+
cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
1365213739
else
13653-
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
13740+
cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
1365413741

1365513742
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
1365613743
}
@@ -13663,8 +13750,10 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
1366313750

1366413751
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
1366513752
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13666-
else
13753+
else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
1366713754
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
13755+
else
13756+
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
1366813757

1366913758
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
1367013759
}
@@ -13678,6 +13767,16 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
1367813767
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1367913768
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
1368013769
},
13770+
}, bnxt_udp_tunnels_p7 = {
13771+
.set_port = bnxt_udp_tunnel_set_port,
13772+
.unset_port = bnxt_udp_tunnel_unset_port,
13773+
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13774+
UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
13775+
.tables = {
13776+
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
13777+
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
13778+
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
13779+
},
1368113780
};
1368213781

1368313782
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -13885,9 +13984,12 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
1388513984
if (bp->flags & BNXT_FLAG_AGG_RINGS)
1388613985
*max_rx >>= 1;
1388713986
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
13888-
if (*max_cp < (*max_rx + *max_tx)) {
13889-
*max_rx = *max_cp / 2;
13890-
*max_tx = *max_rx;
13987+
int rc;
13988+
13989+
rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13990+
if (rc) {
13991+
*max_rx = 0;
13992+
*max_tx = 0;
1389113993
}
1389213994
/* On P5 chips, max_cp output param should be available NQs */
1389313995
*max_cp = max_irq;
@@ -14260,6 +14362,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1426014362
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
1426114363
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
1426214364
NETIF_F_RXCSUM | NETIF_F_GRO;
14365+
if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
14366+
dev->hw_features |= NETIF_F_GSO_UDP_L4;
1426314367

1426414368
if (BNXT_SUPPORTS_TPA(bp))
1426514369
dev->hw_features |= NETIF_F_LRO;
@@ -14270,7 +14374,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1427014374
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
1427114375
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
1427214376
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
14273-
dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
14377+
if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
14378+
dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
14379+
if (bp->flags & BNXT_FLAG_CHIP_P7)
14380+
dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
14381+
else
14382+
dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
1427414383

1427514384
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
1427614385
NETIF_F_GSO_GRE_CSUM;

0 commit comments

Comments
 (0)