Skip to content

Commit 05b953a

Browse files
committed
Merge tag 'mlx5-updates-2023-02-15' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== mlx5-updates-2023-02-15 1) From Gal Tariq and Parav, Few cleanups for mlx5 driver. 2) From Vlad: Allow offloading of ct 'new' match based on [1] [1] https://lore.kernel.org/netdev/[email protected]/ * tag 'mlx5-updates-2023-02-15' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5e: RX, Remove doubtful unlikely call net/mlx5e: Fix outdated TLS comment net/mlx5e: Remove unused function mlx5e_sq_xmit_simple net/mlx5e: Allow offloading of ct 'new' match net/mlx5e: Implement CT entry update net/mlx5: Simplify eq list traversal net/mlx5e: Remove redundant page argument in mlx5e_xdp_handle() net/mlx5e: Remove redundant page argument in mlx5e_xmit_xdp_buff() net/mlx5e: Switch to using napi_build_skb() ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 981f404 + 993fd9b commit 05b953a

File tree

9 files changed

+145
-49
lines changed

9 files changed

+145
-49
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c

Lines changed: 126 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#define MLX5_CT_STATE_REPLY_BIT BIT(4)
3636
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
3737
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
38+
#define MLX5_CT_STATE_NEW_BIT BIT(7)
3839

3940
#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
4041
#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
@@ -721,12 +722,14 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
721722
DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
722723
DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
723724
struct flow_action_entry *meta;
725+
enum ip_conntrack_info ctinfo;
724726
u16 ct_state = 0;
725727
int err;
726728

727729
meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
728730
if (!meta)
729731
return -EOPNOTSUPP;
732+
ctinfo = meta->ct_metadata.cookie & NFCT_INFOMASK;
730733

731734
err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
732735
&attr->ct_attr.ct_labels_id);
@@ -742,7 +745,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
742745
ct_state |= MLX5_CT_STATE_NAT_BIT;
743746
}
744747

745-
ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
748+
ct_state |= MLX5_CT_STATE_TRK_BIT;
749+
ct_state |= ctinfo == IP_CT_NEW ? MLX5_CT_STATE_NEW_BIT : MLX5_CT_STATE_ESTABLISHED_BIT;
746750
ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
747751
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
748752
ct_state,
@@ -871,6 +875,68 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
871875
return err;
872876
}
873877

878+
static int
879+
mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
880+
struct flow_rule *flow_rule,
881+
struct mlx5_ct_entry *entry,
882+
bool nat, u8 zone_restore_id)
883+
{
884+
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
885+
struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
886+
struct mlx5e_mod_hdr_handle *mh;
887+
struct mlx5_ct_fs_rule *rule;
888+
struct mlx5_flow_spec *spec;
889+
int err;
890+
891+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
892+
if (!spec)
893+
return -ENOMEM;
894+
895+
old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
896+
if (!old_attr) {
897+
err = -ENOMEM;
898+
goto err_attr;
899+
}
900+
*old_attr = *attr;
901+
902+
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
903+
nat, mlx5_tc_ct_entry_has_nat(entry));
904+
if (err) {
905+
ct_dbg("Failed to create ct entry mod hdr");
906+
goto err_mod_hdr;
907+
}
908+
909+
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
910+
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
911+
912+
rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
913+
if (IS_ERR(rule)) {
914+
err = PTR_ERR(rule);
915+
ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
916+
goto err_rule;
917+
}
918+
919+
ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
920+
zone_rule->rule = rule;
921+
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
922+
zone_rule->mh = mh;
923+
924+
kfree(old_attr);
925+
kvfree(spec);
926+
ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
927+
928+
return 0;
929+
930+
err_rule:
931+
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
932+
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
933+
err_mod_hdr:
934+
kfree(old_attr);
935+
err_attr:
936+
kvfree(spec);
937+
return err;
938+
}
939+
874940
static bool
875941
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
876942
{
@@ -1065,6 +1131,52 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
10651131
return err;
10661132
}
10671133

1134+
static int
1135+
mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
1136+
struct flow_rule *flow_rule,
1137+
struct mlx5_ct_entry *entry,
1138+
u8 zone_restore_id)
1139+
{
1140+
int err;
1141+
1142+
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
1143+
zone_restore_id);
1144+
if (err)
1145+
return err;
1146+
1147+
err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
1148+
zone_restore_id);
1149+
if (err)
1150+
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
1151+
return err;
1152+
}
1153+
1154+
static int
1155+
mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
1156+
struct mlx5_ct_entry *entry, unsigned long cookie)
1157+
{
1158+
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1159+
int err;
1160+
1161+
err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
1162+
if (!err)
1163+
return 0;
1164+
1165+
/* If failed to update the entry, then look it up again under ht_lock
1166+
* protection and properly delete it.
1167+
*/
1168+
spin_lock_bh(&ct_priv->ht_lock);
1169+
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1170+
if (entry) {
1171+
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
1172+
spin_unlock_bh(&ct_priv->ht_lock);
1173+
mlx5_tc_ct_entry_put(entry);
1174+
} else {
1175+
spin_unlock_bh(&ct_priv->ht_lock);
1176+
}
1177+
return err;
1178+
}
1179+
10681180
static int
10691181
mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
10701182
struct flow_cls_offload *flow)
@@ -1073,23 +1185,27 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
10731185
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
10741186
struct flow_action_entry *meta_action;
10751187
unsigned long cookie = flow->cookie;
1076-
enum ip_conntrack_info ctinfo;
10771188
struct mlx5_ct_entry *entry;
10781189
int err;
10791190

10801191
meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
10811192
if (!meta_action)
10821193
return -EOPNOTSUPP;
1083-
ctinfo = meta_action->ct_metadata.cookie & NFCT_INFOMASK;
1084-
if (ctinfo == IP_CT_NEW)
1085-
return -EOPNOTSUPP;
10861194

10871195
spin_lock_bh(&ct_priv->ht_lock);
10881196
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
10891197
if (entry && refcount_inc_not_zero(&entry->refcnt)) {
1198+
if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
1199+
spin_unlock_bh(&ct_priv->ht_lock);
1200+
mlx5_tc_ct_entry_put(entry);
1201+
return -EEXIST;
1202+
}
1203+
entry->restore_cookie = meta_action->ct_metadata.cookie;
10901204
spin_unlock_bh(&ct_priv->ht_lock);
1205+
1206+
err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
10911207
mlx5_tc_ct_entry_put(entry);
1092-
return -EEXIST;
1208+
return err;
10931209
}
10941210
spin_unlock_bh(&ct_priv->ht_lock);
10951211

@@ -1327,7 +1443,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
13271443
struct mlx5_ct_attr *ct_attr,
13281444
struct netlink_ext_ack *extack)
13291445
{
1330-
bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
1446+
bool trk, est, untrk, unnew, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
13311447
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
13321448
struct flow_dissector_key_ct *mask, *key;
13331449
u32 ctstate = 0, ctstate_mask = 0;
@@ -1373,15 +1489,18 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
13731489
rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
13741490
inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
13751491
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
1492+
unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW;
13761493
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
13771494
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
13781495
unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
13791496
uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
13801497

13811498
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
1499+
ctstate |= new ? MLX5_CT_STATE_NEW_BIT : 0;
13821500
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
13831501
ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
13841502
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
1503+
ctstate_mask |= (unnew || new) ? MLX5_CT_STATE_NEW_BIT : 0;
13851504
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
13861505
ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
13871506
ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
@@ -1399,12 +1518,6 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
13991518
return -EOPNOTSUPP;
14001519
}
14011520

1402-
if (new) {
1403-
NL_SET_ERR_MSG_MOD(extack,
1404-
"matching on ct_state +new isn't supported");
1405-
return -EOPNOTSUPP;
1406-
}
1407-
14081521
if (mask->ct_zone)
14091522
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
14101523
key->ct_zone, MLX5_CT_ZONE_MASK);

drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,6 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
320320
}
321321
}
322322

323-
void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
324323
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
325324

326325
static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)

drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,9 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
5757

5858
static inline bool
5959
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
60-
struct page *page, struct xdp_buff *xdp)
60+
struct xdp_buff *xdp)
6161
{
62+
struct page *page = virt_to_page(xdp->data);
6263
struct skb_shared_info *sinfo = NULL;
6364
struct mlx5e_xmit_data xdptxd;
6465
struct mlx5e_xdp_info xdpi;
@@ -185,7 +186,7 @@ const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
185186
};
186187

187188
/* returns true if packet was consumed by xdp */
188-
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
189+
bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
189190
struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
190191
{
191192
struct xdp_buff *xdp = &mxbuf->xdp;
@@ -197,7 +198,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
197198
case XDP_PASS:
198199
return false;
199200
case XDP_TX:
200-
if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
201+
if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp)))
201202
goto xdp_abort;
202203
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
203204
return true;
@@ -209,7 +210,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
209210
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
210211
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
211212
if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
212-
mlx5e_page_dma_unmap(rq, page);
213+
mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data));
213214
rq->stats->xdp_redirect++;
214215
return true;
215216
default:

drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ struct mlx5e_xdp_buff {
5252

5353
struct mlx5e_xsk_param;
5454
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
55-
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
55+
bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
5656
struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
5757
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
5858
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
289289
*/
290290

291291
prog = rcu_dereference(rq->xdp_prog);
292-
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) {
292+
if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
293293
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
294294
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
295295
return NULL; /* page/packet was consumed by XDP */
@@ -323,7 +323,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
323323
net_prefetch(mxbuf->xdp.data);
324324

325325
prog = rcu_dereference(rq->xdp_prog);
326-
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf)))
326+
if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf)))
327327
return NULL; /* page/packet was consumed by XDP */
328328

329329
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse

drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
124124
mlx5e_udp_gso_handle_tx_skb(skb);
125125

126126
#ifdef CONFIG_MLX5_EN_TLS
127-
/* May send SKBs and WQEs. */
127+
/* May send WQEs. */
128128
if (mlx5e_ktls_skb_offloaded(skb))
129129
if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
130130
&state->tls)))

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1556,7 +1556,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
15561556
u32 frag_size, u16 headroom,
15571557
u32 cqe_bcnt, u32 metasize)
15581558
{
1559-
struct sk_buff *skb = build_skb(va, frag_size);
1559+
struct sk_buff *skb = napi_build_skb(va, frag_size);
15601560

15611561
if (unlikely(!skb)) {
15621562
rq->stats->buff_alloc_err++;
@@ -1610,7 +1610,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
16101610

16111611
net_prefetchw(va); /* xdp_frame data area */
16121612
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
1613-
if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
1613+
if (mlx5e_xdp_handle(rq, prog, &mxbuf))
16141614
return NULL; /* page/packet was consumed by XDP */
16151615

16161616
rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
@@ -1698,10 +1698,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
16981698
wi++;
16991699
}
17001700

1701-
au = head_wi->au;
1702-
17031701
prog = rcu_dereference(rq->xdp_prog);
1704-
if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
1702+
if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
17051703
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
17061704
int i;
17071705

@@ -1718,9 +1716,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
17181716
if (unlikely(!skb))
17191717
return NULL;
17201718

1721-
page_ref_inc(au->page);
1719+
page_ref_inc(head_wi->au->page);
17221720

1723-
if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) {
1721+
if (xdp_buff_has_frags(&mxbuf.xdp)) {
17241722
int i;
17251723

17261724
/* sinfo->nr_frags is reset by build_skb, calculate again. */
@@ -2013,7 +2011,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
20132011

20142012
net_prefetchw(va); /* xdp_frame data area */
20152013
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
2016-
if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
2014+
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
20172015
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
20182016
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
20192017
return NULL; /* page/packet was consumed by XDP */

drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -720,21 +720,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
720720
return NETDEV_TX_OK;
721721
}
722722

723-
void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
724-
{
725-
struct mlx5e_tx_wqe_attr wqe_attr;
726-
struct mlx5e_tx_attr attr;
727-
struct mlx5e_tx_wqe *wqe;
728-
u16 pi;
729-
730-
mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
731-
mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
732-
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
733-
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
734-
mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
735-
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
736-
}
737-
738723
static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
739724
u32 *dma_fifo_cc)
740725
{

0 commit comments

Comments
 (0)