Skip to content

Commit abd5ac1

Browse files
committed
Merge tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== mlx5 fixes 2022-11-02 This series provides bug fixes to mlx5 driver. * tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5e: TC, Fix slab-out-of-bounds in parse_tc_actions net/mlx5e: E-Switch, Fix comparing termination table instance net/mlx5e: TC, Fix wrong rejection of packet-per-second policing net/mlx5e: Fix tc acts array not to be dependent on enum order net/mlx5e: Fix usage of DMA sync API net/mlx5e: Add missing sanity checks for max TX WQE size net/mlx5: fw_reset: Don't try to load device in case PCI isn't working net/mlx5: E-switch, Set to legacy mode if failed to change switchdev mode net/mlx5: Allow async trigger completion execution on single CPU systems net/mlx5: Bridge, verify LAG state when adding bond to bridge ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents b3bbeba + 7f1a6d4 commit abd5ac1

File tree

13 files changed

+149
-115
lines changed

13 files changed

+149
-115
lines changed

drivers/net/ethernet/mellanox/mlx5/core/cmd.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1770,12 +1770,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
17701770
struct mlx5_cmd *cmd = &dev->cmd;
17711771
int i;
17721772

1773-
for (i = 0; i < cmd->max_reg_cmds; i++)
1774-
while (down_trylock(&cmd->sem))
1773+
for (i = 0; i < cmd->max_reg_cmds; i++) {
1774+
while (down_trylock(&cmd->sem)) {
17751775
mlx5_cmd_trigger_completions(dev);
1776+
cond_resched();
1777+
}
1778+
}
17761779

1777-
while (down_trylock(&cmd->pages_sem))
1780+
while (down_trylock(&cmd->pages_sem)) {
17781781
mlx5_cmd_trigger_completions(dev);
1782+
cond_resched();
1783+
}
17791784

17801785
/* Unlock cmdif */
17811786
up(&cmd->pages_sem);

drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,13 +164,44 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
164164
return err;
165165
}
166166

167+
static int
168+
mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
169+
{
170+
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
171+
struct netdev_notifier_changeupper_info *info = ptr;
172+
struct net_device *upper = info->upper_dev;
173+
struct net_device *lower;
174+
struct list_head *iter;
175+
176+
if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
177+
return 0;
178+
179+
netdev_for_each_lower_dev(dev, lower, iter) {
180+
struct mlx5_core_dev *mdev;
181+
struct mlx5e_priv *priv;
182+
183+
if (!mlx5e_eswitch_rep(lower))
184+
continue;
185+
186+
priv = netdev_priv(lower);
187+
mdev = priv->mdev;
188+
if (!mlx5_lag_is_active(mdev))
189+
return -EAGAIN;
190+
if (!mlx5_lag_is_shared_fdb(mdev))
191+
return -EOPNOTSUPP;
192+
}
193+
194+
return 0;
195+
}
196+
167197
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
168198
unsigned long event, void *ptr)
169199
{
170200
int err = 0;
171201

172202
switch (event) {
173203
case NETDEV_PRECHANGEUPPER:
204+
err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
174205
break;
175206

176207
case NETDEV_CHANGEUPPER:

drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c

Lines changed: 32 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -6,70 +6,42 @@
66
#include "en/tc_priv.h"
77
#include "mlx5_core.h"
88

9-
/* Must be aligned with enum flow_action_id. */
109
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
11-
&mlx5e_tc_act_accept,
12-
&mlx5e_tc_act_drop,
13-
&mlx5e_tc_act_trap,
14-
&mlx5e_tc_act_goto,
15-
&mlx5e_tc_act_mirred,
16-
&mlx5e_tc_act_mirred,
17-
&mlx5e_tc_act_redirect_ingress,
18-
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
19-
&mlx5e_tc_act_vlan,
20-
&mlx5e_tc_act_vlan,
21-
&mlx5e_tc_act_vlan_mangle,
22-
&mlx5e_tc_act_tun_encap,
23-
&mlx5e_tc_act_tun_decap,
24-
&mlx5e_tc_act_pedit,
25-
&mlx5e_tc_act_pedit,
26-
&mlx5e_tc_act_csum,
27-
NULL, /* FLOW_ACTION_MARK, */
28-
&mlx5e_tc_act_ptype,
29-
NULL, /* FLOW_ACTION_PRIORITY, */
30-
NULL, /* FLOW_ACTION_WAKE, */
31-
NULL, /* FLOW_ACTION_QUEUE, */
32-
&mlx5e_tc_act_sample,
33-
&mlx5e_tc_act_police,
34-
&mlx5e_tc_act_ct,
35-
NULL, /* FLOW_ACTION_CT_METADATA, */
36-
&mlx5e_tc_act_mpls_push,
37-
&mlx5e_tc_act_mpls_pop,
38-
NULL, /* FLOW_ACTION_MPLS_MANGLE, */
39-
NULL, /* FLOW_ACTION_GATE, */
40-
NULL, /* FLOW_ACTION_PPPOE_PUSH, */
41-
NULL, /* FLOW_ACTION_JUMP, */
42-
NULL, /* FLOW_ACTION_PIPE, */
43-
&mlx5e_tc_act_vlan,
44-
&mlx5e_tc_act_vlan,
10+
[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
11+
[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
12+
[FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
13+
[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
14+
[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
15+
[FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
16+
[FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
17+
[FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
18+
[FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan,
19+
[FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle,
20+
[FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap,
21+
[FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap,
22+
[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
23+
[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
24+
[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
25+
[FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype,
26+
[FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample,
27+
[FLOW_ACTION_POLICE] = &mlx5e_tc_act_police,
28+
[FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
29+
[FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push,
30+
[FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop,
31+
[FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan,
32+
[FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan,
4533
};
4634

47-
/* Must be aligned with enum flow_action_id. */
4835
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
49-
&mlx5e_tc_act_accept,
50-
&mlx5e_tc_act_drop,
51-
NULL, /* FLOW_ACTION_TRAP, */
52-
&mlx5e_tc_act_goto,
53-
&mlx5e_tc_act_mirred_nic,
54-
NULL, /* FLOW_ACTION_MIRRED, */
55-
NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
56-
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
57-
NULL, /* FLOW_ACTION_VLAN_PUSH, */
58-
NULL, /* FLOW_ACTION_VLAN_POP, */
59-
NULL, /* FLOW_ACTION_VLAN_MANGLE, */
60-
NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
61-
NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
62-
&mlx5e_tc_act_pedit,
63-
&mlx5e_tc_act_pedit,
64-
&mlx5e_tc_act_csum,
65-
&mlx5e_tc_act_mark,
66-
NULL, /* FLOW_ACTION_PTYPE, */
67-
NULL, /* FLOW_ACTION_PRIORITY, */
68-
NULL, /* FLOW_ACTION_WAKE, */
69-
NULL, /* FLOW_ACTION_QUEUE, */
70-
NULL, /* FLOW_ACTION_SAMPLE, */
71-
NULL, /* FLOW_ACTION_POLICE, */
72-
&mlx5e_tc_act_ct,
36+
[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
37+
[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
38+
[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
39+
[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic,
40+
[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
41+
[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
42+
[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
43+
[FLOW_ACTION_MARK] = &mlx5e_tc_act_mark,
44+
[FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
7345
};
7446

7547
/**

drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,27 @@
1111

1212
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
1313

14+
/* IPSEC inline data includes:
15+
* 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
16+
* next header.
17+
* 2. ESP authentication data: 16 bytes for ICV.
18+
*/
19+
#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
20+
255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
21+
22+
/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
23+
* encapsulations.
24+
*/
25+
#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
26+
MLX5_SEND_WQE_DS)
27+
28+
/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
29+
#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
30+
MLX5E_MAX_TX_INLINE_DS + \
31+
MLX5E_MAX_TX_IPSEC_DS + \
32+
MAX_SKB_FRAGS + 1, \
33+
MLX5_SEND_WQEBB_NUM_DS)
34+
1435
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
1536

1637
static inline
@@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
424445

425446
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
426447
{
448+
WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
449+
427450
/* A WQE must not cross the page boundary, hence two conditions:
428451
* 1. Its size must not exceed the page size.
429452
* 2. If the WQE size is X, and the space remaining in a page is less
@@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si
436459
"wqe_size %u is greater than max SQ WQEBBs %u",
437460
wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
438461

439-
440462
return MLX5E_STOP_ROOM(wqe_size);
441463
}
442464

drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
117117
xdpi.page.rq = rq;
118118

119119
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
120-
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
120+
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);
121121

122122
if (unlikely(xdp_frame_has_frags(xdpf))) {
123123
sinfo = xdp_get_shared_info_from_frame(xdpf);
@@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
131131
skb_frag_off(frag);
132132
len = skb_frag_size(frag);
133133
dma_sync_single_for_device(sq->pdev, addr, len,
134-
DMA_TO_DEVICE);
134+
DMA_BIDIRECTIONAL);
135135
}
136136
}
137137

drivers/net/ethernet/mellanox/mlx5/core/en_main.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5694,6 +5694,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
56945694
mlx5e_fs_set_state_destroy(priv->fs,
56955695
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
56965696

5697+
/* Validate the max_wqe_size_sq capability. */
5698+
if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
5699+
mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n",
5700+
mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS);
5701+
return -EIO;
5702+
}
5703+
56975704
/* max number of channels may have changed */
56985705
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
56995706
if (priv->channels.params.num_channels > max_nch) {

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_uni
266266

267267
addr = page_pool_get_dma_addr(au->page);
268268
/* Non-XSK always uses PAGE_SIZE. */
269-
dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE);
269+
dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir);
270270
return true;
271271
}
272272

@@ -282,8 +282,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u
282282
return -ENOMEM;
283283

284284
/* Non-XSK always uses PAGE_SIZE. */
285-
addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE,
286-
rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
285+
addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir);
287286
if (unlikely(dma_mapping_error(rq->pdev, addr))) {
288287
page_pool_recycle_direct(rq->page_pool, au->page);
289288
au->page = NULL;
@@ -427,22 +426,24 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
427426
{
428427
dma_addr_t addr = page_pool_get_dma_addr(au->page);
429428

430-
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE);
429+
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
430+
rq->buff.map_dir);
431431
page_ref_inc(au->page);
432432
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
433433
au->page, frag_offset, len, truesize);
434434
}
435435

436436
static inline void
437-
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
437+
mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
438438
struct page *page, dma_addr_t addr,
439439
int offset_from, int dma_offset, u32 headlen)
440440
{
441441
const void *from = page_address(page) + offset_from;
442442
/* Aligning len to sizeof(long) optimizes memcpy performance */
443443
unsigned int len = ALIGN(headlen, sizeof(long));
444444

445-
dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE);
445+
dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
446+
rq->buff.map_dir);
446447
skb_copy_to_linear_data(skb, from, len);
447448
}
448449

@@ -1538,7 +1539,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
15381539

15391540
addr = page_pool_get_dma_addr(au->page);
15401541
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1541-
frag_size, DMA_FROM_DEVICE);
1542+
frag_size, rq->buff.map_dir);
15421543
net_prefetch(data);
15431544

15441545
prog = rcu_dereference(rq->xdp_prog);
@@ -1587,7 +1588,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
15871588

15881589
addr = page_pool_get_dma_addr(au->page);
15891590
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1590-
rq->buff.frame0_sz, DMA_FROM_DEVICE);
1591+
rq->buff.frame0_sz, rq->buff.map_dir);
15911592
net_prefetchw(va); /* xdp_frame data area */
15921593
net_prefetch(va + rx_headroom);
15931594

@@ -1608,7 +1609,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
16081609

16091610
addr = page_pool_get_dma_addr(au->page);
16101611
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
1611-
frag_consumed_bytes, DMA_FROM_DEVICE);
1612+
frag_consumed_bytes, rq->buff.map_dir);
16121613

16131614
if (!xdp_buff_has_frags(&xdp)) {
16141615
/* Init on the first fragment to avoid cold cache access
@@ -1905,7 +1906,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
19051906
mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
19061907
/* copy header */
19071908
addr = page_pool_get_dma_addr(head_au->page);
1908-
mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr,
1909+
mlx5e_copy_skb_header(rq, skb, head_au->page, addr,
19091910
head_offset, head_offset, headlen);
19101911
/* skb linear part was allocated with headlen and aligned to long */
19111912
skb->tail += headlen;
@@ -1939,7 +1940,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
19391940

19401941
addr = page_pool_get_dma_addr(au->page);
19411942
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
1942-
frag_size, DMA_FROM_DEVICE);
1943+
frag_size, rq->buff.map_dir);
19431944
net_prefetch(data);
19441945

19451946
prog = rcu_dereference(rq->xdp_prog);
@@ -1987,7 +1988,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
19871988

19881989
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
19891990
/* build SKB around header */
1990-
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
1991+
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
19911992
prefetchw(hdr);
19921993
prefetch(data);
19931994
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
@@ -2009,7 +2010,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
20092010
}
20102011

20112012
prefetchw(skb->data);
2012-
mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr,
2013+
mlx5e_copy_skb_header(rq, skb, head->page, head->addr,
20132014
head_offset + rx_headroom,
20142015
rx_headroom, head_size);
20152016
/* skb linear part was allocated with headlen and aligned to long */

drivers/net/ethernet/mellanox/mlx5/core/en_tc.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3633,10 +3633,14 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
36333633
attr2->action = 0;
36343634
attr2->flags = 0;
36353635
attr2->parse_attr = parse_attr;
3636-
attr2->esw_attr->out_count = 0;
3637-
attr2->esw_attr->split_count = 0;
36383636
attr2->dest_chain = 0;
36393637
attr2->dest_ft = NULL;
3638+
3639+
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3640+
attr2->esw_attr->out_count = 0;
3641+
attr2->esw_attr->split_count = 0;
3642+
}
3643+
36403644
return attr2;
36413645
}
36423646

@@ -4758,12 +4762,6 @@ int mlx5e_policer_validate(const struct flow_action *action,
47584762
return -EOPNOTSUPP;
47594763
}
47604764

4761-
if (act->police.rate_pkt_ps) {
4762-
NL_SET_ERR_MSG_MOD(extack,
4763-
"QoS offload not support packets per second");
4764-
return -EOPNOTSUPP;
4765-
}
4766-
47674765
return 0;
47684766
}
47694767

0 commit comments

Comments
 (0)