Skip to content

Commit 5b4c56a

Browse files
Shahar Shitritkuba-moo
authored andcommitted
net/mlx5: E-Switch, Fix peer miss rules to use peer eswitch
In the original design, it is assumed local and peer eswitches have the same number of vfs. However, in new firmware, local and peer eswitches can have different number of vfs configured by mlxconfig. In such configuration, it is incorrect to derive the number of vfs from the local device's eswitch. Fix this by updating the peer miss rules add and delete functions to use the peer device's eswitch and vf count instead of the local device's information, ensuring correct behavior regardless of vf configuration differences. Fixes: ac004b8 ("net/mlx5e: E-Switch, Add peer miss rules") Signed-off-by: Shahar Shitrit <[email protected]> Reviewed-by: Mark Bloch <[email protected]> Signed-off-by: Tariq Toukan <[email protected]> Reviewed-by: Simon Horman <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 3afa3ae commit 5b4c56a

File tree

1 file changed

+54
-54
lines changed

1 file changed

+54
-54
lines changed

drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

Lines changed: 54 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1182,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
11821182
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
11831183
struct mlx5_core_dev *peer_dev)
11841184
{
1185+
struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
11851186
struct mlx5_flow_destination dest = {};
11861187
struct mlx5_flow_act flow_act = {0};
11871188
struct mlx5_flow_handle **flows;
1188-
/* total vports is the same for both e-switches */
1189-
int nvports = esw->total_vports;
11901189
struct mlx5_flow_handle *flow;
1190+
struct mlx5_vport *peer_vport;
11911191
struct mlx5_flow_spec *spec;
1192-
struct mlx5_vport *vport;
11931192
int err, pfindex;
11941193
unsigned long i;
11951194
void *misc;
11961195

1197-
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
1196+
if (!MLX5_VPORT_MANAGER(peer_dev) &&
1197+
!mlx5_core_is_ecpf_esw_manager(peer_dev))
11981198
return 0;
11991199

12001200
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -1203,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
12031203

12041204
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
12051205

1206-
flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1206+
flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL);
12071207
if (!flows) {
12081208
err = -ENOMEM;
12091209
goto alloc_flows_err;
@@ -1213,59 +1213,60 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
12131213
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
12141214
misc_parameters);
12151215

1216-
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1217-
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1218-
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1219-
spec, MLX5_VPORT_PF);
1216+
if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
1217+
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
1218+
esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
1219+
MLX5_VPORT_PF);
12201220

12211221
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
12221222
spec, &flow_act, &dest, 1);
12231223
if (IS_ERR(flow)) {
12241224
err = PTR_ERR(flow);
12251225
goto add_pf_flow_err;
12261226
}
1227-
flows[vport->index] = flow;
1227+
flows[peer_vport->index] = flow;
12281228
}
12291229

1230-
if (mlx5_ecpf_vport_exists(esw->dev)) {
1231-
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1230+
if (mlx5_ecpf_vport_exists(peer_dev)) {
1231+
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
12321232
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
12331233
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
12341234
spec, &flow_act, &dest, 1);
12351235
if (IS_ERR(flow)) {
12361236
err = PTR_ERR(flow);
12371237
goto add_ecpf_flow_err;
12381238
}
1239-
flows[vport->index] = flow;
1239+
flows[peer_vport->index] = flow;
12401240
}
12411241

1242-
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1242+
mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
1243+
mlx5_core_max_vfs(peer_dev)) {
12431244
esw_set_peer_miss_rule_source_port(esw,
1244-
peer_dev->priv.eswitch,
1245-
spec, vport->vport);
1245+
peer_esw,
1246+
spec, peer_vport->vport);
12461247

12471248
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
12481249
spec, &flow_act, &dest, 1);
12491250
if (IS_ERR(flow)) {
12501251
err = PTR_ERR(flow);
12511252
goto add_vf_flow_err;
12521253
}
1253-
flows[vport->index] = flow;
1254+
flows[peer_vport->index] = flow;
12541255
}
12551256

1256-
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1257-
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1258-
if (i >= mlx5_core_max_ec_vfs(peer_dev))
1259-
break;
1260-
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1261-
spec, vport->vport);
1257+
if (mlx5_core_ec_sriov_enabled(peer_dev)) {
1258+
mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
1259+
mlx5_core_max_ec_vfs(peer_dev)) {
1260+
esw_set_peer_miss_rule_source_port(esw, peer_esw,
1261+
spec,
1262+
peer_vport->vport);
12621263
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
12631264
spec, &flow_act, &dest, 1);
12641265
if (IS_ERR(flow)) {
12651266
err = PTR_ERR(flow);
12661267
goto add_ec_vf_flow_err;
12671268
}
1268-
flows[vport->index] = flow;
1269+
flows[peer_vport->index] = flow;
12691270
}
12701271
}
12711272

@@ -1282,25 +1283,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
12821283
return 0;
12831284

12841285
add_ec_vf_flow_err:
1285-
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1286-
if (!flows[vport->index])
1286+
mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
1287+
mlx5_core_max_ec_vfs(peer_dev)) {
1288+
if (!flows[peer_vport->index])
12871289
continue;
1288-
mlx5_del_flow_rules(flows[vport->index]);
1290+
mlx5_del_flow_rules(flows[peer_vport->index]);
12891291
}
12901292
add_vf_flow_err:
1291-
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1292-
if (!flows[vport->index])
1293+
mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
1294+
mlx5_core_max_vfs(peer_dev)) {
1295+
if (!flows[peer_vport->index])
12931296
continue;
1294-
mlx5_del_flow_rules(flows[vport->index]);
1297+
mlx5_del_flow_rules(flows[peer_vport->index]);
12951298
}
1296-
if (mlx5_ecpf_vport_exists(esw->dev)) {
1297-
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1298-
mlx5_del_flow_rules(flows[vport->index]);
1299+
if (mlx5_ecpf_vport_exists(peer_dev)) {
1300+
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
1301+
mlx5_del_flow_rules(flows[peer_vport->index]);
12991302
}
13001303
add_ecpf_flow_err:
1301-
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1302-
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1303-
mlx5_del_flow_rules(flows[vport->index]);
1304+
if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
1305+
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
1306+
mlx5_del_flow_rules(flows[peer_vport->index]);
13041307
}
13051308
add_pf_flow_err:
13061309
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
@@ -1313,37 +1316,34 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
13131316
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
13141317
struct mlx5_core_dev *peer_dev)
13151318
{
1319+
struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
13161320
u16 peer_index = mlx5_get_dev_index(peer_dev);
13171321
struct mlx5_flow_handle **flows;
1318-
struct mlx5_vport *vport;
1322+
struct mlx5_vport *peer_vport;
13191323
unsigned long i;
13201324

13211325
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
13221326
if (!flows)
13231327
return;
13241328

1325-
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1326-
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
1327-
/* The flow for a particular vport could be NULL if the other ECPF
1328-
* has fewer or no VFs enabled
1329-
*/
1330-
if (!flows[vport->index])
1331-
continue;
1332-
mlx5_del_flow_rules(flows[vport->index]);
1333-
}
1329+
if (mlx5_core_ec_sriov_enabled(peer_dev)) {
1330+
mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
1331+
mlx5_core_max_ec_vfs(peer_dev))
1332+
mlx5_del_flow_rules(flows[peer_vport->index]);
13341333
}
13351334

1336-
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1337-
mlx5_del_flow_rules(flows[vport->index]);
1335+
mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
1336+
mlx5_core_max_vfs(peer_dev))
1337+
mlx5_del_flow_rules(flows[peer_vport->index]);
13381338

1339-
if (mlx5_ecpf_vport_exists(esw->dev)) {
1340-
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1341-
mlx5_del_flow_rules(flows[vport->index]);
1339+
if (mlx5_ecpf_vport_exists(peer_dev)) {
1340+
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
1341+
mlx5_del_flow_rules(flows[peer_vport->index]);
13421342
}
13431343

1344-
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1345-
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1346-
mlx5_del_flow_rules(flows[vport->index]);
1344+
if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
1345+
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
1346+
mlx5_del_flow_rules(flows[peer_vport->index]);
13471347
}
13481348

13491349
kvfree(flows);

0 commit comments

Comments
 (0)