35
35
#define MLX5_CT_STATE_REPLY_BIT BIT(4)
36
36
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
37
37
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
38
+ #define MLX5_CT_STATE_NEW_BIT BIT(7)
38
39
39
40
#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
40
41
#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
@@ -721,12 +722,14 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
721
722
DECLARE_MOD_HDR_ACTS_ACTIONS (actions_arr , MLX5_CT_MIN_MOD_ACTS );
722
723
DECLARE_MOD_HDR_ACTS (mod_acts , actions_arr );
723
724
struct flow_action_entry * meta ;
725
+ enum ip_conntrack_info ctinfo ;
724
726
u16 ct_state = 0 ;
725
727
int err ;
726
728
727
729
meta = mlx5_tc_ct_get_ct_metadata_action (flow_rule );
728
730
if (!meta )
729
731
return - EOPNOTSUPP ;
732
+ ctinfo = meta -> ct_metadata .cookie & NFCT_INFOMASK ;
730
733
731
734
err = mlx5_get_label_mapping (ct_priv , meta -> ct_metadata .labels ,
732
735
& attr -> ct_attr .ct_labels_id );
@@ -742,7 +745,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
742
745
ct_state |= MLX5_CT_STATE_NAT_BIT ;
743
746
}
744
747
745
- ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT ;
748
+ ct_state |= MLX5_CT_STATE_TRK_BIT ;
749
+ ct_state |= ctinfo == IP_CT_NEW ? MLX5_CT_STATE_NEW_BIT : MLX5_CT_STATE_ESTABLISHED_BIT ;
746
750
ct_state |= meta -> ct_metadata .orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT ;
747
751
err = mlx5_tc_ct_entry_set_registers (ct_priv , & mod_acts ,
748
752
ct_state ,
@@ -871,6 +875,68 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
871
875
return err ;
872
876
}
873
877
878
+ static int
879
+ mlx5_tc_ct_entry_replace_rule (struct mlx5_tc_ct_priv * ct_priv ,
880
+ struct flow_rule * flow_rule ,
881
+ struct mlx5_ct_entry * entry ,
882
+ bool nat , u8 zone_restore_id )
883
+ {
884
+ struct mlx5_ct_zone_rule * zone_rule = & entry -> zone_rules [nat ];
885
+ struct mlx5_flow_attr * attr = zone_rule -> attr , * old_attr ;
886
+ struct mlx5e_mod_hdr_handle * mh ;
887
+ struct mlx5_ct_fs_rule * rule ;
888
+ struct mlx5_flow_spec * spec ;
889
+ int err ;
890
+
891
+ spec = kvzalloc (sizeof (* spec ), GFP_KERNEL );
892
+ if (!spec )
893
+ return - ENOMEM ;
894
+
895
+ old_attr = mlx5_alloc_flow_attr (ct_priv -> ns_type );
896
+ if (!old_attr ) {
897
+ err = - ENOMEM ;
898
+ goto err_attr ;
899
+ }
900
+ * old_attr = * attr ;
901
+
902
+ err = mlx5_tc_ct_entry_create_mod_hdr (ct_priv , attr , flow_rule , & mh , zone_restore_id ,
903
+ nat , mlx5_tc_ct_entry_has_nat (entry ));
904
+ if (err ) {
905
+ ct_dbg ("Failed to create ct entry mod hdr" );
906
+ goto err_mod_hdr ;
907
+ }
908
+
909
+ mlx5_tc_ct_set_tuple_match (ct_priv , spec , flow_rule );
910
+ mlx5e_tc_match_to_reg_match (spec , ZONE_TO_REG , entry -> tuple .zone , MLX5_CT_ZONE_MASK );
911
+
912
+ rule = ct_priv -> fs_ops -> ct_rule_add (ct_priv -> fs , spec , attr , flow_rule );
913
+ if (IS_ERR (rule )) {
914
+ err = PTR_ERR (rule );
915
+ ct_dbg ("Failed to add replacement ct entry rule, nat: %d" , nat );
916
+ goto err_rule ;
917
+ }
918
+
919
+ ct_priv -> fs_ops -> ct_rule_del (ct_priv -> fs , zone_rule -> rule );
920
+ zone_rule -> rule = rule ;
921
+ mlx5_tc_ct_entry_destroy_mod_hdr (ct_priv , old_attr , zone_rule -> mh );
922
+ zone_rule -> mh = mh ;
923
+
924
+ kfree (old_attr );
925
+ kvfree (spec );
926
+ ct_dbg ("Replaced ct entry rule in zone %d" , entry -> tuple .zone );
927
+
928
+ return 0 ;
929
+
930
+ err_rule :
931
+ mlx5_tc_ct_entry_destroy_mod_hdr (ct_priv , zone_rule -> attr , mh );
932
+ mlx5_put_label_mapping (ct_priv , attr -> ct_attr .ct_labels_id );
933
+ err_mod_hdr :
934
+ kfree (old_attr );
935
+ err_attr :
936
+ kvfree (spec );
937
+ return err ;
938
+ }
939
+
874
940
static bool
875
941
mlx5_tc_ct_entry_valid (struct mlx5_ct_entry * entry )
876
942
{
@@ -1065,6 +1131,52 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
1065
1131
return err ;
1066
1132
}
1067
1133
1134
+ static int
1135
+ mlx5_tc_ct_entry_replace_rules (struct mlx5_tc_ct_priv * ct_priv ,
1136
+ struct flow_rule * flow_rule ,
1137
+ struct mlx5_ct_entry * entry ,
1138
+ u8 zone_restore_id )
1139
+ {
1140
+ int err ;
1141
+
1142
+ err = mlx5_tc_ct_entry_replace_rule (ct_priv , flow_rule , entry , false,
1143
+ zone_restore_id );
1144
+ if (err )
1145
+ return err ;
1146
+
1147
+ err = mlx5_tc_ct_entry_replace_rule (ct_priv , flow_rule , entry , true,
1148
+ zone_restore_id );
1149
+ if (err )
1150
+ mlx5_tc_ct_entry_del_rule (ct_priv , entry , false);
1151
+ return err ;
1152
+ }
1153
+
1154
+ static int
1155
+ mlx5_tc_ct_block_flow_offload_replace (struct mlx5_ct_ft * ft , struct flow_rule * flow_rule ,
1156
+ struct mlx5_ct_entry * entry , unsigned long cookie )
1157
+ {
1158
+ struct mlx5_tc_ct_priv * ct_priv = ft -> ct_priv ;
1159
+ int err ;
1160
+
1161
+ err = mlx5_tc_ct_entry_replace_rules (ct_priv , flow_rule , entry , ft -> zone_restore_id );
1162
+ if (!err )
1163
+ return 0 ;
1164
+
1165
+ /* If failed to update the entry, then look it up again under ht_lock
1166
+ * protection and properly delete it.
1167
+ */
1168
+ spin_lock_bh (& ct_priv -> ht_lock );
1169
+ entry = rhashtable_lookup_fast (& ft -> ct_entries_ht , & cookie , cts_ht_params );
1170
+ if (entry ) {
1171
+ rhashtable_remove_fast (& ft -> ct_entries_ht , & entry -> node , cts_ht_params );
1172
+ spin_unlock_bh (& ct_priv -> ht_lock );
1173
+ mlx5_tc_ct_entry_put (entry );
1174
+ } else {
1175
+ spin_unlock_bh (& ct_priv -> ht_lock );
1176
+ }
1177
+ return err ;
1178
+ }
1179
+
1068
1180
static int
1069
1181
mlx5_tc_ct_block_flow_offload_add (struct mlx5_ct_ft * ft ,
1070
1182
struct flow_cls_offload * flow )
@@ -1073,23 +1185,27 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
1073
1185
struct mlx5_tc_ct_priv * ct_priv = ft -> ct_priv ;
1074
1186
struct flow_action_entry * meta_action ;
1075
1187
unsigned long cookie = flow -> cookie ;
1076
- enum ip_conntrack_info ctinfo ;
1077
1188
struct mlx5_ct_entry * entry ;
1078
1189
int err ;
1079
1190
1080
1191
meta_action = mlx5_tc_ct_get_ct_metadata_action (flow_rule );
1081
1192
if (!meta_action )
1082
1193
return - EOPNOTSUPP ;
1083
- ctinfo = meta_action -> ct_metadata .cookie & NFCT_INFOMASK ;
1084
- if (ctinfo == IP_CT_NEW )
1085
- return - EOPNOTSUPP ;
1086
1194
1087
1195
spin_lock_bh (& ct_priv -> ht_lock );
1088
1196
entry = rhashtable_lookup_fast (& ft -> ct_entries_ht , & cookie , cts_ht_params );
1089
1197
if (entry && refcount_inc_not_zero (& entry -> refcnt )) {
1198
+ if (entry -> restore_cookie == meta_action -> ct_metadata .cookie ) {
1199
+ spin_unlock_bh (& ct_priv -> ht_lock );
1200
+ mlx5_tc_ct_entry_put (entry );
1201
+ return - EEXIST ;
1202
+ }
1203
+ entry -> restore_cookie = meta_action -> ct_metadata .cookie ;
1090
1204
spin_unlock_bh (& ct_priv -> ht_lock );
1205
+
1206
+ err = mlx5_tc_ct_block_flow_offload_replace (ft , flow_rule , entry , cookie );
1091
1207
mlx5_tc_ct_entry_put (entry );
1092
- return - EEXIST ;
1208
+ return err ;
1093
1209
}
1094
1210
spin_unlock_bh (& ct_priv -> ht_lock );
1095
1211
@@ -1327,7 +1443,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
1327
1443
struct mlx5_ct_attr * ct_attr ,
1328
1444
struct netlink_ext_ack * extack )
1329
1445
{
1330
- bool trk , est , untrk , unest , new , rpl , unrpl , rel , unrel , inv , uninv ;
1446
+ bool trk , est , untrk , unnew , unest , new , rpl , unrpl , rel , unrel , inv , uninv ;
1331
1447
struct flow_rule * rule = flow_cls_offload_flow_rule (f );
1332
1448
struct flow_dissector_key_ct * mask , * key ;
1333
1449
u32 ctstate = 0 , ctstate_mask = 0 ;
@@ -1373,15 +1489,18 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
1373
1489
rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED ;
1374
1490
inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID ;
1375
1491
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED ;
1492
+ unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW ;
1376
1493
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED ;
1377
1494
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY ;
1378
1495
unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED ;
1379
1496
uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID ;
1380
1497
1381
1498
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0 ;
1499
+ ctstate |= new ? MLX5_CT_STATE_NEW_BIT : 0 ;
1382
1500
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0 ;
1383
1501
ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0 ;
1384
1502
ctstate_mask |= (untrk || trk ) ? MLX5_CT_STATE_TRK_BIT : 0 ;
1503
+ ctstate_mask |= (unnew || new ) ? MLX5_CT_STATE_NEW_BIT : 0 ;
1385
1504
ctstate_mask |= (unest || est ) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0 ;
1386
1505
ctstate_mask |= (unrpl || rpl ) ? MLX5_CT_STATE_REPLY_BIT : 0 ;
1387
1506
ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0 ;
@@ -1399,12 +1518,6 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
1399
1518
return - EOPNOTSUPP ;
1400
1519
}
1401
1520
1402
- if (new ) {
1403
- NL_SET_ERR_MSG_MOD (extack ,
1404
- "matching on ct_state +new isn't supported" );
1405
- return - EOPNOTSUPP ;
1406
- }
1407
-
1408
1521
if (mask -> ct_zone )
1409
1522
mlx5e_tc_match_to_reg_match (spec , ZONE_TO_REG ,
1410
1523
key -> ct_zone , MLX5_CT_ZONE_MASK );
0 commit comments