@@ -1011,7 +1011,6 @@ static int bnxt_set_channels(struct net_device *dev,
1011
1011
return rc ;
1012
1012
}
1013
1013
1014
- #ifdef CONFIG_RFS_ACCEL
1015
1014
static u32 bnxt_get_all_fltr_ids_rcu (struct bnxt * bp , struct hlist_head tbl [],
1016
1015
int tbl_size , u32 * ids , u32 start ,
1017
1016
u32 id_cnt )
@@ -1152,7 +1151,195 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1152
1151
1153
1152
return rc ;
1154
1153
}
1155
- #endif
1154
+
1155
+ #define IPV4_ALL_MASK ((__force __be32)~0)
1156
+ #define L4_PORT_ALL_MASK ((__force __be16)~0)
1157
+
1158
+ static bool ipv6_mask_is_full (__be32 mask [4 ])
1159
+ {
1160
+ return (mask [0 ] & mask [1 ] & mask [2 ] & mask [3 ]) == IPV4_ALL_MASK ;
1161
+ }
1162
+
1163
+ static bool ipv6_mask_is_zero (__be32 mask [4 ])
1164
+ {
1165
+ return !(mask [0 ] | mask [1 ] | mask [2 ] | mask [3 ]);
1166
+ }
1167
+
1168
+ static int bnxt_add_ntuple_cls_rule (struct bnxt * bp ,
1169
+ struct ethtool_rx_flow_spec * fs )
1170
+ {
1171
+ u8 vf = ethtool_get_flow_spec_ring_vf (fs -> ring_cookie );
1172
+ u32 ring = ethtool_get_flow_spec_ring (fs -> ring_cookie );
1173
+ struct bnxt_ntuple_filter * new_fltr , * fltr ;
1174
+ struct bnxt_l2_filter * l2_fltr ;
1175
+ u32 flow_type = fs -> flow_type ;
1176
+ struct flow_keys * fkeys ;
1177
+ u32 idx ;
1178
+ int rc ;
1179
+
1180
+ if (!bp -> vnic_info )
1181
+ return - EAGAIN ;
1182
+
1183
+ if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT )) || vf )
1184
+ return - EOPNOTSUPP ;
1185
+
1186
+ new_fltr = kzalloc (sizeof (* new_fltr ), GFP_KERNEL );
1187
+ if (!new_fltr )
1188
+ return - ENOMEM ;
1189
+
1190
+ l2_fltr = bp -> vnic_info [0 ].l2_filters [0 ];
1191
+ atomic_inc (& l2_fltr -> refcnt );
1192
+ new_fltr -> l2_fltr = l2_fltr ;
1193
+ fkeys = & new_fltr -> fkeys ;
1194
+
1195
+ rc = - EOPNOTSUPP ;
1196
+ switch (flow_type ) {
1197
+ case TCP_V4_FLOW :
1198
+ case UDP_V4_FLOW : {
1199
+ struct ethtool_tcpip4_spec * ip_spec = & fs -> h_u .tcp_ip4_spec ;
1200
+ struct ethtool_tcpip4_spec * ip_mask = & fs -> m_u .tcp_ip4_spec ;
1201
+
1202
+ fkeys -> basic .ip_proto = IPPROTO_TCP ;
1203
+ if (flow_type == UDP_V4_FLOW )
1204
+ fkeys -> basic .ip_proto = IPPROTO_UDP ;
1205
+ fkeys -> basic .n_proto = htons (ETH_P_IP );
1206
+
1207
+ if (ip_mask -> ip4src == IPV4_ALL_MASK ) {
1208
+ fkeys -> addrs .v4addrs .src = ip_spec -> ip4src ;
1209
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP ;
1210
+ } else if (ip_mask -> ip4src ) {
1211
+ goto ntuple_err ;
1212
+ }
1213
+ if (ip_mask -> ip4dst == IPV4_ALL_MASK ) {
1214
+ fkeys -> addrs .v4addrs .dst = ip_spec -> ip4dst ;
1215
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP ;
1216
+ } else if (ip_mask -> ip4dst ) {
1217
+ goto ntuple_err ;
1218
+ }
1219
+
1220
+ if (ip_mask -> psrc == L4_PORT_ALL_MASK ) {
1221
+ fkeys -> ports .src = ip_spec -> psrc ;
1222
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT ;
1223
+ } else if (ip_mask -> psrc ) {
1224
+ goto ntuple_err ;
1225
+ }
1226
+ if (ip_mask -> pdst == L4_PORT_ALL_MASK ) {
1227
+ fkeys -> ports .dst = ip_spec -> pdst ;
1228
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT ;
1229
+ } else if (ip_mask -> pdst ) {
1230
+ goto ntuple_err ;
1231
+ }
1232
+ break ;
1233
+ }
1234
+ case TCP_V6_FLOW :
1235
+ case UDP_V6_FLOW : {
1236
+ struct ethtool_tcpip6_spec * ip_spec = & fs -> h_u .tcp_ip6_spec ;
1237
+ struct ethtool_tcpip6_spec * ip_mask = & fs -> m_u .tcp_ip6_spec ;
1238
+
1239
+ fkeys -> basic .ip_proto = IPPROTO_TCP ;
1240
+ if (flow_type == UDP_V6_FLOW )
1241
+ fkeys -> basic .ip_proto = IPPROTO_UDP ;
1242
+ fkeys -> basic .n_proto = htons (ETH_P_IPV6 );
1243
+
1244
+ if (ipv6_mask_is_full (ip_mask -> ip6src )) {
1245
+ fkeys -> addrs .v6addrs .src =
1246
+ * (struct in6_addr * )& ip_spec -> ip6src ;
1247
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP ;
1248
+ } else if (!ipv6_mask_is_zero (ip_mask -> ip6src )) {
1249
+ goto ntuple_err ;
1250
+ }
1251
+ if (ipv6_mask_is_full (ip_mask -> ip6dst )) {
1252
+ fkeys -> addrs .v6addrs .dst =
1253
+ * (struct in6_addr * )& ip_spec -> ip6dst ;
1254
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP ;
1255
+ } else if (!ipv6_mask_is_zero (ip_mask -> ip6dst )) {
1256
+ goto ntuple_err ;
1257
+ }
1258
+
1259
+ if (ip_mask -> psrc == L4_PORT_ALL_MASK ) {
1260
+ fkeys -> ports .src = ip_spec -> psrc ;
1261
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT ;
1262
+ } else if (ip_mask -> psrc ) {
1263
+ goto ntuple_err ;
1264
+ }
1265
+ if (ip_mask -> pdst == L4_PORT_ALL_MASK ) {
1266
+ fkeys -> ports .dst = ip_spec -> pdst ;
1267
+ new_fltr -> ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT ;
1268
+ } else if (ip_mask -> pdst ) {
1269
+ goto ntuple_err ;
1270
+ }
1271
+ break ;
1272
+ }
1273
+ default :
1274
+ rc = - EOPNOTSUPP ;
1275
+ goto ntuple_err ;
1276
+ }
1277
+ if (!new_fltr -> ntuple_flags )
1278
+ goto ntuple_err ;
1279
+
1280
+ idx = bnxt_get_ntp_filter_idx (bp , fkeys , NULL );
1281
+ rcu_read_lock ();
1282
+ fltr = bnxt_lookup_ntp_filter_from_idx (bp , new_fltr , idx );
1283
+ if (fltr ) {
1284
+ rcu_read_unlock ();
1285
+ rc = - EEXIST ;
1286
+ goto ntuple_err ;
1287
+ }
1288
+ rcu_read_unlock ();
1289
+
1290
+ new_fltr -> base .rxq = ring ;
1291
+ new_fltr -> base .flags = BNXT_ACT_NO_AGING ;
1292
+ __set_bit (BNXT_FLTR_VALID , & new_fltr -> base .state );
1293
+ rc = bnxt_insert_ntp_filter (bp , new_fltr , idx );
1294
+ if (!rc ) {
1295
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc (bp , new_fltr );
1296
+ if (rc ) {
1297
+ bnxt_del_ntp_filter (bp , new_fltr );
1298
+ return rc ;
1299
+ }
1300
+ fs -> location = new_fltr -> base .sw_id ;
1301
+ return 0 ;
1302
+ }
1303
+
1304
+ ntuple_err :
1305
+ atomic_dec (& l2_fltr -> refcnt );
1306
+ kfree (new_fltr );
1307
+ return rc ;
1308
+ }
1309
+
1310
+ static int bnxt_srxclsrlins (struct bnxt * bp , struct ethtool_rxnfc * cmd )
1311
+ {
1312
+ struct ethtool_rx_flow_spec * fs = & cmd -> fs ;
1313
+ u32 ring , flow_type ;
1314
+ int rc ;
1315
+ u8 vf ;
1316
+
1317
+ if (!netif_running (bp -> dev ))
1318
+ return - EAGAIN ;
1319
+ if (!(bp -> flags & BNXT_FLAG_RFS ))
1320
+ return - EPERM ;
1321
+ if (fs -> location != RX_CLS_LOC_ANY )
1322
+ return - EINVAL ;
1323
+
1324
+ ring = ethtool_get_flow_spec_ring (fs -> ring_cookie );
1325
+ vf = ethtool_get_flow_spec_ring_vf (fs -> ring_cookie );
1326
+ if (BNXT_VF (bp ) && vf )
1327
+ return - EINVAL ;
1328
+ if (BNXT_PF (bp ) && vf > bp -> pf .active_vfs )
1329
+ return - EINVAL ;
1330
+ if (!vf && ring >= bp -> rx_nr_rings )
1331
+ return - EINVAL ;
1332
+
1333
+ flow_type = fs -> flow_type ;
1334
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS ))
1335
+ return - EINVAL ;
1336
+ flow_type &= ~FLOW_EXT ;
1337
+ if (flow_type == ETHER_FLOW )
1338
+ rc = - EOPNOTSUPP ;
1339
+ else
1340
+ rc = bnxt_add_ntuple_cls_rule (bp , fs );
1341
+ return rc ;
1342
+ }
1156
1343
1157
1344
static u64 get_ethtool_ipv4_rss (struct bnxt * bp )
1158
1345
{
@@ -1302,14 +1489,13 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1302
1489
int rc = 0 ;
1303
1490
1304
1491
switch (cmd -> cmd ) {
1305
- #ifdef CONFIG_RFS_ACCEL
1306
1492
case ETHTOOL_GRXRINGS :
1307
1493
cmd -> data = bp -> rx_nr_rings ;
1308
1494
break ;
1309
1495
1310
1496
case ETHTOOL_GRXCLSRLCNT :
1311
1497
cmd -> rule_cnt = bp -> ntp_fltr_count ;
1312
- cmd -> data = BNXT_NTP_FLTR_MAX_FLTR ;
1498
+ cmd -> data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL ;
1313
1499
break ;
1314
1500
1315
1501
case ETHTOOL_GRXCLSRLALL :
@@ -1319,7 +1505,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1319
1505
case ETHTOOL_GRXCLSRULE :
1320
1506
rc = bnxt_grxclsrule (bp , cmd );
1321
1507
break ;
1322
- #endif
1323
1508
1324
1509
case ETHTOOL_GRXFH :
1325
1510
rc = bnxt_grxfh (bp , cmd );
@@ -1343,6 +1528,10 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1343
1528
rc = bnxt_srxfh (bp , cmd );
1344
1529
break ;
1345
1530
1531
+ case ETHTOOL_SRXCLSRLINS :
1532
+ rc = bnxt_srxclsrlins (bp , cmd );
1533
+ break ;
1534
+
1346
1535
default :
1347
1536
rc = - EOPNOTSUPP ;
1348
1537
break ;
0 commit comments