10
10
#include <linux/netdevice.h>
11
11
#include <linux/mutex.h>
12
12
#include <linux/refcount.h>
13
+ #include <linux/idr.h>
13
14
#include <net/devlink.h>
14
15
#include <trace/events/mlxsw.h>
15
16
@@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
58
59
static int mlxsw_sp_acl_tcam_region_id_get (struct mlxsw_sp_acl_tcam * tcam ,
59
60
u16 * p_id )
60
61
{
61
- u16 id ;
62
+ int id ;
62
63
63
- id = find_first_zero_bit (tcam -> used_regions , tcam -> max_regions );
64
- if (id < tcam -> max_regions ) {
65
- __set_bit (id , tcam -> used_regions );
66
- * p_id = id ;
67
- return 0 ;
68
- }
69
- return - ENOBUFS ;
64
+ id = ida_alloc_max (& tcam -> used_regions , tcam -> max_regions - 1 ,
65
+ GFP_KERNEL );
66
+ if (id < 0 )
67
+ return id ;
68
+
69
+ * p_id = id ;
70
+
71
+ return 0 ;
70
72
}
71
73
72
74
static void mlxsw_sp_acl_tcam_region_id_put (struct mlxsw_sp_acl_tcam * tcam ,
73
75
u16 id )
74
76
{
75
- __clear_bit ( id , tcam -> used_regions );
77
+ ida_free ( & tcam -> used_regions , id );
76
78
}
77
79
78
80
static int mlxsw_sp_acl_tcam_group_id_get (struct mlxsw_sp_acl_tcam * tcam ,
79
81
u16 * p_id )
80
82
{
81
- u16 id ;
83
+ int id ;
82
84
83
- id = find_first_zero_bit (tcam -> used_groups , tcam -> max_groups );
84
- if (id < tcam -> max_groups ) {
85
- __set_bit (id , tcam -> used_groups );
86
- * p_id = id ;
87
- return 0 ;
88
- }
89
- return - ENOBUFS ;
85
+ id = ida_alloc_max (& tcam -> used_groups , tcam -> max_groups - 1 ,
86
+ GFP_KERNEL );
87
+ if (id < 0 )
88
+ return id ;
89
+
90
+ * p_id = id ;
91
+
92
+ return 0 ;
90
93
}
91
94
92
95
static void mlxsw_sp_acl_tcam_group_id_put (struct mlxsw_sp_acl_tcam * tcam ,
93
96
u16 id )
94
97
{
95
- __clear_bit ( id , tcam -> used_groups );
98
+ ida_free ( & tcam -> used_groups , id );
96
99
}
97
100
98
101
struct mlxsw_sp_acl_tcam_pattern {
@@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
715
718
rehash .dw .work );
716
719
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS ;
717
720
721
+ mutex_lock (& vregion -> lock );
718
722
mlxsw_sp_acl_tcam_vregion_rehash (vregion -> mlxsw_sp , vregion , & credits );
723
+ mutex_unlock (& vregion -> lock );
719
724
if (credits < 0 )
720
725
/* Rehash gone out of credits so it was interrupted.
721
726
* Schedule the work as soon as possible to continue.
@@ -725,6 +730,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
725
730
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule (vregion );
726
731
}
727
732
733
+ static void
734
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset (struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
735
+ {
736
+ /* The entry markers are relative to the current chunk and therefore
737
+ * needs to be reset together with the chunk marker.
738
+ */
739
+ ctx -> current_vchunk = NULL ;
740
+ ctx -> start_ventry = NULL ;
741
+ ctx -> stop_ventry = NULL ;
742
+ }
743
+
728
744
static void
729
745
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed (struct mlxsw_sp_acl_tcam_vchunk * vchunk )
730
746
{
@@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
747
763
* the current chunk pointer to make sure all chunks
748
764
* are properly migrated.
749
765
*/
750
- vregion -> rehash .ctx . current_vchunk = NULL ;
766
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset ( & vregion -> rehash .ctx ) ;
751
767
}
752
768
753
769
static struct mlxsw_sp_acl_tcam_vregion *
@@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
820
836
struct mlxsw_sp_acl_tcam * tcam = vregion -> tcam ;
821
837
822
838
if (vgroup -> vregion_rehash_enabled && ops -> region_rehash_hints_get ) {
839
+ struct mlxsw_sp_acl_tcam_rehash_ctx * ctx = & vregion -> rehash .ctx ;
840
+
823
841
mutex_lock (& tcam -> lock );
824
842
list_del (& vregion -> tlist );
825
843
mutex_unlock (& tcam -> lock );
826
- cancel_delayed_work_sync (& vregion -> rehash .dw );
844
+ if (cancel_delayed_work_sync (& vregion -> rehash .dw ) &&
845
+ ctx -> hints_priv )
846
+ ops -> region_rehash_hints_put (ctx -> hints_priv );
827
847
}
828
848
mlxsw_sp_acl_tcam_vgroup_vregion_detach (mlxsw_sp , vregion );
829
849
if (vregion -> region2 )
@@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1154
1174
struct mlxsw_sp_acl_tcam_ventry * ventry ,
1155
1175
bool * activity )
1156
1176
{
1157
- return mlxsw_sp_acl_tcam_entry_activity_get (mlxsw_sp ,
1158
- ventry -> entry , activity );
1177
+ struct mlxsw_sp_acl_tcam_vregion * vregion = ventry -> vchunk -> vregion ;
1178
+ int err ;
1179
+
1180
+ mutex_lock (& vregion -> lock );
1181
+ err = mlxsw_sp_acl_tcam_entry_activity_get (mlxsw_sp , ventry -> entry ,
1182
+ activity );
1183
+ mutex_unlock (& vregion -> lock );
1184
+ return err ;
1159
1185
}
1160
1186
1161
1187
static int
@@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1189
1215
{
1190
1216
struct mlxsw_sp_acl_tcam_chunk * new_chunk ;
1191
1217
1218
+ WARN_ON (vchunk -> chunk2 );
1219
+
1192
1220
new_chunk = mlxsw_sp_acl_tcam_chunk_create (mlxsw_sp , vchunk , region );
1193
1221
if (IS_ERR (new_chunk ))
1194
1222
return PTR_ERR (new_chunk );
@@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1207
1235
{
1208
1236
mlxsw_sp_acl_tcam_chunk_destroy (mlxsw_sp , vchunk -> chunk2 );
1209
1237
vchunk -> chunk2 = NULL ;
1210
- ctx -> current_vchunk = NULL ;
1238
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset ( ctx ) ;
1211
1239
}
1212
1240
1213
1241
static int
@@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1230
1258
return 0 ;
1231
1259
}
1232
1260
1261
+ if (list_empty (& vchunk -> ventry_list ))
1262
+ goto out ;
1263
+
1233
1264
/* If the migration got interrupted, we have the ventry to start from
1234
1265
* stored in context.
1235
1266
*/
@@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1239
1270
ventry = list_first_entry (& vchunk -> ventry_list ,
1240
1271
typeof (* ventry ), list );
1241
1272
1273
+ WARN_ON (ventry -> vchunk != vchunk );
1274
+
1242
1275
list_for_each_entry_from (ventry , & vchunk -> ventry_list , list ) {
1243
1276
/* During rollback, once we reach the ventry that failed
1244
1277
* to migrate, we are done.
@@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1279
1312
}
1280
1313
}
1281
1314
1315
+ out :
1282
1316
mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk , ctx );
1283
1317
return 0 ;
1284
1318
}
@@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1292
1326
struct mlxsw_sp_acl_tcam_vchunk * vchunk ;
1293
1327
int err ;
1294
1328
1329
+ if (list_empty (& vregion -> vchunk_list ))
1330
+ return 0 ;
1331
+
1295
1332
/* If the migration got interrupted, we have the vchunk
1296
1333
* we are working on stored in context.
1297
1334
*/
@@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1320
1357
int err , err2 ;
1321
1358
1322
1359
trace_mlxsw_sp_acl_tcam_vregion_migrate (mlxsw_sp , vregion );
1323
- mutex_lock (& vregion -> lock );
1324
1360
err = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion ,
1325
1361
ctx , credits );
1326
1362
if (err ) {
1363
+ if (ctx -> this_is_rollback )
1364
+ return err ;
1327
1365
/* In case migration was not successful, we need to swap
1328
1366
* so the original region pointer is assigned again
1329
1367
* to vregion->region.
1330
1368
*/
1331
1369
swap (vregion -> region , vregion -> region2 );
1332
- ctx -> current_vchunk = NULL ;
1370
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset ( ctx ) ;
1333
1371
ctx -> this_is_rollback = true;
1334
1372
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion ,
1335
1373
ctx , credits );
@@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1340
1378
/* Let the rollback to be continued later on. */
1341
1379
}
1342
1380
}
1343
- mutex_unlock (& vregion -> lock );
1344
1381
trace_mlxsw_sp_acl_tcam_vregion_migrate_end (mlxsw_sp , vregion );
1345
1382
return err ;
1346
1383
}
@@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1389
1426
1390
1427
ctx -> hints_priv = hints_priv ;
1391
1428
ctx -> this_is_rollback = false;
1429
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset (ctx );
1392
1430
1393
1431
return 0 ;
1394
1432
@@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1441
1479
err = mlxsw_sp_acl_tcam_vregion_migrate (mlxsw_sp , vregion ,
1442
1480
ctx , credits );
1443
1481
if (err ) {
1444
- dev_err (mlxsw_sp -> bus_info -> dev , "Failed to migrate vregion\n" );
1482
+ dev_err_ratelimited (mlxsw_sp -> bus_info -> dev , "Failed to migrate vregion\n" );
1483
+ return ;
1445
1484
}
1446
1485
1447
1486
if (* credits >= 0 )
@@ -1549,19 +1588,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
1549
1588
if (max_tcam_regions < max_regions )
1550
1589
max_regions = max_tcam_regions ;
1551
1590
1552
- tcam -> used_regions = bitmap_zalloc (max_regions , GFP_KERNEL );
1553
- if (!tcam -> used_regions ) {
1554
- err = - ENOMEM ;
1555
- goto err_alloc_used_regions ;
1556
- }
1591
+ ida_init (& tcam -> used_regions );
1557
1592
tcam -> max_regions = max_regions ;
1558
1593
1559
1594
max_groups = MLXSW_CORE_RES_GET (mlxsw_sp -> core , ACL_MAX_GROUPS );
1560
- tcam -> used_groups = bitmap_zalloc (max_groups , GFP_KERNEL );
1561
- if (!tcam -> used_groups ) {
1562
- err = - ENOMEM ;
1563
- goto err_alloc_used_groups ;
1564
- }
1595
+ ida_init (& tcam -> used_groups );
1565
1596
tcam -> max_groups = max_groups ;
1566
1597
tcam -> max_group_size = MLXSW_CORE_RES_GET (mlxsw_sp -> core ,
1567
1598
ACL_MAX_GROUP_SIZE );
@@ -1575,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
1575
1606
return 0 ;
1576
1607
1577
1608
err_tcam_init :
1578
- bitmap_free (tcam -> used_groups );
1579
- err_alloc_used_groups :
1580
- bitmap_free (tcam -> used_regions );
1581
- err_alloc_used_regions :
1609
+ ida_destroy (& tcam -> used_groups );
1610
+ ida_destroy (& tcam -> used_regions );
1582
1611
mlxsw_sp_acl_tcam_rehash_params_unregister (mlxsw_sp );
1583
1612
err_rehash_params_register :
1584
1613
mutex_destroy (& tcam -> lock );
@@ -1591,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
1591
1620
const struct mlxsw_sp_acl_tcam_ops * ops = mlxsw_sp -> acl_tcam_ops ;
1592
1621
1593
1622
ops -> fini (mlxsw_sp , tcam -> priv );
1594
- bitmap_free ( tcam -> used_groups );
1595
- bitmap_free ( tcam -> used_regions );
1623
+ ida_destroy ( & tcam -> used_groups );
1624
+ ida_destroy ( & tcam -> used_regions );
1596
1625
mlxsw_sp_acl_tcam_rehash_params_unregister (mlxsw_sp );
1597
1626
mutex_destroy (& tcam -> lock );
1598
1627
}
0 commit comments