Skip to content

Commit 31d1356

Browse files
mosheshemesh2kuba-moo
authored andcommitted
net/mlx5: fs, add mlx5_fs_pool API
Refactor fc_pool API to create generic fs_pool API, as HW steering has more flow steering elements which can take advantage of the same pool of bulks API. Change fs_counters code to use the fs_pool API. Note, removed __counted_by from struct mlx5_fc_bulk as bulk_len is now inner struct member. It will be added back once __counted_by can support inner struct members. Signed-off-by: Moshe Shemesh <[email protected]> Reviewed-by: Yevgeny Kliteynik <[email protected]> Reviewed-by: Mark Bloch <[email protected]> Signed-off-by: Tariq Toukan <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 95f68e0 commit 31d1356

File tree

4 files changed

+327
-209
lines changed

4 files changed

+327
-209
lines changed

drivers/net/ethernet/mellanox/mlx5/core/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
1717
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
1818
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
1919
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
20-
fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
20+
fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
2121

2222
#
2323
# Netdev basic

drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c

Lines changed: 78 additions & 208 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include <linux/mlx5/fs.h>
3535
#include "mlx5_core.h"
3636
#include "fs_core.h"
37+
#include "fs_pool.h"
3738
#include "fs_cmd.h"
3839

3940
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
@@ -65,17 +66,6 @@ struct mlx5_fc {
6566
u64 lastbytes;
6667
};
6768

68-
struct mlx5_fc_pool {
69-
struct mlx5_core_dev *dev;
70-
struct mutex pool_lock; /* protects pool lists */
71-
struct list_head fully_used;
72-
struct list_head partially_used;
73-
struct list_head unused;
74-
int available_fcs;
75-
int used_fcs;
76-
int threshold;
77-
};
78-
7969
struct mlx5_fc_stats {
8070
struct xarray counters;
8171

@@ -86,13 +76,13 @@ struct mlx5_fc_stats {
8676
int bulk_query_len;
8777
bool bulk_query_alloc_failed;
8878
unsigned long next_bulk_query_alloc;
89-
struct mlx5_fc_pool fc_pool;
79+
struct mlx5_fs_pool fc_pool;
9080
};
9181

92-
static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
93-
static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
94-
static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
95-
static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
82+
static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
83+
static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
84+
static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
85+
static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
9686

9787
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
9888
{
@@ -447,11 +437,9 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
447437
/* Flow counter bluks */
448438

449439
struct mlx5_fc_bulk {
450-
struct list_head pool_list;
440+
struct mlx5_fs_bulk fs_bulk;
451441
u32 base_id;
452-
int bulk_len;
453-
unsigned long *bitmask;
454-
struct mlx5_fc fcs[] __counted_by(bulk_len);
442+
struct mlx5_fc fcs[];
455443
};
456444

457445
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
@@ -461,88 +449,108 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
461449
counter->id = id;
462450
}
463451

464-
static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
465-
{
466-
return bitmap_weight(bulk->bitmask, bulk->bulk_len);
467-
}
468-
469-
static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
452+
static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
470453
{
471454
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
472-
struct mlx5_fc_bulk *bulk;
473-
int err = -ENOMEM;
455+
struct mlx5_fc_bulk *fc_bulk;
474456
int bulk_len;
475457
u32 base_id;
476458
int i;
477459

478460
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
479461
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
480462

481-
bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
482-
if (!bulk)
483-
goto err_alloc_bulk;
463+
fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
464+
if (!fc_bulk)
465+
return NULL;
484466

485-
bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
486-
GFP_KERNEL);
487-
if (!bulk->bitmask)
488-
goto err_alloc_bitmask;
467+
if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
468+
goto fc_bulk_free;
489469

490-
err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
491-
if (err)
492-
goto err_mlx5_cmd_bulk_alloc;
493-
494-
bulk->base_id = base_id;
495-
bulk->bulk_len = bulk_len;
496-
for (i = 0; i < bulk_len; i++) {
497-
mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
498-
set_bit(i, bulk->bitmask);
499-
}
470+
if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
471+
goto fs_bulk_cleanup;
472+
fc_bulk->base_id = base_id;
473+
for (i = 0; i < bulk_len; i++)
474+
mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
500475

501-
return bulk;
476+
return &fc_bulk->fs_bulk;
502477

503-
err_mlx5_cmd_bulk_alloc:
504-
kvfree(bulk->bitmask);
505-
err_alloc_bitmask:
506-
kvfree(bulk);
507-
err_alloc_bulk:
508-
return ERR_PTR(err);
478+
fs_bulk_cleanup:
479+
mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
480+
fc_bulk_free:
481+
kvfree(fc_bulk);
482+
return NULL;
509483
}
510484

511485
static int
512-
mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
486+
mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
513487
{
514-
if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
488+
struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
489+
struct mlx5_fc_bulk,
490+
fs_bulk);
491+
492+
if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
515493
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
516494
return -EBUSY;
517495
}
518496

519-
mlx5_cmd_fc_free(dev, bulk->base_id);
520-
kvfree(bulk->bitmask);
521-
kvfree(bulk);
497+
mlx5_cmd_fc_free(dev, fc_bulk->base_id);
498+
mlx5_fs_bulk_cleanup(fs_bulk);
499+
kvfree(fc_bulk);
522500

523501
return 0;
524502
}
525503

526-
static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
504+
static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
527505
{
528-
int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
506+
fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
507+
fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
508+
}
529509

530-
if (free_fc_index >= bulk->bulk_len)
531-
return ERR_PTR(-ENOSPC);
510+
/* Flow counters pool API */
532511

533-
clear_bit(free_fc_index, bulk->bitmask);
534-
return &bulk->fcs[free_fc_index];
512+
static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
513+
.bulk_destroy = mlx5_fc_bulk_destroy,
514+
.bulk_create = mlx5_fc_bulk_create,
515+
.update_threshold = mlx5_fc_pool_update_threshold,
516+
};
517+
518+
static void
519+
mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
520+
{
521+
mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops);
535522
}
536523

537-
static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
524+
static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
538525
{
539-
int fc_index = fc->id - bulk->base_id;
526+
mlx5_fs_pool_cleanup(fc_pool);
527+
}
540528

541-
if (test_bit(fc_index, bulk->bitmask))
542-
return -EINVAL;
529+
static struct mlx5_fc *
530+
mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
531+
{
532+
struct mlx5_fs_pool_index pool_index = {};
533+
struct mlx5_fc_bulk *fc_bulk;
534+
int err;
543535

544-
set_bit(fc_index, bulk->bitmask);
545-
return 0;
536+
err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
537+
if (err)
538+
return ERR_PTR(err);
539+
fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
540+
return &fc_bulk->fcs[pool_index.index];
541+
}
542+
543+
static void
544+
mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
545+
{
546+
struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
547+
struct mlx5_fs_pool_index pool_index = {};
548+
struct mlx5_core_dev *dev = fc_pool->dev;
549+
550+
pool_index.fs_bulk = fs_bulk;
551+
pool_index.index = fc->id - fc->bulk->base_id;
552+
if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
553+
mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
546554
}
547555

548556
/**
@@ -573,7 +581,7 @@ mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
573581
counter->type = MLX5_FC_TYPE_LOCAL;
574582
counter->id = counter_id;
575583
fc_bulk->base_id = counter_id - offset;
576-
fc_bulk->bulk_len = bulk_size;
584+
fc_bulk->fs_bulk.bulk_len = bulk_size;
577585
counter->bulk = fc_bulk;
578586
return counter;
579587
}
@@ -588,141 +596,3 @@ void mlx5_fc_local_destroy(struct mlx5_fc *counter)
588596
kfree(counter);
589597
}
590598
EXPORT_SYMBOL(mlx5_fc_local_destroy);
591-
592-
/* Flow counters pool API */
593-
594-
static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
595-
{
596-
fc_pool->dev = dev;
597-
mutex_init(&fc_pool->pool_lock);
598-
INIT_LIST_HEAD(&fc_pool->fully_used);
599-
INIT_LIST_HEAD(&fc_pool->partially_used);
600-
INIT_LIST_HEAD(&fc_pool->unused);
601-
fc_pool->available_fcs = 0;
602-
fc_pool->used_fcs = 0;
603-
fc_pool->threshold = 0;
604-
}
605-
606-
static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
607-
{
608-
struct mlx5_core_dev *dev = fc_pool->dev;
609-
struct mlx5_fc_bulk *bulk;
610-
struct mlx5_fc_bulk *tmp;
611-
612-
list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
613-
mlx5_fc_bulk_destroy(dev, bulk);
614-
list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
615-
mlx5_fc_bulk_destroy(dev, bulk);
616-
list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
617-
mlx5_fc_bulk_destroy(dev, bulk);
618-
}
619-
620-
static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
621-
{
622-
fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
623-
fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
624-
}
625-
626-
static struct mlx5_fc_bulk *
627-
mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
628-
{
629-
struct mlx5_core_dev *dev = fc_pool->dev;
630-
struct mlx5_fc_bulk *new_bulk;
631-
632-
new_bulk = mlx5_fc_bulk_create(dev);
633-
if (!IS_ERR(new_bulk))
634-
fc_pool->available_fcs += new_bulk->bulk_len;
635-
mlx5_fc_pool_update_threshold(fc_pool);
636-
return new_bulk;
637-
}
638-
639-
static void
640-
mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
641-
{
642-
struct mlx5_core_dev *dev = fc_pool->dev;
643-
644-
fc_pool->available_fcs -= bulk->bulk_len;
645-
mlx5_fc_bulk_destroy(dev, bulk);
646-
mlx5_fc_pool_update_threshold(fc_pool);
647-
}
648-
649-
static struct mlx5_fc *
650-
mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
651-
struct list_head *next_list,
652-
bool move_non_full_bulk)
653-
{
654-
struct mlx5_fc_bulk *bulk;
655-
struct mlx5_fc *fc;
656-
657-
if (list_empty(src_list))
658-
return ERR_PTR(-ENODATA);
659-
660-
bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
661-
fc = mlx5_fc_bulk_acquire_fc(bulk);
662-
if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
663-
list_move(&bulk->pool_list, next_list);
664-
return fc;
665-
}
666-
667-
static struct mlx5_fc *
668-
mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
669-
{
670-
struct mlx5_fc_bulk *new_bulk;
671-
struct mlx5_fc *fc;
672-
673-
mutex_lock(&fc_pool->pool_lock);
674-
675-
fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
676-
&fc_pool->fully_used, false);
677-
if (IS_ERR(fc))
678-
fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
679-
&fc_pool->partially_used,
680-
true);
681-
if (IS_ERR(fc)) {
682-
new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
683-
if (IS_ERR(new_bulk)) {
684-
fc = ERR_CAST(new_bulk);
685-
goto out;
686-
}
687-
fc = mlx5_fc_bulk_acquire_fc(new_bulk);
688-
list_add(&new_bulk->pool_list, &fc_pool->partially_used);
689-
}
690-
fc_pool->available_fcs--;
691-
fc_pool->used_fcs++;
692-
693-
out:
694-
mutex_unlock(&fc_pool->pool_lock);
695-
return fc;
696-
}
697-
698-
static void
699-
mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
700-
{
701-
struct mlx5_core_dev *dev = fc_pool->dev;
702-
struct mlx5_fc_bulk *bulk = fc->bulk;
703-
int bulk_free_fcs_amount;
704-
705-
mutex_lock(&fc_pool->pool_lock);
706-
707-
if (mlx5_fc_bulk_release_fc(bulk, fc)) {
708-
mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
709-
goto unlock;
710-
}
711-
712-
fc_pool->available_fcs++;
713-
fc_pool->used_fcs--;
714-
715-
bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
716-
if (bulk_free_fcs_amount == 1)
717-
list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
718-
if (bulk_free_fcs_amount == bulk->bulk_len) {
719-
list_del(&bulk->pool_list);
720-
if (fc_pool->available_fcs > fc_pool->threshold)
721-
mlx5_fc_pool_free_bulk(fc_pool, bulk);
722-
else
723-
list_add(&bulk->pool_list, &fc_pool->unused);
724-
}
725-
726-
unlock:
727-
mutex_unlock(&fc_pool->pool_lock);
728-
}

0 commit comments

Comments
 (0)