@@ -437,12 +437,49 @@ static int do_get_hw_stats(struct ib_device *ibdev,
437
437
static bool is_rdma_bytes_counter (u32 type )
438
438
{
439
439
if (type == MLX5_IB_OPCOUNTER_RDMA_TX_BYTES ||
440
- type == MLX5_IB_OPCOUNTER_RDMA_RX_BYTES )
440
+ type == MLX5_IB_OPCOUNTER_RDMA_RX_BYTES ||
441
+ type == MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP ||
442
+ type == MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP )
441
443
return true;
442
444
443
445
return false;
444
446
}
445
447
448
+ static int do_per_qp_get_op_stat (struct rdma_counter * counter )
449
+ {
450
+ struct mlx5_ib_dev * dev = to_mdev (counter -> device );
451
+ const struct mlx5_ib_counters * cnts = get_counters (dev , counter -> port );
452
+ struct mlx5_rdma_counter * mcounter = to_mcounter (counter );
453
+ int i , ret , index , num_hw_counters ;
454
+ u64 packets = 0 , bytes = 0 ;
455
+
456
+ for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP ;
457
+ i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP ; i ++ ) {
458
+ if (!mcounter -> fc [i ])
459
+ continue ;
460
+
461
+ ret = mlx5_fc_query (dev -> mdev , mcounter -> fc [i ],
462
+ & packets , & bytes );
463
+ if (ret )
464
+ return ret ;
465
+
466
+ num_hw_counters = cnts -> num_q_counters +
467
+ cnts -> num_cong_counters +
468
+ cnts -> num_ext_ppcnt_counters ;
469
+
470
+ index = i - MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP +
471
+ num_hw_counters ;
472
+
473
+ if (is_rdma_bytes_counter (i ))
474
+ counter -> stats -> value [index ] = bytes ;
475
+ else
476
+ counter -> stats -> value [index ] = packets ;
477
+
478
+ clear_bit (index , counter -> stats -> is_disabled );
479
+ }
480
+ return 0 ;
481
+ }
482
+
446
483
static int do_get_op_stat (struct ib_device * ibdev ,
447
484
struct rdma_hw_stats * stats ,
448
485
u32 port_num , int index )
@@ -542,19 +579,30 @@ static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
542
579
{
543
580
struct mlx5_ib_dev * dev = to_mdev (counter -> device );
544
581
const struct mlx5_ib_counters * cnts = get_counters (dev , counter -> port );
582
+ int ret ;
583
+
584
+ ret = mlx5_ib_query_q_counters (dev -> mdev , cnts , counter -> stats ,
585
+ counter -> id );
586
+ if (ret )
587
+ return ret ;
588
+
589
+ if (!counter -> mode .bind_opcnt )
590
+ return 0 ;
545
591
546
- return mlx5_ib_query_q_counters (dev -> mdev , cnts ,
547
- counter -> stats , counter -> id );
592
+ return do_per_qp_get_op_stat (counter );
548
593
}
549
594
550
595
static int mlx5_ib_counter_dealloc (struct rdma_counter * counter )
551
596
{
597
+ struct mlx5_rdma_counter * mcounter = to_mcounter (counter );
552
598
struct mlx5_ib_dev * dev = to_mdev (counter -> device );
553
599
u32 in [MLX5_ST_SZ_DW (dealloc_q_counter_in )] = {};
554
600
555
601
if (!counter -> id )
556
602
return 0 ;
557
603
604
+ WARN_ON (!xa_empty (& mcounter -> qpn_opfc_xa ));
605
+ mlx5r_fs_destroy_fcs (dev , counter );
558
606
MLX5_SET (dealloc_q_counter_in , in , opcode ,
559
607
MLX5_CMD_OP_DEALLOC_Q_COUNTER );
560
608
MLX5_SET (dealloc_q_counter_in , in , counter_set_id , counter -> id );
@@ -585,8 +633,14 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
585
633
if (err )
586
634
goto fail_set_counter ;
587
635
636
+ err = mlx5r_fs_bind_op_fc (qp , counter , port );
637
+ if (err )
638
+ goto fail_bind_op_fc ;
639
+
588
640
return 0 ;
589
641
642
+ fail_bind_op_fc :
643
+ mlx5_ib_qp_set_counter (qp , NULL );
590
644
fail_set_counter :
591
645
mlx5_ib_counter_dealloc (counter );
592
646
counter -> id = 0 ;
@@ -596,7 +650,20 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
596
650
597
651
static int mlx5_ib_counter_unbind_qp (struct ib_qp * qp , u32 port )
598
652
{
599
- return mlx5_ib_qp_set_counter (qp , NULL );
653
+ struct rdma_counter * counter = qp -> counter ;
654
+ int err ;
655
+
656
+ mlx5r_fs_unbind_op_fc (qp , counter );
657
+
658
+ err = mlx5_ib_qp_set_counter (qp , NULL );
659
+ if (err )
660
+ goto fail_set_counter ;
661
+
662
+ return 0 ;
663
+
664
+ fail_set_counter :
665
+ mlx5r_fs_bind_op_fc (qp , counter , port );
666
+ return err ;
600
667
}
601
668
602
669
static void mlx5_ib_fill_counters (struct mlx5_ib_dev * dev ,
@@ -789,9 +856,8 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
789
856
* was already created, if both conditions are met return true and the counter
790
857
* else return false.
791
858
*/
792
- static bool mlx5r_is_opfc_shared_and_in_use (struct mlx5_ib_op_fc * opfcs ,
793
- u32 type ,
794
- struct mlx5_ib_op_fc * * opfc )
859
+ bool mlx5r_is_opfc_shared_and_in_use (struct mlx5_ib_op_fc * opfcs , u32 type ,
860
+ struct mlx5_ib_op_fc * * opfc )
795
861
{
796
862
u32 shared_fc_type ;
797
863
@@ -808,6 +874,18 @@ static bool mlx5r_is_opfc_shared_and_in_use(struct mlx5_ib_op_fc *opfcs,
808
874
case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES :
809
875
shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS ;
810
876
break ;
877
+ case MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP :
878
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP ;
879
+ break ;
880
+ case MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP :
881
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP ;
882
+ break ;
883
+ case MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP :
884
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP ;
885
+ break ;
886
+ case MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP :
887
+ shared_fc_type = MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP ;
888
+ break ;
811
889
default :
812
890
return false;
813
891
}
@@ -1104,7 +1182,12 @@ static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
1104
1182
return 0 ;
1105
1183
}
1106
1184
1107
- static void mlx5_ib_counter_init (struct rdma_counter * counter ) {}
1185
+ static void mlx5_ib_counter_init (struct rdma_counter * counter )
1186
+ {
1187
+ struct mlx5_rdma_counter * mcounter = to_mcounter (counter );
1188
+
1189
+ xa_init (& mcounter -> qpn_opfc_xa );
1190
+ }
1108
1191
1109
1192
static const struct ib_device_ops hw_stats_ops = {
1110
1193
.alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats ,
0 commit comments