@@ -3421,15 +3421,11 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
3421
3421
}
3422
3422
}
3423
3423
3424
- static void bnxt_free_one_rx_ring_skbs (struct bnxt * bp , int ring_nr )
3424
+ static void bnxt_free_one_tpa_info_data (struct bnxt * bp ,
3425
+ struct bnxt_rx_ring_info * rxr )
3425
3426
{
3426
- struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [ring_nr ];
3427
- struct bnxt_tpa_idx_map * map ;
3428
3427
int i ;
3429
3428
3430
- if (!rxr -> rx_tpa )
3431
- goto skip_rx_tpa_free ;
3432
-
3433
3429
for (i = 0 ; i < bp -> max_tpa ; i ++ ) {
3434
3430
struct bnxt_tpa_info * tpa_info = & rxr -> rx_tpa [i ];
3435
3431
u8 * data = tpa_info -> data ;
@@ -3440,6 +3436,17 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3440
3436
tpa_info -> data = NULL ;
3441
3437
page_pool_free_va (rxr -> head_pool , data , false);
3442
3438
}
3439
+ }
3440
+
3441
+ static void bnxt_free_one_rx_ring_skbs (struct bnxt * bp ,
3442
+ struct bnxt_rx_ring_info * rxr )
3443
+ {
3444
+ struct bnxt_tpa_idx_map * map ;
3445
+
3446
+ if (!rxr -> rx_tpa )
3447
+ goto skip_rx_tpa_free ;
3448
+
3449
+ bnxt_free_one_tpa_info_data (bp , rxr );
3443
3450
3444
3451
skip_rx_tpa_free :
3445
3452
if (!rxr -> rx_buf_ring )
@@ -3467,7 +3474,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
3467
3474
return ;
3468
3475
3469
3476
for (i = 0 ; i < bp -> rx_nr_rings ; i ++ )
3470
- bnxt_free_one_rx_ring_skbs (bp , i );
3477
+ bnxt_free_one_rx_ring_skbs (bp , & bp -> rx_ring [ i ] );
3471
3478
}
3472
3479
3473
3480
static void bnxt_free_skbs (struct bnxt * bp )
@@ -3608,29 +3615,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3608
3615
return 0 ;
3609
3616
}
3610
3617
3618
+ static void bnxt_free_one_tpa_info (struct bnxt * bp ,
3619
+ struct bnxt_rx_ring_info * rxr )
3620
+ {
3621
+ int i ;
3622
+
3623
+ kfree (rxr -> rx_tpa_idx_map );
3624
+ rxr -> rx_tpa_idx_map = NULL ;
3625
+ if (rxr -> rx_tpa ) {
3626
+ for (i = 0 ; i < bp -> max_tpa ; i ++ ) {
3627
+ kfree (rxr -> rx_tpa [i ].agg_arr );
3628
+ rxr -> rx_tpa [i ].agg_arr = NULL ;
3629
+ }
3630
+ }
3631
+ kfree (rxr -> rx_tpa );
3632
+ rxr -> rx_tpa = NULL ;
3633
+ }
3634
+
3611
3635
static void bnxt_free_tpa_info (struct bnxt * bp )
3612
3636
{
3613
- int i , j ;
3637
+ int i ;
3614
3638
3615
3639
for (i = 0 ; i < bp -> rx_nr_rings ; i ++ ) {
3616
3640
struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [i ];
3617
3641
3618
- kfree (rxr -> rx_tpa_idx_map );
3619
- rxr -> rx_tpa_idx_map = NULL ;
3620
- if (rxr -> rx_tpa ) {
3621
- for (j = 0 ; j < bp -> max_tpa ; j ++ ) {
3622
- kfree (rxr -> rx_tpa [j ].agg_arr );
3623
- rxr -> rx_tpa [j ].agg_arr = NULL ;
3624
- }
3625
- }
3626
- kfree (rxr -> rx_tpa );
3627
- rxr -> rx_tpa = NULL ;
3642
+ bnxt_free_one_tpa_info (bp , rxr );
3643
+ }
3644
+ }
3645
+
3646
+ static int bnxt_alloc_one_tpa_info (struct bnxt * bp ,
3647
+ struct bnxt_rx_ring_info * rxr )
3648
+ {
3649
+ struct rx_agg_cmp * agg ;
3650
+ int i ;
3651
+
3652
+ rxr -> rx_tpa = kcalloc (bp -> max_tpa , sizeof (struct bnxt_tpa_info ),
3653
+ GFP_KERNEL );
3654
+ if (!rxr -> rx_tpa )
3655
+ return - ENOMEM ;
3656
+
3657
+ if (!(bp -> flags & BNXT_FLAG_CHIP_P5_PLUS ))
3658
+ return 0 ;
3659
+ for (i = 0 ; i < bp -> max_tpa ; i ++ ) {
3660
+ agg = kcalloc (MAX_SKB_FRAGS , sizeof (* agg ), GFP_KERNEL );
3661
+ if (!agg )
3662
+ return - ENOMEM ;
3663
+ rxr -> rx_tpa [i ].agg_arr = agg ;
3628
3664
}
3665
+ rxr -> rx_tpa_idx_map = kzalloc (sizeof (* rxr -> rx_tpa_idx_map ),
3666
+ GFP_KERNEL );
3667
+ if (!rxr -> rx_tpa_idx_map )
3668
+ return - ENOMEM ;
3669
+
3670
+ return 0 ;
3629
3671
}
3630
3672
3631
3673
static int bnxt_alloc_tpa_info (struct bnxt * bp )
3632
3674
{
3633
- int i , j ;
3675
+ int i , rc ;
3634
3676
3635
3677
bp -> max_tpa = MAX_TPA ;
3636
3678
if (bp -> flags & BNXT_FLAG_CHIP_P5_PLUS ) {
@@ -3641,25 +3683,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
3641
3683
3642
3684
for (i = 0 ; i < bp -> rx_nr_rings ; i ++ ) {
3643
3685
struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [i ];
3644
- struct rx_agg_cmp * agg ;
3645
-
3646
- rxr -> rx_tpa = kcalloc (bp -> max_tpa , sizeof (struct bnxt_tpa_info ),
3647
- GFP_KERNEL );
3648
- if (!rxr -> rx_tpa )
3649
- return - ENOMEM ;
3650
3686
3651
- if (!(bp -> flags & BNXT_FLAG_CHIP_P5_PLUS ))
3652
- continue ;
3653
- for (j = 0 ; j < bp -> max_tpa ; j ++ ) {
3654
- agg = kcalloc (MAX_SKB_FRAGS , sizeof (* agg ), GFP_KERNEL );
3655
- if (!agg )
3656
- return - ENOMEM ;
3657
- rxr -> rx_tpa [j ].agg_arr = agg ;
3658
- }
3659
- rxr -> rx_tpa_idx_map = kzalloc (sizeof (* rxr -> rx_tpa_idx_map ),
3660
- GFP_KERNEL );
3661
- if (!rxr -> rx_tpa_idx_map )
3662
- return - ENOMEM ;
3687
+ rc = bnxt_alloc_one_tpa_info (bp , rxr );
3688
+ if (rc )
3689
+ return rc ;
3663
3690
}
3664
3691
return 0 ;
3665
3692
}
@@ -3683,7 +3710,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
3683
3710
xdp_rxq_info_unreg (& rxr -> xdp_rxq );
3684
3711
3685
3712
page_pool_destroy (rxr -> page_pool );
3686
- if (rxr -> page_pool != rxr -> head_pool )
3713
+ if (bnxt_separate_head_pool () )
3687
3714
page_pool_destroy (rxr -> head_pool );
3688
3715
rxr -> page_pool = rxr -> head_pool = NULL ;
3689
3716
@@ -3737,6 +3764,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3737
3764
return PTR_ERR (pool );
3738
3765
}
3739
3766
3767
+ static int bnxt_alloc_rx_agg_bmap (struct bnxt * bp , struct bnxt_rx_ring_info * rxr )
3768
+ {
3769
+ u16 mem_size ;
3770
+
3771
+ rxr -> rx_agg_bmap_size = bp -> rx_agg_ring_mask + 1 ;
3772
+ mem_size = rxr -> rx_agg_bmap_size / 8 ;
3773
+ rxr -> rx_agg_bmap = kzalloc (mem_size , GFP_KERNEL );
3774
+ if (!rxr -> rx_agg_bmap )
3775
+ return - ENOMEM ;
3776
+
3777
+ return 0 ;
3778
+ }
3779
+
3740
3780
static int bnxt_alloc_rx_rings (struct bnxt * bp )
3741
3781
{
3742
3782
int numa_node = dev_to_node (& bp -> pdev -> dev );
@@ -3781,19 +3821,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
3781
3821
3782
3822
ring -> grp_idx = i ;
3783
3823
if (agg_rings ) {
3784
- u16 mem_size ;
3785
-
3786
3824
ring = & rxr -> rx_agg_ring_struct ;
3787
3825
rc = bnxt_alloc_ring (bp , & ring -> ring_mem );
3788
3826
if (rc )
3789
3827
return rc ;
3790
3828
3791
3829
ring -> grp_idx = i ;
3792
- rxr -> rx_agg_bmap_size = bp -> rx_agg_ring_mask + 1 ;
3793
- mem_size = rxr -> rx_agg_bmap_size / 8 ;
3794
- rxr -> rx_agg_bmap = kzalloc (mem_size , GFP_KERNEL );
3795
- if (!rxr -> rx_agg_bmap )
3796
- return - ENOMEM ;
3830
+ rc = bnxt_alloc_rx_agg_bmap (bp , rxr );
3831
+ if (rc )
3832
+ return rc ;
3797
3833
}
3798
3834
}
3799
3835
if (bp -> flags & BNXT_FLAG_TPA )
@@ -4268,10 +4304,31 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
4268
4304
rxr -> rx_agg_prod = prod ;
4269
4305
}
4270
4306
4307
+ static int bnxt_alloc_one_tpa_info_data (struct bnxt * bp ,
4308
+ struct bnxt_rx_ring_info * rxr )
4309
+ {
4310
+ dma_addr_t mapping ;
4311
+ u8 * data ;
4312
+ int i ;
4313
+
4314
+ for (i = 0 ; i < bp -> max_tpa ; i ++ ) {
4315
+ data = __bnxt_alloc_rx_frag (bp , & mapping , rxr ,
4316
+ GFP_KERNEL );
4317
+ if (!data )
4318
+ return - ENOMEM ;
4319
+
4320
+ rxr -> rx_tpa [i ].data = data ;
4321
+ rxr -> rx_tpa [i ].data_ptr = data + bp -> rx_offset ;
4322
+ rxr -> rx_tpa [i ].mapping = mapping ;
4323
+ }
4324
+
4325
+ return 0 ;
4326
+ }
4327
+
4271
4328
static int bnxt_alloc_one_rx_ring (struct bnxt * bp , int ring_nr )
4272
4329
{
4273
4330
struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [ring_nr ];
4274
- int i ;
4331
+ int rc ;
4275
4332
4276
4333
bnxt_alloc_one_rx_ring_skb (bp , rxr , ring_nr );
4277
4334
@@ -4281,19 +4338,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
4281
4338
bnxt_alloc_one_rx_ring_page (bp , rxr , ring_nr );
4282
4339
4283
4340
if (rxr -> rx_tpa ) {
4284
- dma_addr_t mapping ;
4285
- u8 * data ;
4286
-
4287
- for (i = 0 ; i < bp -> max_tpa ; i ++ ) {
4288
- data = __bnxt_alloc_rx_frag (bp , & mapping , rxr ,
4289
- GFP_KERNEL );
4290
- if (!data )
4291
- return - ENOMEM ;
4292
-
4293
- rxr -> rx_tpa [i ].data = data ;
4294
- rxr -> rx_tpa [i ].data_ptr = data + bp -> rx_offset ;
4295
- rxr -> rx_tpa [i ].mapping = mapping ;
4296
- }
4341
+ rc = bnxt_alloc_one_tpa_info_data (bp , rxr );
4342
+ if (rc )
4343
+ return rc ;
4297
4344
}
4298
4345
return 0 ;
4299
4346
}
@@ -13663,7 +13710,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
13663
13710
bnxt_reset_task (bp , true);
13664
13711
break ;
13665
13712
}
13666
- bnxt_free_one_rx_ring_skbs (bp , i );
13713
+ bnxt_free_one_rx_ring_skbs (bp , rxr );
13667
13714
rxr -> rx_prod = 0 ;
13668
13715
rxr -> rx_agg_prod = 0 ;
13669
13716
rxr -> rx_sw_agg_prod = 0 ;
@@ -15293,19 +15340,6 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
15293
15340
.get_base_stats = bnxt_get_base_stats ,
15294
15341
};
15295
15342
15296
- static int bnxt_alloc_rx_agg_bmap (struct bnxt * bp , struct bnxt_rx_ring_info * rxr )
15297
- {
15298
- u16 mem_size ;
15299
-
15300
- rxr -> rx_agg_bmap_size = bp -> rx_agg_ring_mask + 1 ;
15301
- mem_size = rxr -> rx_agg_bmap_size / 8 ;
15302
- rxr -> rx_agg_bmap = kzalloc (mem_size , GFP_KERNEL );
15303
- if (!rxr -> rx_agg_bmap )
15304
- return - ENOMEM ;
15305
-
15306
- return 0 ;
15307
- }
15308
-
15309
15343
static int bnxt_queue_mem_alloc (struct net_device * dev , void * qmem , int idx )
15310
15344
{
15311
15345
struct bnxt_rx_ring_info * rxr , * clone ;
@@ -15354,25 +15388,37 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
15354
15388
goto err_free_rx_agg_ring ;
15355
15389
}
15356
15390
15391
+ if (bp -> flags & BNXT_FLAG_TPA ) {
15392
+ rc = bnxt_alloc_one_tpa_info (bp , clone );
15393
+ if (rc )
15394
+ goto err_free_tpa_info ;
15395
+ }
15396
+
15357
15397
bnxt_init_one_rx_ring_rxbd (bp , clone );
15358
15398
bnxt_init_one_rx_agg_ring_rxbd (bp , clone );
15359
15399
15360
15400
bnxt_alloc_one_rx_ring_skb (bp , clone , idx );
15361
15401
if (bp -> flags & BNXT_FLAG_AGG_RINGS )
15362
15402
bnxt_alloc_one_rx_ring_page (bp , clone , idx );
15403
+ if (bp -> flags & BNXT_FLAG_TPA )
15404
+ bnxt_alloc_one_tpa_info_data (bp , clone );
15363
15405
15364
15406
return 0 ;
15365
15407
15408
+ err_free_tpa_info :
15409
+ bnxt_free_one_tpa_info (bp , clone );
15366
15410
err_free_rx_agg_ring :
15367
15411
bnxt_free_ring (bp , & clone -> rx_agg_ring_struct .ring_mem );
15368
15412
err_free_rx_ring :
15369
15413
bnxt_free_ring (bp , & clone -> rx_ring_struct .ring_mem );
15370
15414
err_rxq_info_unreg :
15371
15415
xdp_rxq_info_unreg (& clone -> xdp_rxq );
15372
15416
err_page_pool_destroy :
15373
- clone -> page_pool -> p .napi = NULL ;
15374
15417
page_pool_destroy (clone -> page_pool );
15418
+ if (bnxt_separate_head_pool ())
15419
+ page_pool_destroy (clone -> head_pool );
15375
15420
clone -> page_pool = NULL ;
15421
+ clone -> head_pool = NULL ;
15376
15422
return rc ;
15377
15423
}
15378
15424
@@ -15382,13 +15428,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
15382
15428
struct bnxt * bp = netdev_priv (dev );
15383
15429
struct bnxt_ring_struct * ring ;
15384
15430
15385
- bnxt_free_one_rx_ring (bp , rxr );
15386
- bnxt_free_one_rx_agg_ring (bp , rxr );
15431
+ bnxt_free_one_rx_ring_skbs (bp , rxr );
15387
15432
15388
15433
xdp_rxq_info_unreg (& rxr -> xdp_rxq );
15389
15434
15390
15435
page_pool_destroy (rxr -> page_pool );
15436
+ if (bnxt_separate_head_pool ())
15437
+ page_pool_destroy (rxr -> head_pool );
15391
15438
rxr -> page_pool = NULL ;
15439
+ rxr -> head_pool = NULL ;
15392
15440
15393
15441
ring = & rxr -> rx_ring_struct ;
15394
15442
bnxt_free_ring (bp , & ring -> ring_mem );
@@ -15470,7 +15518,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
15470
15518
rxr -> rx_agg_prod = clone -> rx_agg_prod ;
15471
15519
rxr -> rx_sw_agg_prod = clone -> rx_sw_agg_prod ;
15472
15520
rxr -> rx_next_cons = clone -> rx_next_cons ;
15521
+ rxr -> rx_tpa = clone -> rx_tpa ;
15522
+ rxr -> rx_tpa_idx_map = clone -> rx_tpa_idx_map ;
15473
15523
rxr -> page_pool = clone -> page_pool ;
15524
+ rxr -> head_pool = clone -> head_pool ;
15474
15525
rxr -> xdp_rxq = clone -> xdp_rxq ;
15475
15526
15476
15527
bnxt_copy_rx_ring (bp , rxr , clone );
@@ -15529,6 +15580,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
15529
15580
bnxt_hwrm_rx_agg_ring_free (bp , rxr , false);
15530
15581
rxr -> rx_next_cons = 0 ;
15531
15582
page_pool_disable_direct_recycling (rxr -> page_pool );
15583
+ if (bnxt_separate_head_pool ())
15584
+ page_pool_disable_direct_recycling (rxr -> head_pool );
15532
15585
15533
15586
memcpy (qmem , rxr , sizeof (* rxr ));
15534
15587
bnxt_init_rx_ring_struct (bp , qmem );
0 commit comments