@@ -4834,7 +4834,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4834
4834
INIT_HLIST_HEAD (& bp -> ntp_fltr_hash_tbl [i ]);
4835
4835
4836
4836
bp -> ntp_fltr_count = 0 ;
4837
- bp -> ntp_fltr_bmap = bitmap_zalloc (BNXT_NTP_FLTR_MAX_FLTR , GFP_KERNEL );
4837
+ bp -> ntp_fltr_bmap = bitmap_zalloc (BNXT_MAX_FLTR , GFP_KERNEL );
4838
4838
4839
4839
if (!bp -> ntp_fltr_bmap )
4840
4840
rc = - ENOMEM ;
@@ -5396,6 +5396,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
5396
5396
ether_addr_copy (fltr -> l2_key .dst_mac_addr , key -> dst_mac_addr );
5397
5397
fltr -> l2_key .vlan = key -> vlan ;
5398
5398
fltr -> base .type = BNXT_FLTR_TYPE_L2 ;
5399
+ if (fltr -> base .flags ) {
5400
+ int bit_id ;
5401
+
5402
+ bit_id = bitmap_find_free_region (bp -> ntp_fltr_bmap ,
5403
+ BNXT_MAX_FLTR , 0 );
5404
+ if (bit_id < 0 )
5405
+ return - ENOMEM ;
5406
+ fltr -> base .sw_id = (u16 )bit_id ;
5407
+ }
5399
5408
head = & bp -> l2_fltr_hash_tbl [idx ];
5400
5409
hlist_add_head_rcu (& fltr -> base .hash , head );
5401
5410
atomic_set (& fltr -> refcnt , 1 );
@@ -5429,6 +5438,96 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
5429
5438
return fltr ;
5430
5439
}
5431
5440
5441
+ static u16 bnxt_vf_target_id (struct bnxt_pf_info * pf , u16 vf_idx )
5442
+ {
5443
+ #ifdef CONFIG_BNXT_SRIOV
5444
+ struct bnxt_vf_info * vf = & pf -> vf [vf_idx ];
5445
+
5446
+ return vf -> fw_fid ;
5447
+ #else
5448
+ return INVALID_HW_RING_ID ;
5449
+ #endif
5450
+ }
5451
+
5452
+ int bnxt_hwrm_l2_filter_free (struct bnxt * bp , struct bnxt_l2_filter * fltr )
5453
+ {
5454
+ struct hwrm_cfa_l2_filter_free_input * req ;
5455
+ u16 target_id = 0xffff ;
5456
+ int rc ;
5457
+
5458
+ if (fltr -> base .flags & BNXT_ACT_FUNC_DST ) {
5459
+ struct bnxt_pf_info * pf = & bp -> pf ;
5460
+
5461
+ if (fltr -> base .vf_idx >= pf -> active_vfs )
5462
+ return - EINVAL ;
5463
+
5464
+ target_id = bnxt_vf_target_id (pf , fltr -> base .vf_idx );
5465
+ if (target_id == INVALID_HW_RING_ID )
5466
+ return - EINVAL ;
5467
+ }
5468
+
5469
+ rc = hwrm_req_init (bp , req , HWRM_CFA_L2_FILTER_FREE );
5470
+ if (rc )
5471
+ return rc ;
5472
+
5473
+ req -> target_id = cpu_to_le16 (target_id );
5474
+ req -> l2_filter_id = fltr -> base .filter_id ;
5475
+ return hwrm_req_send (bp , req );
5476
+ }
5477
+
5478
+ int bnxt_hwrm_l2_filter_alloc (struct bnxt * bp , struct bnxt_l2_filter * fltr )
5479
+ {
5480
+ struct hwrm_cfa_l2_filter_alloc_output * resp ;
5481
+ struct hwrm_cfa_l2_filter_alloc_input * req ;
5482
+ u16 target_id = 0xffff ;
5483
+ int rc ;
5484
+
5485
+ if (fltr -> base .flags & BNXT_ACT_FUNC_DST ) {
5486
+ struct bnxt_pf_info * pf = & bp -> pf ;
5487
+
5488
+ if (fltr -> base .vf_idx >= pf -> active_vfs )
5489
+ return - EINVAL ;
5490
+
5491
+ target_id = bnxt_vf_target_id (pf , fltr -> base .vf_idx );
5492
+ }
5493
+ rc = hwrm_req_init (bp , req , HWRM_CFA_L2_FILTER_ALLOC );
5494
+ if (rc )
5495
+ return rc ;
5496
+
5497
+ req -> target_id = cpu_to_le16 (target_id );
5498
+ req -> flags = cpu_to_le32 (CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX );
5499
+
5500
+ if (!BNXT_CHIP_TYPE_NITRO_A0 (bp ))
5501
+ req -> flags |=
5502
+ cpu_to_le32 (CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST );
5503
+ req -> dst_id = cpu_to_le16 (fltr -> base .fw_vnic_id );
5504
+ req -> enables =
5505
+ cpu_to_le32 (CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5506
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5507
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK );
5508
+ ether_addr_copy (req -> l2_addr , fltr -> l2_key .dst_mac_addr );
5509
+ eth_broadcast_addr (req -> l2_addr_mask );
5510
+
5511
+ if (fltr -> l2_key .vlan ) {
5512
+ req -> enables |=
5513
+ cpu_to_le32 (CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
5514
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
5515
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS );
5516
+ req -> num_vlans = 1 ;
5517
+ req -> l2_ivlan = cpu_to_le16 (fltr -> l2_key .vlan );
5518
+ req -> l2_ivlan_mask = cpu_to_le16 (0xfff );
5519
+ }
5520
+
5521
+ resp = hwrm_req_hold (bp , req );
5522
+ rc = hwrm_req_send (bp , req );
5523
+ if (!rc ) {
5524
+ fltr -> base .filter_id = resp -> l2_filter_id ;
5525
+ set_bit (BNXT_FLTR_VALID , & fltr -> base .state );
5526
+ }
5527
+ hwrm_req_drop (bp , req );
5528
+ return rc ;
5529
+ }
5530
+
5432
5531
#ifdef CONFIG_RFS_ACCEL
5433
5532
static int bnxt_hwrm_cfa_ntuple_filter_free (struct bnxt * bp ,
5434
5533
struct bnxt_ntuple_filter * fltr )
@@ -5538,8 +5637,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5538
5637
static int bnxt_hwrm_set_vnic_filter (struct bnxt * bp , u16 vnic_id , u16 idx ,
5539
5638
const u8 * mac_addr )
5540
5639
{
5541
- struct hwrm_cfa_l2_filter_alloc_output * resp ;
5542
- struct hwrm_cfa_l2_filter_alloc_input * req ;
5543
5640
struct bnxt_l2_filter * fltr ;
5544
5641
struct bnxt_l2_key key ;
5545
5642
int rc ;
@@ -5550,66 +5647,33 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5550
5647
if (IS_ERR (fltr ))
5551
5648
return PTR_ERR (fltr );
5552
5649
5553
- rc = hwrm_req_init (bp , req , HWRM_CFA_L2_FILTER_ALLOC );
5650
+ fltr -> base .fw_vnic_id = bp -> vnic_info [vnic_id ].fw_vnic_id ;
5651
+ rc = bnxt_hwrm_l2_filter_alloc (bp , fltr );
5554
5652
if (rc )
5555
- return rc ;
5556
-
5557
- req -> flags = cpu_to_le32 (CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX );
5558
- if (!BNXT_CHIP_TYPE_NITRO_A0 (bp ))
5559
- req -> flags |=
5560
- cpu_to_le32 (CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST );
5561
- req -> dst_id = cpu_to_le16 (bp -> vnic_info [vnic_id ].fw_vnic_id );
5562
- req -> enables =
5563
- cpu_to_le32 (CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5564
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5565
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK );
5566
- memcpy (req -> l2_addr , mac_addr , ETH_ALEN );
5567
- req -> l2_addr_mask [0 ] = 0xff ;
5568
- req -> l2_addr_mask [1 ] = 0xff ;
5569
- req -> l2_addr_mask [2 ] = 0xff ;
5570
- req -> l2_addr_mask [3 ] = 0xff ;
5571
- req -> l2_addr_mask [4 ] = 0xff ;
5572
- req -> l2_addr_mask [5 ] = 0xff ;
5573
-
5574
- resp = hwrm_req_hold (bp , req );
5575
- rc = hwrm_req_send (bp , req );
5576
- if (rc ) {
5577
5653
bnxt_del_l2_filter (bp , fltr );
5578
- } else {
5579
- fltr -> base .filter_id = resp -> l2_filter_id ;
5580
- set_bit (BNXT_FLTR_VALID , & fltr -> base .state );
5654
+ else
5581
5655
bp -> vnic_info [vnic_id ].l2_filters [idx ] = fltr ;
5582
- }
5583
- hwrm_req_drop (bp , req );
5584
5656
return rc ;
5585
5657
}
5586
5658
5587
5659
static int bnxt_hwrm_clear_vnic_filter (struct bnxt * bp )
5588
5660
{
5589
- struct hwrm_cfa_l2_filter_free_input * req ;
5590
5661
u16 i , j , num_of_vnics = 1 ; /* only vnic 0 supported */
5591
- int rc ;
5662
+ int rc = 0 ;
5592
5663
5593
5664
/* Any associated ntuple filters will also be cleared by firmware. */
5594
- rc = hwrm_req_init (bp , req , HWRM_CFA_L2_FILTER_FREE );
5595
- if (rc )
5596
- return rc ;
5597
- hwrm_req_hold (bp , req );
5598
5665
for (i = 0 ; i < num_of_vnics ; i ++ ) {
5599
5666
struct bnxt_vnic_info * vnic = & bp -> vnic_info [i ];
5600
5667
5601
5668
for (j = 0 ; j < vnic -> uc_filter_count ; j ++ ) {
5602
- struct bnxt_l2_filter * fltr ;
5603
-
5604
- fltr = vnic -> l2_filters [j ];
5605
- req -> l2_filter_id = fltr -> base .filter_id ;
5669
+ struct bnxt_l2_filter * fltr = vnic -> l2_filters [j ];
5606
5670
5607
- rc = hwrm_req_send (bp , req );
5671
+ bnxt_hwrm_l2_filter_free (bp , fltr );
5608
5672
bnxt_del_l2_filter (bp , fltr );
5609
5673
}
5610
5674
vnic -> uc_filter_count = 0 ;
5611
5675
}
5612
- hwrm_req_drop ( bp , req );
5676
+
5613
5677
return rc ;
5614
5678
}
5615
5679
@@ -11898,7 +11962,6 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
11898
11962
{
11899
11963
struct net_device * dev = bp -> dev ;
11900
11964
struct bnxt_vnic_info * vnic = & bp -> vnic_info [0 ];
11901
- struct hwrm_cfa_l2_filter_free_input * req ;
11902
11965
struct netdev_hw_addr * ha ;
11903
11966
int i , off = 0 , rc ;
11904
11967
bool uc_update ;
@@ -11910,19 +11973,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
11910
11973
if (!uc_update )
11911
11974
goto skip_uc ;
11912
11975
11913
- rc = hwrm_req_init (bp , req , HWRM_CFA_L2_FILTER_FREE );
11914
- if (rc )
11915
- return rc ;
11916
- hwrm_req_hold (bp , req );
11917
11976
for (i = 1 ; i < vnic -> uc_filter_count ; i ++ ) {
11918
11977
struct bnxt_l2_filter * fltr = vnic -> l2_filters [i ];
11919
11978
11920
- req -> l2_filter_id = fltr -> base .filter_id ;
11921
-
11922
- rc = hwrm_req_send (bp , req );
11979
+ bnxt_hwrm_l2_filter_free (bp , fltr );
11923
11980
bnxt_del_l2_filter (bp , fltr );
11924
11981
}
11925
- hwrm_req_drop (bp , req );
11926
11982
11927
11983
vnic -> uc_filter_count = 1 ;
11928
11984
@@ -13823,8 +13879,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
13823
13879
rcu_read_unlock ();
13824
13880
13825
13881
spin_lock_bh (& bp -> ntp_fltr_lock );
13826
- bit_id = bitmap_find_free_region (bp -> ntp_fltr_bmap ,
13827
- BNXT_NTP_FLTR_MAX_FLTR , 0 );
13882
+ bit_id = bitmap_find_free_region (bp -> ntp_fltr_bmap , BNXT_MAX_FLTR , 0 );
13828
13883
if (bit_id < 0 ) {
13829
13884
spin_unlock_bh (& bp -> ntp_fltr_lock );
13830
13885
rc = - ENOMEM ;
0 commit comments