@@ -8353,36 +8353,18 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev)
8353
8353
}
8354
8354
}
8355
8355
8356
- void hclge_rm_vport_all_mac_table (struct hclge_vport * vport , bool is_del_list ,
8357
- enum HCLGE_MAC_ADDR_TYPE mac_type )
8356
+ static void hclge_build_del_list (struct list_head * list ,
8357
+ bool is_del_list ,
8358
+ struct list_head * tmp_del_list )
8358
8359
{
8359
- int (* unsync )(struct hclge_vport * vport , const unsigned char * addr );
8360
8360
struct hclge_mac_node * mac_cfg , * tmp ;
8361
- struct hclge_dev * hdev = vport -> back ;
8362
- struct list_head tmp_del_list , * list ;
8363
- int ret ;
8364
-
8365
- if (mac_type == HCLGE_MAC_ADDR_UC ) {
8366
- list = & vport -> uc_mac_list ;
8367
- unsync = hclge_rm_uc_addr_common ;
8368
- } else {
8369
- list = & vport -> mc_mac_list ;
8370
- unsync = hclge_rm_mc_addr_common ;
8371
- }
8372
-
8373
- INIT_LIST_HEAD (& tmp_del_list );
8374
-
8375
- if (!is_del_list )
8376
- set_bit (vport -> vport_id , hdev -> vport_config_block );
8377
-
8378
- spin_lock_bh (& vport -> mac_list_lock );
8379
8361
8380
8362
list_for_each_entry_safe (mac_cfg , tmp , list , node ) {
8381
8363
switch (mac_cfg -> state ) {
8382
8364
case HCLGE_MAC_TO_DEL :
8383
8365
case HCLGE_MAC_ACTIVE :
8384
8366
list_del (& mac_cfg -> node );
8385
- list_add_tail (& mac_cfg -> node , & tmp_del_list );
8367
+ list_add_tail (& mac_cfg -> node , tmp_del_list );
8386
8368
break ;
8387
8369
case HCLGE_MAC_TO_ADD :
8388
8370
if (is_del_list ) {
@@ -8392,10 +8374,18 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8392
8374
break ;
8393
8375
}
8394
8376
}
8377
+ }
8395
8378
8396
- spin_unlock_bh (& vport -> mac_list_lock );
8379
+ static void hclge_unsync_del_list (struct hclge_vport * vport ,
8380
+ int (* unsync )(struct hclge_vport * vport ,
8381
+ const unsigned char * addr ),
8382
+ bool is_del_list ,
8383
+ struct list_head * tmp_del_list )
8384
+ {
8385
+ struct hclge_mac_node * mac_cfg , * tmp ;
8386
+ int ret ;
8397
8387
8398
- list_for_each_entry_safe (mac_cfg , tmp , & tmp_del_list , node ) {
8388
+ list_for_each_entry_safe (mac_cfg , tmp , tmp_del_list , node ) {
8399
8389
ret = unsync (vport , mac_cfg -> mac_addr );
8400
8390
if (!ret || ret == - ENOENT ) {
8401
8391
/* clear all mac addr from hardware, but remain these
@@ -8413,6 +8403,35 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8413
8403
mac_cfg -> state = HCLGE_MAC_TO_DEL ;
8414
8404
}
8415
8405
}
8406
+ }
8407
+
8408
+ void hclge_rm_vport_all_mac_table (struct hclge_vport * vport , bool is_del_list ,
8409
+ enum HCLGE_MAC_ADDR_TYPE mac_type )
8410
+ {
8411
+ int (* unsync )(struct hclge_vport * vport , const unsigned char * addr );
8412
+ struct hclge_dev * hdev = vport -> back ;
8413
+ struct list_head tmp_del_list , * list ;
8414
+
8415
+ if (mac_type == HCLGE_MAC_ADDR_UC ) {
8416
+ list = & vport -> uc_mac_list ;
8417
+ unsync = hclge_rm_uc_addr_common ;
8418
+ } else {
8419
+ list = & vport -> mc_mac_list ;
8420
+ unsync = hclge_rm_mc_addr_common ;
8421
+ }
8422
+
8423
+ INIT_LIST_HEAD (& tmp_del_list );
8424
+
8425
+ if (!is_del_list )
8426
+ set_bit (vport -> vport_id , hdev -> vport_config_block );
8427
+
8428
+ spin_lock_bh (& vport -> mac_list_lock );
8429
+
8430
+ hclge_build_del_list (list , is_del_list , & tmp_del_list );
8431
+
8432
+ spin_unlock_bh (& vport -> mac_list_lock );
8433
+
8434
+ hclge_unsync_del_list (vport , unsync , is_del_list , & tmp_del_list );
8416
8435
8417
8436
spin_lock_bh (& vport -> mac_list_lock );
8418
8437
0 commit comments