@@ -1287,30 +1287,25 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
1287
1287
}
1288
1288
1289
1289
static int hns3_nic_maybe_stop_tx (struct hns3_enet_ring * ring ,
1290
- struct sk_buff * * out_skb )
1290
+ struct net_device * netdev ,
1291
+ struct sk_buff * skb )
1291
1292
{
1293
+ struct hns3_nic_priv * priv = netdev_priv (netdev );
1292
1294
unsigned int bd_size [HNS3_MAX_TSO_BD_NUM + 1U ];
1293
- struct sk_buff * skb = * out_skb ;
1294
1295
unsigned int bd_num ;
1295
1296
1296
1297
bd_num = hns3_tx_bd_num (skb , bd_size );
1297
1298
if (unlikely (bd_num > HNS3_MAX_NON_TSO_BD_NUM )) {
1298
- struct sk_buff * new_skb ;
1299
-
1300
1299
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso (skb ) &&
1301
1300
!hns3_skb_need_linearized (skb , bd_size , bd_num ))
1302
1301
goto out ;
1303
1302
1304
- /* manual split the send packet */
1305
- new_skb = skb_copy (skb , GFP_ATOMIC );
1306
- if (!new_skb )
1303
+ if (__skb_linearize (skb ))
1307
1304
return - ENOMEM ;
1308
- dev_kfree_skb_any (skb );
1309
- * out_skb = new_skb ;
1310
1305
1311
- bd_num = hns3_tx_bd_count (new_skb -> len );
1312
- if ((skb_is_gso (new_skb ) && bd_num > HNS3_MAX_TSO_BD_NUM ) ||
1313
- (!skb_is_gso (new_skb ) &&
1306
+ bd_num = hns3_tx_bd_count (skb -> len );
1307
+ if ((skb_is_gso (skb ) && bd_num > HNS3_MAX_TSO_BD_NUM ) ||
1308
+ (!skb_is_gso (skb ) &&
1314
1309
bd_num > HNS3_MAX_NON_TSO_BD_NUM ))
1315
1310
return - ENOMEM ;
1316
1311
@@ -1320,10 +1315,23 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1320
1315
}
1321
1316
1322
1317
out :
1323
- if (unlikely (ring_space (ring ) < bd_num ))
1324
- return - EBUSY ;
1318
+ if (likely (ring_space (ring ) >= bd_num ))
1319
+ return bd_num ;
1325
1320
1326
- return bd_num ;
1321
+ netif_stop_subqueue (netdev , ring -> queue_index );
1322
+ smp_mb (); /* Memory barrier before checking ring_space */
1323
+
1324
+ /* Start queue in case hns3_clean_tx_ring has just made room
1325
+ * available and has not seen the queue stopped state performed
1326
+ * by netif_stop_subqueue above.
1327
+ */
1328
+ if (ring_space (ring ) >= bd_num && netif_carrier_ok (netdev ) &&
1329
+ !test_bit (HNS3_NIC_STATE_DOWN , & priv -> state )) {
1330
+ netif_start_subqueue (netdev , ring -> queue_index );
1331
+ return bd_num ;
1332
+ }
1333
+
1334
+ return - EBUSY ;
1327
1335
}
1328
1336
1329
1337
static void hns3_clear_desc (struct hns3_enet_ring * ring , int next_to_use_orig )
@@ -1400,13 +1408,13 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1400
1408
/* Prefetch the data used later */
1401
1409
prefetch (skb -> data );
1402
1410
1403
- ret = hns3_nic_maybe_stop_tx (ring , & skb );
1411
+ ret = hns3_nic_maybe_stop_tx (ring , netdev , skb );
1404
1412
if (unlikely (ret <= 0 )) {
1405
1413
if (ret == - EBUSY ) {
1406
1414
u64_stats_update_begin (& ring -> syncp );
1407
1415
ring -> stats .tx_busy ++ ;
1408
1416
u64_stats_update_end (& ring -> syncp );
1409
- goto out_net_tx_busy ;
1417
+ return NETDEV_TX_BUSY ;
1410
1418
} else if (ret == - ENOMEM ) {
1411
1419
u64_stats_update_begin (& ring -> syncp );
1412
1420
ring -> stats .sw_err_cnt ++ ;
@@ -1457,12 +1465,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1457
1465
out_err_tx_ok :
1458
1466
dev_kfree_skb_any (skb );
1459
1467
return NETDEV_TX_OK ;
1460
-
1461
- out_net_tx_busy :
1462
- netif_stop_subqueue (netdev , ring -> queue_index );
1463
- smp_mb (); /* Commit all data before submit */
1464
-
1465
- return NETDEV_TX_BUSY ;
1466
1468
}
1467
1469
1468
1470
static int hns3_nic_net_set_mac_address (struct net_device * netdev , void * p )
@@ -2519,7 +2521,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2519
2521
dev_queue = netdev_get_tx_queue (netdev , ring -> tqp -> tqp_index );
2520
2522
netdev_tx_completed_queue (dev_queue , pkts , bytes );
2521
2523
2522
- if (unlikely (pkts && netif_carrier_ok (netdev ) &&
2524
+ if (unlikely (netif_carrier_ok (netdev ) &&
2523
2525
ring_space (ring ) > HNS3_MAX_TSO_BD_NUM )) {
2524
2526
/* Make sure that anybody stopping the queue after this
2525
2527
* sees the new next_to_clean.
0 commit comments