@@ -2703,12 +2703,11 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
2703
2703
return err ;
2704
2704
}
2705
2705
2706
- static int virtnet_tx_resize (struct virtnet_info * vi ,
2707
- struct send_queue * sq , u32 ring_num )
2706
+ static void virtnet_tx_pause (struct virtnet_info * vi , struct send_queue * sq )
2708
2707
{
2709
2708
bool running = netif_running (vi -> dev );
2710
2709
struct netdev_queue * txq ;
2711
- int err , qindex ;
2710
+ int qindex ;
2712
2711
2713
2712
qindex = sq - vi -> sq ;
2714
2713
@@ -2729,10 +2728,17 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
2729
2728
netif_stop_subqueue (vi -> dev , qindex );
2730
2729
2731
2730
__netif_tx_unlock_bh (txq );
2731
+ }
2732
2732
2733
- err = virtqueue_resize (sq -> vq , ring_num , virtnet_sq_free_unused_buf );
2734
- if (err )
2735
- netdev_err (vi -> dev , "resize tx fail: tx queue index: %d err: %d\n" , qindex , err );
2733
+ static void virtnet_tx_resume (struct virtnet_info * vi , struct send_queue * sq )
2734
+ {
2735
+ bool running = netif_running (vi -> dev );
2736
+ struct netdev_queue * txq ;
2737
+ int qindex ;
2738
+
2739
+ qindex = sq - vi -> sq ;
2740
+
2741
+ txq = netdev_get_tx_queue (vi -> dev , qindex );
2736
2742
2737
2743
__netif_tx_lock_bh (txq );
2738
2744
sq -> reset = false;
@@ -2741,6 +2747,23 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
2741
2747
2742
2748
if (running )
2743
2749
virtnet_napi_tx_enable (vi , sq -> vq , & sq -> napi );
2750
+ }
2751
+
2752
+ static int virtnet_tx_resize (struct virtnet_info * vi , struct send_queue * sq ,
2753
+ u32 ring_num )
2754
+ {
2755
+ int qindex , err ;
2756
+
2757
+ qindex = sq - vi -> sq ;
2758
+
2759
+ virtnet_tx_pause (vi , sq );
2760
+
2761
+ err = virtqueue_resize (sq -> vq , ring_num , virtnet_sq_free_unused_buf );
2762
+ if (err )
2763
+ netdev_err (vi -> dev , "resize tx fail: tx queue index: %d err: %d\n" , qindex , err );
2764
+
2765
+ virtnet_tx_resume (vi , sq );
2766
+
2744
2767
return err ;
2745
2768
}
2746
2769
0 commit comments