@@ -1104,7 +1104,7 @@ static bool tx_may_stop(struct virtnet_info *vi,
1104
1104
* Since most packets only take 1 or 2 ring slots, stopping the queue
1105
1105
* early means 16 slots are typically wasted.
1106
1106
*/
1107
- if (sq -> vq -> num_free < 2 + MAX_SKB_FRAGS ) {
1107
+ if (sq -> vq -> num_free < MAX_SKB_FRAGS + 2 ) {
1108
1108
struct netdev_queue * txq = netdev_get_tx_queue (dev , qnum );
1109
1109
1110
1110
netif_tx_stop_queue (txq );
@@ -1136,7 +1136,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
1136
1136
} else if (unlikely (!virtqueue_enable_cb_delayed (sq -> vq ))) {
1137
1137
/* More just got used, free them then recheck. */
1138
1138
free_old_xmit (sq , txq , false);
1139
- if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS ) {
1139
+ if (sq -> vq -> num_free >= MAX_SKB_FRAGS + 2 ) {
1140
1140
netif_start_subqueue (dev , qnum );
1141
1141
u64_stats_update_begin (& sq -> stats .syncp );
1142
1142
u64_stats_inc (& sq -> stats .wake );
@@ -3021,7 +3021,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
3021
3021
free_old_xmit (sq , txq , !!budget );
3022
3022
} while (unlikely (!virtqueue_enable_cb_delayed (sq -> vq )));
3023
3023
3024
- if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS ) {
3024
+ if (sq -> vq -> num_free >= MAX_SKB_FRAGS + 2 ) {
3025
3025
if (netif_tx_queue_stopped (txq )) {
3026
3026
u64_stats_update_begin (& sq -> stats .syncp );
3027
3027
u64_stats_inc (& sq -> stats .wake );
@@ -3218,7 +3218,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
3218
3218
else
3219
3219
free_old_xmit (sq , txq , !!budget );
3220
3220
3221
- if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS ) {
3221
+ if (sq -> vq -> num_free >= MAX_SKB_FRAGS + 2 ) {
3222
3222
if (netif_tx_queue_stopped (txq )) {
3223
3223
u64_stats_update_begin (& sq -> stats .syncp );
3224
3224
u64_stats_inc (& sq -> stats .wake );
0 commit comments