@@ -1821,6 +1821,49 @@ static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1821
1821
return NULL ;
1822
1822
}
1823
1823
1824
+ static struct sk_buff * virtnet_skb_append_frag (struct sk_buff * head_skb ,
1825
+ struct sk_buff * curr_skb ,
1826
+ struct page * page , void * buf ,
1827
+ int len , int truesize )
1828
+ {
1829
+ int num_skb_frags ;
1830
+ int offset ;
1831
+
1832
+ num_skb_frags = skb_shinfo (curr_skb )-> nr_frags ;
1833
+ if (unlikely (num_skb_frags == MAX_SKB_FRAGS )) {
1834
+ struct sk_buff * nskb = alloc_skb (0 , GFP_ATOMIC );
1835
+
1836
+ if (unlikely (!nskb ))
1837
+ return NULL ;
1838
+
1839
+ if (curr_skb == head_skb )
1840
+ skb_shinfo (curr_skb )-> frag_list = nskb ;
1841
+ else
1842
+ curr_skb -> next = nskb ;
1843
+ curr_skb = nskb ;
1844
+ head_skb -> truesize += nskb -> truesize ;
1845
+ num_skb_frags = 0 ;
1846
+ }
1847
+
1848
+ if (curr_skb != head_skb ) {
1849
+ head_skb -> data_len += len ;
1850
+ head_skb -> len += len ;
1851
+ head_skb -> truesize += truesize ;
1852
+ }
1853
+
1854
+ offset = buf - page_address (page );
1855
+ if (skb_can_coalesce (curr_skb , num_skb_frags , page , offset )) {
1856
+ put_page (page );
1857
+ skb_coalesce_rx_frag (curr_skb , num_skb_frags - 1 ,
1858
+ len , truesize );
1859
+ } else {
1860
+ skb_add_rx_frag (curr_skb , num_skb_frags , page ,
1861
+ offset , len , truesize );
1862
+ }
1863
+
1864
+ return curr_skb ;
1865
+ }
1866
+
1824
1867
static struct sk_buff * receive_mergeable (struct net_device * dev ,
1825
1868
struct virtnet_info * vi ,
1826
1869
struct receive_queue * rq ,
@@ -1870,8 +1913,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1870
1913
if (unlikely (!curr_skb ))
1871
1914
goto err_skb ;
1872
1915
while (-- num_buf ) {
1873
- int num_skb_frags ;
1874
-
1875
1916
buf = virtnet_rq_get_buf (rq , & len , & ctx );
1876
1917
if (unlikely (!buf )) {
1877
1918
pr_debug ("%s: rx error: %d buffers out of %d missing\n" ,
@@ -1896,34 +1937,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1896
1937
goto err_skb ;
1897
1938
}
1898
1939
1899
- num_skb_frags = skb_shinfo (curr_skb )-> nr_frags ;
1900
- if (unlikely (num_skb_frags == MAX_SKB_FRAGS )) {
1901
- struct sk_buff * nskb = alloc_skb (0 , GFP_ATOMIC );
1902
-
1903
- if (unlikely (!nskb ))
1904
- goto err_skb ;
1905
- if (curr_skb == head_skb )
1906
- skb_shinfo (curr_skb )-> frag_list = nskb ;
1907
- else
1908
- curr_skb -> next = nskb ;
1909
- curr_skb = nskb ;
1910
- head_skb -> truesize += nskb -> truesize ;
1911
- num_skb_frags = 0 ;
1912
- }
1913
- if (curr_skb != head_skb ) {
1914
- head_skb -> data_len += len ;
1915
- head_skb -> len += len ;
1916
- head_skb -> truesize += truesize ;
1917
- }
1918
- offset = buf - page_address (page );
1919
- if (skb_can_coalesce (curr_skb , num_skb_frags , page , offset )) {
1920
- put_page (page );
1921
- skb_coalesce_rx_frag (curr_skb , num_skb_frags - 1 ,
1922
- len , truesize );
1923
- } else {
1924
- skb_add_rx_frag (curr_skb , num_skb_frags , page ,
1925
- offset , len , truesize );
1926
- }
1940
+ curr_skb = virtnet_skb_append_frag (head_skb , curr_skb , page ,
1941
+ buf , len , truesize );
1942
+ if (!curr_skb )
1943
+ goto err_skb ;
1927
1944
}
1928
1945
1929
1946
ewma_pkt_len_add (& rq -> mrg_avg_pkt_len , head_skb -> len );
0 commit comments