@@ -356,6 +356,9 @@ struct receive_queue {
356
356
struct xdp_rxq_info xsk_rxq_info ;
357
357
358
358
struct xdp_buff * * xsk_buffs ;
359
+
360
+ /* Do dma by self */
361
+ bool do_dma ;
359
362
};
360
363
361
364
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
885
888
void * buf ;
886
889
887
890
buf = virtqueue_get_buf_ctx (rq -> vq , len , ctx );
888
- if (buf )
891
+ if (buf && rq -> do_dma )
889
892
virtnet_rq_unmap (rq , buf , * len );
890
893
891
894
return buf ;
@@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
898
901
u32 offset ;
899
902
void * head ;
900
903
904
+ if (!rq -> do_dma ) {
905
+ sg_init_one (rq -> sg , buf , len );
906
+ return ;
907
+ }
908
+
901
909
head = page_address (rq -> alloc_frag .page );
902
910
903
911
offset = buf - head ;
@@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
923
931
924
932
head = page_address (alloc_frag -> page );
925
933
926
- dma = head ;
934
+ if (rq -> do_dma ) {
935
+ dma = head ;
936
+
937
+ /* new pages */
938
+ if (!alloc_frag -> offset ) {
939
+ if (rq -> last_dma ) {
940
+ /* Now, the new page is allocated, the last dma
941
+ * will not be used. So the dma can be unmapped
942
+ * if the ref is 0.
943
+ */
944
+ virtnet_rq_unmap (rq , rq -> last_dma , 0 );
945
+ rq -> last_dma = NULL ;
946
+ }
927
947
928
- /* new pages */
929
- if (!alloc_frag -> offset ) {
930
- if (rq -> last_dma ) {
931
- /* Now, the new page is allocated, the last dma
932
- * will not be used. So the dma can be unmapped
933
- * if the ref is 0.
934
- */
935
- virtnet_rq_unmap (rq , rq -> last_dma , 0 );
936
- rq -> last_dma = NULL ;
937
- }
948
+ dma -> len = alloc_frag -> size - sizeof (* dma );
938
949
939
- dma -> len = alloc_frag -> size - sizeof (* dma );
950
+ addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
951
+ dma -> len , DMA_FROM_DEVICE , 0 );
952
+ if (virtqueue_dma_mapping_error (rq -> vq , addr ))
953
+ return NULL ;
940
954
941
- addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
942
- dma -> len , DMA_FROM_DEVICE , 0 );
943
- if (virtqueue_dma_mapping_error (rq -> vq , addr ))
944
- return NULL ;
955
+ dma -> addr = addr ;
956
+ dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
945
957
946
- dma -> addr = addr ;
947
- dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
958
+ /* Add a reference to dma to prevent the entire dma from
959
+ * being released during error handling. This reference
960
+ * will be freed after the pages are no longer used.
961
+ */
962
+ get_page (alloc_frag -> page );
963
+ dma -> ref = 1 ;
964
+ alloc_frag -> offset = sizeof (* dma );
948
965
949
- /* Add a reference to dma to prevent the entire dma from
950
- * being released during error handling. This reference
951
- * will be freed after the pages are no longer used.
952
- */
953
- get_page (alloc_frag -> page );
954
- dma -> ref = 1 ;
955
- alloc_frag -> offset = sizeof (* dma );
966
+ rq -> last_dma = dma ;
967
+ }
956
968
957
- rq -> last_dma = dma ;
969
+ ++ dma -> ref ;
958
970
}
959
971
960
- ++ dma -> ref ;
961
-
962
972
buf = head + alloc_frag -> offset ;
963
973
964
974
get_page (alloc_frag -> page );
@@ -967,19 +977,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
967
977
return buf ;
968
978
}
969
979
970
- static void virtnet_rq_set_premapped (struct virtnet_info * vi )
971
- {
972
- int i ;
973
-
974
- /* disable for big mode */
975
- if (!vi -> mergeable_rx_bufs && vi -> big_packets )
976
- return ;
977
-
978
- for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
979
- /* error should never happen */
980
- BUG_ON (virtqueue_set_dma_premapped (vi -> rq [i ].vq ));
981
- }
982
-
983
980
static void virtnet_rq_unmap_free_buf (struct virtqueue * vq , void * buf )
984
981
{
985
982
struct virtnet_info * vi = vq -> vdev -> priv ;
@@ -993,7 +990,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
993
990
return ;
994
991
}
995
992
996
- if (! vi -> big_packets || vi -> mergeable_rx_bufs )
993
+ if (rq -> do_dma )
997
994
virtnet_rq_unmap (rq , buf , 0 );
998
995
999
996
virtnet_rq_free_buf (vi , rq , buf );
@@ -2430,7 +2427,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
2430
2427
2431
2428
err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
2432
2429
if (err < 0 ) {
2433
- virtnet_rq_unmap (rq , buf , 0 );
2430
+ if (rq -> do_dma )
2431
+ virtnet_rq_unmap (rq , buf , 0 );
2434
2432
put_page (virt_to_head_page (buf ));
2435
2433
}
2436
2434
@@ -2544,7 +2542,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
2544
2542
ctx = mergeable_len_to_ctx (len + room , headroom );
2545
2543
err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
2546
2544
if (err < 0 ) {
2547
- virtnet_rq_unmap (rq , buf , 0 );
2545
+ if (rq -> do_dma )
2546
+ virtnet_rq_unmap (rq , buf , 0 );
2548
2547
put_page (virt_to_head_page (buf ));
2549
2548
}
2550
2549
@@ -2701,7 +2700,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi,
2701
2700
}
2702
2701
} else {
2703
2702
while (packets < budget &&
2704
- (buf = virtqueue_get_buf (rq -> vq , & len )) != NULL ) {
2703
+ (buf = virtnet_rq_get_buf (rq , & len , NULL )) != NULL ) {
2705
2704
receive_buf (vi , rq , buf , len , NULL , xdp_xmit , stats );
2706
2705
packets ++ ;
2707
2706
}
@@ -5892,7 +5891,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
5892
5891
int i ;
5893
5892
for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
5894
5893
if (vi -> rq [i ].alloc_frag .page ) {
5895
- if (vi -> rq [i ].last_dma )
5894
+ if (vi -> rq [i ].do_dma && vi -> rq [ i ]. last_dma )
5896
5895
virtnet_rq_unmap (& vi -> rq [i ], vi -> rq [i ].last_dma , 0 );
5897
5896
put_page (vi -> rq [i ].alloc_frag .page );
5898
5897
}
@@ -6090,8 +6089,6 @@ static int init_vqs(struct virtnet_info *vi)
6090
6089
if (ret )
6091
6090
goto err_free ;
6092
6091
6093
- virtnet_rq_set_premapped (vi );
6094
-
6095
6092
cpus_read_lock ();
6096
6093
virtnet_set_affinity (vi );
6097
6094
cpus_read_unlock ();
0 commit comments