@@ -356,6 +356,9 @@ struct receive_queue {
356
356
struct xdp_rxq_info xsk_rxq_info ;
357
357
358
358
struct xdp_buff * * xsk_buffs ;
359
+
360
+ /* Do dma by self */
361
+ bool do_dma ;
359
362
};
360
363
361
364
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
885
888
void * buf ;
886
889
887
890
buf = virtqueue_get_buf_ctx (rq -> vq , len , ctx );
888
- if (buf )
891
+ if (buf && rq -> do_dma )
889
892
virtnet_rq_unmap (rq , buf , * len );
890
893
891
894
return buf ;
@@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
898
901
u32 offset ;
899
902
void * head ;
900
903
904
+ if (!rq -> do_dma ) {
905
+ sg_init_one (rq -> sg , buf , len );
906
+ return ;
907
+ }
908
+
901
909
head = page_address (rq -> alloc_frag .page );
902
910
903
911
offset = buf - head ;
@@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
923
931
924
932
head = page_address (alloc_frag -> page );
925
933
926
- dma = head ;
934
+ if (rq -> do_dma ) {
935
+ dma = head ;
936
+
937
+ /* new pages */
938
+ if (!alloc_frag -> offset ) {
939
+ if (rq -> last_dma ) {
940
+ /* Now, the new page is allocated, the last dma
941
+ * will not be used. So the dma can be unmapped
942
+ * if the ref is 0.
943
+ */
944
+ virtnet_rq_unmap (rq , rq -> last_dma , 0 );
945
+ rq -> last_dma = NULL ;
946
+ }
927
947
928
- /* new pages */
929
- if (!alloc_frag -> offset ) {
930
- if (rq -> last_dma ) {
931
- /* Now, the new page is allocated, the last dma
932
- * will not be used. So the dma can be unmapped
933
- * if the ref is 0.
934
- */
935
- virtnet_rq_unmap (rq , rq -> last_dma , 0 );
936
- rq -> last_dma = NULL ;
937
- }
948
+ dma -> len = alloc_frag -> size - sizeof (* dma );
938
949
939
- dma -> len = alloc_frag -> size - sizeof (* dma );
950
+ addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
951
+ dma -> len , DMA_FROM_DEVICE , 0 );
952
+ if (virtqueue_dma_mapping_error (rq -> vq , addr ))
953
+ return NULL ;
940
954
941
- addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
942
- dma -> len , DMA_FROM_DEVICE , 0 );
943
- if (virtqueue_dma_mapping_error (rq -> vq , addr ))
944
- return NULL ;
955
+ dma -> addr = addr ;
956
+ dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
945
957
946
- dma -> addr = addr ;
947
- dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
958
+ /* Add a reference to dma to prevent the entire dma from
959
+ * being released during error handling. This reference
960
+ * will be freed after the pages are no longer used.
961
+ */
962
+ get_page (alloc_frag -> page );
963
+ dma -> ref = 1 ;
964
+ alloc_frag -> offset = sizeof (* dma );
948
965
949
- /* Add a reference to dma to prevent the entire dma from
950
- * being released during error handling. This reference
951
- * will be freed after the pages are no longer used.
952
- */
953
- get_page (alloc_frag -> page );
954
- dma -> ref = 1 ;
955
- alloc_frag -> offset = sizeof (* dma );
966
+ rq -> last_dma = dma ;
967
+ }
956
968
957
- rq -> last_dma = dma ;
969
+ ++ dma -> ref ;
958
970
}
959
971
960
- ++ dma -> ref ;
961
-
962
972
buf = head + alloc_frag -> offset ;
963
973
964
974
get_page (alloc_frag -> page );
@@ -975,9 +985,12 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
975
985
if (!vi -> mergeable_rx_bufs && vi -> big_packets )
976
986
return ;
977
987
978
- for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
979
- /* error should never happen */
980
- BUG_ON (virtqueue_set_dma_premapped (vi -> rq [i ].vq ));
988
+ for (i = 0 ; i < vi -> max_queue_pairs ; i ++ ) {
989
+ if (virtqueue_set_dma_premapped (vi -> rq [i ].vq ))
990
+ continue ;
991
+
992
+ vi -> rq [i ].do_dma = true;
993
+ }
981
994
}
982
995
983
996
static void virtnet_rq_unmap_free_buf (struct virtqueue * vq , void * buf )
@@ -2430,7 +2443,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
2430
2443
2431
2444
err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
2432
2445
if (err < 0 ) {
2433
- virtnet_rq_unmap (rq , buf , 0 );
2446
+ if (rq -> do_dma )
2447
+ virtnet_rq_unmap (rq , buf , 0 );
2434
2448
put_page (virt_to_head_page (buf ));
2435
2449
}
2436
2450
@@ -2544,7 +2558,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
2544
2558
ctx = mergeable_len_to_ctx (len + room , headroom );
2545
2559
err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
2546
2560
if (err < 0 ) {
2547
- virtnet_rq_unmap (rq , buf , 0 );
2561
+ if (rq -> do_dma )
2562
+ virtnet_rq_unmap (rq , buf , 0 );
2548
2563
put_page (virt_to_head_page (buf ));
2549
2564
}
2550
2565
@@ -5892,7 +5907,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
5892
5907
int i ;
5893
5908
for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
5894
5909
if (vi -> rq [i ].alloc_frag .page ) {
5895
- if (vi -> rq [i ].last_dma )
5910
+ if (vi -> rq [i ].do_dma && vi -> rq [ i ]. last_dma )
5896
5911
virtnet_rq_unmap (& vi -> rq [i ], vi -> rq [i ].last_dma , 0 );
5897
5912
put_page (vi -> rq [i ].alloc_frag .page );
5898
5913
}
0 commit comments