Skip to content

Commit dc4547f

Browse files
fengidrikuba-moo
authored andcommitted
Revert "virtio_net: rx remove premapped failover code"
This reverts commit defd28a. Recover the code to disable premapped mode. Signed-off-by: Xuan Zhuo <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Tested-by: Takero Funaki <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent fef2843 commit dc4547f

File tree

1 file changed

+50
-35
lines changed

1 file changed

+50
-35
lines changed

drivers/net/virtio_net.c

Lines changed: 50 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -356,6 +356,9 @@ struct receive_queue {
356356
struct xdp_rxq_info xsk_rxq_info;
357357

358358
struct xdp_buff **xsk_buffs;
359+
360+
/* Do dma by self */
361+
bool do_dma;
359362
};
360363

361364
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
885888
void *buf;
886889

887890
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
888-
if (buf)
891+
if (buf && rq->do_dma)
889892
virtnet_rq_unmap(rq, buf, *len);
890893

891894
return buf;
@@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
898901
u32 offset;
899902
void *head;
900903

904+
if (!rq->do_dma) {
905+
sg_init_one(rq->sg, buf, len);
906+
return;
907+
}
908+
901909
head = page_address(rq->alloc_frag.page);
902910

903911
offset = buf - head;
@@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
923931

924932
head = page_address(alloc_frag->page);
925933

926-
dma = head;
934+
if (rq->do_dma) {
935+
dma = head;
936+
937+
/* new pages */
938+
if (!alloc_frag->offset) {
939+
if (rq->last_dma) {
940+
/* Now, the new page is allocated, the last dma
941+
* will not be used. So the dma can be unmapped
942+
* if the ref is 0.
943+
*/
944+
virtnet_rq_unmap(rq, rq->last_dma, 0);
945+
rq->last_dma = NULL;
946+
}
927947

928-
/* new pages */
929-
if (!alloc_frag->offset) {
930-
if (rq->last_dma) {
931-
/* Now, the new page is allocated, the last dma
932-
* will not be used. So the dma can be unmapped
933-
* if the ref is 0.
934-
*/
935-
virtnet_rq_unmap(rq, rq->last_dma, 0);
936-
rq->last_dma = NULL;
937-
}
948+
dma->len = alloc_frag->size - sizeof(*dma);
938949

939-
dma->len = alloc_frag->size - sizeof(*dma);
950+
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
951+
dma->len, DMA_FROM_DEVICE, 0);
952+
if (virtqueue_dma_mapping_error(rq->vq, addr))
953+
return NULL;
940954

941-
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
942-
dma->len, DMA_FROM_DEVICE, 0);
943-
if (virtqueue_dma_mapping_error(rq->vq, addr))
944-
return NULL;
955+
dma->addr = addr;
956+
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
945957

946-
dma->addr = addr;
947-
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
958+
/* Add a reference to dma to prevent the entire dma from
959+
* being released during error handling. This reference
960+
* will be freed after the pages are no longer used.
961+
*/
962+
get_page(alloc_frag->page);
963+
dma->ref = 1;
964+
alloc_frag->offset = sizeof(*dma);
948965

949-
/* Add a reference to dma to prevent the entire dma from
950-
* being released during error handling. This reference
951-
* will be freed after the pages are no longer used.
952-
*/
953-
get_page(alloc_frag->page);
954-
dma->ref = 1;
955-
alloc_frag->offset = sizeof(*dma);
966+
rq->last_dma = dma;
967+
}
956968

957-
rq->last_dma = dma;
969+
++dma->ref;
958970
}
959971

960-
++dma->ref;
961-
962972
buf = head + alloc_frag->offset;
963973

964974
get_page(alloc_frag->page);
@@ -975,9 +985,12 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
975985
if (!vi->mergeable_rx_bufs && vi->big_packets)
976986
return;
977987

978-
for (i = 0; i < vi->max_queue_pairs; i++)
979-
/* error should never happen */
980-
BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
988+
for (i = 0; i < vi->max_queue_pairs; i++) {
989+
if (virtqueue_set_dma_premapped(vi->rq[i].vq))
990+
continue;
991+
992+
vi->rq[i].do_dma = true;
993+
}
981994
}
982995

983996
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
@@ -2430,7 +2443,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
24302443

24312444
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
24322445
if (err < 0) {
2433-
virtnet_rq_unmap(rq, buf, 0);
2446+
if (rq->do_dma)
2447+
virtnet_rq_unmap(rq, buf, 0);
24342448
put_page(virt_to_head_page(buf));
24352449
}
24362450

@@ -2544,7 +2558,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
25442558
ctx = mergeable_len_to_ctx(len + room, headroom);
25452559
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
25462560
if (err < 0) {
2547-
virtnet_rq_unmap(rq, buf, 0);
2561+
if (rq->do_dma)
2562+
virtnet_rq_unmap(rq, buf, 0);
25482563
put_page(virt_to_head_page(buf));
25492564
}
25502565

@@ -5892,7 +5907,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
58925907
int i;
58935908
for (i = 0; i < vi->max_queue_pairs; i++)
58945909
if (vi->rq[i].alloc_frag.page) {
5895-
if (vi->rq[i].last_dma)
5910+
if (vi->rq[i].do_dma && vi->rq[i].last_dma)
58965911
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
58975912
put_page(vi->rq[i].alloc_frag.page);
58985913
}

0 commit comments

Comments
 (0)