Skip to content

Commit defd28a

Browse files
fengidrikuba-moo
authored andcommitted
virtio_net: rx remove premapped failover code
Now, the premapped mode can be enabled unconditionally. So we can remove the failover code for merge and small mode. Signed-off-by: Xuan Zhuo <[email protected]> Acked-by: Jason Wang <[email protected]> Reviewed-by: Larysa Zaremba <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent a377ae5 commit defd28a

File tree

1 file changed

+35
-50
lines changed

1 file changed

+35
-50
lines changed

drivers/net/virtio_net.c

Lines changed: 35 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -348,9 +348,6 @@ struct receive_queue {
348348

349349
/* Record the last dma info to free after new pages is allocated. */
350350
struct virtnet_rq_dma *last_dma;
351-
352-
/* Do dma by self */
353-
bool do_dma;
354351
};
355352

356353
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -850,7 +847,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
850847
void *buf;
851848

852849
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
853-
if (buf && rq->do_dma)
850+
if (buf)
854851
virtnet_rq_unmap(rq, buf, *len);
855852

856853
return buf;
@@ -863,11 +860,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
863860
u32 offset;
864861
void *head;
865862

866-
if (!rq->do_dma) {
867-
sg_init_one(rq->sg, buf, len);
868-
return;
869-
}
870-
871863
head = page_address(rq->alloc_frag.page);
872864

873865
offset = buf - head;
@@ -893,44 +885,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
893885

894886
head = page_address(alloc_frag->page);
895887

896-
if (rq->do_dma) {
897-
dma = head;
898-
899-
/* new pages */
900-
if (!alloc_frag->offset) {
901-
if (rq->last_dma) {
902-
/* Now, the new page is allocated, the last dma
903-
* will not be used. So the dma can be unmapped
904-
* if the ref is 0.
905-
*/
906-
virtnet_rq_unmap(rq, rq->last_dma, 0);
907-
rq->last_dma = NULL;
908-
}
888+
dma = head;
909889

910-
dma->len = alloc_frag->size - sizeof(*dma);
890+
/* new pages */
891+
if (!alloc_frag->offset) {
892+
if (rq->last_dma) {
893+
/* Now, the new page is allocated, the last dma
894+
* will not be used. So the dma can be unmapped
895+
* if the ref is 0.
896+
*/
897+
virtnet_rq_unmap(rq, rq->last_dma, 0);
898+
rq->last_dma = NULL;
899+
}
911900

912-
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
913-
dma->len, DMA_FROM_DEVICE, 0);
914-
if (virtqueue_dma_mapping_error(rq->vq, addr))
915-
return NULL;
901+
dma->len = alloc_frag->size - sizeof(*dma);
916902

917-
dma->addr = addr;
918-
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
903+
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
904+
dma->len, DMA_FROM_DEVICE, 0);
905+
if (virtqueue_dma_mapping_error(rq->vq, addr))
906+
return NULL;
919907

920-
/* Add a reference to dma to prevent the entire dma from
921-
* being released during error handling. This reference
922-
* will be freed after the pages are no longer used.
923-
*/
924-
get_page(alloc_frag->page);
925-
dma->ref = 1;
926-
alloc_frag->offset = sizeof(*dma);
908+
dma->addr = addr;
909+
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
927910

928-
rq->last_dma = dma;
929-
}
911+
/* Add a reference to dma to prevent the entire dma from
912+
* being released during error handling. This reference
913+
* will be freed after the pages are no longer used.
914+
*/
915+
get_page(alloc_frag->page);
916+
dma->ref = 1;
917+
alloc_frag->offset = sizeof(*dma);
930918

931-
++dma->ref;
919+
rq->last_dma = dma;
932920
}
933921

922+
++dma->ref;
923+
934924
buf = head + alloc_frag->offset;
935925

936926
get_page(alloc_frag->page);
@@ -947,12 +937,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
947937
if (!vi->mergeable_rx_bufs && vi->big_packets)
948938
return;
949939

950-
for (i = 0; i < vi->max_queue_pairs; i++) {
951-
if (virtqueue_set_dma_premapped(vi->rq[i].vq))
952-
continue;
953-
954-
vi->rq[i].do_dma = true;
955-
}
940+
for (i = 0; i < vi->max_queue_pairs; i++)
941+
/* error should never happen */
942+
BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
956943
}
957944

958945
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
@@ -2030,8 +2017,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
20302017

20312018
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
20322019
if (err < 0) {
2033-
if (rq->do_dma)
2034-
virtnet_rq_unmap(rq, buf, 0);
2020+
virtnet_rq_unmap(rq, buf, 0);
20352021
put_page(virt_to_head_page(buf));
20362022
}
20372023

@@ -2145,8 +2131,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
21452131
ctx = mergeable_len_to_ctx(len + room, headroom);
21462132
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
21472133
if (err < 0) {
2148-
if (rq->do_dma)
2149-
virtnet_rq_unmap(rq, buf, 0);
2134+
virtnet_rq_unmap(rq, buf, 0);
21502135
put_page(virt_to_head_page(buf));
21512136
}
21522137

@@ -5229,7 +5214,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
52295214
int i;
52305215
for (i = 0; i < vi->max_queue_pairs; i++)
52315216
if (vi->rq[i].alloc_frag.page) {
5232-
if (vi->rq[i].do_dma && vi->rq[i].last_dma)
5217+
if (vi->rq[i].last_dma)
52335218
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
52345219
put_page(vi->rq[i].alloc_frag.page);
52355220
}

0 commit comments

Comments
 (0)