Skip to content

Commit f4edb4d

Browse files
committed
Merge branch 'virtio_net-rx-enable-premapped-mode-by-default'
Xuan Zhuo says: ==================== virtio_net: rx enable premapped mode by default Actually, for the virtio drivers, we can enable premapped mode whatever the value of use_dma_api. Because we provide the virtio dma apis. So the driver can enable premapped mode unconditionally. This patch set makes the big mode of virtio-net to support premapped mode. And enable premapped mode for rx by default. Based on the following points, we do not use page pool to manage these pages: 1. virtio-net uses the DMA APIs wrapped by virtio core. Therefore, we can only prevent the page pool from performing DMA operations, and let the driver perform DMA operations on the allocated pages. 2. But when the page pool releases the page, we have no chance to execute dma unmap. 3. A solution to #2 is to execute dma unmap every time before putting the page back to the page pool. (This is actually a waste, we don't execute unmap so frequently.) 4. But there is another problem, we still need to use page.dma_addr to save the dma address. Using page.dma_addr while using page pool is unsafe behavior. 5. And we need space the chain the pages submitted once to virtio core. More: https://lore.kernel.org/all/CACGkMEu=Aok9z2imB_c5qVuujSh=vjj1kx12fy9N7hqyi+M5Ow@mail.gmail.com/ Why we do not use the page space to store the dma? http://lore.kernel.org/all/CACGkMEuyeJ9mMgYnnB42=hw6umNuo=agn7VBqBqYPd7GN=+39Q@mail.gmail.com ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 6e62702 + 9719f03 commit f4edb4d

File tree

2 files changed

+38
-59
lines changed

2 files changed

+38
-59
lines changed

drivers/net/virtio_net.c

Lines changed: 37 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -348,9 +348,6 @@ struct receive_queue {
348348

349349
/* Record the last dma info to free after new pages is allocated. */
350350
struct virtnet_rq_dma *last_dma;
351-
352-
/* Do dma by self */
353-
bool do_dma;
354351
};
355352

356353
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -746,7 +743,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
746743

747744
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
748745

749-
/* copy small packet so we can reuse these pages */
750746
if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
751747
skb = virtnet_build_skb(buf, truesize, p - buf, len);
752748
if (unlikely(!skb))
@@ -850,7 +846,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
850846
void *buf;
851847

852848
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
853-
if (buf && rq->do_dma)
849+
if (buf)
854850
virtnet_rq_unmap(rq, buf, *len);
855851

856852
return buf;
@@ -863,11 +859,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
863859
u32 offset;
864860
void *head;
865861

866-
if (!rq->do_dma) {
867-
sg_init_one(rq->sg, buf, len);
868-
return;
869-
}
870-
871862
head = page_address(rq->alloc_frag.page);
872863

873864
offset = buf - head;
@@ -893,44 +884,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
893884

894885
head = page_address(alloc_frag->page);
895886

896-
if (rq->do_dma) {
897-
dma = head;
898-
899-
/* new pages */
900-
if (!alloc_frag->offset) {
901-
if (rq->last_dma) {
902-
/* Now, the new page is allocated, the last dma
903-
* will not be used. So the dma can be unmapped
904-
* if the ref is 0.
905-
*/
906-
virtnet_rq_unmap(rq, rq->last_dma, 0);
907-
rq->last_dma = NULL;
908-
}
887+
dma = head;
909888

910-
dma->len = alloc_frag->size - sizeof(*dma);
889+
/* new pages */
890+
if (!alloc_frag->offset) {
891+
if (rq->last_dma) {
892+
/* Now, the new page is allocated, the last dma
893+
* will not be used. So the dma can be unmapped
894+
* if the ref is 0.
895+
*/
896+
virtnet_rq_unmap(rq, rq->last_dma, 0);
897+
rq->last_dma = NULL;
898+
}
911899

912-
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
913-
dma->len, DMA_FROM_DEVICE, 0);
914-
if (virtqueue_dma_mapping_error(rq->vq, addr))
915-
return NULL;
900+
dma->len = alloc_frag->size - sizeof(*dma);
916901

917-
dma->addr = addr;
918-
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
902+
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
903+
dma->len, DMA_FROM_DEVICE, 0);
904+
if (virtqueue_dma_mapping_error(rq->vq, addr))
905+
return NULL;
919906

920-
/* Add a reference to dma to prevent the entire dma from
921-
* being released during error handling. This reference
922-
* will be freed after the pages are no longer used.
923-
*/
924-
get_page(alloc_frag->page);
925-
dma->ref = 1;
926-
alloc_frag->offset = sizeof(*dma);
907+
dma->addr = addr;
908+
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
927909

928-
rq->last_dma = dma;
929-
}
910+
/* Add a reference to dma to prevent the entire dma from
911+
* being released during error handling. This reference
912+
* will be freed after the pages are no longer used.
913+
*/
914+
get_page(alloc_frag->page);
915+
dma->ref = 1;
916+
alloc_frag->offset = sizeof(*dma);
930917

931-
++dma->ref;
918+
rq->last_dma = dma;
932919
}
933920

921+
++dma->ref;
922+
934923
buf = head + alloc_frag->offset;
935924

936925
get_page(alloc_frag->page);
@@ -947,12 +936,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
947936
if (!vi->mergeable_rx_bufs && vi->big_packets)
948937
return;
949938

950-
for (i = 0; i < vi->max_queue_pairs; i++) {
951-
if (virtqueue_set_dma_premapped(vi->rq[i].vq))
952-
continue;
953-
954-
vi->rq[i].do_dma = true;
955-
}
939+
for (i = 0; i < vi->max_queue_pairs; i++)
940+
/* error should never happen */
941+
BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
956942
}
957943

958944
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
@@ -963,7 +949,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
963949

964950
rq = &vi->rq[i];
965951

966-
if (rq->do_dma)
952+
if (!vi->big_packets || vi->mergeable_rx_bufs)
967953
virtnet_rq_unmap(rq, buf, 0);
968954

969955
virtnet_rq_free_buf(vi, rq, buf);
@@ -2030,8 +2016,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
20302016

20312017
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
20322018
if (err < 0) {
2033-
if (rq->do_dma)
2034-
virtnet_rq_unmap(rq, buf, 0);
2019+
virtnet_rq_unmap(rq, buf, 0);
20352020
put_page(virt_to_head_page(buf));
20362021
}
20372022

@@ -2145,8 +2130,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
21452130
ctx = mergeable_len_to_ctx(len + room, headroom);
21462131
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
21472132
if (err < 0) {
2148-
if (rq->do_dma)
2149-
virtnet_rq_unmap(rq, buf, 0);
2133+
virtnet_rq_unmap(rq, buf, 0);
21502134
put_page(virt_to_head_page(buf));
21512135
}
21522136

@@ -2277,7 +2261,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
22772261
}
22782262
} else {
22792263
while (packets < budget &&
2280-
(buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2264+
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
22812265
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
22822266
packets++;
22832267
}
@@ -5229,7 +5213,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
52295213
int i;
52305214
for (i = 0; i < vi->max_queue_pairs; i++)
52315215
if (vi->rq[i].alloc_frag.page) {
5232-
if (vi->rq[i].do_dma && vi->rq[i].last_dma)
5216+
if (vi->rq[i].last_dma)
52335217
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
52345218
put_page(vi->rq[i].alloc_frag.page);
52355219
}

drivers/virtio/virtio_ring.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2782,7 +2782,7 @@ EXPORT_SYMBOL_GPL(virtqueue_resize);
27822782
*
27832783
* Returns zero or a negative error.
27842784
* 0: success.
2785-
* -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
2785+
* -EINVAL: too late to enable premapped mode, the vq already contains buffers.
27862786
*/
27872787
int virtqueue_set_dma_premapped(struct virtqueue *_vq)
27882788
{
@@ -2798,11 +2798,6 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
27982798
return -EINVAL;
27992799
}
28002800

2801-
if (!vq->use_dma_api) {
2802-
END_USE(vq);
2803-
return -EINVAL;
2804-
}
2805-
28062801
vq->premapped = true;
28072802
vq->do_unmap = false;
28082803

0 commit comments

Comments
 (0)