Skip to content

Commit db07044

Browse files
isilenceaxboe
authored andcommitted
io_uring/zcrx: dma-map area for the device
Setup DMA mappings for the area into which we intend to receive data later on. We know the device we want to attach to even before we get a page pool and can pre-map in advance. All net_iov are synchronised for device when allocated, see page_pool_mp_return_in_cache(). Reviewed-by: Jens Axboe <[email protected]> Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: David Wei <[email protected]> Acked-by: Jakub Kicinski <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 34a3e60 commit db07044

File tree

2 files changed

+82
-1
lines changed

2 files changed

+82
-1
lines changed

io_uring/zcrx.c

Lines changed: 81 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// SPDX-License-Identifier: GPL-2.0
22
#include <linux/kernel.h>
33
#include <linux/errno.h>
4+
#include <linux/dma-map-ops.h>
45
#include <linux/mm.h>
56
#include <linux/nospec.h>
67
#include <linux/io_uring.h>
@@ -21,6 +22,73 @@
2122
#include "zcrx.h"
2223
#include "rsrc.h"
2324

25+
#define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
26+
27+
static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
28+
struct io_zcrx_area *area, int nr_mapped)
29+
{
30+
int i;
31+
32+
for (i = 0; i < nr_mapped; i++) {
33+
struct net_iov *niov = &area->nia.niovs[i];
34+
dma_addr_t dma;
35+
36+
dma = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
37+
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
38+
DMA_FROM_DEVICE, IO_DMA_ATTR);
39+
net_mp_niov_set_dma_addr(niov, 0);
40+
}
41+
}
42+
43+
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
44+
{
45+
if (area->is_mapped)
46+
__io_zcrx_unmap_area(ifq, area, area->nia.num_niovs);
47+
}
48+
49+
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
50+
{
51+
int i;
52+
53+
for (i = 0; i < area->nia.num_niovs; i++) {
54+
struct net_iov *niov = &area->nia.niovs[i];
55+
dma_addr_t dma;
56+
57+
dma = dma_map_page_attrs(ifq->dev, area->pages[i], 0, PAGE_SIZE,
58+
DMA_FROM_DEVICE, IO_DMA_ATTR);
59+
if (dma_mapping_error(ifq->dev, dma))
60+
break;
61+
if (net_mp_niov_set_dma_addr(niov, dma)) {
62+
dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE,
63+
DMA_FROM_DEVICE, IO_DMA_ATTR);
64+
break;
65+
}
66+
}
67+
68+
if (i != area->nia.num_niovs) {
69+
__io_zcrx_unmap_area(ifq, area, i);
70+
return -EINVAL;
71+
}
72+
73+
area->is_mapped = true;
74+
return 0;
75+
}
76+
77+
static void io_zcrx_sync_for_device(const struct page_pool *pool,
78+
struct net_iov *niov)
79+
{
80+
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
81+
dma_addr_t dma_addr;
82+
83+
if (!dma_dev_need_sync(pool->p.dev))
84+
return;
85+
86+
dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
87+
__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
88+
PAGE_SIZE, pool->p.dma_dir);
89+
#endif
90+
}
91+
2492
#define IO_RQ_MAX_ENTRIES 32768
2593

2694
__maybe_unused
@@ -83,6 +151,8 @@ static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
83151

84152
static void io_zcrx_free_area(struct io_zcrx_area *area)
85153
{
154+
io_zcrx_unmap_area(area->ifq, area);
155+
86156
kvfree(area->freelist);
87157
kvfree(area->nia.niovs);
88158
kvfree(area->user_refs);
@@ -272,6 +342,10 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
272342
return -EOPNOTSUPP;
273343
get_device(ifq->dev);
274344

345+
ret = io_zcrx_map_area(ifq, ifq->area);
346+
if (ret)
347+
goto err;
348+
275349
reg.offsets.rqes = sizeof(struct io_uring);
276350
reg.offsets.head = offsetof(struct io_uring, head);
277351
reg.offsets.tail = offsetof(struct io_uring, tail);
@@ -422,6 +496,7 @@ static void io_zcrx_ring_refill(struct page_pool *pp,
422496
continue;
423497
}
424498

499+
io_zcrx_sync_for_device(pp, niov);
425500
net_mp_netmem_place_in_cache(pp, netmem);
426501
} while (--entries);
427502

@@ -439,6 +514,7 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
439514
netmem_ref netmem = net_iov_to_netmem(niov);
440515

441516
net_mp_niov_set_page_pool(pp, niov);
517+
io_zcrx_sync_for_device(pp, niov);
442518
net_mp_netmem_place_in_cache(pp, netmem);
443519
}
444520
spin_unlock_bh(&area->freelist_lock);
@@ -482,10 +558,14 @@ static int io_pp_zc_init(struct page_pool *pp)
482558

483559
if (WARN_ON_ONCE(!ifq))
484560
return -EINVAL;
485-
if (pp->dma_map)
561+
if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
562+
return -EINVAL;
563+
if (WARN_ON_ONCE(!pp->dma_map))
486564
return -EOPNOTSUPP;
487565
if (pp->p.order != 0)
488566
return -EOPNOTSUPP;
567+
if (pp->p.dma_dir != DMA_FROM_DEVICE)
568+
return -EOPNOTSUPP;
489569

490570
percpu_ref_get(&ifq->ctx->refs);
491571
return 0;

io_uring/zcrx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ struct io_zcrx_area {
1111
struct io_zcrx_ifq *ifq;
1212
atomic_t *user_refs;
1313

14+
bool is_mapped;
1415
u16 area_id;
1516
struct page **pages;
1617

0 commit comments

Comments
 (0)