Skip to content

Commit 4321de4

Browse files
alobakinChristoph Hellwig
authored andcommitted
page_pool: check for DMA sync shortcut earlier
We can save a couple more function calls in the Page Pool code if we check for dma_need_sync() earlier, just when we test pp->p.dma_sync. Move both these checks into an inline wrapper and call the PP wrapper over the generic DMA sync function only when both are true. You can't cache the result of dma_need_sync() in &page_pool, as it may change anytime if an SWIOTLB buffer is allocated or mapped. Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 403f11a commit 4321de4

File tree

1 file changed

+19
-14
lines changed

1 file changed

+19
-14
lines changed

net/core/page_pool.c

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -398,16 +398,26 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
398398
return page;
399399
}
400400

401-
static void page_pool_dma_sync_for_device(struct page_pool *pool,
402-
struct page *page,
403-
unsigned int dma_sync_size)
401+
static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
402+
struct page *page,
403+
u32 dma_sync_size)
404404
{
405+
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
405406
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
406407

407408
dma_sync_size = min(dma_sync_size, pool->p.max_len);
408-
dma_sync_single_range_for_device(pool->p.dev, dma_addr,
409-
pool->p.offset, dma_sync_size,
410-
pool->p.dma_dir);
409+
__dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
410+
dma_sync_size, pool->p.dma_dir);
411+
#endif
412+
}
413+
414+
static __always_inline void
415+
page_pool_dma_sync_for_device(const struct page_pool *pool,
416+
struct page *page,
417+
u32 dma_sync_size)
418+
{
419+
if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
420+
__page_pool_dma_sync_for_device(pool, page, dma_sync_size);
411421
}
412422

413423
static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
@@ -429,8 +439,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
429439
if (page_pool_set_dma_addr(page, dma))
430440
goto unmap_failed;
431441

432-
if (pool->dma_sync)
433-
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
442+
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
434443

435444
return true;
436445

@@ -699,9 +708,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
699708
if (likely(__page_pool_page_can_be_recycled(page))) {
700709
/* Read barrier done in page_ref_count / READ_ONCE */
701710

702-
if (pool->dma_sync)
703-
page_pool_dma_sync_for_device(pool, page,
704-
dma_sync_size);
711+
page_pool_dma_sync_for_device(pool, page, dma_sync_size);
705712

706713
if (allow_direct && in_softirq() &&
707714
page_pool_recycle_in_cache(page, pool))
@@ -812,9 +819,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
812819
return NULL;
813820

814821
if (__page_pool_page_can_be_recycled(page)) {
815-
if (pool->dma_sync)
816-
page_pool_dma_sync_for_device(pool, page, -1);
817-
822+
page_pool_dma_sync_for_device(pool, page, -1);
818823
return page;
819824
}
820825

0 commit comments

Comments
 (0)