Skip to content

Commit 403f11a

Browse files
alobakinChristoph Hellwig
authored andcommitted
page_pool: don't use driver-set flags field directly
page_pool::p is driver-defined params, copied directly from the structure passed to page_pool_create(). The structure isn't meant to be modified by the Page Pool core code and this even might look confusing[0][1]. In order to be able to alter some flags, let's define our own, internal fields the same way as the already existing one (::has_init_callback). They are defined as bits in the driver-set params, leave them so here as well, to not waste byte-per-bit or so. Almost 30 bits are still free for future extensions. We could've defined only new flags here or only the ones we may need to alter, but checking some flags in one place while others in another doesn't sound convenient or intuitive. ::flags passed by the driver can now go to the "slow" PP params. Suggested-by: Jakub Kicinski <[email protected]> Link[0]: https://lore.kernel.org/netdev/[email protected] Suggested-by: Alexander Duyck <[email protected]> Link[1]: https://lore.kernel.org/netdev/CAKgT0UfZCGnWgOH96E4GV3ZP6LLbROHM7SHE8NKwq+exX+Gk_Q@mail.gmail.com Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 1f20a57 commit 403f11a

File tree

2 files changed

+32
-22
lines changed

2 files changed

+32
-22
lines changed

include/net/page_pool/types.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ struct pp_alloc_cache {
4545

4646
/**
4747
* struct page_pool_params - page pool parameters
48-
* @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
4948
* @order: 2^order pages on allocation
5049
* @pool_size: size of the ptr_ring
5150
* @nid: NUMA node id to allocate from pages from
@@ -55,10 +54,11 @@ struct pp_alloc_cache {
5554
* @dma_dir: DMA mapping direction
5655
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
5756
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
57+
* @netdev: corresponding &net_device for Netlink introspection
58+
* @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
5859
*/
5960
struct page_pool_params {
6061
struct_group_tagged(page_pool_params_fast, fast,
61-
unsigned int flags;
6262
unsigned int order;
6363
unsigned int pool_size;
6464
int nid;
@@ -70,6 +70,7 @@ struct page_pool_params {
7070
);
7171
struct_group_tagged(page_pool_params_slow, slow,
7272
struct net_device *netdev;
73+
unsigned int flags;
7374
/* private: used by test code only */
7475
void (*init_callback)(struct page *page, void *arg);
7576
void *init_arg;
@@ -131,7 +132,13 @@ struct page_pool {
131132

132133
int cpuid;
133134
u32 pages_state_hold_cnt;
134-
bool has_init_callback;
135+
136+
bool has_init_callback:1; /* slow::init_callback is set */
137+
bool dma_map:1; /* Perform DMA mapping */
138+
bool dma_sync:1; /* Perform DMA sync */
139+
#ifdef CONFIG_PAGE_POOL_STATS
140+
bool system:1; /* This is a global percpu pool */
141+
#endif
135142

136143
/* The following block must stay within one cacheline. On 32-bit
137144
* systems, sizeof(long) == sizeof(int), so that the block size is

net/core/page_pool.c

Lines changed: 22 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ static int page_pool_init(struct page_pool *pool,
194194
pool->cpuid = cpuid;
195195

196196
/* Validate only known flags were used */
197-
if (pool->p.flags & ~(PP_FLAG_ALL))
197+
if (pool->slow.flags & ~PP_FLAG_ALL)
198198
return -EINVAL;
199199

200200
if (pool->p.pool_size)
@@ -208,22 +208,26 @@ static int page_pool_init(struct page_pool *pool,
208208
* DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
209209
* which is the XDP_TX use-case.
210210
*/
211-
if (pool->p.flags & PP_FLAG_DMA_MAP) {
211+
if (pool->slow.flags & PP_FLAG_DMA_MAP) {
212212
if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
213213
(pool->p.dma_dir != DMA_BIDIRECTIONAL))
214214
return -EINVAL;
215+
216+
pool->dma_map = true;
215217
}
216218

217-
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
219+
if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) {
218220
/* In order to request DMA-sync-for-device the page
219221
* needs to be mapped
220222
*/
221-
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
223+
if (!(pool->slow.flags & PP_FLAG_DMA_MAP))
222224
return -EINVAL;
223225

224226
if (!pool->p.max_len)
225227
return -EINVAL;
226228

229+
pool->dma_sync = true;
230+
227231
/* pool->p.offset has to be set according to the address
228232
* offset used by the DMA engine to start copying rx data
229233
*/
@@ -232,7 +236,7 @@ static int page_pool_init(struct page_pool *pool,
232236
pool->has_init_callback = !!pool->slow.init_callback;
233237

234238
#ifdef CONFIG_PAGE_POOL_STATS
235-
if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
239+
if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) {
236240
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
237241
if (!pool->recycle_stats)
238242
return -ENOMEM;
@@ -242,12 +246,13 @@ static int page_pool_init(struct page_pool *pool,
242246
* (also percpu) page pool instance.
243247
*/
244248
pool->recycle_stats = &pp_system_recycle_stats;
249+
pool->system = true;
245250
}
246251
#endif
247252

248253
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
249254
#ifdef CONFIG_PAGE_POOL_STATS
250-
if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
255+
if (!pool->system)
251256
free_percpu(pool->recycle_stats);
252257
#endif
253258
return -ENOMEM;
@@ -258,7 +263,7 @@ static int page_pool_init(struct page_pool *pool,
258263
/* Driver calling page_pool_create() also call page_pool_destroy() */
259264
refcount_set(&pool->user_cnt, 1);
260265

261-
if (pool->p.flags & PP_FLAG_DMA_MAP)
266+
if (pool->dma_map)
262267
get_device(pool->p.dev);
263268

264269
return 0;
@@ -268,11 +273,11 @@ static void page_pool_uninit(struct page_pool *pool)
268273
{
269274
ptr_ring_cleanup(&pool->ring, NULL);
270275

271-
if (pool->p.flags & PP_FLAG_DMA_MAP)
276+
if (pool->dma_map)
272277
put_device(pool->p.dev);
273278

274279
#ifdef CONFIG_PAGE_POOL_STATS
275-
if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
280+
if (!pool->system)
276281
free_percpu(pool->recycle_stats);
277282
#endif
278283
}
@@ -424,7 +429,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
424429
if (page_pool_set_dma_addr(page, dma))
425430
goto unmap_failed;
426431

427-
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
432+
if (pool->dma_sync)
428433
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
429434

430435
return true;
@@ -470,8 +475,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
470475
if (unlikely(!page))
471476
return NULL;
472477

473-
if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
474-
unlikely(!page_pool_dma_map(pool, page))) {
478+
if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page))) {
475479
put_page(page);
476480
return NULL;
477481
}
@@ -491,8 +495,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
491495
gfp_t gfp)
492496
{
493497
const int bulk = PP_ALLOC_CACHE_REFILL;
494-
unsigned int pp_flags = pool->p.flags;
495498
unsigned int pp_order = pool->p.order;
499+
bool dma_map = pool->dma_map;
496500
struct page *page;
497501
int i, nr_pages;
498502

@@ -517,8 +521,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
517521
*/
518522
for (i = 0; i < nr_pages; i++) {
519523
page = pool->alloc.cache[i];
520-
if ((pp_flags & PP_FLAG_DMA_MAP) &&
521-
unlikely(!page_pool_dma_map(pool, page))) {
524+
if (dma_map && unlikely(!page_pool_dma_map(pool, page))) {
522525
put_page(page);
523526
continue;
524527
}
@@ -590,7 +593,7 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
590593
{
591594
dma_addr_t dma;
592595

593-
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
596+
if (!pool->dma_map)
594597
/* Always account for inflight pages, even if we didn't
595598
* map them
596599
*/
@@ -673,7 +676,7 @@ static bool __page_pool_page_can_be_recycled(const struct page *page)
673676
}
674677

675678
/* If the page refcnt == 1, this will try to recycle the page.
676-
* if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
679+
* If pool->dma_sync is set, we'll try to sync the DMA area for
677680
* the configured size min(dma_sync_size, pool->max_len).
678681
* If the page refcnt != 1, then the page will be returned to memory
679682
* subsystem.
@@ -696,7 +699,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
696699
if (likely(__page_pool_page_can_be_recycled(page))) {
697700
/* Read barrier done in page_ref_count / READ_ONCE */
698701

699-
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
702+
if (pool->dma_sync)
700703
page_pool_dma_sync_for_device(pool, page,
701704
dma_sync_size);
702705

@@ -809,7 +812,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
809812
return NULL;
810813

811814
if (__page_pool_page_can_be_recycled(page)) {
812-
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
815+
if (pool->dma_sync)
813816
page_pool_dma_sync_for_device(pool, page, -1);
814817

815818
return page;

0 commit comments

Comments
 (0)