@@ -468,11 +468,60 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
468468 }
469469}
470470
471+ static int page_pool_register_dma_index (struct page_pool * pool ,
472+ netmem_ref netmem , gfp_t gfp )
473+ {
474+ int err = 0 ;
475+ u32 id ;
476+
477+ if (unlikely (!PP_DMA_INDEX_BITS ))
478+ goto out ;
479+
480+ if (in_softirq ())
481+ err = xa_alloc (& pool -> dma_mapped , & id , netmem_to_page (netmem ),
482+ PP_DMA_INDEX_LIMIT , gfp );
483+ else
484+ err = xa_alloc_bh (& pool -> dma_mapped , & id , netmem_to_page (netmem ),
485+ PP_DMA_INDEX_LIMIT , gfp );
486+ if (err ) {
487+ WARN_ONCE (err != - ENOMEM , "couldn't track DMA mapping, please report to netdev@" );
488+ goto out ;
489+ }
490+
491+ netmem_set_dma_index (netmem , id );
492+ out :
493+ return err ;
494+ }
495+
496+ static int page_pool_release_dma_index (struct page_pool * pool ,
497+ netmem_ref netmem )
498+ {
499+ struct page * old , * page = netmem_to_page (netmem );
500+ unsigned long id ;
501+
502+ if (unlikely (!PP_DMA_INDEX_BITS ))
503+ return 0 ;
504+
505+ id = netmem_get_dma_index (netmem );
506+ if (!id )
507+ return -1 ;
508+
509+ if (in_softirq ())
510+ old = xa_cmpxchg (& pool -> dma_mapped , id , page , NULL , 0 );
511+ else
512+ old = xa_cmpxchg_bh (& pool -> dma_mapped , id , page , NULL , 0 );
513+ if (old != page )
514+ return -1 ;
515+
516+ netmem_set_dma_index (netmem , 0 );
517+
518+ return 0 ;
519+ }
520+
471521static bool page_pool_dma_map (struct page_pool * pool , netmem_ref netmem , gfp_t gfp )
472522{
473523 dma_addr_t dma ;
474524 int err ;
475- u32 id ;
476525
477526 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
478527 * since dma_addr_t can be either 32 or 64 bits and does not always fit
@@ -491,18 +540,10 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t g
491540 goto unmap_failed ;
492541 }
493542
494- if (in_softirq ())
495- err = xa_alloc (& pool -> dma_mapped , & id , netmem_to_page (netmem ),
496- PP_DMA_INDEX_LIMIT , gfp );
497- else
498- err = xa_alloc_bh (& pool -> dma_mapped , & id , netmem_to_page (netmem ),
499- PP_DMA_INDEX_LIMIT , gfp );
500- if (err ) {
501- WARN_ONCE (err != - ENOMEM , "couldn't track DMA mapping, please report to netdev@" );
543+ err = page_pool_register_dma_index (pool , netmem , gfp );
544+ if (err )
502545 goto unset_failed ;
503- }
504546
505- netmem_set_dma_index (netmem , id );
506547 page_pool_dma_sync_for_device (pool , netmem , pool -> p .max_len );
507548
508549 return true;
@@ -680,8 +721,6 @@ void page_pool_clear_pp_info(netmem_ref netmem)
680721static __always_inline void __page_pool_release_netmem_dma (struct page_pool * pool ,
681722 netmem_ref netmem )
682723{
683- struct page * old , * page = netmem_to_page (netmem );
684- unsigned long id ;
685724 dma_addr_t dma ;
686725
687726 if (!pool -> dma_map )
@@ -690,15 +729,7 @@ static __always_inline void __page_pool_release_netmem_dma(struct page_pool *poo
690729 */
691730 return ;
692731
693- id = netmem_get_dma_index (netmem );
694- if (!id )
695- return ;
696-
697- if (in_softirq ())
698- old = xa_cmpxchg (& pool -> dma_mapped , id , page , NULL , 0 );
699- else
700- old = xa_cmpxchg_bh (& pool -> dma_mapped , id , page , NULL , 0 );
701- if (old != page )
732+ if (page_pool_release_dma_index (pool , netmem ))
702733 return ;
703734
704735 dma = page_pool_get_dma_addr_netmem (netmem );
@@ -708,7 +739,6 @@ static __always_inline void __page_pool_release_netmem_dma(struct page_pool *poo
708739 PAGE_SIZE << pool -> p .order , pool -> p .dma_dir ,
709740 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING );
710741 page_pool_set_dma_addr_netmem (netmem , 0 );
711- netmem_set_dma_index (netmem , 0 );
712742}
713743
714744/* Disconnects a page (from a page_pool). API users can have a need
0 commit comments