1414#include <linux/if.h>
1515#include <net/ip.h>
1616#include <linux/skbuff.h>
17+ #include <net/page_pool/helpers.h>
1718
1819#include "bnge.h"
1920#include "bnge_hwrm_lib.h"
2021#include "bnge_ethtool.h"
22+ #include "bnge_rmem.h"
23+
24+ #define BNGE_RING_TO_TC_OFF (bd , tx ) \
25+ ((tx) % (bd)->tx_nr_rings_per_tc)
26+
27+ #define BNGE_RING_TO_TC (bd , tx ) \
28+ ((tx) / (bd)->tx_nr_rings_per_tc)
29+
30+ static bool bnge_separate_head_pool (struct bnge_rx_ring_info * rxr )
31+ {
32+ return rxr -> need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE ;
33+ }
34+
35+ static void bnge_free_rx_rings (struct bnge_net * bn )
36+ {
37+ struct bnge_dev * bd = bn -> bd ;
38+ int i ;
39+
40+ for (i = 0 ; i < bd -> rx_nr_rings ; i ++ ) {
41+ struct bnge_rx_ring_info * rxr = & bn -> rx_ring [i ];
42+ struct bnge_ring_struct * ring ;
43+
44+ page_pool_destroy (rxr -> page_pool );
45+ page_pool_destroy (rxr -> head_pool );
46+ rxr -> page_pool = rxr -> head_pool = NULL ;
47+
48+ kfree (rxr -> rx_agg_bmap );
49+ rxr -> rx_agg_bmap = NULL ;
50+
51+ ring = & rxr -> rx_ring_struct ;
52+ bnge_free_ring (bd , & ring -> ring_mem );
53+
54+ ring = & rxr -> rx_agg_ring_struct ;
55+ bnge_free_ring (bd , & ring -> ring_mem );
56+ }
57+ }
58+
59+ static int bnge_alloc_rx_page_pool (struct bnge_net * bn ,
60+ struct bnge_rx_ring_info * rxr ,
61+ int numa_node )
62+ {
63+ const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE ;
64+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K ;
65+ struct page_pool_params pp = { 0 };
66+ struct bnge_dev * bd = bn -> bd ;
67+ struct page_pool * pool ;
68+
69+ pp .pool_size = bn -> rx_agg_ring_size / agg_size_fac ;
70+ pp .nid = numa_node ;
71+ pp .netdev = bn -> netdev ;
72+ pp .dev = bd -> dev ;
73+ pp .dma_dir = bn -> rx_dir ;
74+ pp .max_len = PAGE_SIZE ;
75+ pp .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
76+ PP_FLAG_ALLOW_UNREADABLE_NETMEM ;
77+ pp .queue_idx = rxr -> bnapi -> index ;
78+
79+ pool = page_pool_create (& pp );
80+ if (IS_ERR (pool ))
81+ return PTR_ERR (pool );
82+ rxr -> page_pool = pool ;
83+
84+ rxr -> need_head_pool = page_pool_is_unreadable (pool );
85+ if (bnge_separate_head_pool (rxr )) {
86+ pp .pool_size = min (bn -> rx_ring_size / rx_size_fac , 1024 );
87+ pp .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
88+ pool = page_pool_create (& pp );
89+ if (IS_ERR (pool ))
90+ goto err_destroy_pp ;
91+ } else {
92+ page_pool_get (pool );
93+ }
94+ rxr -> head_pool = pool ;
95+ return 0 ;
96+
97+ err_destroy_pp :
98+ page_pool_destroy (rxr -> page_pool );
99+ rxr -> page_pool = NULL ;
100+ return PTR_ERR (pool );
101+ }
102+
103+ static void bnge_enable_rx_page_pool (struct bnge_rx_ring_info * rxr )
104+ {
105+ page_pool_enable_direct_recycling (rxr -> head_pool , & rxr -> bnapi -> napi );
106+ page_pool_enable_direct_recycling (rxr -> page_pool , & rxr -> bnapi -> napi );
107+ }
108+
109+ static int bnge_alloc_rx_agg_bmap (struct bnge_net * bn ,
110+ struct bnge_rx_ring_info * rxr )
111+ {
112+ u16 mem_size ;
113+
114+ rxr -> rx_agg_bmap_size = bn -> rx_agg_ring_mask + 1 ;
115+ mem_size = rxr -> rx_agg_bmap_size / 8 ;
116+ rxr -> rx_agg_bmap = kzalloc (mem_size , GFP_KERNEL );
117+ if (!rxr -> rx_agg_bmap )
118+ return - ENOMEM ;
119+
120+ return 0 ;
121+ }
122+
123+ static int bnge_alloc_rx_rings (struct bnge_net * bn )
124+ {
125+ int i , rc = 0 , agg_rings = 0 , cpu ;
126+ struct bnge_dev * bd = bn -> bd ;
127+
128+ if (bnge_is_agg_reqd (bd ))
129+ agg_rings = 1 ;
130+
131+ for (i = 0 ; i < bd -> rx_nr_rings ; i ++ ) {
132+ struct bnge_rx_ring_info * rxr = & bn -> rx_ring [i ];
133+ struct bnge_ring_struct * ring ;
134+ int cpu_node ;
135+
136+ ring = & rxr -> rx_ring_struct ;
137+
138+ cpu = cpumask_local_spread (i , dev_to_node (bd -> dev ));
139+ cpu_node = cpu_to_node (cpu );
140+ netdev_dbg (bn -> netdev , "Allocating page pool for rx_ring[%d] on numa_node: %d\n" ,
141+ i , cpu_node );
142+ rc = bnge_alloc_rx_page_pool (bn , rxr , cpu_node );
143+ if (rc )
144+ goto err_free_rx_rings ;
145+ bnge_enable_rx_page_pool (rxr );
146+
147+ rc = bnge_alloc_ring (bd , & ring -> ring_mem );
148+ if (rc )
149+ goto err_free_rx_rings ;
150+
151+ ring -> grp_idx = i ;
152+ if (agg_rings ) {
153+ ring = & rxr -> rx_agg_ring_struct ;
154+ rc = bnge_alloc_ring (bd , & ring -> ring_mem );
155+ if (rc )
156+ goto err_free_rx_rings ;
157+
158+ ring -> grp_idx = i ;
159+ rc = bnge_alloc_rx_agg_bmap (bn , rxr );
160+ if (rc )
161+ goto err_free_rx_rings ;
162+ }
163+ }
164+ return rc ;
165+
166+ err_free_rx_rings :
167+ bnge_free_rx_rings (bn );
168+ return rc ;
169+ }
170+
171+ static void bnge_free_tx_rings (struct bnge_net * bn )
172+ {
173+ struct bnge_dev * bd = bn -> bd ;
174+ int i ;
175+
176+ for (i = 0 ; i < bd -> tx_nr_rings ; i ++ ) {
177+ struct bnge_tx_ring_info * txr = & bn -> tx_ring [i ];
178+ struct bnge_ring_struct * ring ;
179+
180+ ring = & txr -> tx_ring_struct ;
181+
182+ bnge_free_ring (bd , & ring -> ring_mem );
183+ }
184+ }
185+
186+ static int bnge_alloc_tx_rings (struct bnge_net * bn )
187+ {
188+ struct bnge_dev * bd = bn -> bd ;
189+ int i , j , rc ;
190+
191+ for (i = 0 , j = 0 ; i < bd -> tx_nr_rings ; i ++ ) {
192+ struct bnge_tx_ring_info * txr = & bn -> tx_ring [i ];
193+ struct bnge_ring_struct * ring ;
194+ u8 qidx ;
195+
196+ ring = & txr -> tx_ring_struct ;
197+
198+ rc = bnge_alloc_ring (bd , & ring -> ring_mem );
199+ if (rc )
200+ goto err_free_tx_rings ;
201+
202+ ring -> grp_idx = txr -> bnapi -> index ;
203+ qidx = bd -> tc_to_qidx [j ];
204+ ring -> queue_id = bd -> q_info [qidx ].queue_id ;
205+ if (BNGE_RING_TO_TC_OFF (bd , i ) == (bd -> tx_nr_rings_per_tc - 1 ))
206+ j ++ ;
207+ }
208+ return 0 ;
209+
210+ err_free_tx_rings :
211+ bnge_free_tx_rings (bn );
212+ return rc ;
213+ }
214+
215+ static void bnge_free_core (struct bnge_net * bn )
216+ {
217+ bnge_free_tx_rings (bn );
218+ bnge_free_rx_rings (bn );
219+ kfree (bn -> tx_ring_map );
220+ bn -> tx_ring_map = NULL ;
221+ kfree (bn -> tx_ring );
222+ bn -> tx_ring = NULL ;
223+ kfree (bn -> rx_ring );
224+ bn -> rx_ring = NULL ;
225+ kfree (bn -> bnapi );
226+ bn -> bnapi = NULL ;
227+ }
228+
229+ static int bnge_alloc_core (struct bnge_net * bn )
230+ {
231+ struct bnge_dev * bd = bn -> bd ;
232+ int i , j , size , arr_size ;
233+ int rc = - ENOMEM ;
234+ void * bnapi ;
235+
236+ arr_size = L1_CACHE_ALIGN (sizeof (struct bnge_napi * ) *
237+ bd -> nq_nr_rings );
238+ size = L1_CACHE_ALIGN (sizeof (struct bnge_napi ));
239+ bnapi = kzalloc (arr_size + size * bd -> nq_nr_rings , GFP_KERNEL );
240+ if (!bnapi )
241+ return rc ;
242+
243+ bn -> bnapi = bnapi ;
244+ bnapi += arr_size ;
245+ for (i = 0 ; i < bd -> nq_nr_rings ; i ++ , bnapi += size ) {
246+ struct bnge_nq_ring_info * nqr ;
247+
248+ bn -> bnapi [i ] = bnapi ;
249+ bn -> bnapi [i ]-> index = i ;
250+ bn -> bnapi [i ]-> bn = bn ;
251+ nqr = & bn -> bnapi [i ]-> nq_ring ;
252+ nqr -> ring_struct .ring_mem .flags = BNGE_RMEM_RING_PTE_FLAG ;
253+ }
254+
255+ bn -> rx_ring = kcalloc (bd -> rx_nr_rings ,
256+ sizeof (struct bnge_rx_ring_info ),
257+ GFP_KERNEL );
258+ if (!bn -> rx_ring )
259+ goto err_free_core ;
260+
261+ for (i = 0 ; i < bd -> rx_nr_rings ; i ++ ) {
262+ struct bnge_rx_ring_info * rxr = & bn -> rx_ring [i ];
263+
264+ rxr -> rx_ring_struct .ring_mem .flags =
265+ BNGE_RMEM_RING_PTE_FLAG ;
266+ rxr -> rx_agg_ring_struct .ring_mem .flags =
267+ BNGE_RMEM_RING_PTE_FLAG ;
268+ rxr -> bnapi = bn -> bnapi [i ];
269+ bn -> bnapi [i ]-> rx_ring = & bn -> rx_ring [i ];
270+ }
271+
272+ bn -> tx_ring = kcalloc (bd -> tx_nr_rings ,
273+ sizeof (struct bnge_tx_ring_info ),
274+ GFP_KERNEL );
275+ if (!bn -> tx_ring )
276+ goto err_free_core ;
277+
278+ bn -> tx_ring_map = kcalloc (bd -> tx_nr_rings , sizeof (u16 ),
279+ GFP_KERNEL );
280+ if (!bn -> tx_ring_map )
281+ goto err_free_core ;
282+
283+ if (bd -> flags & BNGE_EN_SHARED_CHNL )
284+ j = 0 ;
285+ else
286+ j = bd -> rx_nr_rings ;
287+
288+ for (i = 0 ; i < bd -> tx_nr_rings ; i ++ ) {
289+ struct bnge_tx_ring_info * txr = & bn -> tx_ring [i ];
290+ struct bnge_napi * bnapi2 ;
291+ int k ;
292+
293+ txr -> tx_ring_struct .ring_mem .flags = BNGE_RMEM_RING_PTE_FLAG ;
294+ bn -> tx_ring_map [i ] = i ;
295+ k = j + BNGE_RING_TO_TC_OFF (bd , i );
296+
297+ bnapi2 = bn -> bnapi [k ];
298+ txr -> txq_index = i ;
299+ txr -> tx_napi_idx =
300+ BNGE_RING_TO_TC (bd , txr -> txq_index );
301+ bnapi2 -> tx_ring [txr -> tx_napi_idx ] = txr ;
302+ txr -> bnapi = bnapi2 ;
303+ }
304+
305+ bnge_init_ring_struct (bn );
306+
307+ rc = bnge_alloc_rx_rings (bn );
308+ if (rc )
309+ goto err_free_core ;
310+
311+ rc = bnge_alloc_tx_rings (bn );
312+ if (rc )
313+ goto err_free_core ;
314+ return 0 ;
315+
316+ err_free_core :
317+ bnge_free_core (bn );
318+ return rc ;
319+ }
320+
321+ static int bnge_open_core (struct bnge_net * bn )
322+ {
323+ struct bnge_dev * bd = bn -> bd ;
324+ int rc ;
325+
326+ netif_carrier_off (bn -> netdev );
327+
328+ rc = bnge_reserve_rings (bd );
329+ if (rc ) {
330+ netdev_err (bn -> netdev , "bnge_reserve_rings err: %d\n" , rc );
331+ return rc ;
332+ }
333+
334+ rc = bnge_alloc_core (bn );
335+ if (rc ) {
336+ netdev_err (bn -> netdev , "bnge_alloc_core err: %d\n" , rc );
337+ return rc ;
338+ }
339+
340+ set_bit (BNGE_STATE_OPEN , & bd -> state );
341+ return 0 ;
342+ }
21343
22344static netdev_tx_t bnge_start_xmit (struct sk_buff * skb , struct net_device * dev )
23345{
@@ -28,11 +350,30 @@ static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
28350
29351static int bnge_open (struct net_device * dev )
30352{
31- return 0 ;
353+ struct bnge_net * bn = netdev_priv (dev );
354+ int rc ;
355+
356+ rc = bnge_open_core (bn );
357+ if (rc )
358+ netdev_err (dev , "bnge_open_core err: %d\n" , rc );
359+
360+ return rc ;
361+ }
362+
363+ static void bnge_close_core (struct bnge_net * bn )
364+ {
365+ struct bnge_dev * bd = bn -> bd ;
366+
367+ clear_bit (BNGE_STATE_OPEN , & bd -> state );
368+ bnge_free_core (bn );
32369}
33370
34371static int bnge_close (struct net_device * dev )
35372{
373+ struct bnge_net * bn = netdev_priv (dev );
374+
375+ bnge_close_core (bn );
376+
36377 return 0 ;
37378}
38379
@@ -238,6 +579,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
238579
239580 bn -> rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE ;
240581 bn -> tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE ;
582+ bn -> rx_dir = DMA_FROM_DEVICE ;
241583
242584 bnge_set_tpa_flags (bd );
243585 bnge_set_ring_params (bd );
0 commit comments