Skip to content

Commit 2fe6e77

Browse files
bmarreddykuba-moo
authored andcommitted
bng_en: Allocate packet buffers
Populate packet buffers into the RX and AGG rings while these rings are being initialized. Signed-off-by: Bhargava Marreddy <[email protected]> Reviewed-by: Vikas Gupta <[email protected]> Reviewed-by: Rajashekar Hudumula <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent d85b5a2 commit 2fe6e77

File tree

1 file changed

+287
-1
lines changed

1 file changed

+287
-1
lines changed

drivers/net/ethernet/broadcom/bnge/bnge_netdev.c

Lines changed: 287 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,76 @@ static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
259259
return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
260260
}
261261

262+
static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
263+
struct bnge_rx_ring_info *rxr)
264+
{
265+
int i, max_idx;
266+
267+
if (!rxr->rx_buf_ring)
268+
return;
269+
270+
max_idx = bn->rx_nr_pages * RX_DESC_CNT;
271+
272+
for (i = 0; i < max_idx; i++) {
273+
struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
274+
void *data = rx_buf->data;
275+
276+
if (!data)
277+
continue;
278+
279+
rx_buf->data = NULL;
280+
page_pool_free_va(rxr->head_pool, data, true);
281+
}
282+
}
283+
284+
static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
285+
struct bnge_rx_ring_info *rxr)
286+
{
287+
int i, max_idx;
288+
289+
if (!rxr->rx_agg_buf_ring)
290+
return;
291+
292+
max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
293+
294+
for (i = 0; i < max_idx; i++) {
295+
struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
296+
netmem_ref netmem = rx_agg_buf->netmem;
297+
298+
if (!netmem)
299+
continue;
300+
301+
rx_agg_buf->netmem = 0;
302+
__clear_bit(i, rxr->rx_agg_bmap);
303+
304+
page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
305+
}
306+
}
307+
308+
static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
309+
struct bnge_rx_ring_info *rxr)
310+
{
311+
bnge_free_one_rx_ring_bufs(bn, rxr);
312+
bnge_free_one_agg_ring_bufs(bn, rxr);
313+
}
314+
315+
static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
316+
{
317+
struct bnge_dev *bd = bn->bd;
318+
int i;
319+
320+
if (!bn->rx_ring)
321+
return;
322+
323+
for (i = 0; i < bd->rx_nr_rings; i++)
324+
bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
325+
}
326+
327+
static void bnge_free_all_rings_bufs(struct bnge_net *bn)
328+
{
329+
bnge_free_rx_ring_pair_bufs(bn);
330+
}
331+
262332
static void bnge_free_rx_rings(struct bnge_net *bn)
263333
{
264334
struct bnge_dev *bd = bn->bd;
@@ -739,6 +809,194 @@ static void bnge_init_nq_tree(struct bnge_net *bn)
739809
}
740810
}
741811

812+
static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
813+
dma_addr_t *mapping,
814+
struct bnge_rx_ring_info *rxr,
815+
unsigned int *offset,
816+
gfp_t gfp)
817+
{
818+
netmem_ref netmem;
819+
820+
if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
821+
netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
822+
BNGE_RX_PAGE_SIZE, gfp);
823+
} else {
824+
netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
825+
*offset = 0;
826+
}
827+
if (!netmem)
828+
return 0;
829+
830+
*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
831+
return netmem;
832+
}
833+
834+
static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
835+
struct bnge_rx_ring_info *rxr,
836+
gfp_t gfp)
837+
{
838+
unsigned int offset;
839+
struct page *page;
840+
841+
page = page_pool_alloc_frag(rxr->head_pool, &offset,
842+
bn->rx_buf_size, gfp);
843+
if (!page)
844+
return NULL;
845+
846+
*mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
847+
return page_address(page) + offset;
848+
}
849+
850+
static int bnge_alloc_rx_data(struct bnge_net *bn,
851+
struct bnge_rx_ring_info *rxr,
852+
u16 prod, gfp_t gfp)
853+
{
854+
struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
855+
struct rx_bd *rxbd;
856+
dma_addr_t mapping;
857+
u8 *data;
858+
859+
rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
860+
data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
861+
if (!data)
862+
return -ENOMEM;
863+
864+
rx_buf->data = data;
865+
rx_buf->data_ptr = data + bn->rx_offset;
866+
rx_buf->mapping = mapping;
867+
868+
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
869+
870+
return 0;
871+
}
872+
873+
static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
874+
struct bnge_rx_ring_info *rxr,
875+
int ring_nr)
876+
{
877+
u32 prod = rxr->rx_prod;
878+
int i, rc = 0;
879+
880+
for (i = 0; i < bn->rx_ring_size; i++) {
881+
rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
882+
if (rc)
883+
break;
884+
prod = NEXT_RX(prod);
885+
}
886+
887+
/* Abort if not a single buffer can be allocated */
888+
if (rc && !i) {
889+
netdev_err(bn->netdev,
890+
"RX ring %d: allocated %d/%d buffers, abort\n",
891+
ring_nr, i, bn->rx_ring_size);
892+
return rc;
893+
}
894+
895+
rxr->rx_prod = prod;
896+
897+
if (i < bn->rx_ring_size)
898+
netdev_warn(bn->netdev,
899+
"RX ring %d: allocated %d/%d buffers, continuing\n",
900+
ring_nr, i, bn->rx_ring_size);
901+
return 0;
902+
}
903+
904+
static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
905+
{
906+
u16 next, max = rxr->rx_agg_bmap_size;
907+
908+
next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
909+
if (next >= max)
910+
next = find_first_zero_bit(rxr->rx_agg_bmap, max);
911+
return next;
912+
}
913+
914+
static int bnge_alloc_rx_netmem(struct bnge_net *bn,
915+
struct bnge_rx_ring_info *rxr,
916+
u16 prod, gfp_t gfp)
917+
{
918+
struct bnge_sw_rx_agg_bd *rx_agg_buf;
919+
u16 sw_prod = rxr->rx_sw_agg_prod;
920+
unsigned int offset = 0;
921+
struct rx_bd *rxbd;
922+
dma_addr_t mapping;
923+
netmem_ref netmem;
924+
925+
rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
926+
netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
927+
if (!netmem)
928+
return -ENOMEM;
929+
930+
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
931+
sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
932+
933+
__set_bit(sw_prod, rxr->rx_agg_bmap);
934+
rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
935+
rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
936+
937+
rx_agg_buf->netmem = netmem;
938+
rx_agg_buf->offset = offset;
939+
rx_agg_buf->mapping = mapping;
940+
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
941+
rxbd->rx_bd_opaque = sw_prod;
942+
return 0;
943+
}
944+
945+
static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
946+
struct bnge_rx_ring_info *rxr,
947+
int ring_nr)
948+
{
949+
u32 prod = rxr->rx_agg_prod;
950+
int i, rc = 0;
951+
952+
for (i = 0; i < bn->rx_agg_ring_size; i++) {
953+
rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
954+
if (rc)
955+
break;
956+
prod = NEXT_RX_AGG(prod);
957+
}
958+
959+
if (rc && i < MAX_SKB_FRAGS) {
960+
netdev_err(bn->netdev,
961+
"Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
962+
ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
963+
goto err_free_one_agg_ring_bufs;
964+
}
965+
966+
rxr->rx_agg_prod = prod;
967+
968+
if (i < bn->rx_agg_ring_size)
969+
netdev_warn(bn->netdev,
970+
"Agg ring %d: allocated %d/%d buffers, continuing\n",
971+
ring_nr, i, bn->rx_agg_ring_size);
972+
return 0;
973+
974+
err_free_one_agg_ring_bufs:
975+
bnge_free_one_agg_ring_bufs(bn, rxr);
976+
return -ENOMEM;
977+
}
978+
979+
static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
980+
{
981+
struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
982+
int rc;
983+
984+
rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
985+
if (rc)
986+
return rc;
987+
988+
if (bnge_is_agg_reqd(bn->bd)) {
989+
rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
990+
if (rc)
991+
goto err_free_one_rx_ring_bufs;
992+
}
993+
return 0;
994+
995+
err_free_one_rx_ring_bufs:
996+
bnge_free_one_rx_ring_bufs(bn, rxr);
997+
return rc;
998+
}
999+
7421000
static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
7431001
{
7441002
struct rx_bd **rx_desc_ring;
@@ -803,6 +1061,22 @@ static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
8031061
bnge_init_one_agg_ring_rxbd(bn, rxr);
8041062
}
8051063

1064+
static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
1065+
{
1066+
int i, rc;
1067+
1068+
for (i = 0; i < bn->bd->rx_nr_rings; i++) {
1069+
rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
1070+
if (rc)
1071+
goto err_free_rx_ring_pair_bufs;
1072+
}
1073+
return 0;
1074+
1075+
err_free_rx_ring_pair_bufs:
1076+
bnge_free_rx_ring_pair_bufs(bn);
1077+
return rc;
1078+
}
1079+
8061080
static void bnge_init_rx_rings(struct bnge_net *bn)
8071081
{
8081082
int i;
@@ -1030,13 +1304,24 @@ static int bnge_init_nic(struct bnge_net *bn)
10301304
int rc;
10311305

10321306
bnge_init_nq_tree(bn);
1307+
10331308
bnge_init_rx_rings(bn);
1309+
rc = bnge_alloc_rx_ring_pair_bufs(bn);
1310+
if (rc)
1311+
return rc;
1312+
10341313
bnge_init_tx_rings(bn);
1314+
10351315
rc = bnge_init_ring_grps(bn);
10361316
if (rc)
1037-
return rc;
1317+
goto err_free_rx_ring_pair_bufs;
1318+
10381319
bnge_init_vnics(bn);
10391320
return rc;
1321+
1322+
err_free_rx_ring_pair_bufs:
1323+
bnge_free_rx_ring_pair_bufs(bn);
1324+
return rc;
10401325
}
10411326

10421327
static int bnge_open_core(struct bnge_net *bn)
@@ -1105,6 +1390,7 @@ static void bnge_close_core(struct bnge_net *bn)
11051390
struct bnge_dev *bd = bn->bd;
11061391

11071392
clear_bit(BNGE_STATE_OPEN, &bd->state);
1393+
bnge_free_all_rings_bufs(bn);
11081394
bnge_free_irq(bn);
11091395
bnge_del_napi(bn);
11101396

0 commit comments

Comments
 (0)