@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
86
86
for (i = 1 ; i < DPAA2_ETH_MAX_SG_ENTRIES ; i ++ ) {
87
87
addr = dpaa2_sg_get_addr (& sgt [i ]);
88
88
sg_vaddr = dpaa2_iova_to_virt (priv -> iommu_domain , addr );
89
- dma_unmap_page (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
89
+ dma_unmap_page (dev , addr , priv -> rx_buf_size ,
90
90
DMA_BIDIRECTIONAL );
91
91
92
92
free_pages ((unsigned long )sg_vaddr , 0 );
@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
144
144
/* Get the address and length from the S/G entry */
145
145
sg_addr = dpaa2_sg_get_addr (sge );
146
146
sg_vaddr = dpaa2_iova_to_virt (priv -> iommu_domain , sg_addr );
147
- dma_unmap_page (dev , sg_addr , DPAA2_ETH_RX_BUF_SIZE ,
147
+ dma_unmap_page (dev , sg_addr , priv -> rx_buf_size ,
148
148
DMA_BIDIRECTIONAL );
149
149
150
150
sg_length = dpaa2_sg_get_len (sge );
@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
185
185
(page_address (page ) - page_address (head_page ));
186
186
187
187
skb_add_rx_frag (skb , i - 1 , head_page , page_offset ,
188
- sg_length , DPAA2_ETH_RX_BUF_SIZE );
188
+ sg_length , priv -> rx_buf_size );
189
189
}
190
190
191
191
if (dpaa2_sg_is_final (sge ))
@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
211
211
212
212
for (i = 0 ; i < count ; i ++ ) {
213
213
vaddr = dpaa2_iova_to_virt (priv -> iommu_domain , buf_array [i ]);
214
- dma_unmap_page (dev , buf_array [i ], DPAA2_ETH_RX_BUF_SIZE ,
214
+ dma_unmap_page (dev , buf_array [i ], priv -> rx_buf_size ,
215
215
DMA_BIDIRECTIONAL );
216
216
free_pages ((unsigned long )vaddr , 0 );
217
217
}
@@ -335,7 +335,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
335
335
break ;
336
336
case XDP_REDIRECT :
337
337
dma_unmap_page (priv -> net_dev -> dev .parent , addr ,
338
- DPAA2_ETH_RX_BUF_SIZE , DMA_BIDIRECTIONAL );
338
+ priv -> rx_buf_size , DMA_BIDIRECTIONAL );
339
339
ch -> buf_count -- ;
340
340
xdp .data_hard_start = vaddr ;
341
341
err = xdp_do_redirect (priv -> net_dev , & xdp , xdp_prog );
@@ -374,7 +374,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
374
374
trace_dpaa2_rx_fd (priv -> net_dev , fd );
375
375
376
376
vaddr = dpaa2_iova_to_virt (priv -> iommu_domain , addr );
377
- dma_sync_single_for_cpu (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
377
+ dma_sync_single_for_cpu (dev , addr , priv -> rx_buf_size ,
378
378
DMA_BIDIRECTIONAL );
379
379
380
380
fas = dpaa2_get_fas (vaddr , false);
@@ -393,13 +393,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
393
393
return ;
394
394
}
395
395
396
- dma_unmap_page (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
396
+ dma_unmap_page (dev , addr , priv -> rx_buf_size ,
397
397
DMA_BIDIRECTIONAL );
398
398
skb = build_linear_skb (ch , fd , vaddr );
399
399
} else if (fd_format == dpaa2_fd_sg ) {
400
400
WARN_ON (priv -> xdp_prog );
401
401
402
- dma_unmap_page (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
402
+ dma_unmap_page (dev , addr , priv -> rx_buf_size ,
403
403
DMA_BIDIRECTIONAL );
404
404
skb = build_frag_skb (priv , ch , buf_data );
405
405
free_pages ((unsigned long )vaddr , 0 );
@@ -974,7 +974,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
974
974
if (!page )
975
975
goto err_alloc ;
976
976
977
- addr = dma_map_page (dev , page , 0 , DPAA2_ETH_RX_BUF_SIZE ,
977
+ addr = dma_map_page (dev , page , 0 , priv -> rx_buf_size ,
978
978
DMA_BIDIRECTIONAL );
979
979
if (unlikely (dma_mapping_error (dev , addr )))
980
980
goto err_map ;
@@ -984,7 +984,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
984
984
/* tracing point */
985
985
trace_dpaa2_eth_buf_seed (priv -> net_dev ,
986
986
page , DPAA2_ETH_RX_BUF_RAW_SIZE ,
987
- addr , DPAA2_ETH_RX_BUF_SIZE ,
987
+ addr , priv -> rx_buf_size ,
988
988
bpid );
989
989
}
990
990
@@ -1720,7 +1720,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1720
1720
int mfl , linear_mfl ;
1721
1721
1722
1722
mfl = DPAA2_ETH_L2_MAX_FRM (mtu );
1723
- linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
1723
+ linear_mfl = priv -> rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
1724
1724
dpaa2_eth_rx_head_room (priv ) - XDP_PACKET_HEADROOM ;
1725
1725
1726
1726
if (mfl > linear_mfl ) {
@@ -2462,6 +2462,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2462
2462
else
2463
2463
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN ;
2464
2464
2465
+ /* We need to ensure that the buffer size seen by WRIOP is a multiple
2466
+ * of 64 or 256 bytes depending on the WRIOP version.
2467
+ */
2468
+ priv -> rx_buf_size = ALIGN_DOWN (DPAA2_ETH_RX_BUF_SIZE , rx_buf_align );
2469
+
2465
2470
/* tx buffer */
2466
2471
buf_layout .private_data_size = DPAA2_ETH_SWA_SIZE ;
2467
2472
buf_layout .pass_timestamp = true;
@@ -3126,7 +3131,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
3126
3131
pools_params .num_dpbp = 1 ;
3127
3132
pools_params .pools [0 ].dpbp_id = priv -> dpbp_dev -> obj_desc .id ;
3128
3133
pools_params .pools [0 ].backup_pool = 0 ;
3129
- pools_params .pools [0 ].buffer_size = DPAA2_ETH_RX_BUF_SIZE ;
3134
+ pools_params .pools [0 ].buffer_size = priv -> rx_buf_size ;
3130
3135
err = dpni_set_pools (priv -> mc_io , 0 , priv -> mc_token , & pools_params );
3131
3136
if (err ) {
3132
3137
dev_err (dev , "dpni_set_pools() failed\n" );
0 commit comments