21
21
_r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
22
22
#define hbg_queue_is_empty (head , tail , ring ) \
23
23
(hbg_queue_used_num((head), (tail), (ring)) == 0)
24
+ #define hbg_queue_is_full (head , tail , ring ) \
25
+ (hbg_queue_left_num((head), (tail), (ring)) == 0)
24
26
#define hbg_queue_next_prt (p , ring ) (((p) + 1) % (ring)->len)
27
+ #define hbg_queue_move_next (p , ring ) ({ \
28
+ typeof(ring) _ring = (ring); \
29
+ _ring->p = hbg_queue_next_prt(_ring->p, _ring); })
25
30
26
31
#define HBG_TX_STOP_THRS 2
27
32
#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
@@ -124,6 +129,20 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
124
129
buffer -> skb = NULL ;
125
130
}
126
131
132
+ static int hbg_buffer_alloc_skb (struct hbg_buffer * buffer )
133
+ {
134
+ u32 len = hbg_spec_max_frame_len (buffer -> priv , buffer -> dir );
135
+ struct hbg_priv * priv = buffer -> priv ;
136
+
137
+ buffer -> skb = netdev_alloc_skb (priv -> netdev , len );
138
+ if (unlikely (!buffer -> skb ))
139
+ return - ENOMEM ;
140
+
141
+ buffer -> skb_len = len ;
142
+ memset (buffer -> skb -> data , 0 , HBG_PACKET_HEAD_SIZE );
143
+ return 0 ;
144
+ }
145
+
127
146
static void hbg_buffer_free (struct hbg_buffer * buffer )
128
147
{
129
148
hbg_dma_unmap (buffer );
@@ -176,6 +195,92 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
176
195
return packet_done ;
177
196
}
178
197
198
+ static int hbg_rx_fill_one_buffer (struct hbg_priv * priv )
199
+ {
200
+ struct hbg_ring * ring = & priv -> rx_ring ;
201
+ struct hbg_buffer * buffer ;
202
+ int ret ;
203
+
204
+ if (hbg_queue_is_full (ring -> ntc , ring -> ntu , ring ))
205
+ return 0 ;
206
+
207
+ buffer = & ring -> queue [ring -> ntu ];
208
+ ret = hbg_buffer_alloc_skb (buffer );
209
+ if (unlikely (ret ))
210
+ return ret ;
211
+
212
+ ret = hbg_dma_map (buffer );
213
+ if (unlikely (ret )) {
214
+ hbg_buffer_free_skb (buffer );
215
+ return ret ;
216
+ }
217
+
218
+ hbg_hw_fill_buffer (priv , buffer -> skb_dma );
219
+ hbg_queue_move_next (ntu , ring );
220
+ return 0 ;
221
+ }
222
+
223
+ static bool hbg_sync_data_from_hw (struct hbg_priv * priv ,
224
+ struct hbg_buffer * buffer )
225
+ {
226
+ struct hbg_rx_desc * rx_desc ;
227
+
228
+ /* make sure HW write desc complete */
229
+ dma_rmb ();
230
+
231
+ dma_sync_single_for_cpu (& priv -> pdev -> dev , buffer -> skb_dma ,
232
+ buffer -> skb_len , DMA_FROM_DEVICE );
233
+
234
+ rx_desc = (struct hbg_rx_desc * )buffer -> skb -> data ;
235
+ return FIELD_GET (HBG_RX_DESC_W2_PKT_LEN_M , rx_desc -> word2 ) != 0 ;
236
+ }
237
+
238
+ static int hbg_napi_rx_poll (struct napi_struct * napi , int budget )
239
+ {
240
+ struct hbg_ring * ring = container_of (napi , struct hbg_ring , napi );
241
+ struct hbg_priv * priv = ring -> priv ;
242
+ struct hbg_rx_desc * rx_desc ;
243
+ struct hbg_buffer * buffer ;
244
+ u32 packet_done = 0 ;
245
+ u32 pkt_len ;
246
+
247
+ while (packet_done < budget ) {
248
+ if (unlikely (hbg_queue_is_empty (ring -> ntc , ring -> ntu , ring )))
249
+ break ;
250
+
251
+ buffer = & ring -> queue [ring -> ntc ];
252
+ if (unlikely (!buffer -> skb ))
253
+ goto next_buffer ;
254
+
255
+ if (unlikely (!hbg_sync_data_from_hw (priv , buffer )))
256
+ break ;
257
+ rx_desc = (struct hbg_rx_desc * )buffer -> skb -> data ;
258
+ pkt_len = FIELD_GET (HBG_RX_DESC_W2_PKT_LEN_M , rx_desc -> word2 );
259
+
260
+ hbg_dma_unmap (buffer );
261
+
262
+ skb_reserve (buffer -> skb , HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN );
263
+ skb_put (buffer -> skb , pkt_len );
264
+ buffer -> skb -> protocol = eth_type_trans (buffer -> skb ,
265
+ priv -> netdev );
266
+
267
+ dev_sw_netstats_rx_add (priv -> netdev , pkt_len );
268
+ napi_gro_receive (napi , buffer -> skb );
269
+ buffer -> skb = NULL ;
270
+
271
+ next_buffer :
272
+ hbg_rx_fill_one_buffer (priv );
273
+ hbg_queue_move_next (ntc , ring );
274
+ packet_done ++ ;
275
+ }
276
+
277
+ if (likely (packet_done < budget &&
278
+ napi_complete_done (napi , packet_done )))
279
+ hbg_hw_irq_enable (priv , HBG_INT_MSK_RX_B , true);
280
+
281
+ return packet_done ;
282
+ }
283
+
179
284
static void hbg_ring_uninit (struct hbg_ring * ring )
180
285
{
181
286
struct hbg_buffer * buffer ;
@@ -232,7 +337,11 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
232
337
ring -> ntu = 0 ;
233
338
ring -> len = len ;
234
339
235
- netif_napi_add_tx (priv -> netdev , & ring -> napi , napi_poll );
340
+ if (dir == HBG_DIR_TX )
341
+ netif_napi_add_tx (priv -> netdev , & ring -> napi , napi_poll );
342
+ else
343
+ netif_napi_add (priv -> netdev , & ring -> napi , napi_poll );
344
+
236
345
napi_enable (& ring -> napi );
237
346
return 0 ;
238
347
}
@@ -252,19 +361,49 @@ static int hbg_tx_ring_init(struct hbg_priv *priv)
252
361
return hbg_ring_init (priv , tx_ring , hbg_napi_tx_recycle , HBG_DIR_TX );
253
362
}
254
363
364
+ static int hbg_rx_ring_init (struct hbg_priv * priv )
365
+ {
366
+ int ret ;
367
+ u32 i ;
368
+
369
+ ret = hbg_ring_init (priv , & priv -> rx_ring , hbg_napi_rx_poll , HBG_DIR_RX );
370
+ if (ret )
371
+ return ret ;
372
+
373
+ for (i = 0 ; i < priv -> rx_ring .len - 1 ; i ++ ) {
374
+ ret = hbg_rx_fill_one_buffer (priv );
375
+ if (ret ) {
376
+ hbg_ring_uninit (& priv -> rx_ring );
377
+ return ret ;
378
+ }
379
+ }
380
+
381
+ return 0 ;
382
+ }
383
+
255
384
int hbg_txrx_init (struct hbg_priv * priv )
256
385
{
257
386
int ret ;
258
387
259
388
ret = hbg_tx_ring_init (priv );
260
- if (ret )
389
+ if (ret ) {
261
390
dev_err (& priv -> pdev -> dev ,
262
391
"failed to init tx ring, ret = %d\n" , ret );
392
+ return ret ;
393
+ }
394
+
395
+ ret = hbg_rx_ring_init (priv );
396
+ if (ret ) {
397
+ dev_err (& priv -> pdev -> dev ,
398
+ "failed to init rx ring, ret = %d\n" , ret );
399
+ hbg_ring_uninit (& priv -> tx_ring );
400
+ }
263
401
264
402
return ret ;
265
403
}
266
404
267
405
void hbg_txrx_uninit (struct hbg_priv * priv )
268
406
{
269
407
hbg_ring_uninit (& priv -> tx_ring );
408
+ hbg_ring_uninit (& priv -> rx_ring );
270
409
}
0 commit comments