@@ -367,10 +367,11 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
367367static void xgbe_isr_bh_work (struct work_struct * work )
368368{
369369 struct xgbe_prv_data * pdata = from_work (pdata , work , dev_bh_work );
370+ unsigned int mac_isr , mac_tssr , mac_mdioisr ;
370371 struct xgbe_hw_if * hw_if = & pdata -> hw_if ;
371- struct xgbe_channel * channel ;
372+ bool per_ch_irq , ti , ri , rbu , fbe ;
372373 unsigned int dma_isr , dma_ch_isr ;
373- unsigned int mac_isr , mac_tssr , mac_mdioisr ;
374+ struct xgbe_channel * channel ;
374375 unsigned int i ;
375376
376377 /* The DMA interrupt status register also reports MAC and MTL
@@ -384,43 +385,73 @@ static void xgbe_isr_bh_work(struct work_struct *work)
384385 netif_dbg (pdata , intr , pdata -> netdev , "DMA_ISR=%#010x\n" , dma_isr );
385386
386387 for (i = 0 ; i < pdata -> channel_count ; i ++ ) {
388+ bool schedule_napi = false;
389+ struct napi_struct * napi ;
390+
387391 if (!(dma_isr & (1 << i )))
388392 continue ;
389393
390394 channel = pdata -> channel [i ];
391395
392396 dma_ch_isr = XGMAC_DMA_IOREAD (channel , DMA_CH_SR );
397+
398+ /* Precompute flags once */
399+ ti = !!XGMAC_GET_BITS (dma_ch_isr , DMA_CH_SR , TI );
400+ ri = !!XGMAC_GET_BITS (dma_ch_isr , DMA_CH_SR , RI );
401+ rbu = !!XGMAC_GET_BITS (dma_ch_isr , DMA_CH_SR , RBU );
402+ fbe = !!XGMAC_GET_BITS (dma_ch_isr , DMA_CH_SR , FBE );
403+
393404 netif_dbg (pdata , intr , pdata -> netdev , "DMA_CH%u_ISR=%#010x\n" ,
394405 i , dma_ch_isr );
395406
396- /* The TI or RI interrupt bits may still be set even if using
397- * per channel DMA interrupts. Check to be sure those are not
398- * enabled before using the private data napi structure.
407+ per_ch_irq = pdata -> per_channel_irq ;
408+
409+ /*
410+ * Decide which NAPI to use and whether to schedule:
411+ * - When not using per-channel IRQs: schedule on global NAPI
412+ * if TI or RI are set.
413+ * - RBU should also trigger NAPI (either per-channel or global)
414+ * to allow refill.
399415 */
400- if (!pdata -> per_channel_irq &&
401- (XGMAC_GET_BITS (dma_ch_isr , DMA_CH_SR , TI ) ||
402- XGMAC_GET_BITS (dma_ch_isr , DMA_CH_SR , RI ))) {
403- if (napi_schedule_prep (& pdata -> napi )) {
404- /* Disable Tx and Rx interrupts */
405- xgbe_disable_rx_tx_ints (pdata );
416+ if (!per_ch_irq && (ti || ri ))
417+ schedule_napi = true;
406418
407- /* Turn on polling */
408- __napi_schedule (& pdata -> napi );
419+ if (rbu ) {
420+ schedule_napi = true;
421+ pdata -> ext_stats .rx_buffer_unavailable ++ ;
422+ }
423+
424+ napi = per_ch_irq ? & channel -> napi : & pdata -> napi ;
425+
426+ if (schedule_napi && napi_schedule_prep (napi )) {
427+ /* Disable interrupts appropriately before polling */
428+ if (per_ch_irq ) {
429+ if (pdata -> channel_irq_mode )
430+ xgbe_disable_rx_tx_int (pdata , channel );
431+ else
432+ disable_irq_nosync (channel -> dma_irq );
433+ } else {
434+ xgbe_disable_rx_tx_ints (pdata );
409435 }
436+
437+ /* Turn on polling */
438+ __napi_schedule (napi );
410439 } else {
411- /* Don't clear Rx/Tx status if doing per channel DMA
412- * interrupts, these will be cleared by the ISR for
413- * per channel DMA interrupts.
440+ /*
441+ * Don't clear Rx/Tx status if doing per-channel DMA
442+ * interrupts; those bits will be serviced/cleared by
443+ * the per-channel ISR/NAPI. In non-per-channel mode
444+ * when we're not scheduling NAPI here, ensure we don't
445+ * accidentally clear TI/RI in HW: zero them in the
446+ * local copy so that the eventual write-back does not
447+ * clear TI/RI.
414448 */
415449 XGMAC_SET_BITS (dma_ch_isr , DMA_CH_SR , TI , 0 );
416450 XGMAC_SET_BITS (dma_ch_isr , DMA_CH_SR , RI , 0 );
417451 }
418452
419- if (XGMAC_GET_BITS (dma_ch_isr , DMA_CH_SR , RBU ))
420- pdata -> ext_stats .rx_buffer_unavailable ++ ;
421-
422453 /* Restart the device on a Fatal Bus Error */
423- if (XGMAC_GET_BITS ( dma_ch_isr , DMA_CH_SR , FBE ) )
454+ if (fbe )
424455 schedule_work (& pdata -> restart_work );
425456
426457 /* Clear interrupt signals */
0 commit comments