@@ -1560,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
1560
1560
}
1561
1561
1562
1562
1563
- static void ace_tasklet (struct tasklet_struct * t )
1563
+ static void ace_bh_work (struct work_struct * work )
1564
1564
{
1565
- struct ace_private * ap = from_tasklet (ap , t , ace_tasklet );
1565
+ struct ace_private * ap = from_work (ap , work , ace_bh_work );
1566
1566
struct net_device * dev = ap -> ndev ;
1567
1567
int cur_size ;
1568
1568
@@ -1595,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t)
1595
1595
#endif
1596
1596
ace_load_jumbo_rx_ring (dev , RX_JUMBO_SIZE - cur_size );
1597
1597
}
1598
- ap -> tasklet_pending = 0 ;
1598
+ ap -> bh_work_pending = 0 ;
1599
1599
}
1600
1600
1601
1601
@@ -1617,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap)
1617
1617
*
1618
1618
* Loading rings is safe without holding the spin lock since this is
1619
1619
* done only before the device is enabled, thus no interrupts are
1620
- * generated and by the interrupt handler/tasklet handler.
1620
+ * generated and by the interrupt handler/bh handler.
1621
1621
*/
1622
1622
static void ace_load_std_rx_ring (struct net_device * dev , int nr_bufs )
1623
1623
{
@@ -2160,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
2160
2160
*/
2161
2161
if (netif_running (dev )) {
2162
2162
int cur_size ;
2163
- int run_tasklet = 0 ;
2163
+ int run_bh_work = 0 ;
2164
2164
2165
2165
cur_size = atomic_read (& ap -> cur_rx_bufs );
2166
2166
if (cur_size < RX_LOW_STD_THRES ) {
@@ -2172,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
2172
2172
ace_load_std_rx_ring (dev ,
2173
2173
RX_RING_SIZE - cur_size );
2174
2174
} else
2175
- run_tasklet = 1 ;
2175
+ run_bh_work = 1 ;
2176
2176
}
2177
2177
2178
2178
if (!ACE_IS_TIGON_I (ap )) {
@@ -2188,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
2188
2188
ace_load_mini_rx_ring (dev ,
2189
2189
RX_MINI_SIZE - cur_size );
2190
2190
} else
2191
- run_tasklet = 1 ;
2191
+ run_bh_work = 1 ;
2192
2192
}
2193
2193
}
2194
2194
@@ -2205,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
2205
2205
ace_load_jumbo_rx_ring (dev ,
2206
2206
RX_JUMBO_SIZE - cur_size );
2207
2207
} else
2208
- run_tasklet = 1 ;
2208
+ run_bh_work = 1 ;
2209
2209
}
2210
2210
}
2211
- if (run_tasklet && !ap -> tasklet_pending ) {
2212
- ap -> tasklet_pending = 1 ;
2213
- tasklet_schedule ( & ap -> ace_tasklet );
2211
+ if (run_bh_work && !ap -> bh_work_pending ) {
2212
+ ap -> bh_work_pending = 1 ;
2213
+ queue_work ( system_bh_wq , & ap -> ace_bh_work );
2214
2214
}
2215
2215
}
2216
2216
@@ -2267,7 +2267,7 @@ static int ace_open(struct net_device *dev)
2267
2267
/*
2268
2268
* Setup the bottom half rx ring refill handler
2269
2269
*/
2270
- tasklet_setup (& ap -> ace_tasklet , ace_tasklet );
2270
+ INIT_WORK (& ap -> ace_bh_work , ace_bh_work );
2271
2271
return 0 ;
2272
2272
}
2273
2273
@@ -2301,7 +2301,7 @@ static int ace_close(struct net_device *dev)
2301
2301
cmd .idx = 0 ;
2302
2302
ace_issue_cmd (regs , & cmd );
2303
2303
2304
- tasklet_kill (& ap -> ace_tasklet );
2304
+ cancel_work_sync (& ap -> ace_bh_work );
2305
2305
2306
2306
/*
2307
2307
* Make sure one CPU is not processing packets while
0 commit comments