@@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
111
111
{
112
112
struct arc_emac_priv * priv = netdev_priv (ndev );
113
113
struct net_device_stats * stats = & ndev -> stats ;
114
+ struct device * dev = ndev -> dev .parent ;
114
115
unsigned int i ;
115
116
116
117
for (i = 0 ; i < TX_BD_NUM ; i ++ ) {
@@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
140
141
stats -> tx_bytes += skb -> len ;
141
142
}
142
143
143
- dma_unmap_single (& ndev -> dev , dma_unmap_addr (tx_buff , addr ),
144
+ dma_unmap_single (dev , dma_unmap_addr (tx_buff , addr ),
144
145
dma_unmap_len (tx_buff , len ), DMA_TO_DEVICE );
145
146
146
147
/* return the sk_buff to system */
@@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
174
175
static int arc_emac_rx (struct net_device * ndev , int budget )
175
176
{
176
177
struct arc_emac_priv * priv = netdev_priv (ndev );
178
+ struct device * dev = ndev -> dev .parent ;
177
179
unsigned int work_done ;
178
180
179
181
for (work_done = 0 ; work_done < budget ; work_done ++ ) {
@@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
223
225
continue ;
224
226
}
225
227
226
- addr = dma_map_single (& ndev -> dev , (void * )skb -> data ,
228
+ addr = dma_map_single (dev , (void * )skb -> data ,
227
229
EMAC_BUFFER_SIZE , DMA_FROM_DEVICE );
228
- if (dma_mapping_error (& ndev -> dev , addr )) {
230
+ if (dma_mapping_error (dev , addr )) {
229
231
if (net_ratelimit ())
230
232
netdev_err (ndev , "cannot map dma buffer\n" );
231
233
dev_kfree_skb (skb );
@@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
237
239
}
238
240
239
241
/* unmap previosly mapped skb */
240
- dma_unmap_single (& ndev -> dev , dma_unmap_addr (rx_buff , addr ),
242
+ dma_unmap_single (dev , dma_unmap_addr (rx_buff , addr ),
241
243
dma_unmap_len (rx_buff , len ), DMA_FROM_DEVICE );
242
244
243
245
pktlen = info & LEN_MASK ;
@@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev)
423
425
{
424
426
struct arc_emac_priv * priv = netdev_priv (ndev );
425
427
struct phy_device * phy_dev = ndev -> phydev ;
428
+ struct device * dev = ndev -> dev .parent ;
426
429
int i ;
427
430
428
431
phy_dev -> autoneg = AUTONEG_ENABLE ;
@@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev)
445
448
if (unlikely (!rx_buff -> skb ))
446
449
return - ENOMEM ;
447
450
448
- addr = dma_map_single (& ndev -> dev , (void * )rx_buff -> skb -> data ,
451
+ addr = dma_map_single (dev , (void * )rx_buff -> skb -> data ,
449
452
EMAC_BUFFER_SIZE , DMA_FROM_DEVICE );
450
- if (dma_mapping_error (& ndev -> dev , addr )) {
453
+ if (dma_mapping_error (dev , addr )) {
451
454
netdev_err (ndev , "cannot dma map\n" );
452
455
dev_kfree_skb (rx_buff -> skb );
453
456
return - ENOMEM ;
@@ -548,14 +551,15 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
548
551
static void arc_free_tx_queue (struct net_device * ndev )
549
552
{
550
553
struct arc_emac_priv * priv = netdev_priv (ndev );
554
+ struct device * dev = ndev -> dev .parent ;
551
555
unsigned int i ;
552
556
553
557
for (i = 0 ; i < TX_BD_NUM ; i ++ ) {
554
558
struct arc_emac_bd * txbd = & priv -> txbd [i ];
555
559
struct buffer_state * tx_buff = & priv -> tx_buff [i ];
556
560
557
561
if (tx_buff -> skb ) {
558
- dma_unmap_single (& ndev -> dev ,
562
+ dma_unmap_single (dev ,
559
563
dma_unmap_addr (tx_buff , addr ),
560
564
dma_unmap_len (tx_buff , len ),
561
565
DMA_TO_DEVICE );
@@ -579,14 +583,15 @@ static void arc_free_tx_queue(struct net_device *ndev)
579
583
static void arc_free_rx_queue (struct net_device * ndev )
580
584
{
581
585
struct arc_emac_priv * priv = netdev_priv (ndev );
586
+ struct device * dev = ndev -> dev .parent ;
582
587
unsigned int i ;
583
588
584
589
for (i = 0 ; i < RX_BD_NUM ; i ++ ) {
585
590
struct arc_emac_bd * rxbd = & priv -> rxbd [i ];
586
591
struct buffer_state * rx_buff = & priv -> rx_buff [i ];
587
592
588
593
if (rx_buff -> skb ) {
589
- dma_unmap_single (& ndev -> dev ,
594
+ dma_unmap_single (dev ,
590
595
dma_unmap_addr (rx_buff , addr ),
591
596
dma_unmap_len (rx_buff , len ),
592
597
DMA_FROM_DEVICE );
@@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
679
684
unsigned int len , * txbd_curr = & priv -> txbd_curr ;
680
685
struct net_device_stats * stats = & ndev -> stats ;
681
686
__le32 * info = & priv -> txbd [* txbd_curr ].info ;
687
+ struct device * dev = ndev -> dev .parent ;
682
688
dma_addr_t addr ;
683
689
684
690
if (skb_padto (skb , ETH_ZLEN ))
@@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
692
698
return NETDEV_TX_BUSY ;
693
699
}
694
700
695
- addr = dma_map_single (& ndev -> dev , (void * )skb -> data , len ,
696
- DMA_TO_DEVICE );
701
+ addr = dma_map_single (dev , (void * )skb -> data , len , DMA_TO_DEVICE );
697
702
698
- if (unlikely (dma_mapping_error (& ndev -> dev , addr ))) {
703
+ if (unlikely (dma_mapping_error (dev , addr ))) {
699
704
stats -> tx_dropped ++ ;
700
705
stats -> tx_errors ++ ;
701
706
dev_kfree_skb_any (skb );
0 commit comments