@@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
111111{
112112 struct arc_emac_priv * priv = netdev_priv (ndev );
113113 struct net_device_stats * stats = & ndev -> stats ;
114+ struct device * dev = ndev -> dev .parent ;
114115 unsigned int i ;
115116
116117 for (i = 0 ; i < TX_BD_NUM ; i ++ ) {
@@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
140141 stats -> tx_bytes += skb -> len ;
141142 }
142143
143- dma_unmap_single (& ndev -> dev , dma_unmap_addr (tx_buff , addr ),
144+ dma_unmap_single (dev , dma_unmap_addr (tx_buff , addr ),
144145 dma_unmap_len (tx_buff , len ), DMA_TO_DEVICE );
145146
146147 /* return the sk_buff to system */
@@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
174175static int arc_emac_rx (struct net_device * ndev , int budget )
175176{
176177 struct arc_emac_priv * priv = netdev_priv (ndev );
178+ struct device * dev = ndev -> dev .parent ;
177179 unsigned int work_done ;
178180
179181 for (work_done = 0 ; work_done < budget ; work_done ++ ) {
@@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
223225 continue ;
224226 }
225227
226- addr = dma_map_single (& ndev -> dev , (void * )skb -> data ,
228+ addr = dma_map_single (dev , (void * )skb -> data ,
227229 EMAC_BUFFER_SIZE , DMA_FROM_DEVICE );
228- if (dma_mapping_error (& ndev -> dev , addr )) {
230+ if (dma_mapping_error (dev , addr )) {
229231 if (net_ratelimit ())
230232 netdev_err (ndev , "cannot map dma buffer\n" );
231233 dev_kfree_skb (skb );
@@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
237239 }
238240
239241 /* unmap previosly mapped skb */
240- dma_unmap_single (& ndev -> dev , dma_unmap_addr (rx_buff , addr ),
242+ dma_unmap_single (dev , dma_unmap_addr (rx_buff , addr ),
241243 dma_unmap_len (rx_buff , len ), DMA_FROM_DEVICE );
242244
243245 pktlen = info & LEN_MASK ;
@@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev)
423425{
424426 struct arc_emac_priv * priv = netdev_priv (ndev );
425427 struct phy_device * phy_dev = ndev -> phydev ;
428+ struct device * dev = ndev -> dev .parent ;
426429 int i ;
427430
428431 phy_dev -> autoneg = AUTONEG_ENABLE ;
@@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev)
445448 if (unlikely (!rx_buff -> skb ))
446449 return - ENOMEM ;
447450
448- addr = dma_map_single (& ndev -> dev , (void * )rx_buff -> skb -> data ,
451+ addr = dma_map_single (dev , (void * )rx_buff -> skb -> data ,
449452 EMAC_BUFFER_SIZE , DMA_FROM_DEVICE );
450- if (dma_mapping_error (& ndev -> dev , addr )) {
453+ if (dma_mapping_error (dev , addr )) {
451454 netdev_err (ndev , "cannot dma map\n" );
452455 dev_kfree_skb (rx_buff -> skb );
453456 return - ENOMEM ;
@@ -548,14 +551,15 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
548551static void arc_free_tx_queue (struct net_device * ndev )
549552{
550553 struct arc_emac_priv * priv = netdev_priv (ndev );
554+ struct device * dev = ndev -> dev .parent ;
551555 unsigned int i ;
552556
553557 for (i = 0 ; i < TX_BD_NUM ; i ++ ) {
554558 struct arc_emac_bd * txbd = & priv -> txbd [i ];
555559 struct buffer_state * tx_buff = & priv -> tx_buff [i ];
556560
557561 if (tx_buff -> skb ) {
558- dma_unmap_single (& ndev -> dev ,
562+ dma_unmap_single (dev ,
559563 dma_unmap_addr (tx_buff , addr ),
560564 dma_unmap_len (tx_buff , len ),
561565 DMA_TO_DEVICE );
@@ -579,14 +583,15 @@ static void arc_free_tx_queue(struct net_device *ndev)
579583static void arc_free_rx_queue (struct net_device * ndev )
580584{
581585 struct arc_emac_priv * priv = netdev_priv (ndev );
586+ struct device * dev = ndev -> dev .parent ;
582587 unsigned int i ;
583588
584589 for (i = 0 ; i < RX_BD_NUM ; i ++ ) {
585590 struct arc_emac_bd * rxbd = & priv -> rxbd [i ];
586591 struct buffer_state * rx_buff = & priv -> rx_buff [i ];
587592
588593 if (rx_buff -> skb ) {
589- dma_unmap_single (& ndev -> dev ,
594+ dma_unmap_single (dev ,
590595 dma_unmap_addr (rx_buff , addr ),
591596 dma_unmap_len (rx_buff , len ),
592597 DMA_FROM_DEVICE );
@@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
679684 unsigned int len , * txbd_curr = & priv -> txbd_curr ;
680685 struct net_device_stats * stats = & ndev -> stats ;
681686 __le32 * info = & priv -> txbd [* txbd_curr ].info ;
687+ struct device * dev = ndev -> dev .parent ;
682688 dma_addr_t addr ;
683689
684690 if (skb_padto (skb , ETH_ZLEN ))
@@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
692698 return NETDEV_TX_BUSY ;
693699 }
694700
695- addr = dma_map_single (& ndev -> dev , (void * )skb -> data , len ,
696- DMA_TO_DEVICE );
701+ addr = dma_map_single (dev , (void * )skb -> data , len , DMA_TO_DEVICE );
697702
698- if (unlikely (dma_mapping_error (& ndev -> dev , addr ))) {
703+ if (unlikely (dma_mapping_error (dev , addr ))) {
699704 stats -> tx_dropped ++ ;
700705 stats -> tx_errors ++ ;
701706 dev_kfree_skb_any (skb );
0 commit comments