@@ -1033,21 +1033,22 @@ static void init_ring(struct net_device *dev)
1033
1033
1034
1034
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
1035
1035
for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
1036
+ dma_addr_t addr ;
1037
+
1036
1038
struct sk_buff * skb =
1037
1039
netdev_alloc_skb (dev , np -> rx_buf_sz + 2 );
1038
1040
np -> rx_skbuff [i ] = skb ;
1039
1041
if (skb == NULL )
1040
1042
break ;
1041
1043
skb_reserve (skb , 2 ); /* 16 byte align the IP header. */
1042
- np -> rx_ring [i ].frag .addr = cpu_to_le32 (
1043
- dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1044
- np -> rx_buf_sz , DMA_FROM_DEVICE ));
1045
- if (dma_mapping_error (& np -> pci_dev -> dev ,
1046
- np -> rx_ring [i ].frag .addr )) {
1044
+ addr = dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1045
+ np -> rx_buf_sz , DMA_FROM_DEVICE );
1046
+ if (dma_mapping_error (& np -> pci_dev -> dev , addr )) {
1047
1047
dev_kfree_skb (skb );
1048
1048
np -> rx_skbuff [i ] = NULL ;
1049
1049
break ;
1050
1050
}
1051
+ np -> rx_ring [i ].frag .addr = cpu_to_le32 (addr );
1051
1052
np -> rx_ring [i ].frag .length = cpu_to_le32 (np -> rx_buf_sz | LastFrag );
1052
1053
}
1053
1054
np -> dirty_rx = (unsigned int )(i - RX_RING_SIZE );
@@ -1088,20 +1089,22 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1088
1089
{
1089
1090
struct netdev_private * np = netdev_priv (dev );
1090
1091
struct netdev_desc * txdesc ;
1092
+ dma_addr_t addr ;
1091
1093
unsigned entry ;
1092
1094
1093
1095
/* Calculate the next Tx descriptor entry. */
1094
1096
entry = np -> cur_tx % TX_RING_SIZE ;
1095
1097
np -> tx_skbuff [entry ] = skb ;
1096
1098
txdesc = & np -> tx_ring [entry ];
1097
1099
1100
+ addr = dma_map_single (& np -> pci_dev -> dev , skb -> data , skb -> len ,
1101
+ DMA_TO_DEVICE );
1102
+ if (dma_mapping_error (& np -> pci_dev -> dev , addr ))
1103
+ goto drop_frame ;
1104
+
1098
1105
txdesc -> next_desc = 0 ;
1099
1106
txdesc -> status = cpu_to_le32 ((entry << 2 ) | DisableAlign );
1100
- txdesc -> frag .addr = cpu_to_le32 (dma_map_single (& np -> pci_dev -> dev ,
1101
- skb -> data , skb -> len , DMA_TO_DEVICE ));
1102
- if (dma_mapping_error (& np -> pci_dev -> dev ,
1103
- txdesc -> frag .addr ))
1104
- goto drop_frame ;
1107
+ txdesc -> frag .addr = cpu_to_le32 (addr );
1105
1108
txdesc -> frag .length = cpu_to_le32 (skb -> len | LastFrag );
1106
1109
1107
1110
/* Increment cur_tx before tasklet_schedule() */
@@ -1419,22 +1422,24 @@ static void refill_rx (struct net_device *dev)
1419
1422
for (;(np -> cur_rx - np -> dirty_rx + RX_RING_SIZE ) % RX_RING_SIZE > 0 ;
1420
1423
np -> dirty_rx = (np -> dirty_rx + 1 ) % RX_RING_SIZE ) {
1421
1424
struct sk_buff * skb ;
1425
+ dma_addr_t addr ;
1426
+
1422
1427
entry = np -> dirty_rx % RX_RING_SIZE ;
1423
1428
if (np -> rx_skbuff [entry ] == NULL ) {
1424
1429
skb = netdev_alloc_skb (dev , np -> rx_buf_sz + 2 );
1425
1430
np -> rx_skbuff [entry ] = skb ;
1426
1431
if (skb == NULL )
1427
1432
break ; /* Better luck next round. */
1428
1433
skb_reserve (skb , 2 ); /* Align IP on 16 byte boundaries */
1429
- np -> rx_ring [entry ].frag .addr = cpu_to_le32 (
1430
- dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1431
- np -> rx_buf_sz , DMA_FROM_DEVICE ));
1432
- if (dma_mapping_error (& np -> pci_dev -> dev ,
1433
- np -> rx_ring [entry ].frag .addr )) {
1434
+ addr = dma_map_single (& np -> pci_dev -> dev , skb -> data ,
1435
+ np -> rx_buf_sz , DMA_FROM_DEVICE );
1436
+ if (dma_mapping_error (& np -> pci_dev -> dev , addr )) {
1434
1437
dev_kfree_skb_irq (skb );
1435
1438
np -> rx_skbuff [entry ] = NULL ;
1436
1439
break ;
1437
1440
}
1441
+
1442
+ np -> rx_ring [entry ].frag .addr = cpu_to_le32 (addr );
1438
1443
}
1439
1444
/* Perhaps we need not reset this field. */
1440
1445
np -> rx_ring [entry ].frag .length =
0 commit comments