16
16
#include <linux/netdevice.h>
17
17
#include <linux/pci.h>
18
18
#include <linux/timer.h>
19
+ #include <net/netdev_queues.h>
19
20
20
21
MODULE_LICENSE ("Dual BSD/GPL" );
21
22
MODULE_AUTHOR (
"Kvaser AB <[email protected] >" );
@@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
410
411
void __iomem * reg_base ;
411
412
struct can_berr_counter bec ;
412
413
u8 cmd_seq ;
414
+ u8 tx_max_count ;
415
+ u8 tx_idx ;
416
+ u8 ack_idx ;
413
417
int err_rep_cnt ;
414
- int echo_idx ;
418
+ unsigned int completed_tx_pkts ;
419
+ unsigned int completed_tx_bytes ;
415
420
spinlock_t lock ; /* Locks sensitive registers (e.g. MODE) */
416
- spinlock_t echo_lock ; /* Locks the message echo buffer */
417
421
struct timer_list bec_poll_timer ;
418
422
struct completion start_comp , flush_comp ;
419
423
};
@@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
714
718
int ret ;
715
719
struct kvaser_pciefd_can * can = netdev_priv (netdev );
716
720
721
+ can -> tx_idx = 0 ;
722
+ can -> ack_idx = 0 ;
723
+
717
724
ret = open_candev (netdev );
718
725
if (ret )
719
726
return ret ;
@@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
745
752
timer_delete (& can -> bec_poll_timer );
746
753
}
747
754
can -> can .state = CAN_STATE_STOPPED ;
755
+ netdev_reset_queue (netdev );
748
756
close_candev (netdev );
749
757
750
758
return ret ;
751
759
}
752
760
761
+ static unsigned int kvaser_pciefd_tx_avail (const struct kvaser_pciefd_can * can )
762
+ {
763
+ return can -> tx_max_count - (READ_ONCE (can -> tx_idx ) - READ_ONCE (can -> ack_idx ));
764
+ }
765
+
753
766
static int kvaser_pciefd_prepare_tx_packet (struct kvaser_pciefd_tx_packet * p ,
754
- struct kvaser_pciefd_can * can ,
767
+ struct can_priv * can , u8 seq ,
755
768
struct sk_buff * skb )
756
769
{
757
770
struct canfd_frame * cf = (struct canfd_frame * )skb -> data ;
758
771
int packet_size ;
759
- int seq = can -> echo_idx ;
760
772
761
773
memset (p , 0 , sizeof (* p ));
762
- if (can -> can . ctrlmode & CAN_CTRLMODE_ONE_SHOT )
774
+ if (can -> ctrlmode & CAN_CTRLMODE_ONE_SHOT )
763
775
p -> header [1 ] |= KVASER_PCIEFD_TPACKET_SMS ;
764
776
765
777
if (cf -> can_id & CAN_RTR_FLAG )
@@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
782
794
} else {
783
795
p -> header [1 ] |=
784
796
FIELD_PREP (KVASER_PCIEFD_RPACKET_DLC_MASK ,
785
- can_get_cc_dlc ((struct can_frame * )cf , can -> can . ctrlmode ));
797
+ can_get_cc_dlc ((struct can_frame * )cf , can -> ctrlmode ));
786
798
}
787
799
788
800
p -> header [1 ] |= FIELD_PREP (KVASER_PCIEFD_PACKET_SEQ_MASK , seq );
@@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
797
809
struct net_device * netdev )
798
810
{
799
811
struct kvaser_pciefd_can * can = netdev_priv (netdev );
800
- unsigned long irq_flags ;
801
812
struct kvaser_pciefd_tx_packet packet ;
813
+ unsigned int seq = can -> tx_idx & (can -> can .echo_skb_max - 1 );
814
+ unsigned int frame_len ;
802
815
int nr_words ;
803
- u8 count ;
804
816
805
817
if (can_dev_dropped_skb (netdev , skb ))
806
818
return NETDEV_TX_OK ;
819
+ if (!netif_subqueue_maybe_stop (netdev , 0 , kvaser_pciefd_tx_avail (can ), 1 , 1 ))
820
+ return NETDEV_TX_BUSY ;
807
821
808
- nr_words = kvaser_pciefd_prepare_tx_packet (& packet , can , skb );
822
+ nr_words = kvaser_pciefd_prepare_tx_packet (& packet , & can -> can , seq , skb );
809
823
810
- spin_lock_irqsave (& can -> echo_lock , irq_flags );
811
824
/* Prepare and save echo skb in internal slot */
812
- can_put_echo_skb (skb , netdev , can -> echo_idx , 0 );
813
-
814
- /* Move echo index to the next slot */
815
- can -> echo_idx = (can -> echo_idx + 1 ) % can -> can .echo_skb_max ;
825
+ WRITE_ONCE (can -> can .echo_skb [seq ], NULL );
826
+ frame_len = can_skb_get_frame_len (skb );
827
+ can_put_echo_skb (skb , netdev , seq , frame_len );
828
+ netdev_sent_queue (netdev , frame_len );
829
+ WRITE_ONCE (can -> tx_idx , can -> tx_idx + 1 );
816
830
817
831
/* Write header to fifo */
818
832
iowrite32 (packet .header [0 ],
@@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
836
850
KVASER_PCIEFD_KCAN_FIFO_LAST_REG );
837
851
}
838
852
839
- count = FIELD_GET (KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK ,
840
- ioread32 (can -> reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG ));
841
- /* No room for a new message, stop the queue until at least one
842
- * successful transmit
843
- */
844
- if (count >= can -> can .echo_skb_max || can -> can .echo_skb [can -> echo_idx ])
845
- netif_stop_queue (netdev );
846
- spin_unlock_irqrestore (& can -> echo_lock , irq_flags );
853
+ netif_subqueue_maybe_stop (netdev , 0 , kvaser_pciefd_tx_avail (can ), 1 , 1 );
847
854
848
855
return NETDEV_TX_OK ;
849
856
}
@@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
970
977
can -> kv_pcie = pcie ;
971
978
can -> cmd_seq = 0 ;
972
979
can -> err_rep_cnt = 0 ;
980
+ can -> completed_tx_pkts = 0 ;
981
+ can -> completed_tx_bytes = 0 ;
973
982
can -> bec .txerr = 0 ;
974
983
can -> bec .rxerr = 0 ;
975
984
@@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
983
992
tx_nr_packets_max =
984
993
FIELD_GET (KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK ,
985
994
ioread32 (can -> reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG ));
995
+ can -> tx_max_count = min (KVASER_PCIEFD_CAN_TX_MAX_COUNT , tx_nr_packets_max - 1 );
986
996
987
997
can -> can .clock .freq = pcie -> freq ;
988
- can -> can .echo_skb_max = min (KVASER_PCIEFD_CAN_TX_MAX_COUNT , tx_nr_packets_max - 1 );
989
- can -> echo_idx = 0 ;
990
- spin_lock_init (& can -> echo_lock );
998
+ can -> can .echo_skb_max = roundup_pow_of_two (can -> tx_max_count );
991
999
spin_lock_init (& can -> lock );
992
1000
993
1001
can -> can .bittiming_const = & kvaser_pciefd_bittiming_const ;
@@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1201
1209
skb = alloc_canfd_skb (priv -> dev , & cf );
1202
1210
if (!skb ) {
1203
1211
priv -> dev -> stats .rx_dropped ++ ;
1204
- return - ENOMEM ;
1212
+ return 0 ;
1205
1213
}
1206
1214
1207
1215
cf -> len = can_fd_dlc2len (dlc );
@@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1213
1221
skb = alloc_can_skb (priv -> dev , (struct can_frame * * )& cf );
1214
1222
if (!skb ) {
1215
1223
priv -> dev -> stats .rx_dropped ++ ;
1216
- return - ENOMEM ;
1224
+ return 0 ;
1217
1225
}
1218
1226
can_frame_set_cc_len ((struct can_frame * )cf , dlc , priv -> ctrlmode );
1219
1227
}
@@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1231
1239
priv -> dev -> stats .rx_packets ++ ;
1232
1240
kvaser_pciefd_set_skb_timestamp (pcie , skb , p -> timestamp );
1233
1241
1234
- return netif_rx (skb );
1242
+ netif_rx (skb );
1243
+
1244
+ return 0 ;
1235
1245
}
1236
1246
1237
1247
static void kvaser_pciefd_change_state (struct kvaser_pciefd_can * can ,
@@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1510
1520
netdev_dbg (can -> can .dev , "Packet was flushed\n" );
1511
1521
} else {
1512
1522
int echo_idx = FIELD_GET (KVASER_PCIEFD_PACKET_SEQ_MASK , p -> header [0 ]);
1513
- int len ;
1514
- u8 count ;
1523
+ unsigned int len , frame_len = 0 ;
1515
1524
struct sk_buff * skb ;
1516
1525
1526
+ if (echo_idx != (can -> ack_idx & (can -> can .echo_skb_max - 1 )))
1527
+ return 0 ;
1517
1528
skb = can -> can .echo_skb [echo_idx ];
1518
- if (skb )
1519
- kvaser_pciefd_set_skb_timestamp (pcie , skb , p -> timestamp );
1520
- len = can_get_echo_skb (can -> can .dev , echo_idx , NULL );
1521
- count = FIELD_GET (KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK ,
1522
- ioread32 (can -> reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG ));
1529
+ if (!skb )
1530
+ return 0 ;
1531
+ kvaser_pciefd_set_skb_timestamp (pcie , skb , p -> timestamp );
1532
+ len = can_get_echo_skb (can -> can .dev , echo_idx , & frame_len );
1523
1533
1524
- if (count < can -> can .echo_skb_max && netif_queue_stopped (can -> can .dev ))
1525
- netif_wake_queue (can -> can .dev );
1534
+ /* Pairs with barrier in kvaser_pciefd_start_xmit() */
1535
+ smp_store_release (& can -> ack_idx , can -> ack_idx + 1 );
1536
+ can -> completed_tx_pkts ++ ;
1537
+ can -> completed_tx_bytes += frame_len ;
1526
1538
1527
1539
if (!one_shot_fail ) {
1528
1540
can -> can .dev -> stats .tx_bytes += len ;
@@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1638
1650
{
1639
1651
int pos = 0 ;
1640
1652
int res = 0 ;
1653
+ unsigned int i ;
1641
1654
1642
1655
do {
1643
1656
res = kvaser_pciefd_read_packet (pcie , & pos , dma_buf );
1644
1657
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE );
1645
1658
1659
+ /* Report ACKs in this buffer to BQL en masse for correct periods */
1660
+ for (i = 0 ; i < pcie -> nr_channels ; ++ i ) {
1661
+ struct kvaser_pciefd_can * can = pcie -> can [i ];
1662
+
1663
+ if (!can -> completed_tx_pkts )
1664
+ continue ;
1665
+ netif_subqueue_completed_wake (can -> can .dev , 0 ,
1666
+ can -> completed_tx_pkts ,
1667
+ can -> completed_tx_bytes ,
1668
+ kvaser_pciefd_tx_avail (can ), 1 );
1669
+ can -> completed_tx_pkts = 0 ;
1670
+ can -> completed_tx_bytes = 0 ;
1671
+ }
1672
+
1646
1673
return res ;
1647
1674
}
1648
1675
1649
- static u32 kvaser_pciefd_receive_irq (struct kvaser_pciefd * pcie )
1676
+ static void kvaser_pciefd_receive_irq (struct kvaser_pciefd * pcie )
1650
1677
{
1678
+ void __iomem * srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_CMD_REG ;
1651
1679
u32 irq = ioread32 (KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_IRQ_REG );
1652
1680
1653
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0 )
1681
+ iowrite32 (irq , KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_IRQ_REG );
1682
+
1683
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0 ) {
1654
1684
kvaser_pciefd_read_buffer (pcie , 0 );
1685
+ iowrite32 (KVASER_PCIEFD_SRB_CMD_RDB0 , srb_cmd_reg ); /* Rearm buffer */
1686
+ }
1655
1687
1656
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1 )
1688
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1 ) {
1657
1689
kvaser_pciefd_read_buffer (pcie , 1 );
1690
+ iowrite32 (KVASER_PCIEFD_SRB_CMD_RDB1 , srb_cmd_reg ); /* Rearm buffer */
1691
+ }
1658
1692
1659
1693
if (unlikely (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1660
1694
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1661
1695
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1662
1696
irq & KVASER_PCIEFD_SRB_IRQ_DUF1 ))
1663
1697
dev_err (& pcie -> pci -> dev , "DMA IRQ error 0x%08X\n" , irq );
1664
-
1665
- iowrite32 (irq , KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_IRQ_REG );
1666
- return irq ;
1667
1698
}
1668
1699
1669
1700
static void kvaser_pciefd_transmit_irq (struct kvaser_pciefd_can * can )
@@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1691
1722
struct kvaser_pciefd * pcie = (struct kvaser_pciefd * )dev ;
1692
1723
const struct kvaser_pciefd_irq_mask * irq_mask = pcie -> driver_data -> irq_mask ;
1693
1724
u32 pci_irq = ioread32 (KVASER_PCIEFD_PCI_IRQ_ADDR (pcie ));
1694
- u32 srb_irq = 0 ;
1695
- u32 srb_release = 0 ;
1696
1725
int i ;
1697
1726
1698
1727
if (!(pci_irq & irq_mask -> all ))
1699
1728
return IRQ_NONE ;
1700
1729
1730
+ iowrite32 (0 , KVASER_PCIEFD_PCI_IEN_ADDR (pcie ));
1731
+
1701
1732
if (pci_irq & irq_mask -> kcan_rx0 )
1702
- srb_irq = kvaser_pciefd_receive_irq (pcie );
1733
+ kvaser_pciefd_receive_irq (pcie );
1703
1734
1704
1735
for (i = 0 ; i < pcie -> nr_channels ; i ++ ) {
1705
1736
if (pci_irq & irq_mask -> kcan_tx [i ])
1706
1737
kvaser_pciefd_transmit_irq (pcie -> can [i ]);
1707
1738
}
1708
1739
1709
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0 )
1710
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0 ;
1711
-
1712
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1 )
1713
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1 ;
1714
-
1715
- if (srb_release )
1716
- iowrite32 (srb_release , KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_CMD_REG );
1740
+ iowrite32 (irq_mask -> all , KVASER_PCIEFD_PCI_IEN_ADDR (pcie ));
1717
1741
1718
1742
return IRQ_HANDLED ;
1719
1743
}
@@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1733
1757
}
1734
1758
}
1735
1759
1760
+ static void kvaser_pciefd_disable_irq_srcs (struct kvaser_pciefd * pcie )
1761
+ {
1762
+ unsigned int i ;
1763
+
1764
+ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
1765
+ iowrite32 (0 , KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_IEN_REG );
1766
+ for (i = 0 ; i < pcie -> nr_channels ; ++ i )
1767
+ iowrite32 (0 , pcie -> can [i ]-> reg_base + KVASER_PCIEFD_KCAN_IEN_REG );
1768
+ }
1769
+
1736
1770
static int kvaser_pciefd_probe (struct pci_dev * pdev ,
1737
1771
const struct pci_device_id * id )
1738
1772
{
1739
1773
int ret ;
1740
1774
struct kvaser_pciefd * pcie ;
1741
1775
const struct kvaser_pciefd_irq_mask * irq_mask ;
1742
- void __iomem * irq_en_base ;
1743
1776
1744
1777
pcie = devm_kzalloc (& pdev -> dev , sizeof (* pcie ), GFP_KERNEL );
1745
1778
if (!pcie )
@@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
1805
1838
KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_IEN_REG );
1806
1839
1807
1840
/* Enable PCI interrupts */
1808
- irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR (pcie );
1809
- iowrite32 (irq_mask -> all , irq_en_base );
1841
+ iowrite32 (irq_mask -> all , KVASER_PCIEFD_PCI_IEN_ADDR (pcie ));
1810
1842
/* Ready the DMA buffers */
1811
1843
iowrite32 (KVASER_PCIEFD_SRB_CMD_RDB0 ,
1812
1844
KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_CMD_REG );
@@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
1820
1852
return 0 ;
1821
1853
1822
1854
err_free_irq :
1823
- /* Disable PCI interrupts */
1824
- iowrite32 (0 , irq_en_base );
1855
+ kvaser_pciefd_disable_irq_srcs (pcie );
1825
1856
free_irq (pcie -> pci -> irq , pcie );
1826
1857
1827
1858
err_pci_free_irq_vectors :
@@ -1844,35 +1875,26 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
1844
1875
return ret ;
1845
1876
}
1846
1877
1847
- static void kvaser_pciefd_remove_all_ctrls (struct kvaser_pciefd * pcie )
1848
- {
1849
- int i ;
1850
-
1851
- for (i = 0 ; i < pcie -> nr_channels ; i ++ ) {
1852
- struct kvaser_pciefd_can * can = pcie -> can [i ];
1853
-
1854
- if (can ) {
1855
- iowrite32 (0 , can -> reg_base + KVASER_PCIEFD_KCAN_IEN_REG );
1856
- unregister_candev (can -> can .dev );
1857
- timer_delete (& can -> bec_poll_timer );
1858
- kvaser_pciefd_pwm_stop (can );
1859
- free_candev (can -> can .dev );
1860
- }
1861
- }
1862
- }
1863
-
1864
1878
static void kvaser_pciefd_remove (struct pci_dev * pdev )
1865
1879
{
1866
1880
struct kvaser_pciefd * pcie = pci_get_drvdata (pdev );
1881
+ unsigned int i ;
1867
1882
1868
- kvaser_pciefd_remove_all_ctrls (pcie );
1883
+ for (i = 0 ; i < pcie -> nr_channels ; ++ i ) {
1884
+ struct kvaser_pciefd_can * can = pcie -> can [i ];
1869
1885
1870
- /* Disable interrupts */
1871
- iowrite32 (0 , KVASER_PCIEFD_SRB_ADDR (pcie ) + KVASER_PCIEFD_SRB_CTRL_REG );
1872
- iowrite32 (0 , KVASER_PCIEFD_PCI_IEN_ADDR (pcie ));
1886
+ unregister_candev (can -> can .dev );
1887
+ timer_delete (& can -> bec_poll_timer );
1888
+ kvaser_pciefd_pwm_stop (can );
1889
+ }
1873
1890
1891
+ kvaser_pciefd_disable_irq_srcs (pcie );
1874
1892
free_irq (pcie -> pci -> irq , pcie );
1875
1893
pci_free_irq_vectors (pcie -> pci );
1894
+
1895
+ for (i = 0 ; i < pcie -> nr_channels ; ++ i )
1896
+ free_candev (pcie -> can [i ]-> can .dev );
1897
+
1876
1898
pci_iounmap (pdev , pcie -> reg_base );
1877
1899
pci_release_regions (pdev );
1878
1900
pci_disable_device (pdev );
0 commit comments