35
35
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
36
36
#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
37
37
38
+ #define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
39
+ #define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
40
+ #define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
41
+
38
42
/* Interrupt registers definitions */
39
43
#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
40
44
#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
98
102
/* Phy Status/Control Register definitions */
99
103
#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
100
104
105
+ enum xilinx_cpm_version {
106
+ CPM ,
107
+ CPM5 ,
108
+ };
109
+
110
+ /**
111
+ * struct xilinx_cpm_variant - CPM variant information
112
+ * @version: CPM version
113
+ */
114
+ struct xilinx_cpm_variant {
115
+ enum xilinx_cpm_version version ;
116
+ };
117
+
101
118
/**
102
119
* struct xilinx_cpm_pcie - PCIe port information
103
120
* @dev: Device pointer
109
126
* @intx_irq: legacy interrupt number
110
127
* @irq: Error interrupt number
111
128
* @lock: lock protecting shared register access
129
+ * @variant: CPM version check pointer
112
130
*/
113
131
struct xilinx_cpm_pcie {
114
132
struct device * dev ;
@@ -120,6 +138,7 @@ struct xilinx_cpm_pcie {
120
138
int intx_irq ;
121
139
int irq ;
122
140
raw_spinlock_t lock ;
141
+ const struct xilinx_cpm_variant * variant ;
123
142
};
124
143
125
144
static u32 pcie_read (struct xilinx_cpm_pcie * port , u32 reg )
@@ -285,6 +304,13 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
285
304
generic_handle_domain_irq (port -> cpm_domain , i );
286
305
pcie_write (port , val , XILINX_CPM_PCIE_REG_IDR );
287
306
307
+ if (port -> variant -> version == CPM5 ) {
308
+ val = readl_relaxed (port -> cpm_base + XILINX_CPM_PCIE_IR_STATUS );
309
+ if (val )
310
+ writel_relaxed (val , port -> cpm_base +
311
+ XILINX_CPM_PCIE_IR_STATUS );
312
+ }
313
+
288
314
/*
289
315
* XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
290
316
* CPM SLCR block.
@@ -484,6 +510,12 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
484
510
*/
485
511
writel (XILINX_CPM_PCIE_MISC_IR_LOCAL ,
486
512
port -> cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE );
513
+
514
+ if (port -> variant -> version == CPM5 ) {
515
+ writel (XILINX_CPM_PCIE_IR_LOCAL ,
516
+ port -> cpm_base + XILINX_CPM_PCIE_IR_ENABLE );
517
+ }
518
+
487
519
/* Enable the Bridge enable bit */
488
520
pcie_write (port , pcie_read (port , XILINX_CPM_PCIE_REG_RPSC ) |
489
521
XILINX_CPM_PCIE_REG_RPSC_BEN ,
@@ -518,7 +550,14 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
518
550
if (IS_ERR (port -> cfg ))
519
551
return PTR_ERR (port -> cfg );
520
552
521
- port -> reg_base = port -> cfg -> win ;
553
+ if (port -> variant -> version == CPM5 ) {
554
+ port -> reg_base = devm_platform_ioremap_resource_byname (pdev ,
555
+ "cpm_csr" );
556
+ if (IS_ERR (port -> reg_base ))
557
+ return PTR_ERR (port -> reg_base );
558
+ } else {
559
+ port -> reg_base = port -> cfg -> win ;
560
+ }
522
561
523
562
return 0 ;
524
563
}
@@ -559,6 +598,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
559
598
if (!bus )
560
599
return - ENODEV ;
561
600
601
+ port -> variant = of_device_get_match_data (dev );
602
+
562
603
err = xilinx_cpm_pcie_parse_dt (port , bus -> res );
563
604
if (err ) {
564
605
dev_err (dev , "Parsing DT failed\n" );
@@ -591,8 +632,23 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
591
632
return err ;
592
633
}
593
634
635
+ static const struct xilinx_cpm_variant cpm_host = {
636
+ .version = CPM ,
637
+ };
638
+
639
+ static const struct xilinx_cpm_variant cpm5_host = {
640
+ .version = CPM5 ,
641
+ };
642
+
594
643
static const struct of_device_id xilinx_cpm_pcie_of_match [] = {
595
- { .compatible = "xlnx,versal-cpm-host-1.00" , },
644
+ {
645
+ .compatible = "xlnx,versal-cpm-host-1.00" ,
646
+ .data = & cpm_host ,
647
+ },
648
+ {
649
+ .compatible = "xlnx,versal-cpm5-host" ,
650
+ .data = & cpm5_host ,
651
+ },
596
652
{}
597
653
};
598
654
0 commit comments