3
3
* PLDA PCIe XpressRich host controller driver
4
4
*
5
5
* Copyright (C) 2023 Microchip Co. Ltd
6
+ * StarFive Co. Ltd
6
7
*
7
8
* Author: Daire McNamara <[email protected] >
8
9
*/
15
16
16
17
#include "pcie-plda.h"
17
18
19
+ void __iomem * plda_pcie_map_bus (struct pci_bus * bus , unsigned int devfn ,
20
+ int where )
21
+ {
22
+ struct plda_pcie_rp * pcie = bus -> sysdata ;
23
+
24
+ return pcie -> config_base + PCIE_ECAM_OFFSET (bus -> number , devfn , where );
25
+ }
26
+ EXPORT_SYMBOL_GPL (plda_pcie_map_bus );
27
+
18
28
static void plda_handle_msi (struct irq_desc * desc )
19
29
{
20
30
struct plda_pcie_rp * port = irq_desc_get_handler_data (desc );
@@ -420,9 +430,7 @@ int plda_init_interrupts(struct platform_device *pdev,
420
430
const struct plda_event * event )
421
431
{
422
432
struct device * dev = & pdev -> dev ;
423
- int irq ;
424
- int intx_irq , msi_irq , event_irq ;
425
- int ret ;
433
+ int event_irq , ret ;
426
434
u32 i ;
427
435
428
436
if (!port -> event_ops )
@@ -437,8 +445,8 @@ int plda_init_interrupts(struct platform_device *pdev,
437
445
return ret ;
438
446
}
439
447
440
- irq = platform_get_irq (pdev , 0 );
441
- if (irq < 0 )
448
+ port -> irq = platform_get_irq (pdev , 0 );
449
+ if (port -> irq < 0 )
442
450
return - ENODEV ;
443
451
444
452
for_each_set_bit (i , & port -> events_bitmap , port -> num_events ) {
@@ -461,26 +469,26 @@ int plda_init_interrupts(struct platform_device *pdev,
461
469
}
462
470
}
463
471
464
- intx_irq = irq_create_mapping (port -> event_domain ,
465
- event -> intx_event );
466
- if (!intx_irq ) {
472
+ port -> intx_irq = irq_create_mapping (port -> event_domain ,
473
+ event -> intx_event );
474
+ if (!port -> intx_irq ) {
467
475
dev_err (dev , "failed to map INTx interrupt\n" );
468
476
return - ENXIO ;
469
477
}
470
478
471
479
/* Plug the INTx chained handler */
472
- irq_set_chained_handler_and_data (intx_irq , plda_handle_intx , port );
480
+ irq_set_chained_handler_and_data (port -> intx_irq , plda_handle_intx , port );
473
481
474
- msi_irq = irq_create_mapping (port -> event_domain ,
475
- event -> msi_event );
476
- if (!msi_irq )
482
+ port -> msi_irq = irq_create_mapping (port -> event_domain ,
483
+ event -> msi_event );
484
+ if (!port -> msi_irq )
477
485
return - ENXIO ;
478
486
479
487
/* Plug the MSI chained handler */
480
- irq_set_chained_handler_and_data (msi_irq , plda_handle_msi , port );
488
+ irq_set_chained_handler_and_data (port -> msi_irq , plda_handle_msi , port );
481
489
482
490
/* Plug the main event chained handler */
483
- irq_set_chained_handler_and_data (irq , plda_handle_event , port );
491
+ irq_set_chained_handler_and_data (port -> irq , plda_handle_event , port );
484
492
485
493
return 0 ;
486
494
}
@@ -547,3 +555,98 @@ int plda_pcie_setup_iomems(struct platform_device *pdev,
547
555
return 0 ;
548
556
}
549
557
EXPORT_SYMBOL_GPL (plda_pcie_setup_iomems );
558
+
559
+ static void plda_pcie_irq_domain_deinit (struct plda_pcie_rp * pcie )
560
+ {
561
+ irq_set_chained_handler_and_data (pcie -> irq , NULL , NULL );
562
+ irq_set_chained_handler_and_data (pcie -> msi_irq , NULL , NULL );
563
+ irq_set_chained_handler_and_data (pcie -> intx_irq , NULL , NULL );
564
+
565
+ irq_domain_remove (pcie -> msi .msi_domain );
566
+ irq_domain_remove (pcie -> msi .dev_domain );
567
+
568
+ irq_domain_remove (pcie -> intx_domain );
569
+ irq_domain_remove (pcie -> event_domain );
570
+ }
571
+
572
+ int plda_pcie_host_init (struct plda_pcie_rp * port , struct pci_ops * ops ,
573
+ const struct plda_event * plda_event )
574
+ {
575
+ struct device * dev = port -> dev ;
576
+ struct pci_host_bridge * bridge ;
577
+ struct platform_device * pdev = to_platform_device (dev );
578
+ struct resource * cfg_res ;
579
+ int ret ;
580
+
581
+ pdev = to_platform_device (dev );
582
+
583
+ port -> bridge_addr =
584
+ devm_platform_ioremap_resource_byname (pdev , "apb" );
585
+
586
+ if (IS_ERR (port -> bridge_addr ))
587
+ return dev_err_probe (dev , PTR_ERR (port -> bridge_addr ),
588
+ "failed to map reg memory\n" );
589
+
590
+ cfg_res = platform_get_resource_byname (pdev , IORESOURCE_MEM , "cfg" );
591
+ if (!cfg_res )
592
+ return dev_err_probe (dev , - ENODEV ,
593
+ "failed to get config memory\n" );
594
+
595
+ port -> config_base = devm_ioremap_resource (dev , cfg_res );
596
+ if (IS_ERR (port -> config_base ))
597
+ return dev_err_probe (dev , PTR_ERR (port -> config_base ),
598
+ "failed to map config memory\n" );
599
+
600
+ bridge = devm_pci_alloc_host_bridge (dev , 0 );
601
+ if (!bridge )
602
+ return dev_err_probe (dev , - ENOMEM ,
603
+ "failed to alloc bridge\n" );
604
+
605
+ if (port -> host_ops && port -> host_ops -> host_init ) {
606
+ ret = port -> host_ops -> host_init (port );
607
+ if (ret )
608
+ return ret ;
609
+ }
610
+
611
+ port -> bridge = bridge ;
612
+ plda_pcie_setup_window (port -> bridge_addr , 0 , cfg_res -> start , 0 ,
613
+ resource_size (cfg_res ));
614
+ plda_pcie_setup_iomems (bridge , port );
615
+ plda_set_default_msi (& port -> msi );
616
+ ret = plda_init_interrupts (pdev , port , plda_event );
617
+ if (ret )
618
+ goto err_host ;
619
+
620
+ /* Set default bus ops */
621
+ bridge -> ops = ops ;
622
+ bridge -> sysdata = port ;
623
+
624
+ ret = pci_host_probe (bridge );
625
+ if (ret < 0 ) {
626
+ dev_err_probe (dev , ret , "failed to probe pci host\n" );
627
+ goto err_probe ;
628
+ }
629
+
630
+ return ret ;
631
+
632
+ err_probe :
633
+ plda_pcie_irq_domain_deinit (port );
634
+ err_host :
635
+ if (port -> host_ops && port -> host_ops -> host_deinit )
636
+ port -> host_ops -> host_deinit (port );
637
+
638
+ return ret ;
639
+ }
640
+ EXPORT_SYMBOL_GPL (plda_pcie_host_init );
641
+
642
+ void plda_pcie_host_deinit (struct plda_pcie_rp * port )
643
+ {
644
+ pci_stop_root_bus (port -> bridge -> bus );
645
+ pci_remove_root_bus (port -> bridge -> bus );
646
+
647
+ plda_pcie_irq_domain_deinit (port );
648
+
649
+ if (port -> host_ops && port -> host_ops -> host_deinit )
650
+ port -> host_ops -> host_deinit (port );
651
+ }
652
+ EXPORT_SYMBOL_GPL (plda_pcie_host_deinit );
0 commit comments