@@ -318,7 +318,7 @@ static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
318
318
ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI );
319
319
}
320
320
321
- static void mc_handle_msi (struct irq_desc * desc )
321
+ static void plda_handle_msi (struct irq_desc * desc )
322
322
{
323
323
struct plda_pcie_rp * port = irq_desc_get_handler_data (desc );
324
324
struct irq_chip * chip = irq_desc_get_chip (desc );
@@ -346,7 +346,7 @@ static void mc_handle_msi(struct irq_desc *desc)
346
346
chained_irq_exit (chip , desc );
347
347
}
348
348
349
- static void mc_msi_bottom_irq_ack (struct irq_data * data )
349
+ static void plda_msi_bottom_irq_ack (struct irq_data * data )
350
350
{
351
351
struct plda_pcie_rp * port = irq_data_get_irq_chip_data (data );
352
352
void __iomem * bridge_base_addr = port -> bridge_addr ;
@@ -355,7 +355,7 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data)
355
355
writel_relaxed (BIT (bitpos ), bridge_base_addr + ISTATUS_MSI );
356
356
}
357
357
358
- static void mc_compose_msi_msg (struct irq_data * data , struct msi_msg * msg )
358
+ static void plda_compose_msi_msg (struct irq_data * data , struct msi_msg * msg )
359
359
{
360
360
struct plda_pcie_rp * port = irq_data_get_irq_chip_data (data );
361
361
phys_addr_t addr = port -> msi .vector_phy ;
@@ -368,21 +368,23 @@ static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
368
368
(int )data -> hwirq , msg -> address_hi , msg -> address_lo );
369
369
}
370
370
371
- static int mc_msi_set_affinity (struct irq_data * irq_data ,
372
- const struct cpumask * mask , bool force )
371
+ static int plda_msi_set_affinity (struct irq_data * irq_data ,
372
+ const struct cpumask * mask , bool force )
373
373
{
374
374
return - EINVAL ;
375
375
}
376
376
377
- static struct irq_chip mc_msi_bottom_irq_chip = {
378
- .name = "Microchip MSI" ,
379
- .irq_ack = mc_msi_bottom_irq_ack ,
380
- .irq_compose_msi_msg = mc_compose_msi_msg ,
381
- .irq_set_affinity = mc_msi_set_affinity ,
377
+ static struct irq_chip plda_msi_bottom_irq_chip = {
378
+ .name = "PLDA MSI" ,
379
+ .irq_ack = plda_msi_bottom_irq_ack ,
380
+ .irq_compose_msi_msg = plda_compose_msi_msg ,
381
+ .irq_set_affinity = plda_msi_set_affinity ,
382
382
};
383
383
384
- static int mc_irq_msi_domain_alloc (struct irq_domain * domain , unsigned int virq ,
385
- unsigned int nr_irqs , void * args )
384
+ static int plda_irq_msi_domain_alloc (struct irq_domain * domain ,
385
+ unsigned int virq ,
386
+ unsigned int nr_irqs ,
387
+ void * args )
386
388
{
387
389
struct plda_pcie_rp * port = domain -> host_data ;
388
390
struct plda_msi * msi = & port -> msi ;
@@ -397,16 +399,17 @@ static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
397
399
398
400
set_bit (bit , msi -> used );
399
401
400
- irq_domain_set_info (domain , virq , bit , & mc_msi_bottom_irq_chip ,
402
+ irq_domain_set_info (domain , virq , bit , & plda_msi_bottom_irq_chip ,
401
403
domain -> host_data , handle_edge_irq , NULL , NULL );
402
404
403
405
mutex_unlock (& msi -> lock );
404
406
405
407
return 0 ;
406
408
}
407
409
408
- static void mc_irq_msi_domain_free (struct irq_domain * domain , unsigned int virq ,
409
- unsigned int nr_irqs )
410
+ static void plda_irq_msi_domain_free (struct irq_domain * domain ,
411
+ unsigned int virq ,
412
+ unsigned int nr_irqs )
410
413
{
411
414
struct irq_data * d = irq_domain_get_irq_data (domain , virq );
412
415
struct plda_pcie_rp * port = irq_data_get_irq_chip_data (d );
@@ -423,24 +426,24 @@ static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
423
426
}
424
427
425
428
static const struct irq_domain_ops msi_domain_ops = {
426
- .alloc = mc_irq_msi_domain_alloc ,
427
- .free = mc_irq_msi_domain_free ,
429
+ .alloc = plda_irq_msi_domain_alloc ,
430
+ .free = plda_irq_msi_domain_free ,
428
431
};
429
432
430
- static struct irq_chip mc_msi_irq_chip = {
431
- .name = "Microchip PCIe MSI" ,
433
+ static struct irq_chip plda_msi_irq_chip = {
434
+ .name = "PLDA PCIe MSI" ,
432
435
.irq_ack = irq_chip_ack_parent ,
433
436
.irq_mask = pci_msi_mask_irq ,
434
437
.irq_unmask = pci_msi_unmask_irq ,
435
438
};
436
439
437
- static struct msi_domain_info mc_msi_domain_info = {
440
+ static struct msi_domain_info plda_msi_domain_info = {
438
441
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
439
442
MSI_FLAG_PCI_MSIX ),
440
- .chip = & mc_msi_irq_chip ,
443
+ .chip = & plda_msi_irq_chip ,
441
444
};
442
445
443
- static int mc_allocate_msi_domains (struct plda_pcie_rp * port )
446
+ static int plda_allocate_msi_domains (struct plda_pcie_rp * port )
444
447
{
445
448
struct device * dev = port -> dev ;
446
449
struct fwnode_handle * fwnode = of_node_to_fwnode (dev -> of_node );
@@ -455,7 +458,8 @@ static int mc_allocate_msi_domains(struct plda_pcie_rp *port)
455
458
return - ENOMEM ;
456
459
}
457
460
458
- msi -> msi_domain = pci_msi_create_irq_domain (fwnode , & mc_msi_domain_info ,
461
+ msi -> msi_domain = pci_msi_create_irq_domain (fwnode ,
462
+ & plda_msi_domain_info ,
459
463
msi -> dev_domain );
460
464
if (!msi -> msi_domain ) {
461
465
dev_err (dev , "failed to create MSI domain\n" );
@@ -466,7 +470,7 @@ static int mc_allocate_msi_domains(struct plda_pcie_rp *port)
466
470
return 0 ;
467
471
}
468
472
469
- static void mc_handle_intx (struct irq_desc * desc )
473
+ static void plda_handle_intx (struct irq_desc * desc )
470
474
{
471
475
struct plda_pcie_rp * port = irq_desc_get_handler_data (desc );
472
476
struct irq_chip * chip = irq_desc_get_chip (desc );
@@ -493,7 +497,7 @@ static void mc_handle_intx(struct irq_desc *desc)
493
497
chained_irq_exit (chip , desc );
494
498
}
495
499
496
- static void mc_ack_intx_irq (struct irq_data * data )
500
+ static void plda_ack_intx_irq (struct irq_data * data )
497
501
{
498
502
struct plda_pcie_rp * port = irq_data_get_irq_chip_data (data );
499
503
void __iomem * bridge_base_addr = port -> bridge_addr ;
@@ -502,7 +506,7 @@ static void mc_ack_intx_irq(struct irq_data *data)
502
506
writel_relaxed (mask , bridge_base_addr + ISTATUS_LOCAL );
503
507
}
504
508
505
- static void mc_mask_intx_irq (struct irq_data * data )
509
+ static void plda_mask_intx_irq (struct irq_data * data )
506
510
{
507
511
struct plda_pcie_rp * port = irq_data_get_irq_chip_data (data );
508
512
void __iomem * bridge_base_addr = port -> bridge_addr ;
@@ -517,7 +521,7 @@ static void mc_mask_intx_irq(struct irq_data *data)
517
521
raw_spin_unlock_irqrestore (& port -> lock , flags );
518
522
}
519
523
520
- static void mc_unmask_intx_irq (struct irq_data * data )
524
+ static void plda_unmask_intx_irq (struct irq_data * data )
521
525
{
522
526
struct plda_pcie_rp * port = irq_data_get_irq_chip_data (data );
523
527
void __iomem * bridge_base_addr = port -> bridge_addr ;
@@ -532,24 +536,24 @@ static void mc_unmask_intx_irq(struct irq_data *data)
532
536
raw_spin_unlock_irqrestore (& port -> lock , flags );
533
537
}
534
538
535
- static struct irq_chip mc_intx_irq_chip = {
536
- .name = "Microchip PCIe INTx" ,
537
- .irq_ack = mc_ack_intx_irq ,
538
- .irq_mask = mc_mask_intx_irq ,
539
- .irq_unmask = mc_unmask_intx_irq ,
539
+ static struct irq_chip plda_intx_irq_chip = {
540
+ .name = "PLDA PCIe INTx" ,
541
+ .irq_ack = plda_ack_intx_irq ,
542
+ .irq_mask = plda_mask_intx_irq ,
543
+ .irq_unmask = plda_unmask_intx_irq ,
540
544
};
541
545
542
- static int mc_pcie_intx_map (struct irq_domain * domain , unsigned int irq ,
543
- irq_hw_number_t hwirq )
546
+ static int plda_pcie_intx_map (struct irq_domain * domain , unsigned int irq ,
547
+ irq_hw_number_t hwirq )
544
548
{
545
- irq_set_chip_and_handler (irq , & mc_intx_irq_chip , handle_level_irq );
549
+ irq_set_chip_and_handler (irq , & plda_intx_irq_chip , handle_level_irq );
546
550
irq_set_chip_data (irq , domain -> host_data );
547
551
548
552
return 0 ;
549
553
}
550
554
551
555
static const struct irq_domain_ops intx_domain_ops = {
552
- .map = mc_pcie_intx_map ,
556
+ .map = plda_pcie_intx_map ,
553
557
};
554
558
555
559
static inline u32 reg_to_event (u32 reg , struct event_map field )
@@ -609,7 +613,7 @@ static u32 local_events(struct mc_pcie *port)
609
613
return val ;
610
614
}
611
615
612
- static u32 get_events (struct plda_pcie_rp * port )
616
+ static u32 mc_get_events (struct plda_pcie_rp * port )
613
617
{
614
618
struct mc_pcie * mc_port = container_of (port , struct mc_pcie , plda );
615
619
u32 events = 0 ;
@@ -638,7 +642,7 @@ static irqreturn_t mc_event_handler(int irq, void *dev_id)
638
642
return IRQ_HANDLED ;
639
643
}
640
644
641
- static void mc_handle_event (struct irq_desc * desc )
645
+ static void plda_handle_event (struct irq_desc * desc )
642
646
{
643
647
struct plda_pcie_rp * port = irq_desc_get_handler_data (desc );
644
648
unsigned long events ;
@@ -647,7 +651,7 @@ static void mc_handle_event(struct irq_desc *desc)
647
651
648
652
chained_irq_enter (chip , desc );
649
653
650
- events = get_events (port );
654
+ events = mc_get_events (port );
651
655
652
656
for_each_set_bit (bit , & events , NUM_EVENTS )
653
657
generic_handle_domain_irq (port -> event_domain , bit );
@@ -741,17 +745,17 @@ static struct irq_chip mc_event_irq_chip = {
741
745
.irq_unmask = mc_unmask_event_irq ,
742
746
};
743
747
744
- static int mc_pcie_event_map (struct irq_domain * domain , unsigned int irq ,
745
- irq_hw_number_t hwirq )
748
+ static int plda_pcie_event_map (struct irq_domain * domain , unsigned int irq ,
749
+ irq_hw_number_t hwirq )
746
750
{
747
751
irq_set_chip_and_handler (irq , & mc_event_irq_chip , handle_level_irq );
748
752
irq_set_chip_data (irq , domain -> host_data );
749
753
750
754
return 0 ;
751
755
}
752
756
753
- static const struct irq_domain_ops event_domain_ops = {
754
- .map = mc_pcie_event_map ,
757
+ static const struct irq_domain_ops plda_event_domain_ops = {
758
+ .map = plda_pcie_event_map ,
755
759
};
756
760
757
761
static inline void mc_pcie_deinit_clk (void * data )
@@ -799,7 +803,7 @@ static int mc_pcie_init_clks(struct device *dev)
799
803
return 0 ;
800
804
}
801
805
802
- static int mc_pcie_init_irq_domains (struct plda_pcie_rp * port )
806
+ static int plda_pcie_init_irq_domains (struct plda_pcie_rp * port )
803
807
{
804
808
struct device * dev = port -> dev ;
805
809
struct device_node * node = dev -> of_node ;
@@ -813,7 +817,8 @@ static int mc_pcie_init_irq_domains(struct plda_pcie_rp *port)
813
817
}
814
818
815
819
port -> event_domain = irq_domain_add_linear (pcie_intc_node , NUM_EVENTS ,
816
- & event_domain_ops , port );
820
+ & plda_event_domain_ops ,
821
+ port );
817
822
if (!port -> event_domain ) {
818
823
dev_err (dev , "failed to get event domain\n" );
819
824
of_node_put (pcie_intc_node );
@@ -835,7 +840,7 @@ static int mc_pcie_init_irq_domains(struct plda_pcie_rp *port)
835
840
of_node_put (pcie_intc_node );
836
841
raw_spin_lock_init (& port -> lock );
837
842
838
- return mc_allocate_msi_domains (port );
843
+ return plda_allocate_msi_domains (port );
839
844
}
840
845
841
846
static inline void mc_clear_secs (struct mc_pcie * port )
@@ -898,14 +903,14 @@ static void mc_disable_interrupts(struct mc_pcie *port)
898
903
writel_relaxed (GENMASK (31 , 0 ), bridge_base_addr + ISTATUS_HOST );
899
904
}
900
905
901
- static int mc_init_interrupts (struct platform_device * pdev , struct plda_pcie_rp * port )
906
+ static int plda_init_interrupts (struct platform_device * pdev , struct plda_pcie_rp * port )
902
907
{
903
908
struct device * dev = & pdev -> dev ;
904
909
int irq ;
905
910
int i , intx_irq , msi_irq , event_irq ;
906
911
int ret ;
907
912
908
- ret = mc_pcie_init_irq_domains (port );
913
+ ret = plda_pcie_init_irq_domains (port );
909
914
if (ret ) {
910
915
dev_err (dev , "failed creating IRQ domains\n" );
911
916
return ret ;
@@ -938,18 +943,18 @@ static int mc_init_interrupts(struct platform_device *pdev, struct plda_pcie_rp
938
943
}
939
944
940
945
/* Plug the INTx chained handler */
941
- irq_set_chained_handler_and_data (intx_irq , mc_handle_intx , port );
946
+ irq_set_chained_handler_and_data (intx_irq , plda_handle_intx , port );
942
947
943
948
msi_irq = irq_create_mapping (port -> event_domain ,
944
949
EVENT_LOCAL_PM_MSI_INT_MSI );
945
950
if (!msi_irq )
946
951
return - ENXIO ;
947
952
948
953
/* Plug the MSI chained handler */
949
- irq_set_chained_handler_and_data (msi_irq , mc_handle_msi , port );
954
+ irq_set_chained_handler_and_data (msi_irq , plda_handle_msi , port );
950
955
951
956
/* Plug the main event chained handler */
952
- irq_set_chained_handler_and_data (irq , mc_handle_event , port );
957
+ irq_set_chained_handler_and_data (irq , plda_handle_event , port );
953
958
954
959
return 0 ;
955
960
}
@@ -976,7 +981,7 @@ static int mc_platform_init(struct pci_config_window *cfg)
976
981
return ret ;
977
982
978
983
/* Address translation is up; safe to enable interrupts */
979
- ret = mc_init_interrupts (pdev , & port -> plda );
984
+ ret = plda_init_interrupts (pdev , & port -> plda );
980
985
if (ret )
981
986
return ret ;
982
987
0 commit comments