@@ -481,6 +481,34 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
481
481
spin_unlock (& zpci_iomap_lock );
482
482
}
483
483
484
+ static void zpci_do_update_iomap_fh (struct zpci_dev * zdev , u32 fh )
485
+ {
486
+ int bar , idx ;
487
+
488
+ spin_lock (& zpci_iomap_lock );
489
+ for (bar = 0 ; bar < PCI_STD_NUM_BARS ; bar ++ ) {
490
+ if (!zdev -> bars [bar ].size )
491
+ continue ;
492
+ idx = zdev -> bars [bar ].map_idx ;
493
+ if (!zpci_iomap_start [idx ].count )
494
+ continue ;
495
+ WRITE_ONCE (zpci_iomap_start [idx ].fh , zdev -> fh );
496
+ }
497
+ spin_unlock (& zpci_iomap_lock );
498
+ }
499
+
500
+ void zpci_update_fh (struct zpci_dev * zdev , u32 fh )
501
+ {
502
+ if (!fh || zdev -> fh == fh )
503
+ return ;
504
+
505
+ zdev -> fh = fh ;
506
+ if (zpci_use_mio (zdev ))
507
+ return ;
508
+ if (zdev -> has_resources && zdev_enabled (zdev ))
509
+ zpci_do_update_iomap_fh (zdev , fh );
510
+ }
511
+
484
512
static struct resource * __alloc_res (struct zpci_dev * zdev , unsigned long start ,
485
513
unsigned long size , unsigned long flags )
486
514
{
@@ -668,7 +696,7 @@ int zpci_enable_device(struct zpci_dev *zdev)
668
696
if (clp_enable_fh (zdev , & fh , ZPCI_NR_DMA_SPACES ))
669
697
rc = - EIO ;
670
698
else
671
- zdev -> fh = fh ;
699
+ zpci_update_fh ( zdev , fh ) ;
672
700
return rc ;
673
701
}
674
702
@@ -679,14 +707,14 @@ int zpci_disable_device(struct zpci_dev *zdev)
679
707
680
708
cc = clp_disable_fh (zdev , & fh );
681
709
if (!cc ) {
682
- zdev -> fh = fh ;
710
+ zpci_update_fh ( zdev , fh ) ;
683
711
} else if (cc == CLP_RC_SETPCIFN_ALRDY ) {
684
712
pr_info ("Disabling PCI function %08x had no effect as it was already disabled\n" ,
685
713
zdev -> fid );
686
714
/* Function is already disabled - update handle */
687
715
rc = clp_refresh_fh (zdev -> fid , & fh );
688
716
if (!rc ) {
689
- zdev -> fh = fh ;
717
+ zpci_update_fh ( zdev , fh ) ;
690
718
rc = - EINVAL ;
691
719
}
692
720
} else {
@@ -695,6 +723,65 @@ int zpci_disable_device(struct zpci_dev *zdev)
695
723
return rc ;
696
724
}
697
725
726
+ /**
727
+ * zpci_hot_reset_device - perform a reset of the given zPCI function
728
+ * @zdev: the slot which should be reset
729
+ *
730
+ * Performs a low level reset of the zPCI function. The reset is low level in
731
+ * the sense that the zPCI function can be reset without detaching it from the
732
+ * common PCI subsystem. The reset may be performed while under control of
733
+ * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
734
+ * table is reinstated at the end of the reset.
735
+ *
736
+ * After the reset the functions internal state is reset to an initial state
737
+ * equivalent to its state during boot when first probing a driver.
738
+ * Consequently after reset the PCI function requires re-initialization via the
739
+ * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
740
+ * and enabling the function via e.g.pci_enablde_device_flags().The caller
741
+ * must guard against concurrent reset attempts.
742
+ *
743
+ * In most cases this function should not be called directly but through
744
+ * pci_reset_function() or pci_reset_bus() which handle the save/restore and
745
+ * locking.
746
+ *
747
+ * Return: 0 on success and an error value otherwise
748
+ */
749
+ int zpci_hot_reset_device (struct zpci_dev * zdev )
750
+ {
751
+ int rc ;
752
+
753
+ zpci_dbg (3 , "rst fid:%x, fh:%x\n" , zdev -> fid , zdev -> fh );
754
+ if (zdev_enabled (zdev )) {
755
+ /* Disables device access, DMAs and IRQs (reset state) */
756
+ rc = zpci_disable_device (zdev );
757
+ /*
758
+ * Due to a z/VM vs LPAR inconsistency in the error state the
759
+ * FH may indicate an enabled device but disable says the
760
+ * device is already disabled don't treat it as an error here.
761
+ */
762
+ if (rc == - EINVAL )
763
+ rc = 0 ;
764
+ if (rc )
765
+ return rc ;
766
+ }
767
+
768
+ rc = zpci_enable_device (zdev );
769
+ if (rc )
770
+ return rc ;
771
+
772
+ if (zdev -> dma_table )
773
+ rc = zpci_register_ioat (zdev , 0 , zdev -> start_dma , zdev -> end_dma ,
774
+ (u64 )zdev -> dma_table );
775
+ else
776
+ rc = zpci_dma_init_device (zdev );
777
+ if (rc ) {
778
+ zpci_disable_device (zdev );
779
+ return rc ;
780
+ }
781
+
782
+ return 0 ;
783
+ }
784
+
698
785
/**
699
786
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
700
787
* @fid: Function ID of the device to be created
@@ -776,7 +863,7 @@ int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
776
863
{
777
864
int rc ;
778
865
779
- zdev -> fh = fh ;
866
+ zpci_update_fh ( zdev , fh ) ;
780
867
/* the PCI function will be scanned once function 0 appears */
781
868
if (!zdev -> zbus -> bus )
782
869
return 0 ;
@@ -903,6 +990,59 @@ int zpci_report_error(struct pci_dev *pdev,
903
990
}
904
991
EXPORT_SYMBOL (zpci_report_error );
905
992
993
+ /**
994
+ * zpci_clear_error_state() - Clears the zPCI error state of the device
995
+ * @zdev: The zdev for which the zPCI error state should be reset
996
+ *
997
+ * Clear the zPCI error state of the device. If clearing the zPCI error state
998
+ * fails the device is left in the error state. In this case it may make sense
999
+ * to call zpci_io_perm_failure() on the associated pdev if it exists.
1000
+ *
1001
+ * Returns: 0 on success, -EIO otherwise
1002
+ */
1003
+ int zpci_clear_error_state (struct zpci_dev * zdev )
1004
+ {
1005
+ u64 req = ZPCI_CREATE_REQ (zdev -> fh , 0 , ZPCI_MOD_FC_RESET_ERROR );
1006
+ struct zpci_fib fib = {0 };
1007
+ u8 status ;
1008
+ int cc ;
1009
+
1010
+ cc = zpci_mod_fc (req , & fib , & status );
1011
+ if (cc ) {
1012
+ zpci_dbg (3 , "ces fid:%x, cc:%d, status:%x\n" , zdev -> fid , cc , status );
1013
+ return - EIO ;
1014
+ }
1015
+
1016
+ return 0 ;
1017
+ }
1018
+
1019
+ /**
1020
+ * zpci_reset_load_store_blocked() - Re-enables L/S from error state
1021
+ * @zdev: The zdev for which to unblock load/store access
1022
+ *
1023
+ * Re-enables load/store access for a PCI function in the error state while
1024
+ * keeping DMA blocked. In this state drivers can poke MMIO space to determine
1025
+ * if error recovery is possible while catching any rogue DMA access from the
1026
+ * device.
1027
+ *
1028
+ * Returns: 0 on success, -EIO otherwise
1029
+ */
1030
+ int zpci_reset_load_store_blocked (struct zpci_dev * zdev )
1031
+ {
1032
+ u64 req = ZPCI_CREATE_REQ (zdev -> fh , 0 , ZPCI_MOD_FC_RESET_BLOCK );
1033
+ struct zpci_fib fib = {0 };
1034
+ u8 status ;
1035
+ int cc ;
1036
+
1037
+ cc = zpci_mod_fc (req , & fib , & status );
1038
+ if (cc ) {
1039
+ zpci_dbg (3 , "rls fid:%x, cc:%d, status:%x\n" , zdev -> fid , cc , status );
1040
+ return - EIO ;
1041
+ }
1042
+
1043
+ return 0 ;
1044
+ }
1045
+
906
1046
static int zpci_mem_init (void )
907
1047
{
908
1048
BUILD_BUG_ON (!is_power_of_2 (__alignof__(struct zpci_fmb )) ||
0 commit comments