@@ -651,55 +651,70 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
651
651
}
652
652
653
653
/**
654
- * ice_ena_vf_mappings
655
- * @vf: pointer to the VF structure
654
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
655
+ * @vf: VF to enable MSIX mappings for
656
656
*
657
- * Enable VF vectors and queues allocation by writing the details into
658
- * respective registers.
657
+ * Some of the registers need to be indexed/configured using hardware global
658
+ * device values and other registers need 0-based values, which represent PF
659
+ * based values.
659
660
*/
660
- static void ice_ena_vf_mappings (struct ice_vf * vf )
661
+ static void ice_ena_vf_msix_mappings (struct ice_vf * vf )
661
662
{
662
- int abs_vf_id , abs_first , abs_last ;
663
+ int device_based_first_msix , device_based_last_msix ;
664
+ int pf_based_first_msix , pf_based_last_msix , v ;
663
665
struct ice_pf * pf = vf -> pf ;
664
- struct ice_vsi * vsi ;
665
- struct device * dev ;
666
- int first , last , v ;
666
+ int device_based_vf_id ;
667
667
struct ice_hw * hw ;
668
668
u32 reg ;
669
669
670
- dev = ice_pf_to_dev (pf );
671
670
hw = & pf -> hw ;
672
- vsi = pf -> vsi [vf -> lan_vsi_idx ];
673
- first = vf -> first_vector_idx ;
674
- last = (first + pf -> num_msix_per_vf ) - 1 ;
675
- abs_first = first + pf -> hw .func_caps .common_cap .msix_vector_first_id ;
676
- abs_last = (abs_first + pf -> num_msix_per_vf ) - 1 ;
677
- abs_vf_id = vf -> vf_id + hw -> func_caps .vf_base_id ;
678
-
679
- /* VF Vector allocation */
680
- reg = (((abs_first << VPINT_ALLOC_FIRST_S ) & VPINT_ALLOC_FIRST_M ) |
681
- ((abs_last << VPINT_ALLOC_LAST_S ) & VPINT_ALLOC_LAST_M ) |
682
- VPINT_ALLOC_VALID_M );
671
+ pf_based_first_msix = vf -> first_vector_idx ;
672
+ pf_based_last_msix = (pf_based_first_msix + pf -> num_msix_per_vf ) - 1 ;
673
+
674
+ device_based_first_msix = pf_based_first_msix +
675
+ pf -> hw .func_caps .common_cap .msix_vector_first_id ;
676
+ device_based_last_msix =
677
+ (device_based_first_msix + pf -> num_msix_per_vf ) - 1 ;
678
+ device_based_vf_id = vf -> vf_id + hw -> func_caps .vf_base_id ;
679
+
680
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S ) &
681
+ VPINT_ALLOC_FIRST_M ) |
682
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S ) &
683
+ VPINT_ALLOC_LAST_M ) | VPINT_ALLOC_VALID_M );
683
684
wr32 (hw , VPINT_ALLOC (vf -> vf_id ), reg );
684
685
685
- reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S )
686
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S )
686
687
& VPINT_ALLOC_PCI_FIRST_M ) |
687
- ((abs_last << VPINT_ALLOC_PCI_LAST_S ) & VPINT_ALLOC_PCI_LAST_M ) |
688
- VPINT_ALLOC_PCI_VALID_M );
688
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S ) &
689
+ VPINT_ALLOC_PCI_LAST_M ) | VPINT_ALLOC_PCI_VALID_M );
689
690
wr32 (hw , VPINT_ALLOC_PCI (vf -> vf_id ), reg );
691
+
690
692
/* map the interrupts to its functions */
691
- for (v = first ; v <= last ; v ++ ) {
692
- reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S ) &
693
+ for (v = pf_based_first_msix ; v <= pf_based_last_msix ; v ++ ) {
694
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S ) &
693
695
GLINT_VECT2FUNC_VF_NUM_M ) |
694
696
((hw -> pf_id << GLINT_VECT2FUNC_PF_NUM_S ) &
695
697
GLINT_VECT2FUNC_PF_NUM_M ));
696
698
wr32 (hw , GLINT_VECT2FUNC (v ), reg );
697
699
}
698
700
699
- /* Map mailbox interrupt. We put an explicit 0 here to remind us that
700
- * VF admin queue interrupts will go to VF MSI-X vector 0.
701
- */
702
- wr32 (hw , VPINT_MBX_CTL (abs_vf_id ), VPINT_MBX_CTL_CAUSE_ENA_M | 0 );
701
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
702
+ wr32 (hw , VPINT_MBX_CTL (device_based_vf_id ), VPINT_MBX_CTL_CAUSE_ENA_M );
703
+ }
704
+
705
+ /**
706
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
707
+ * @vf: VF to enable the mappings for
708
+ * @max_txq: max Tx queues allowed on the VF's VSI
709
+ * @max_rxq: max Rx queues allowed on the VF's VSI
710
+ */
711
+ static void ice_ena_vf_q_mappings (struct ice_vf * vf , u16 max_txq , u16 max_rxq )
712
+ {
713
+ struct ice_vsi * vsi = vf -> pf -> vsi [vf -> lan_vsi_idx ];
714
+ struct device * dev = ice_pf_to_dev (vf -> pf );
715
+ struct ice_hw * hw = & vf -> pf -> hw ;
716
+ u32 reg ;
717
+
703
718
/* set regardless of mapping mode */
704
719
wr32 (hw , VPLAN_TXQ_MAPENA (vf -> vf_id ), VPLAN_TXQ_MAPENA_TX_ENA_M );
705
720
@@ -711,7 +726,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
711
726
*/
712
727
reg = (((vsi -> txq_map [0 ] << VPLAN_TX_QBASE_VFFIRSTQ_S ) &
713
728
VPLAN_TX_QBASE_VFFIRSTQ_M ) |
714
- (((vsi -> alloc_txq - 1 ) << VPLAN_TX_QBASE_VFNUMQ_S ) &
729
+ (((max_txq - 1 ) << VPLAN_TX_QBASE_VFNUMQ_S ) &
715
730
VPLAN_TX_QBASE_VFNUMQ_M ));
716
731
wr32 (hw , VPLAN_TX_QBASE (vf -> vf_id ), reg );
717
732
} else {
@@ -729,14 +744,26 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
729
744
*/
730
745
reg = (((vsi -> rxq_map [0 ] << VPLAN_RX_QBASE_VFFIRSTQ_S ) &
731
746
VPLAN_RX_QBASE_VFFIRSTQ_M ) |
732
- (((vsi -> alloc_txq - 1 ) << VPLAN_RX_QBASE_VFNUMQ_S ) &
747
+ (((max_rxq - 1 ) << VPLAN_RX_QBASE_VFNUMQ_S ) &
733
748
VPLAN_RX_QBASE_VFNUMQ_M ));
734
749
wr32 (hw , VPLAN_RX_QBASE (vf -> vf_id ), reg );
735
750
} else {
736
751
dev_err (dev , "Scattered mode for VF Rx queues is not yet implemented\n" );
737
752
}
738
753
}
739
754
755
+ /**
756
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
757
+ * @vf: pointer to the VF structure
758
+ */
759
+ static void ice_ena_vf_mappings (struct ice_vf * vf )
760
+ {
761
+ struct ice_vsi * vsi = vf -> pf -> vsi [vf -> lan_vsi_idx ];
762
+
763
+ ice_ena_vf_msix_mappings (vf );
764
+ ice_ena_vf_q_mappings (vf , vsi -> alloc_txq , vsi -> alloc_rxq );
765
+ }
766
+
740
767
/**
741
768
* ice_determine_res
742
769
* @pf: pointer to the PF structure
0 commit comments