@@ -1726,13 +1726,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1726
1726
(* (u32 * )p_data ) &= ~_MASKED_BIT_ENABLE (2 );
1727
1727
write_vreg (vgpu , offset , p_data , bytes );
1728
1728
1729
- if (data & _MASKED_BIT_ENABLE ( 1 )) {
1729
+ if (IS_MASKED_BITS_ENABLED ( data , 1 )) {
1730
1730
enter_failsafe_mode (vgpu , GVT_FAILSAFE_UNSUPPORTED_GUEST );
1731
1731
return 0 ;
1732
1732
}
1733
1733
1734
1734
if (IS_COFFEELAKE (vgpu -> gvt -> gt -> i915 ) &&
1735
- data & _MASKED_BIT_ENABLE ( 2 )) {
1735
+ IS_MASKED_BITS_ENABLED ( data , 2 )) {
1736
1736
enter_failsafe_mode (vgpu , GVT_FAILSAFE_UNSUPPORTED_GUEST );
1737
1737
return 0 ;
1738
1738
}
@@ -1741,14 +1741,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1741
1741
* pvinfo, if not, we will treat this guest as non-gvtg-aware
1742
1742
* guest, and stop emulating its cfg space, mmio, gtt, etc.
1743
1743
*/
1744
- if (((data & _MASKED_BIT_ENABLE ( GFX_PPGTT_ENABLE ) ) ||
1745
- ( data & _MASKED_BIT_ENABLE ( GFX_RUN_LIST_ENABLE )))
1746
- && !vgpu -> pv_notified ) {
1744
+ if ((IS_MASKED_BITS_ENABLED (data , GFX_PPGTT_ENABLE ) ||
1745
+ IS_MASKED_BITS_ENABLED ( data , GFX_RUN_LIST_ENABLE )) &&
1746
+ !vgpu -> pv_notified ) {
1747
1747
enter_failsafe_mode (vgpu , GVT_FAILSAFE_UNSUPPORTED_GUEST );
1748
1748
return 0 ;
1749
1749
}
1750
- if ((data & _MASKED_BIT_ENABLE ( GFX_RUN_LIST_ENABLE ))
1751
- || ( data & _MASKED_BIT_DISABLE ( GFX_RUN_LIST_ENABLE ) )) {
1750
+ if (IS_MASKED_BITS_ENABLED (data , GFX_RUN_LIST_ENABLE ) ||
1751
+ IS_MASKED_BITS_DISABLED ( data , GFX_RUN_LIST_ENABLE )) {
1752
1752
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE );
1753
1753
1754
1754
gvt_dbg_core ("EXECLIST %s on ring %s\n" ,
@@ -1809,7 +1809,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1809
1809
write_vreg (vgpu , offset , p_data , bytes );
1810
1810
data = vgpu_vreg (vgpu , offset );
1811
1811
1812
- if (data & _MASKED_BIT_ENABLE ( RESET_CTL_REQUEST_RESET ))
1812
+ if (IS_MASKED_BITS_ENABLED ( data , RESET_CTL_REQUEST_RESET ))
1813
1813
data |= RESET_CTL_READY_TO_RESET ;
1814
1814
else if (data & _MASKED_BIT_DISABLE (RESET_CTL_REQUEST_RESET ))
1815
1815
data &= ~RESET_CTL_READY_TO_RESET ;
@@ -1827,7 +1827,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
1827
1827
(* (u32 * )p_data ) &= ~_MASKED_BIT_ENABLE (0x18 );
1828
1828
write_vreg (vgpu , offset , p_data , bytes );
1829
1829
1830
- if (data & _MASKED_BIT_ENABLE (0x10 ) || data & _MASKED_BIT_ENABLE (0x8 ))
1830
+ if (IS_MASKED_BITS_ENABLED (data , 0x10 ) ||
1831
+ IS_MASKED_BITS_ENABLED (data , 0x8 ))
1831
1832
enter_failsafe_mode (vgpu , GVT_FAILSAFE_UNSUPPORTED_GUEST );
1832
1833
1833
1834
return 0 ;
@@ -3055,6 +3056,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
3055
3056
MMIO_D (_MMIO (0x72380 ), D_SKL_PLUS );
3056
3057
MMIO_D (_MMIO (0x7239c ), D_SKL_PLUS );
3057
3058
MMIO_D (_MMIO (_PLANE_SURF_3_A ), D_SKL_PLUS );
3059
+ MMIO_D (_MMIO (_PLANE_SURF_3_B ), D_SKL_PLUS );
3058
3060
3059
3061
MMIO_D (CSR_SSP_BASE , D_SKL_PLUS );
3060
3062
MMIO_D (CSR_HTP_SKL , D_SKL_PLUS );
@@ -3131,8 +3133,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
3131
3133
MMIO_DFH (GEN9_WM_CHICKEN3 , D_SKL_PLUS , F_MODE_MASK | F_CMD_ACCESS ,
3132
3134
NULL , NULL );
3133
3135
3134
- MMIO_D (GAMT_CHKN_BIT_REG , D_KBL );
3135
- MMIO_D (GEN9_CTX_PREEMPT_REG , D_KBL | D_SKL );
3136
+ MMIO_D (GAMT_CHKN_BIT_REG , D_KBL | D_CFL );
3137
+ MMIO_D (GEN9_CTX_PREEMPT_REG , D_SKL_PLUS );
3136
3138
3137
3139
return 0 ;
3138
3140
}
0 commit comments