Skip to content

Commit 196d9e7

Browse files
committed
Merge tag 'kvm-s390-next-6.17-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
RCU wakeup fix for KVM s390 guest entry
2 parents 6836e1f + 57d88f0 commit 196d9e7

File tree

6 files changed

+77
-23
lines changed

6 files changed

+77
-23
lines changed

arch/s390/include/asm/entry-common.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,4 +59,14 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
5959

6060
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
6161

62+
static __always_inline bool arch_in_rcu_eqs(void)
63+
{
64+
if (IS_ENABLED(CONFIG_KVM))
65+
return current->flags & PF_VCPU;
66+
67+
return false;
68+
}
69+
70+
#define arch_in_rcu_eqs arch_in_rcu_eqs
71+
6272
#endif

arch/s390/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -716,6 +716,9 @@ extern char sie_exit;
716716
bool kvm_s390_pv_is_protected(struct kvm *kvm);
717717
bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu);
718718

719+
extern int kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
720+
u64 *gprs, unsigned long gasce);
721+
719722
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
720723
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
721724

arch/s390/kvm/kvm-s390.c

Lines changed: 39 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5062,6 +5062,30 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
50625062
return vcpu_post_run_handle_fault(vcpu);
50635063
}
50645064

5065+
int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
5066+
u64 *gprs, unsigned long gasce)
5067+
{
5068+
int ret;
5069+
5070+
guest_state_enter_irqoff();
5071+
5072+
/*
5073+
* The guest_state_{enter,exit}_irqoff() functions inform lockdep and
5074+
* tracing that entry to the guest will enable host IRQs, and exit from
5075+
* the guest will disable host IRQs.
5076+
*
5077+
* We must not use lockdep/tracing/RCU in this critical section, so we
5078+
* use the low-level arch_local_irq_*() helpers to enable/disable IRQs.
5079+
*/
5080+
arch_local_irq_enable();
5081+
ret = sie64a(scb, gprs, gasce);
5082+
arch_local_irq_disable();
5083+
5084+
guest_state_exit_irqoff();
5085+
5086+
return ret;
5087+
}
5088+
50655089
#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
50665090
static int __vcpu_run(struct kvm_vcpu *vcpu)
50675091
{
@@ -5082,20 +5106,27 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
50825106
kvm_vcpu_srcu_read_unlock(vcpu);
50835107
/*
50845108
* As PF_VCPU will be used in fault handler, between
5085-
* guest_enter and guest_exit should be no uaccess.
5109+
* guest_timing_enter_irqoff and guest_timing_exit_irqoff
5110+
* should be no uaccess.
50865111
*/
5087-
local_irq_disable();
5088-
guest_enter_irqoff();
5089-
__disable_cpu_timer_accounting(vcpu);
5090-
local_irq_enable();
50915112
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
50925113
memcpy(sie_page->pv_grregs,
50935114
vcpu->run->s.regs.gprs,
50945115
sizeof(sie_page->pv_grregs));
50955116
}
5096-
exit_reason = sie64a(vcpu->arch.sie_block,
5097-
vcpu->run->s.regs.gprs,
5098-
vcpu->arch.gmap->asce);
5117+
5118+
local_irq_disable();
5119+
guest_timing_enter_irqoff();
5120+
__disable_cpu_timer_accounting(vcpu);
5121+
5122+
exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
5123+
vcpu->run->s.regs.gprs,
5124+
vcpu->arch.gmap->asce);
5125+
5126+
__enable_cpu_timer_accounting(vcpu);
5127+
guest_timing_exit_irqoff();
5128+
local_irq_enable();
5129+
50995130
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
51005131
memcpy(vcpu->run->s.regs.gprs,
51015132
sie_page->pv_grregs,
@@ -5111,10 +5142,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
51115142
vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
51125143
}
51135144
}
5114-
local_irq_disable();
5115-
__enable_cpu_timer_accounting(vcpu);
5116-
guest_exit_irqoff();
5117-
local_irq_enable();
51185145
kvm_vcpu_srcu_read_lock(vcpu);
51195146

51205147
rc = vcpu_post_run(vcpu, exit_reason);

arch/s390/kvm/vsie.c

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1170,10 +1170,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
11701170
vcpu->arch.sie_block->fpf & FPF_BPBC)
11711171
set_thread_flag(TIF_ISOLATE_BP_GUEST);
11721172

1173-
local_irq_disable();
1174-
guest_enter_irqoff();
1175-
local_irq_enable();
1176-
11771173
/*
11781174
* Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
11791175
* and VCPU requests also hinder the vSIE from running and lead
@@ -1183,15 +1179,16 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
11831179
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
11841180
current->thread.gmap_int_code = 0;
11851181
barrier();
1186-
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1187-
rc = sie64a(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce);
1182+
if (!kvm_s390_vcpu_sie_inhibited(vcpu)) {
1183+
local_irq_disable();
1184+
guest_timing_enter_irqoff();
1185+
rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce);
1186+
guest_timing_exit_irqoff();
1187+
local_irq_enable();
1188+
}
11881189
barrier();
11891190
vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
11901191

1191-
local_irq_disable();
1192-
guest_exit_irqoff();
1193-
local_irq_enable();
1194-
11951192
/* restore guest state for bp isolation override */
11961193
if (!guest_bp_isolation)
11971194
clear_thread_flag(TIF_ISOLATE_BP_GUEST);

include/linux/entry-common.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,22 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
8686
static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
8787
#endif
8888

89+
/**
90+
* arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent
91+
* states.
92+
*
93+
* Returns: true if the CPU is potentially in an RCU EQS, false otherwise.
94+
*
95+
* Architectures only need to define this if threads other than the idle thread
96+
* may have an interruptible EQS. This does not need to handle idle threads. It
97+
* is safe to over-estimate at the cost of redundant RCU management work.
98+
*
99+
* Invoked from irqentry_enter()
100+
*/
101+
#ifndef arch_in_rcu_eqs
102+
static __always_inline bool arch_in_rcu_eqs(void) { return false; }
103+
#endif
104+
89105
/**
90106
* enter_from_user_mode - Establish state when coming from user mode
91107
*

kernel/entry/common.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,8 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
220220
* TINY_RCU does not support EQS, so let the compiler eliminate
221221
* this part when enabled.
222222
*/
223-
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
223+
if (!IS_ENABLED(CONFIG_TINY_RCU) &&
224+
(is_idle_task(current) || arch_in_rcu_eqs())) {
224225
/*
225226
* If RCU is not watching then the same careful
226227
* sequence vs. lockdep and tracing is required

0 commit comments

Comments
 (0)