Skip to content

Commit 12ad6cf

Browse files
jgross1bostrovs
authored andcommitted
x86/xen: remove xen_have_vcpu_info_placement flag
The flag xen_have_vcpu_info_placement was needed to support Xen hypervisors older than version 3.4, which didn't support the VCPUOP_register_vcpu_info hypercall. Today the Linux kernel requires at least Xen 4.0 to be able to run, so xen_have_vcpu_info_placement can be dropped (in theory the flag was used to ensure a working kernel even in case of the VCPUOP_register_vcpu_info hypercall failing for other reasons than the hypercall not being supported, but the only cases covered by the flag would be parameter errors, which ought not to be made anyway). This allows to let some functions return void now, as they can never fail. Signed-off-by: Juergen Gross <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Boris Ostrovsky <[email protected]>
1 parent 7672167 commit 12ad6cf

File tree

5 files changed

+33
-126
lines changed

5 files changed

+33
-126
lines changed

arch/x86/xen/enlighten.c

Lines changed: 24 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -84,21 +84,6 @@ EXPORT_SYMBOL(xen_start_flags);
8484
*/
8585
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
8686

87-
/*
88-
* Flag to determine whether vcpu info placement is available on all
89-
* VCPUs. We assume it is to start with, and then set it to zero on
90-
* the first failure. This is because it can succeed on some VCPUs
91-
* and not others, since it can involve hypervisor memory allocation,
92-
* or because the guest failed to guarantee all the appropriate
93-
* constraints on all VCPUs (ie buffer can't cross a page boundary).
94-
*
95-
* Note that any particular CPU may be using a placed vcpu structure,
96-
* but we can only optimise if the all are.
97-
*
98-
* 0: not available, 1: available
99-
*/
100-
int xen_have_vcpu_info_placement = 1;
101-
10287
static int xen_cpu_up_online(unsigned int cpu)
10388
{
10489
xen_init_lock_cpu(cpu);
@@ -124,10 +109,8 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
124109
return rc >= 0 ? 0 : rc;
125110
}
126111

127-
static int xen_vcpu_setup_restore(int cpu)
112+
static void xen_vcpu_setup_restore(int cpu)
128113
{
129-
int rc = 0;
130-
131114
/* Any per_cpu(xen_vcpu) is stale, so reset it */
132115
xen_vcpu_info_reset(cpu);
133116

@@ -136,11 +119,8 @@ static int xen_vcpu_setup_restore(int cpu)
136119
* be handled by hotplug.
137120
*/
138121
if (xen_pv_domain() ||
139-
(xen_hvm_domain() && cpu_online(cpu))) {
140-
rc = xen_vcpu_setup(cpu);
141-
}
142-
143-
return rc;
122+
(xen_hvm_domain() && cpu_online(cpu)))
123+
xen_vcpu_setup(cpu);
144124
}
145125

146126
/*
@@ -150,7 +130,7 @@ static int xen_vcpu_setup_restore(int cpu)
150130
*/
151131
void xen_vcpu_restore(void)
152132
{
153-
int cpu, rc;
133+
int cpu;
154134

155135
for_each_possible_cpu(cpu) {
156136
bool other_cpu = (cpu != smp_processor_id());
@@ -170,20 +150,9 @@ void xen_vcpu_restore(void)
170150
if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
171151
xen_setup_runstate_info(cpu);
172152

173-
rc = xen_vcpu_setup_restore(cpu);
174-
if (rc)
175-
pr_emerg_once("vcpu restore failed for cpu=%d err=%d. "
176-
"System will hang.\n", cpu, rc);
177-
/*
178-
* In case xen_vcpu_setup_restore() fails, do not bring up the
179-
* VCPU. This helps us avoid the resulting OOPS when the VCPU
180-
* accesses pvclock_vcpu_time via xen_vcpu (which is NULL.)
181-
* Note that this does not improve the situation much -- now the
182-
* VM hangs instead of OOPSing -- with the VCPUs that did not
183-
* fail, spinning in stop_machine(), waiting for the failed
184-
* VCPUs to come up.
185-
*/
186-
if (other_cpu && is_up && (rc == 0) &&
153+
xen_vcpu_setup_restore(cpu);
154+
155+
if (other_cpu && is_up &&
187156
HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
188157
BUG();
189158
}
@@ -200,7 +169,7 @@ void xen_vcpu_info_reset(int cpu)
200169
}
201170
}
202171

203-
int xen_vcpu_setup(int cpu)
172+
void xen_vcpu_setup(int cpu)
204173
{
205174
struct vcpu_register_vcpu_info info;
206175
int err;
@@ -221,44 +190,26 @@ int xen_vcpu_setup(int cpu)
221190
*/
222191
if (xen_hvm_domain()) {
223192
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
224-
return 0;
193+
return;
225194
}
226195

227-
if (xen_have_vcpu_info_placement) {
228-
vcpup = &per_cpu(xen_vcpu_info, cpu);
229-
info.mfn = arbitrary_virt_to_mfn(vcpup);
230-
info.offset = offset_in_page(vcpup);
196+
vcpup = &per_cpu(xen_vcpu_info, cpu);
197+
info.mfn = arbitrary_virt_to_mfn(vcpup);
198+
info.offset = offset_in_page(vcpup);
231199

232-
/*
233-
* Check to see if the hypervisor will put the vcpu_info
234-
* structure where we want it, which allows direct access via
235-
* a percpu-variable.
236-
* N.B. This hypercall can _only_ be called once per CPU.
237-
* Subsequent calls will error out with -EINVAL. This is due to
238-
* the fact that hypervisor has no unregister variant and this
239-
* hypercall does not allow to over-write info.mfn and
240-
* info.offset.
241-
*/
242-
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info,
243-
xen_vcpu_nr(cpu), &info);
244-
245-
if (err) {
246-
pr_warn_once("register_vcpu_info failed: cpu=%d err=%d\n",
247-
cpu, err);
248-
xen_have_vcpu_info_placement = 0;
249-
} else {
250-
/*
251-
* This cpu is using the registered vcpu info, even if
252-
* later ones fail to.
253-
*/
254-
per_cpu(xen_vcpu, cpu) = vcpup;
255-
}
256-
}
257-
258-
if (!xen_have_vcpu_info_placement)
259-
xen_vcpu_info_reset(cpu);
200+
/*
201+
* N.B. This hypercall can _only_ be called once per CPU.
202+
* Subsequent calls will error out with -EINVAL. This is due to
203+
* the fact that hypervisor has no unregister variant and this
204+
* hypercall does not allow to over-write info.mfn and
205+
* info.offset.
206+
*/
207+
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
208+
&info);
209+
if (err)
210+
panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err);
260211

261-
return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
212+
per_cpu(xen_vcpu, cpu) = vcpup;
262213
}
263214

264215
void __init xen_banner(void)

arch/x86/xen/enlighten_hvm.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -163,9 +163,9 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
163163
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
164164
else
165165
per_cpu(xen_vcpu_id, cpu) = cpu;
166-
rc = xen_vcpu_setup(cpu);
167-
if (rc || !xen_have_vector_callback)
168-
return rc;
166+
xen_vcpu_setup(cpu);
167+
if (!xen_have_vector_callback)
168+
return 0;
169169

170170
if (xen_feature(XENFEAT_hvm_safe_pvclock))
171171
xen_setup_timer(cpu);

arch/x86/xen/enlighten_pv.c

Lines changed: 5 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -993,31 +993,13 @@ void __init xen_setup_vcpu_info_placement(void)
993993
for_each_possible_cpu(cpu) {
994994
/* Set up direct vCPU id mapping for PV guests. */
995995
per_cpu(xen_vcpu_id, cpu) = cpu;
996-
997-
/*
998-
* xen_vcpu_setup(cpu) can fail -- in which case it
999-
* falls back to the shared_info version for cpus
1000-
* where xen_vcpu_nr(cpu) < MAX_VIRT_CPUS.
1001-
*
1002-
* xen_cpu_up_prepare_pv() handles the rest by failing
1003-
* them in hotplug.
1004-
*/
1005-
(void) xen_vcpu_setup(cpu);
996+
xen_vcpu_setup(cpu);
1006997
}
1007998

1008-
/*
1009-
* xen_vcpu_setup managed to place the vcpu_info within the
1010-
* percpu area for all cpus, so make use of it.
1011-
*/
1012-
if (xen_have_vcpu_info_placement) {
1013-
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1014-
pv_ops.irq.irq_disable =
1015-
__PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1016-
pv_ops.irq.irq_enable =
1017-
__PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1018-
pv_ops.mmu.read_cr2 =
1019-
__PV_IS_CALLEE_SAVE(xen_read_cr2_direct);
1020-
}
999+
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1000+
pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1001+
pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1002+
pv_ops.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2_direct);
10211003
}
10221004

10231005
static const struct pv_info xen_info __initconst = {

arch/x86/xen/smp.c

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -121,34 +121,10 @@ int xen_smp_intr_init(unsigned int cpu)
121121

122122
void __init xen_smp_cpus_done(unsigned int max_cpus)
123123
{
124-
int cpu, rc, count = 0;
125-
126124
if (xen_hvm_domain())
127125
native_smp_cpus_done(max_cpus);
128126
else
129127
calculate_max_logical_packages();
130-
131-
if (xen_have_vcpu_info_placement)
132-
return;
133-
134-
for_each_online_cpu(cpu) {
135-
if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
136-
continue;
137-
138-
rc = remove_cpu(cpu);
139-
140-
if (rc == 0) {
141-
/*
142-
* Reset vcpu_info so this cpu cannot be onlined again.
143-
*/
144-
xen_vcpu_info_reset(cpu);
145-
count++;
146-
} else {
147-
pr_warn("%s: failed to bring CPU %d down, error %d\n",
148-
__func__, cpu, rc);
149-
}
150-
}
151-
WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
152128
}
153129

154130
void xen_smp_send_reschedule(int cpu)

arch/x86/xen/xen-ops.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,9 +76,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
7676

7777
bool xen_vcpu_stolen(int vcpu);
7878

79-
extern int xen_have_vcpu_info_placement;
80-
81-
int xen_vcpu_setup(int cpu);
79+
void xen_vcpu_setup(int cpu);
8280
void xen_vcpu_info_reset(int cpu);
8381
void xen_setup_vcpu_info_placement(void);
8482

0 commit comments

Comments
 (0)