Skip to content

Commit 98265c1

Browse files
yyu-intel-comsuryasaimadhu
authored andcommitted
x86/fpu/xstate: Preserve supervisor states for the slow path in __fpu__restore_sig()
The signal return code is responsible for taking an XSAVE buffer present in user memory and loading it into the hardware registers. This operation only affects user XSAVE state and never affects supervisor state. The fast path through this code simply points XRSTOR directly at the user buffer. However, since user memory is not guaranteed to be always mapped, this XRSTOR can fail. If it fails, the signal return code falls back to a slow path which can tolerate page faults. That slow path copies the xfeatures one by one out of the user buffer into the task's fpu state area. However, by being in a context where it can handle page faults, the code can also schedule. The lazy-fpu-load code would think it has an up-to-date fpstate and would fail to save the supervisor state when scheduling the task out. When scheduling back in, it would likely restore stale supervisor state. To fix that, preserve supervisor state before the slow path. Modify copy_user_to_fpregs_zeroing() so that if it fails, fpregs are not zeroed, and there is no need for fpregs_deactivate() and supervisor states are preserved. Move set_thread_flag(TIF_NEED_FPU_LOAD) to the slow path. Without doing this, the fast path also needs supervisor states to be saved first. Signed-off-by: Yu-cheng Yu <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent eeedf15 commit 98265c1

File tree

1 file changed

+28
-25
lines changed

1 file changed

+28
-25
lines changed

arch/x86/kernel/fpu/signal.c

Lines changed: 28 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -262,19 +262,23 @@ sanitize_restored_user_xstate(union fpregs_state *state,
262262
static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
263263
{
264264
u64 init_bv;
265+
int r;
265266

266267
if (use_xsave()) {
267268
if (fx_only) {
268269
init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
269270

270-
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
271-
return copy_user_to_fxregs(buf);
271+
r = copy_user_to_fxregs(buf);
272+
if (!r)
273+
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
274+
return r;
272275
} else {
273276
init_bv = xfeatures_mask_user() & ~xbv;
274277

275-
if (unlikely(init_bv))
278+
r = copy_user_to_xregs(buf, xbv);
279+
if (!r && unlikely(init_bv))
276280
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
277-
return copy_user_to_xregs(buf, xbv);
281+
return r;
278282
}
279283
} else if (use_fxsr()) {
280284
return copy_user_to_fxregs(buf);
@@ -327,28 +331,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
327331
}
328332
}
329333

330-
/*
331-
* The current state of the FPU registers does not matter. By setting
332-
* TIF_NEED_FPU_LOAD unconditionally it is ensured that the our xstate
333-
* is not modified on context switch and that the xstate is considered
334-
* to be loaded again on return to userland (overriding last_cpu avoids
335-
* the optimisation).
336-
*/
337-
set_thread_flag(TIF_NEED_FPU_LOAD);
338-
__fpu_invalidate_fpregs_state(fpu);
339-
340334
if ((unsigned long)buf_fx % 64)
341335
fx_only = 1;
342-
/*
343-
* For 32-bit frames with fxstate, copy the fxstate so it can be
344-
* reconstructed later.
345-
*/
346-
if (ia32_fxstate) {
347-
ret = __copy_from_user(&env, buf, sizeof(env));
348-
if (ret)
349-
goto err_out;
350-
envp = &env;
351-
} else {
336+
337+
if (!ia32_fxstate) {
352338
/*
353339
* Attempt to restore the FPU registers directly from user
354340
* memory. For that to succeed, the user access cannot cause
@@ -365,10 +351,27 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
365351
fpregs_unlock();
366352
return 0;
367353
}
368-
fpregs_deactivate(fpu);
369354
fpregs_unlock();
355+
} else {
356+
/*
357+
* For 32-bit frames with fxstate, copy the fxstate so it can
358+
* be reconstructed later.
359+
*/
360+
ret = __copy_from_user(&env, buf, sizeof(env));
361+
if (ret)
362+
goto err_out;
363+
envp = &env;
370364
}
371365

366+
/*
367+
* The current state of the FPU registers does not matter. By setting
368+
* TIF_NEED_FPU_LOAD unconditionally it is ensured that the our xstate
369+
* is not modified on context switch and that the xstate is considered
370+
* to be loaded again on return to userland (overriding last_cpu avoids
371+
* the optimisation).
372+
*/
373+
set_thread_flag(TIF_NEED_FPU_LOAD);
374+
__fpu_invalidate_fpregs_state(fpu);
372375

373376
if (use_xsave() && !fx_only) {
374377
u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;

0 commit comments

Comments
 (0)