Skip to content

Commit 1e79a9e

Browse files
committed
Merge tag 'kvm-s390-next-6.2-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
- Second batch of the lazy destroy patches - First batch of KVM changes for kernel virtual != physical address support - Removal of a unused function
2 parents 29c4697 + 99b63f5 commit 1e79a9e

File tree

19 files changed

+603
-162
lines changed

19 files changed

+603
-162
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5163,10 +5163,13 @@ KVM_PV_ENABLE
51635163
===== =============================
51645164

51655165
KVM_PV_DISABLE
5166-
Deregister the VM from the Ultravisor and reclaim the memory that
5167-
had been donated to the Ultravisor, making it usable by the kernel
5168-
again. All registered VCPUs are converted back to non-protected
5169-
ones.
5166+
Deregister the VM from the Ultravisor and reclaim the memory that had
5167+
been donated to the Ultravisor, making it usable by the kernel again.
5168+
All registered VCPUs are converted back to non-protected ones. If a
5169+
previous protected VM had been prepared for asynchonous teardown with
5170+
KVM_PV_ASYNC_CLEANUP_PREPARE and not subsequently torn down with
5171+
KVM_PV_ASYNC_CLEANUP_PERFORM, it will be torn down in this call
5172+
together with the current protected VM.
51705173

51715174
KVM_PV_VM_SET_SEC_PARMS
51725175
Pass the image header from VM memory to the Ultravisor in
@@ -5289,6 +5292,36 @@ KVM_PV_DUMP
52895292
authentication tag all of which are needed to decrypt the dump at a
52905293
later time.
52915294

5295+
KVM_PV_ASYNC_CLEANUP_PREPARE
5296+
:Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE
5297+
5298+
Prepare the current protected VM for asynchronous teardown. Most
5299+
resources used by the current protected VM will be set aside for a
5300+
subsequent asynchronous teardown. The current protected VM will then
5301+
resume execution immediately as non-protected. There can be at most
5302+
one protected VM prepared for asynchronous teardown at any time. If
5303+
a protected VM had already been prepared for teardown without
5304+
subsequently calling KVM_PV_ASYNC_CLEANUP_PERFORM, this call will
5305+
fail. In that case, the userspace process should issue a normal
5306+
KVM_PV_DISABLE. The resources set aside with this call will need to
5307+
be cleaned up with a subsequent call to KVM_PV_ASYNC_CLEANUP_PERFORM
5308+
or KVM_PV_DISABLE, otherwise they will be cleaned up when KVM
5309+
terminates. KVM_PV_ASYNC_CLEANUP_PREPARE can be called again as soon
5310+
as cleanup starts, i.e. before KVM_PV_ASYNC_CLEANUP_PERFORM finishes.
5311+
5312+
KVM_PV_ASYNC_CLEANUP_PERFORM
5313+
:Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE
5314+
5315+
Tear down the protected VM previously prepared for teardown with
5316+
KVM_PV_ASYNC_CLEANUP_PREPARE. The resources that had been set aside
5317+
will be freed during the execution of this command. This PV command
5318+
should ideally be issued by userspace from a separate thread. If a
5319+
fatal signal is received (or the process terminates naturally), the
5320+
command will terminate immediately without completing, and the normal
5321+
KVM shutdown procedure will take care of cleaning up all remaining
5322+
protected VMs, including the ones whose teardown was interrupted by
5323+
process termination.
5324+
52925325
4.126 KVM_XEN_HVM_SET_ATTR
52935326
--------------------------
52945327

arch/s390/include/asm/kvm_host.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,7 @@ struct mcck_volatile_info {
142142
CR14_EXTERNAL_DAMAGE_SUBMASK)
143143

144144
#define SIDAD_SIZE_MASK 0xff
145-
#define sida_origin(sie_block) \
146-
((sie_block)->sidad & PAGE_MASK)
145+
#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
147146
#define sida_size(sie_block) \
148147
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
149148

@@ -276,6 +275,7 @@ struct kvm_s390_sie_block {
276275
#define ECB3_AES 0x04
277276
#define ECB3_RI 0x01
278277
__u8 ecb3; /* 0x0063 */
278+
#define ESCA_SCAOL_MASK ~0x3fU
279279
__u32 scaol; /* 0x0064 */
280280
__u8 sdf; /* 0x0068 */
281281
__u8 epdx; /* 0x0069 */
@@ -942,6 +942,8 @@ struct kvm_s390_pv {
942942
unsigned long stor_base;
943943
void *stor_var;
944944
bool dumping;
945+
void *set_aside;
946+
struct list_head need_cleanup;
945947
struct mmu_notifier mmu_notifier;
946948
};
947949

@@ -1017,7 +1019,13 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
10171019
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
10181020
unsigned long *aqm, unsigned long *adm);
10191021

1020-
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
1022+
int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa);
1023+
1024+
static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
1025+
{
1026+
return __sie64a(virt_to_phys(sie_block), sie_block, rsa);
1027+
}
1028+
10211029
extern char sie_exit;
10221030

10231031
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);

arch/s390/include/asm/mem_encrypt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44

55
#ifndef __ASSEMBLY__
66

7-
int set_memory_encrypted(unsigned long addr, int numpages);
8-
int set_memory_decrypted(unsigned long addr, int numpages);
7+
int set_memory_encrypted(unsigned long vaddr, int numpages);
8+
int set_memory_decrypted(unsigned long vaddr, int numpages);
99

1010
#endif /* __ASSEMBLY__ */
1111

arch/s390/include/asm/stacktrace.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ struct stack_frame {
4646
unsigned long sie_savearea;
4747
unsigned long sie_reason;
4848
unsigned long sie_flags;
49+
unsigned long sie_control_block_phys;
4950
};
5051
};
5152
unsigned long gprs[10];

arch/s390/include/asm/uv.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#define UVC_CMD_INIT_UV 0x000f
3535
#define UVC_CMD_CREATE_SEC_CONF 0x0100
3636
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
37+
#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102
3738
#define UVC_CMD_CREATE_SEC_CPU 0x0120
3839
#define UVC_CMD_DESTROY_SEC_CPU 0x0121
3940
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
@@ -81,6 +82,7 @@ enum uv_cmds_inst {
8182
BIT_UVC_CMD_UNSHARE_ALL = 20,
8283
BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
8384
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
85+
BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
8486
BIT_UVC_CMD_DUMP_INIT = 24,
8587
BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
8688
BIT_UVC_CMD_DUMP_CPU = 26,
@@ -230,6 +232,14 @@ struct uv_cb_nodata {
230232
u64 reserved20[4];
231233
} __packed __aligned(8);
232234

235+
/* Destroy Configuration Fast */
236+
struct uv_cb_destroy_fast {
237+
struct uv_cb_header header;
238+
u64 reserved08[2];
239+
u64 handle;
240+
u64 reserved20[5];
241+
} __packed __aligned(8);
242+
233243
/* Set Shared Access */
234244
struct uv_cb_share {
235245
struct uv_cb_header header;

arch/s390/kernel/asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ int main(void)
6262
OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
6363
OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
6464
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
65+
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
6566
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
6667
BLANK();
6768
/* idle data offsets */

arch/s390/kernel/entry.S

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -225,18 +225,20 @@ ENDPROC(__switch_to)
225225

226226
#if IS_ENABLED(CONFIG_KVM)
227227
/*
228-
* sie64a calling convention:
229-
* %r2 pointer to sie control block
230-
* %r3 guest register save area
228+
* __sie64a calling convention:
229+
* %r2 pointer to sie control block phys
230+
* %r3 pointer to sie control block virt
231+
* %r4 guest register save area
231232
*/
232-
ENTRY(sie64a)
233+
ENTRY(__sie64a)
233234
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
234235
lg %r12,__LC_CURRENT
235-
stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
236-
stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
236+
stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
237+
stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
238+
stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
237239
xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
238240
mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
239-
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
241+
lmg %r0,%r13,0(%r4) # load guest gprs 0-13
240242
lg %r14,__LC_GMAP # get gmap pointer
241243
ltgr %r14,%r14
242244
jz .Lsie_gmap
@@ -248,6 +250,7 @@ ENTRY(sie64a)
248250
jnz .Lsie_skip
249251
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
250252
jo .Lsie_skip # exit if fp/vx regs changed
253+
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
251254
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
252255
.Lsie_entry:
253256
sie 0(%r14)
@@ -258,13 +261,14 @@ ENTRY(sie64a)
258261
BPOFF
259262
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
260263
.Lsie_skip:
264+
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
261265
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
262266
lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
263267
.Lsie_done:
264268
# some program checks are suppressing. C code (e.g. do_protection_exception)
265269
# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
266270
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
267-
# Other instructions between sie64a and .Lsie_done should not cause program
271+
# Other instructions between __sie64a and .Lsie_done should not cause program
268272
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
269273
.Lrewind_pad6:
270274
nopr 7
@@ -293,8 +297,8 @@ sie_exit:
293297
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
294298
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
295299
EX_TABLE(sie_exit,.Lsie_fault)
296-
ENDPROC(sie64a)
297-
EXPORT_SYMBOL(sie64a)
300+
ENDPROC(__sie64a)
301+
EXPORT_SYMBOL(__sie64a)
298302
EXPORT_SYMBOL(sie_exit)
299303
#endif
300304

@@ -373,7 +377,7 @@ ENTRY(pgm_check_handler)
373377
j 3f # -> fault in user space
374378
.Lpgm_skip_asce:
375379
#if IS_ENABLED(CONFIG_KVM)
376-
# cleanup critical section for program checks in sie64a
380+
# cleanup critical section for program checks in __sie64a
377381
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
378382
SIEEXIT
379383
lghi %r10,_PIF_GUEST_FAULT

arch/s390/kernel/uv.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,13 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
255255
*/
256256
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
257257
{
258+
/*
259+
* The misc feature indicates, among other things, that importing a
260+
* shared page from a different protected VM will automatically also
261+
* transfer its ownership.
262+
*/
263+
if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
264+
return false;
258265
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
259266
return false;
260267
return atomic_read(&mm->context.protected_count) > 1;

arch/s390/kvm/intercept.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ static int handle_itdb(struct kvm_vcpu *vcpu)
217217
return 0;
218218
if (current->thread.per_flags & PER_FLAG_NO_TE)
219219
return 0;
220-
itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
220+
itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
221221
rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
222222
if (rc)
223223
return rc;
@@ -409,8 +409,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
409409
out:
410410
if (!cc) {
411411
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
412-
memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
413-
sctns, PAGE_SIZE);
412+
memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
414413
} else {
415414
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
416415
if (r) {
@@ -464,7 +463,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
464463

465464
static int handle_pv_spx(struct kvm_vcpu *vcpu)
466465
{
467-
u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
466+
u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
468467

469468
kvm_s390_set_prefix(vcpu, pref);
470469
trace_kvm_s390_handle_prefix(vcpu, 1, pref);
@@ -497,7 +496,7 @@ static int handle_pv_sclp(struct kvm_vcpu *vcpu)
497496

498497
static int handle_pv_uvc(struct kvm_vcpu *vcpu)
499498
{
500-
struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
499+
struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
501500
struct uv_cb_cts uvcb = {
502501
.header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
503502
.header.len = sizeof(uvcb),

arch/s390/kvm/interrupt.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -314,11 +314,6 @@ static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
314314
return READ_ONCE(gisa->ipm);
315315
}
316316

317-
static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
318-
{
319-
clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
320-
}
321-
322317
static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
323318
{
324319
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);

0 commit comments

Comments
 (0)