16
16
#include <linux/capability.h>
17
17
#include <linux/sizes.h>
18
18
19
+ #define PGM_SEGMENT_TRANSLATION 0x10
20
+
19
21
#define VM_MEM_SIZE (4 * SZ_1M)
22
+ #define VM_MEM_EXT_SIZE (2 * SZ_1M)
23
+ #define VM_MEM_MAX_M ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)
20
24
21
25
/* so directly declare capget to check caps without libcap */
22
26
int capget (cap_user_header_t header , cap_user_data_t data );
@@ -58,6 +62,23 @@ asm("test_gprs_asm:\n"
58
62
" j 0b\n"
59
63
);
60
64
65
+ /* Test program manipulating memory */
66
+ extern char test_mem_asm [];
67
+ asm("test_mem_asm:\n"
68
+ "xgr %r0, %r0\n"
69
+
70
+ "0:\n"
71
+ " ahi %r0,1\n"
72
+ " st %r1,0(%r5,%r6)\n"
73
+
74
+ " xgr %r1,%r1\n"
75
+ " l %r1,0(%r5,%r6)\n"
76
+ " ahi %r0,1\n"
77
+ " diag 0,0,0x44\n"
78
+
79
+ " j 0b\n"
80
+ );
81
+
61
82
FIXTURE (uc_kvm )
62
83
{
63
84
struct kvm_s390_sie_block * sie_block ;
@@ -67,6 +88,7 @@ FIXTURE(uc_kvm)
67
88
uintptr_t base_hva ;
68
89
uintptr_t code_hva ;
69
90
int kvm_run_size ;
91
+ vm_paddr_t pgd ;
70
92
void * vm_mem ;
71
93
int vcpu_fd ;
72
94
int kvm_fd ;
@@ -116,7 +138,7 @@ FIXTURE_SETUP(uc_kvm)
116
138
self -> base_gpa = 0 ;
117
139
self -> code_gpa = self -> base_gpa + (3 * SZ_1M );
118
140
119
- self -> vm_mem = aligned_alloc (SZ_1M , VM_MEM_SIZE );
141
+ self -> vm_mem = aligned_alloc (SZ_1M , VM_MEM_MAX_M * SZ_1M );
120
142
ASSERT_NE (NULL , self -> vm_mem ) TH_LOG ("malloc failed %u" , errno );
121
143
self -> base_hva = (uintptr_t )self -> vm_mem ;
122
144
self -> code_hva = self -> base_hva - self -> base_gpa + self -> code_gpa ;
@@ -222,6 +244,60 @@ TEST(uc_cap_hpage)
222
244
close (kvm_fd );
223
245
}
224
246
247
+ /* calculate host virtual addr from guest physical addr */
248
+ static void * gpa2hva (FIXTURE_DATA (uc_kvm ) * self , u64 gpa )
249
+ {
250
+ return (void * )(self -> base_hva - self -> base_gpa + gpa );
251
+ }
252
+
253
+ /* map / make additional memory available */
254
+ static int uc_map_ext (FIXTURE_DATA (uc_kvm ) * self , u64 vcpu_addr , u64 length )
255
+ {
256
+ struct kvm_s390_ucas_mapping map = {
257
+ .user_addr = (u64 )gpa2hva (self , vcpu_addr ),
258
+ .vcpu_addr = vcpu_addr ,
259
+ .length = length ,
260
+ };
261
+ pr_info ("ucas map %p %p 0x%llx" ,
262
+ (void * )map .user_addr , (void * )map .vcpu_addr , map .length );
263
+ return ioctl (self -> vcpu_fd , KVM_S390_UCAS_MAP , & map );
264
+ }
265
+
266
+ /* unmap previously mapped memory */
267
+ static int uc_unmap_ext (FIXTURE_DATA (uc_kvm ) * self , u64 vcpu_addr , u64 length )
268
+ {
269
+ struct kvm_s390_ucas_mapping map = {
270
+ .user_addr = (u64 )gpa2hva (self , vcpu_addr ),
271
+ .vcpu_addr = vcpu_addr ,
272
+ .length = length ,
273
+ };
274
+ pr_info ("ucas unmap %p %p 0x%llx" ,
275
+ (void * )map .user_addr , (void * )map .vcpu_addr , map .length );
276
+ return ioctl (self -> vcpu_fd , KVM_S390_UCAS_UNMAP , & map );
277
+ }
278
+
279
+ /* handle ucontrol exit by mapping the accessed segment */
280
+ static void uc_handle_exit_ucontrol (FIXTURE_DATA (uc_kvm ) * self )
281
+ {
282
+ struct kvm_run * run = self -> run ;
283
+ u64 seg_addr ;
284
+ int rc ;
285
+
286
+ TEST_ASSERT_EQ (KVM_EXIT_S390_UCONTROL , run -> exit_reason );
287
+ switch (run -> s390_ucontrol .pgm_code ) {
288
+ case PGM_SEGMENT_TRANSLATION :
289
+ seg_addr = run -> s390_ucontrol .trans_exc_code & ~(SZ_1M - 1 );
290
+ pr_info ("ucontrol pic segment translation 0x%llx, mapping segment 0x%lx\n" ,
291
+ run -> s390_ucontrol .trans_exc_code , seg_addr );
292
+ /* map / make additional memory available */
293
+ rc = uc_map_ext (self , seg_addr , SZ_1M );
294
+ TEST_ASSERT_EQ (0 , rc );
295
+ break ;
296
+ default :
297
+ TEST_FAIL ("UNEXPECTED PGM CODE %d" , run -> s390_ucontrol .pgm_code );
298
+ }
299
+ }
300
+
225
301
/* verify SIEIC exit
226
302
* * fail on codes not expected in the test cases
227
303
*/
@@ -255,6 +331,12 @@ static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
255
331
struct kvm_run * run = self -> run ;
256
332
257
333
switch (run -> exit_reason ) {
334
+ case KVM_EXIT_S390_UCONTROL :
335
+ /** check program interruption code
336
+ * handle page fault --> ucas map
337
+ */
338
+ uc_handle_exit_ucontrol (self );
339
+ break ;
258
340
case KVM_EXIT_S390_SIEIC :
259
341
return uc_handle_sieic (self );
260
342
default :
@@ -286,6 +368,67 @@ static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
286
368
TEST_ASSERT_EQ (0x440000 , sie_block -> ipb );
287
369
}
288
370
371
+ TEST_F (uc_kvm , uc_map_unmap )
372
+ {
373
+ struct kvm_sync_regs * sync_regs = & self -> run -> s .regs ;
374
+ struct kvm_run * run = self -> run ;
375
+ const u64 disp = 1 ;
376
+ int rc ;
377
+
378
+ /* copy test_mem_asm to code_hva / code_gpa */
379
+ TH_LOG ("copy code %p to vm mapped memory %p / %p" ,
380
+ & test_mem_asm , (void * )self -> code_hva , (void * )self -> code_gpa );
381
+ memcpy ((void * )self -> code_hva , & test_mem_asm , PAGE_SIZE );
382
+
383
+ /* DAT disabled + 64 bit mode */
384
+ run -> psw_mask = 0x0000000180000000ULL ;
385
+ run -> psw_addr = self -> code_gpa ;
386
+
387
+ /* set register content for test_mem_asm to access not mapped memory*/
388
+ sync_regs -> gprs [1 ] = 0x55 ;
389
+ sync_regs -> gprs [5 ] = self -> base_gpa ;
390
+ sync_regs -> gprs [6 ] = VM_MEM_SIZE + disp ;
391
+ run -> kvm_dirty_regs |= KVM_SYNC_GPRS ;
392
+
393
+ /* run and expect to fail with ucontrol pic segment translation */
394
+ ASSERT_EQ (0 , uc_run_once (self ));
395
+ ASSERT_EQ (1 , sync_regs -> gprs [0 ]);
396
+ ASSERT_EQ (KVM_EXIT_S390_UCONTROL , run -> exit_reason );
397
+
398
+ ASSERT_EQ (PGM_SEGMENT_TRANSLATION , run -> s390_ucontrol .pgm_code );
399
+ ASSERT_EQ (self -> base_gpa + VM_MEM_SIZE , run -> s390_ucontrol .trans_exc_code );
400
+
401
+ /* fail to map memory with not segment aligned address */
402
+ rc = uc_map_ext (self , self -> base_gpa + VM_MEM_SIZE + disp , VM_MEM_EXT_SIZE );
403
+ ASSERT_GT (0 , rc )
404
+ TH_LOG ("ucas map for non segment address should fail but didn't; "
405
+ "result %d not expected, %s" , rc , strerror (errno ));
406
+
407
+ /* map / make additional memory available */
408
+ rc = uc_map_ext (self , self -> base_gpa + VM_MEM_SIZE , VM_MEM_EXT_SIZE );
409
+ ASSERT_EQ (0 , rc )
410
+ TH_LOG ("ucas map result %d not expected, %s" , rc , strerror (errno ));
411
+ ASSERT_EQ (0 , uc_run_once (self ));
412
+ ASSERT_EQ (false, uc_handle_exit (self ));
413
+ uc_assert_diag44 (self );
414
+
415
+ /* assert registers and memory are in expected state */
416
+ ASSERT_EQ (2 , sync_regs -> gprs [0 ]);
417
+ ASSERT_EQ (0x55 , sync_regs -> gprs [1 ]);
418
+ ASSERT_EQ (0x55 , * (u32 * )gpa2hva (self , self -> base_gpa + VM_MEM_SIZE + disp ));
419
+
420
+ /* unmap and run loop again */
421
+ rc = uc_unmap_ext (self , self -> base_gpa + VM_MEM_SIZE , VM_MEM_EXT_SIZE );
422
+ ASSERT_EQ (0 , rc )
423
+ TH_LOG ("ucas unmap result %d not expected, %s" , rc , strerror (errno ));
424
+ ASSERT_EQ (0 , uc_run_once (self ));
425
+ ASSERT_EQ (3 , sync_regs -> gprs [0 ]);
426
+ ASSERT_EQ (KVM_EXIT_S390_UCONTROL , run -> exit_reason );
427
+ ASSERT_EQ (PGM_SEGMENT_TRANSLATION , run -> s390_ucontrol .pgm_code );
428
+ /* handle ucontrol exit and remap memory after previous map and unmap */
429
+ ASSERT_EQ (true, uc_handle_exit (self ));
430
+ }
431
+
289
432
TEST_F (uc_kvm , uc_gprs )
290
433
{
291
434
struct kvm_sync_regs * sync_regs = & self -> run -> s .regs ;
0 commit comments