49
49
#define STEALCLOCK_DISABLED 0
50
50
#define STEALCLOCK_ENABLED 1
51
51
52
- #define VMWARE_PORT (cmd , eax , ebx , ecx , edx ) \
53
- __asm__("inl (%%dx), %%eax" : \
54
- "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
55
- "a"(VMWARE_HYPERVISOR_MAGIC), \
56
- "c"(VMWARE_CMD_##cmd), \
57
- "d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \
58
- "memory")
59
-
60
- #define VMWARE_VMCALL (cmd , eax , ebx , ecx , edx ) \
61
- __asm__("vmcall" : \
62
- "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
63
- "a"(VMWARE_HYPERVISOR_MAGIC), \
64
- "c"(VMWARE_CMD_##cmd), \
65
- "d"(0), "b"(UINT_MAX) : \
66
- "memory")
67
-
68
- #define VMWARE_VMMCALL (cmd , eax , ebx , ecx , edx ) \
69
- __asm__("vmmcall" : \
70
- "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
71
- "a"(VMWARE_HYPERVISOR_MAGIC), \
72
- "c"(VMWARE_CMD_##cmd), \
73
- "d"(0), "b"(UINT_MAX) : \
74
- "memory")
75
-
76
- #define VMWARE_CMD (cmd , eax , ebx , ecx , edx ) do { \
77
- switch (vmware_hypercall_mode) { \
78
- case CPUID_VMWARE_FEATURES_ECX_VMCALL: \
79
- VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \
80
- break; \
81
- case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \
82
- VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \
83
- break; \
84
- default: \
85
- VMWARE_PORT(cmd, eax, ebx, ecx, edx); \
86
- break; \
87
- } \
88
- } while (0)
89
-
90
52
struct vmware_steal_time {
91
53
union {
92
- uint64_t clock ; /* stolen time counter in units of vtsc */
54
+ u64 clock ; /* stolen time counter in units of vtsc */
93
55
struct {
94
56
/* only for little-endian */
95
- uint32_t clock_low ;
96
- uint32_t clock_high ;
57
+ u32 clock_low ;
58
+ u32 clock_high ;
97
59
};
98
60
};
99
- uint64_t reserved [7 ];
61
+ u64 reserved [7 ];
100
62
};
101
63
102
64
static unsigned long vmware_tsc_khz __ro_after_init ;
@@ -166,9 +128,10 @@ unsigned long vmware_hypercall_slow(unsigned long cmd,
166
128
167
129
static inline int __vmware_platform (void )
168
130
{
169
- uint32_t eax , ebx , ecx , edx ;
170
- VMWARE_CMD (GETVERSION , eax , ebx , ecx , edx );
171
- return eax != (uint32_t )-1 && ebx == VMWARE_HYPERVISOR_MAGIC ;
131
+ u32 eax , ebx , ecx ;
132
+
133
+ eax = vmware_hypercall3 (VMWARE_CMD_GETVERSION , 0 , & ebx , & ecx );
134
+ return eax != UINT_MAX && ebx == VMWARE_HYPERVISOR_MAGIC ;
172
135
}
173
136
174
137
static unsigned long vmware_get_tsc_khz (void )
@@ -220,21 +183,12 @@ static void __init vmware_cyc2ns_setup(void)
220
183
pr_info ("using clock offset of %llu ns\n" , d -> cyc2ns_offset );
221
184
}
222
185
223
- static int vmware_cmd_stealclock (uint32_t arg1 , uint32_t arg2 )
186
+ static int vmware_cmd_stealclock (u32 addr_hi , u32 addr_lo )
224
187
{
225
- uint32_t result , info ;
226
-
227
- asm volatile (VMWARE_HYPERCALL :
228
- "=a" (result ),
229
- "=c" (info ) :
230
- "a" (VMWARE_HYPERVISOR_MAGIC ),
231
- "b" (0 ),
232
- "c" (VMWARE_CMD_STEALCLOCK ),
233
- "d" (0 ),
234
- "S" (arg1 ),
235
- "D" (arg2 ) :
236
- "memory" );
237
- return result ;
188
+ u32 info ;
189
+
190
+ return vmware_hypercall5 (VMWARE_CMD_STEALCLOCK , 0 , 0 , addr_hi , addr_lo ,
191
+ & info );
238
192
}
239
193
240
194
static bool stealclock_enable (phys_addr_t pa )
@@ -269,15 +223,15 @@ static bool vmware_is_stealclock_available(void)
269
223
* Return:
270
224
* The steal clock reading in ns.
271
225
*/
272
- static uint64_t vmware_steal_clock (int cpu )
226
+ static u64 vmware_steal_clock (int cpu )
273
227
{
274
228
struct vmware_steal_time * steal = & per_cpu (vmw_steal_time , cpu );
275
- uint64_t clock ;
229
+ u64 clock ;
276
230
277
231
if (IS_ENABLED (CONFIG_64BIT ))
278
232
clock = READ_ONCE (steal -> clock );
279
233
else {
280
- uint32_t initial_high , low , high ;
234
+ u32 initial_high , low , high ;
281
235
282
236
do {
283
237
initial_high = READ_ONCE (steal -> clock_high );
@@ -289,7 +243,7 @@ static uint64_t vmware_steal_clock(int cpu)
289
243
high = READ_ONCE (steal -> clock_high );
290
244
} while (initial_high != high );
291
245
292
- clock = ((uint64_t )high << 32 ) | low ;
246
+ clock = ((u64 )high << 32 ) | low ;
293
247
}
294
248
295
249
return mul_u64_u32_shr (clock , vmware_cyc2ns .cyc2ns_mul ,
@@ -443,13 +397,13 @@ static void __init vmware_set_capabilities(void)
443
397
444
398
static void __init vmware_platform_setup (void )
445
399
{
446
- uint32_t eax , ebx , ecx , edx ;
447
- uint64_t lpj , tsc_khz ;
400
+ u32 eax , ebx , ecx ;
401
+ u64 lpj , tsc_khz ;
448
402
449
- VMWARE_CMD ( GETHZ , eax , ebx , ecx , edx );
403
+ eax = vmware_hypercall3 ( VMWARE_CMD_GETHZ , UINT_MAX , & ebx , & ecx );
450
404
451
405
if (ebx != UINT_MAX ) {
452
- lpj = tsc_khz = eax | (((uint64_t )ebx ) << 32 );
406
+ lpj = tsc_khz = eax | (((u64 )ebx ) << 32 );
453
407
do_div (tsc_khz , 1000 );
454
408
WARN_ON (tsc_khz >> 32 );
455
409
pr_info ("TSC freq read from hypervisor : %lu.%03lu MHz\n" ,
@@ -500,7 +454,7 @@ static u8 __init vmware_select_hypercall(void)
500
454
* If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
501
455
* intentionally defaults to 0.
502
456
*/
503
- static uint32_t __init vmware_platform (void )
457
+ static u32 __init vmware_platform (void )
504
458
{
505
459
if (boot_cpu_has (X86_FEATURE_HYPERVISOR )) {
506
460
unsigned int eax ;
@@ -528,8 +482,9 @@ static uint32_t __init vmware_platform(void)
528
482
/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
529
483
static bool __init vmware_legacy_x2apic_available (void )
530
484
{
531
- uint32_t eax , ebx , ecx , edx ;
532
- VMWARE_CMD (GETVCPU_INFO , eax , ebx , ecx , edx );
485
+ u32 eax ;
486
+
487
+ eax = vmware_hypercall1 (VMWARE_CMD_GETVCPU_INFO , 0 );
533
488
return !(eax & BIT (VMWARE_CMD_VCPU_RESERVED )) &&
534
489
(eax & BIT (VMWARE_CMD_LEGACY_X2APIC ));
535
490
}
0 commit comments