@@ -400,68 +400,33 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
400
400
401
401
void vmx_update_cpu_dirty_logging (struct kvm_vcpu * vcpu );
402
402
403
- static inline bool vmx_test_msr_bitmap_read (ulong * msr_bitmap , u32 msr )
404
- {
405
- int f = sizeof (unsigned long );
406
-
407
- if (msr <= 0x1fff )
408
- return test_bit (msr , msr_bitmap + 0x000 / f );
409
- else if ((msr >= 0xc0000000 ) && (msr <= 0xc0001fff ))
410
- return test_bit (msr & 0x1fff , msr_bitmap + 0x400 / f );
411
- return true;
412
- }
413
-
414
- static inline bool vmx_test_msr_bitmap_write (ulong * msr_bitmap , u32 msr )
415
- {
416
- int f = sizeof (unsigned long );
417
-
418
- if (msr <= 0x1fff )
419
- return test_bit (msr , msr_bitmap + 0x800 / f );
420
- else if ((msr >= 0xc0000000 ) && (msr <= 0xc0001fff ))
421
- return test_bit (msr & 0x1fff , msr_bitmap + 0xc00 / f );
422
- return true;
423
- }
424
-
425
- static inline void vmx_clear_msr_bitmap_read (ulong * msr_bitmap , u32 msr )
426
- {
427
- int f = sizeof (unsigned long );
428
-
429
- if (msr <= 0x1fff )
430
- __clear_bit (msr , msr_bitmap + 0x000 / f );
431
- else if ((msr >= 0xc0000000 ) && (msr <= 0xc0001fff ))
432
- __clear_bit (msr & 0x1fff , msr_bitmap + 0x400 / f );
433
- }
434
-
435
- static inline void vmx_clear_msr_bitmap_write (ulong * msr_bitmap , u32 msr )
436
- {
437
- int f = sizeof (unsigned long );
438
-
439
- if (msr <= 0x1fff )
440
- __clear_bit (msr , msr_bitmap + 0x800 / f );
441
- else if ((msr >= 0xc0000000 ) && (msr <= 0xc0001fff ))
442
- __clear_bit (msr & 0x1fff , msr_bitmap + 0xc00 / f );
443
- }
444
-
445
- static inline void vmx_set_msr_bitmap_read (ulong * msr_bitmap , u32 msr )
446
- {
447
- int f = sizeof (unsigned long );
448
-
449
- if (msr <= 0x1fff )
450
- __set_bit (msr , msr_bitmap + 0x000 / f );
451
- else if ((msr >= 0xc0000000 ) && (msr <= 0xc0001fff ))
452
- __set_bit (msr & 0x1fff , msr_bitmap + 0x400 / f );
453
- }
454
-
455
- static inline void vmx_set_msr_bitmap_write (ulong * msr_bitmap , u32 msr )
456
- {
457
- int f = sizeof (unsigned long );
458
-
459
- if (msr <= 0x1fff )
460
- __set_bit (msr , msr_bitmap + 0x800 / f );
461
- else if ((msr >= 0xc0000000 ) && (msr <= 0xc0001fff ))
462
- __set_bit (msr & 0x1fff , msr_bitmap + 0xc00 / f );
403
+ /*
404
+ * Note, early Intel manuals have the write-low and read-high bitmap offsets
405
+ * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
406
+ * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
407
+ * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
408
+ * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
409
+ * VM-Exit.
410
+ */
411
+ #define __BUILD_VMX_MSR_BITMAP_HELPER (rtype , action , bitop , access , base ) \
412
+ static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
413
+ u32 msr) \
414
+ { \
415
+ int f = sizeof(unsigned long); \
416
+ \
417
+ if (msr <= 0x1fff) \
418
+ return bitop##_bit(msr, bitmap + base / f); \
419
+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
420
+ return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
421
+ return (rtype)true; \
463
422
}
423
+ #define BUILD_VMX_MSR_BITMAP_HELPERS (ret_type , action , bitop ) \
424
+ __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
425
+ __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
464
426
427
+ BUILD_VMX_MSR_BITMAP_HELPERS (bool , test , test )
428
+ BUILD_VMX_MSR_BITMAP_HELPERS (void , clear , __clear )
429
+ BUILD_VMX_MSR_BITMAP_HELPERS (void , set , __set )
465
430
466
431
static inline u8 vmx_get_rvi (void )
467
432
{
0 commit comments