Skip to content

Commit 0a2b64c

Browse files
Ben Gardonbonzini
authored andcommitted
kvm: mmu: Replace unsigned with unsigned int for PTE access
There are several functions which pass an access permission mask for SPTEs as an unsigned. This works, but checkpatch complains about it. Switch the occurrences of unsigned to unsigned int to satisfy checkpatch. No functional change expected. Tested by running kvm-unit-tests on an Intel Haswell machine. This commit introduced no new failures. Signed-off-by: Ben Gardon <[email protected]> Reviewed-by: Oliver Upton <[email protected]> Reviewed-by: Vitaly Kuznetsov <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent ea79a75 commit 0a2b64c

File tree

1 file changed

+13
-11
lines changed

1 file changed

+13
-11
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -452,7 +452,7 @@ static u64 get_mmio_spte_generation(u64 spte)
452452
}
453453

454454
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
455-
unsigned access)
455+
unsigned int access)
456456
{
457457
u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
458458
u64 mask = generation_mmio_spte_mask(gen);
@@ -484,7 +484,7 @@ static unsigned get_mmio_spte_access(u64 spte)
484484
}
485485

486486
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
487-
kvm_pfn_t pfn, unsigned access)
487+
kvm_pfn_t pfn, unsigned int access)
488488
{
489489
if (unlikely(is_noslot_pfn(pfn))) {
490490
mark_mmio_spte(vcpu, sptep, gfn, access);
@@ -2475,7 +2475,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
24752475
gva_t gaddr,
24762476
unsigned level,
24772477
int direct,
2478-
unsigned access)
2478+
unsigned int access)
24792479
{
24802480
union kvm_mmu_page_role role;
24812481
unsigned quadrant;
@@ -2990,7 +2990,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
29902990
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
29912991

29922992
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2993-
unsigned pte_access, int level,
2993+
unsigned int pte_access, int level,
29942994
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
29952995
bool can_unsync, bool host_writable)
29962996
{
@@ -3081,9 +3081,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
30813081
return ret;
30823082
}
30833083

3084-
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
3085-
int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
3086-
bool speculative, bool host_writable)
3084+
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
3085+
unsigned int pte_access, int write_fault, int level,
3086+
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
3087+
bool host_writable)
30873088
{
30883089
int was_rmapped = 0;
30893090
int rmap_count;
@@ -3165,7 +3166,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
31653166
{
31663167
struct page *pages[PTE_PREFETCH_NUM];
31673168
struct kvm_memory_slot *slot;
3168-
unsigned access = sp->role.access;
3169+
unsigned int access = sp->role.access;
31693170
int i, ret;
31703171
gfn_t gfn;
31713172

@@ -3400,7 +3401,8 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
34003401
}
34013402

34023403
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
3403-
kvm_pfn_t pfn, unsigned access, int *ret_val)
3404+
kvm_pfn_t pfn, unsigned int access,
3405+
int *ret_val)
34043406
{
34053407
/* The pfn is invalid, report the error! */
34063408
if (unlikely(is_error_pfn(pfn))) {
@@ -4005,7 +4007,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
40054007

40064008
if (is_mmio_spte(spte)) {
40074009
gfn_t gfn = get_mmio_spte_gfn(spte);
4008-
unsigned access = get_mmio_spte_access(spte);
4010+
unsigned int access = get_mmio_spte_access(spte);
40094011

40104012
if (!check_mmio_spte(vcpu, spte))
40114013
return RET_PF_INVALID;
@@ -4349,7 +4351,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
43494351
}
43504352

43514353
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4352-
unsigned access, int *nr_present)
4354+
unsigned int access, int *nr_present)
43534355
{
43544356
if (unlikely(is_mmio_spte(*sptep))) {
43554357
if (gfn != get_mmio_spte_gfn(*sptep)) {

0 commit comments

Comments
 (0)