@@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
242
242
return tlbe -> mas7_3 & (MAS3_SW |MAS3_UW );
243
243
}
244
244
245
- static inline bool kvmppc_e500_ref_setup (struct tlbe_ref * ref ,
245
+ static inline void kvmppc_e500_ref_setup (struct tlbe_ref * ref ,
246
246
struct kvm_book3e_206_tlb_entry * gtlbe ,
247
247
kvm_pfn_t pfn , unsigned int wimg )
248
248
{
@@ -252,7 +252,11 @@ static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
252
252
/* Use guest supplied MAS2_G and MAS2_E */
253
253
ref -> flags |= (gtlbe -> mas2 & MAS2_ATTRIB_MASK ) | wimg ;
254
254
255
- return tlbe_is_writable (gtlbe );
255
+ /* Mark the page accessed */
256
+ kvm_set_pfn_accessed (pfn );
257
+
258
+ if (tlbe_is_writable (gtlbe ))
259
+ kvm_set_pfn_dirty (pfn );
256
260
}
257
261
258
262
static inline void kvmppc_e500_ref_release (struct tlbe_ref * ref )
@@ -333,7 +337,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
333
337
unsigned int wimg = 0 ;
334
338
pgd_t * pgdir ;
335
339
unsigned long flags ;
336
- bool writable = false;
337
340
338
341
/* used to check for invalidations in progress */
339
342
mmu_seq = kvm -> mmu_invalidate_seq ;
@@ -487,9 +490,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
487
490
goto out ;
488
491
}
489
492
}
490
- writable = kvmppc_e500_ref_setup (ref , gtlbe , pfn , wimg );
491
- if (writable )
492
- kvm_set_pfn_dirty (pfn );
493
+ kvmppc_e500_ref_setup (ref , gtlbe , pfn , wimg );
493
494
494
495
kvmppc_e500_setup_stlbe (& vcpu_e500 -> vcpu , gtlbe , tsize ,
495
496
ref , gvaddr , stlbe );
0 commit comments