Skip to content

Commit 75841d8

Browse files
bjorn-helgaasoupton
authored andcommitted
KVM: arm64: Fix typos
Fix typos, most reported by "codespell arch/arm64". Only touches comments, no code changes. Signed-off-by: Bjorn Helgaas <[email protected]> Cc: James Morse <[email protected]> Cc: Suzuki K Poulose <[email protected]> Cc: Zenghui Yu <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: [email protected] Cc: [email protected] Reviewed-by: Zenghui Yu <[email protected]> Reviewed-by: Randy Dunlap <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 284851e commit 75841d8

File tree

8 files changed

+10
-10
lines changed

8 files changed

+10
-10
lines changed

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
7070
/*
7171
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
7272
* static inline can allow the compiler to out-of-line this. KVM always wants
73-
* the macro version as its always inlined.
73+
* the macro version as it's always inlined.
7474
*/
7575
#define __kvm_swab32(x) ___constant_swab32(x)
7676

arch/arm64/kvm/arch_timer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -745,7 +745,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
745745
WARN_ON_ONCE(ret);
746746

747747
/*
748-
* The virtual offset behaviour is "interresting", as it
748+
* The virtual offset behaviour is "interesting", as it
749749
* always applies when HCR_EL2.E2H==0, but only when
750750
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
751751
* track E2H when putting the HV timer in "direct" mode.

arch/arm64/kvm/fpsimd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
117117
}
118118

119119
/*
120-
* Called just before entering the guest once we are no longer preemptable
120+
* Called just before entering the guest once we are no longer preemptible
121121
* and interrupts are disabled. If we have managed to run anything using
122122
* FP while we were preemptible (such as off the back of an interrupt),
123123
* then neither the host nor the guest own the FP hardware (and it was the

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
110110
* u64 elr, u64 par);
111111
*/
112112
SYM_FUNC_START(__hyp_do_panic)
113-
/* Prepare and exit to the host's panic funciton. */
113+
/* Prepare and exit to the host's panic function. */
114114
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
115115
PSR_MODE_EL1h)
116116
msr spsr_el2, lr

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
155155
start = hyp_memory[i].base;
156156
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
157157
/*
158-
* The begining of the hyp_vmemmap region for the current
158+
* The beginning of the hyp_vmemmap region for the current
159159
* memblock may already be backed by the page backing the end
160160
* the previous region, so avoid mapping it twice.
161161
*/
@@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
408408
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
409409
}
410410

411-
/* Refill our local memcache by poping pages from the one provided by the host. */
411+
/* Refill our local memcache by popping pages from the one provided by the host. */
412412
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
413413
struct kvm_hyp_memcache *host_mc)
414414
{

arch/arm64/kvm/inject_fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
134134
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
135135
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
136136
} else {
137-
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
137+
/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
138138
fsr = DFSR_FSC_EXTABT_nLPAE;
139139
}
140140

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ int vgic_init(struct kvm *kvm)
309309
vgic_lpi_translation_cache_init(kvm);
310310

311311
/*
312-
* If we have GICv4.1 enabled, unconditionnaly request enable the
312+
* If we have GICv4.1 enabled, unconditionally request enable the
313313
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
314314
* enable it if we present a virtual ITS to the guest.
315315
*/

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1342,8 +1342,8 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
13421342
}
13431343

13441344
/**
1345-
* vgic_its_invall - invalidate all LPIs targetting a given vcpu
1346-
* @vcpu: the vcpu for which the RD is targetted by an invalidation
1345+
* vgic_its_invall - invalidate all LPIs targeting a given vcpu
1346+
* @vcpu: the vcpu for which the RD is targeted by an invalidation
13471347
*
13481348
* Contrary to the INVALL command, this targets a RD instead of a
13491349
* collection, and we don't need to hold the its_lock, since no ITS is

0 commit comments

Comments
 (0)