Skip to content

Commit 656012c

Browse files
Fuad TabbaMarc Zyngier
authored andcommitted
KVM: Fix spelling in code comments
Fix spelling and typos (e.g., repeated words) in comments. Signed-off-by: Fuad Tabba <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent ce6f8f0 commit 656012c

File tree

12 files changed

+23
-23
lines changed

12 files changed

+23
-23
lines changed

arch/arm64/kvm/arm.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -455,9 +455,9 @@ void force_vm_exit(const cpumask_t *mask)
455455
*
456456
* The hardware supports a limited set of values with the value zero reserved
457457
* for the host, so we check if an assigned value belongs to a previous
458-
* generation, which which requires us to assign a new value. If we're the
459-
* first to use a VMID for the new generation, we must flush necessary caches
460-
* and TLBs on all CPUs.
458+
* generation, which requires us to assign a new value. If we're the first to
459+
* use a VMID for the new generation, we must flush necessary caches and TLBs
460+
* on all CPUs.
461461
*/
462462
static bool need_new_vmid_gen(struct kvm_vmid *vmid)
463463
{

arch/arm64/kvm/guest.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
267267
/*
268268
* Vector lengths supported by the host can't currently be
269269
* hidden from the guest individually: instead we can only set a
270-
* maxmium via ZCR_EL2.LEN. So, make sure the available vector
270+
* maximum via ZCR_EL2.LEN. So, make sure the available vector
271271
* lengths match the set requested exactly up to the requested
272272
* maximum:
273273
*/
@@ -337,7 +337,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
337337
unsigned int reg_num;
338338

339339
unsigned int reqoffset, reqlen; /* User-requested offset and length */
340-
unsigned int maxlen; /* Maxmimum permitted length */
340+
unsigned int maxlen; /* Maximum permitted length */
341341

342342
size_t sve_state_size;
343343

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -577,7 +577,7 @@ static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
577577

578578
/*
579579
* The priority value is independent of any of the BPR values, so we
580-
* normalize it using the minumal BPR value. This guarantees that no
580+
* normalize it using the minimal BPR value. This guarantees that no
581581
* matter what the guest does with its BPR, we can always set/get the
582582
* same value of a priority.
583583
*/

arch/arm64/kvm/mmio.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
131131

132132
/*
133133
* No valid syndrome? Ask userspace for help if it has
134-
* voluntered to do so, and bail out otherwise.
134+
* volunteered to do so, and bail out otherwise.
135135
*/
136136
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
137137
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {

arch/arm64/kvm/mmu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -784,7 +784,7 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
784784
mutex_lock(&kvm_hyp_pgd_mutex);
785785

786786
/*
787-
* This assumes that we we have enough space below the idmap
787+
* This assumes that we have enough space below the idmap
788788
* page to allocate our VAs. If not, the check below will
789789
* kick. A potential alternative would be to detect that
790790
* overflow and switch to an allocation above the idmap.
@@ -964,7 +964,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
964964
* stage2_unmap_vm - Unmap Stage-2 RAM mappings
965965
* @kvm: The struct kvm pointer
966966
*
967-
* Go through the memregions and unmap any reguler RAM
967+
* Go through the memregions and unmap any regular RAM
968968
* backing memory already mapped to the VM.
969969
*/
970970
void stage2_unmap_vm(struct kvm *kvm)
@@ -2262,7 +2262,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
22622262
{
22632263
/*
22642264
* At this point memslot has been committed and there is an
2265-
* allocated dirty_bitmap[], dirty pages will be be tracked while the
2265+
* allocated dirty_bitmap[], dirty pages will be tracked while the
22662266
* memory slot is write protected.
22672267
*/
22682268
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)

arch/arm64/kvm/psci.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
9494

9595
/*
9696
* NOTE: We always update r0 (or x0) because for PSCI v0.1
97-
* the general puspose registers are undefined upon CPU_ON.
97+
* the general purpose registers are undefined upon CPU_ON.
9898
*/
9999
reset_state->r0 = smccc_get_arg3(source_vcpu);
100100

@@ -265,10 +265,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
265265
case PSCI_0_2_FN_SYSTEM_OFF:
266266
kvm_psci_system_off(vcpu);
267267
/*
268-
* We should'nt be going back to guest VCPU after
268+
* We shouldn't be going back to guest VCPU after
269269
* receiving SYSTEM_OFF request.
270270
*
271-
* If user space accidently/deliberately resumes
271+
* If user space accidentally/deliberately resumes
272272
* guest VCPU after SYSTEM_OFF request then guest
273273
* VCPU should see internal failure from PSCI return
274274
* value. To achieve this, we preload r0 (or x0) with

arch/arm64/kvm/reset.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
163163
vl = vcpu->arch.sve_max_vl;
164164

165165
/*
166-
* Resposibility for these properties is shared between
166+
* Responsibility for these properties is shared between
167167
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
168168
* set_sve_vls(). Double-check here just to be sure:
169169
*/
@@ -249,7 +249,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
249249
* ioctl or as part of handling a request issued by another VCPU in the PSCI
250250
* handling code. In the first case, the VCPU will not be loaded, and in the
251251
* second case the VCPU will be loaded. Because this function operates purely
252-
* on the memory-backed valus of system registers, we want to do a full put if
252+
* on the memory-backed values of system registers, we want to do a full put if
253253
* we were loaded (handling a request) and load the values back at the end of
254254
* the function. Otherwise we leave the state alone. In both cases, we
255255
* disable preemption around the vcpu reset as we would otherwise race with
@@ -357,7 +357,7 @@ void kvm_set_ipa_limit(void)
357357
*
358358
* So clamp the ipa limit further down to limit the number of levels.
359359
* Since we can concatenate upto 16 tables at entry level, we could
360-
* go upto 4bits above the maximum VA addressible with the current
360+
* go upto 4bits above the maximum VA addressable with the current
361361
* number of levels.
362362
*/
363363
va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;

arch/arm64/kvm/sys_regs.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
#include "trace.h"
3535

3636
/*
37-
* All of this file is extremly similar to the ARM coproc.c, but the
37+
* All of this file is extremely similar to the ARM coproc.c, but the
3838
* types are different. My gut feeling is that it should be pretty
3939
* easy to merge, but that would be an ABI breakage -- again. VFP
4040
* would also need to be abstracted.
@@ -118,8 +118,8 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
118118
* entry to the guest but are only restored on vcpu_load.
119119
*
120120
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
121-
* should never be listed below, because the the MPIDR should only be
122-
* set once, before running the VCPU, and never changed later.
121+
* should never be listed below, because the MPIDR should only be set
122+
* once, before running the VCPU, and never changed later.
123123
*/
124124
switch (reg) {
125125
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;

arch/arm64/kvm/vgic/vgic-v3.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -587,7 +587,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
587587
int ret;
588588

589589
/*
590-
* The ListRegs field is 5 bits, but there is a architectural
590+
* The ListRegs field is 5 bits, but there is an architectural
591591
* maximum of 16 list registers. Just ignore bit 4...
592592
*/
593593
kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;

virt/kvm/coalesced_mmio.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
119119

120120
/*
121121
* We're using this spinlock to sync access to the coalesced ring.
122-
* The list doesn't need it's own lock since device registration and
122+
* The list doesn't need its own lock since device registration and
123123
* unregistration should only happen when kvm->slots_lock is held.
124124
*/
125125
spin_lock_init(&kvm->ring_lock);

0 commit comments

Comments
 (0)