Skip to content

Commit f0953a1

Browse files
Ingo Molnartorvalds
authored andcommitted
mm: fix typos in comments
Fix ~94 single-word typos in locking code comments, plus a few very obvious grammar mistakes. Link: https://lkml.kernel.org/r/[email protected] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Randy Dunlap <[email protected]> Cc: Bhaskar Chowdhury <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent fa60ce2 commit f0953a1

39 files changed

+83
-83
lines changed

include/linux/mm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
106106
* embedding these tags into addresses that point to these memory regions, and
107107
* checking that the memory and the pointer tags match on memory accesses)
108108
* redefine this macro to strip tags from pointers.
109-
* It's defined as noop for arcitectures that don't support memory tagging.
109+
* It's defined as noop for architectures that don't support memory tagging.
110110
*/
111111
#ifndef untagged_addr
112112
#define untagged_addr(addr) (addr)

include/linux/vmalloc.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ struct notifier_block; /* in notifier.h */
3333
*
3434
* If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
3535
* shadow memory has been mapped. It's used to handle allocation errors so that
36-
* we don't try to poision shadow on free if it was never allocated.
36+
* we don't try to poison shadow on free if it was never allocated.
3737
*
3838
* Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
3939
* determine which allocations need the module shadow freed.
@@ -43,7 +43,7 @@ struct notifier_block; /* in notifier.h */
4343

4444
/*
4545
* Maximum alignment for ioremap() regions.
46-
* Can be overriden by arch-specific value.
46+
* Can be overridden by arch-specific value.
4747
*/
4848
#ifndef IOREMAP_MAX_ORDER
4949
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */

mm/balloon_compaction.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
5858
/**
5959
* balloon_page_list_dequeue() - removes pages from balloon's page list and
6060
* returns a list of the pages.
61-
* @b_dev_info: balloon device decriptor where we will grab a page from.
61+
* @b_dev_info: balloon device descriptor where we will grab a page from.
6262
* @pages: pointer to the list of pages that would be returned to the caller.
6363
* @n_req_pages: number of requested pages.
6464
*
@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
157157
/*
158158
* balloon_page_dequeue - removes a page from balloon's page list and returns
159159
* its address to allow the driver to release the page.
160-
* @b_dev_info: balloon device decriptor where we will grab a page from.
160+
* @b_dev_info: balloon device descriptor where we will grab a page from.
161161
*
162162
* Driver must call this function to properly dequeue a previously enqueued page
163163
* before definitively releasing it back to the guest system.

mm/compaction.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2012,8 +2012,8 @@ static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
20122012
unsigned int wmark_low;
20132013

20142014
/*
2015-
* Cap the low watermak to avoid excessive compaction
2016-
* activity in case a user sets the proactivess tunable
2015+
* Cap the low watermark to avoid excessive compaction
2016+
* activity in case a user sets the proactiveness tunable
20172017
* close to 100 (maximum).
20182018
*/
20192019
wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);

mm/filemap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2755,7 +2755,7 @@ unsigned int seek_page_size(struct xa_state *xas, struct page *page)
27552755
* entirely memory-based such as tmpfs, and filesystems which support
27562756
* unwritten extents.
27572757
*
2758-
* Return: The requested offset on successs, or -ENXIO if @whence specifies
2758+
* Return: The requested offset on success, or -ENXIO if @whence specifies
27592759
* SEEK_DATA and there is no data after @start. There is an implicit hole
27602760
* after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
27612761
* and @end contain data.

mm/gup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1575,7 +1575,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
15751575
* Returns NULL on any kind of failure - a hole must then be inserted into
15761576
* the corefile, to preserve alignment with its headers; and also returns
15771577
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1578-
* allowing a hole to be left in the corefile to save diskspace.
1578+
* allowing a hole to be left in the corefile to save disk space.
15791579
*
15801580
* Called without mmap_lock (takes and releases the mmap_lock by itself).
15811581
*/

mm/highmem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -519,7 +519,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
519519

520520
/*
521521
* Disable migration so resulting virtual address is stable
522-
* accross preemption.
522+
* across preemption.
523523
*/
524524
migrate_disable();
525525
preempt_disable();

mm/huge_memory.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1792,8 +1792,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
17921792
/*
17931793
* Returns
17941794
* - 0 if PMD could not be locked
1795-
* - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1796-
* - HPAGE_PMD_NR is protections changed and TLB flush necessary
1795+
* - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1796+
* - HPAGE_PMD_NR if protections changed and TLB flush necessary
17971797
*/
17981798
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
17991799
unsigned long addr, pgprot_t newprot, unsigned long cp_flags)
@@ -2469,7 +2469,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
24692469
xa_lock(&swap_cache->i_pages);
24702470
}
24712471

2472-
/* lock lru list/PageCompound, ref freezed by page_ref_freeze */
2472+
/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
24732473
lruvec = lock_page_lruvec(head);
24742474

24752475
for (i = nr - 1; i >= 1; i--) {

mm/hugetlb.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -466,7 +466,7 @@ static int allocate_file_region_entries(struct resv_map *resv,
466466
resv->region_cache_count;
467467

468468
/* At this point, we should have enough entries in the cache
469-
* for all the existings adds_in_progress. We should only be
469+
* for all the existing adds_in_progress. We should only be
470470
* needing to allocate for regions_needed.
471471
*/
472472
VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
@@ -5536,8 +5536,8 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
55365536
v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
55375537

55385538
/*
5539-
* vma need span at least one aligned PUD size and the start,end range
5540-
* must at least partialy within it.
5539+
* vma needs to span at least one aligned PUD size, and the range
5540+
* must be at least partially within in.
55415541
*/
55425542
if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
55435543
(*end <= v_start) || (*start >= v_end))

mm/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ static inline bool is_exec_mapping(vm_flags_t flags)
334334
}
335335

336336
/*
337-
* Stack area - atomatically grows in one direction
337+
* Stack area - automatically grows in one direction
338338
*
339339
* VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
340340
* do_mmap() forbids all other combinations.

0 commit comments

Comments
 (0)