Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud);

struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);

extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
Expand Down Expand Up @@ -298,8 +298,8 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}

static inline struct address_space *hugetlb_page_mapping_lock_write(
struct page *hpage)
static inline struct address_space *hugetlb_folio_mapping_lock_write(
struct folio *folio)
{
return NULL;
}
Expand Down
14 changes: 3 additions & 11 deletions include/linux/ksm.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,15 +91,9 @@ struct page *ksm_might_need_to_copy(struct page *page,

void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);

#ifdef CONFIG_MEMORY_FAILURE
void collect_procs_ksm(struct page *page, struct list_head *to_kill,
int force_early);
#endif

#ifdef CONFIG_PROC_FS
void collect_procs_ksm(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);
#endif /* CONFIG_PROC_FS */

#else /* !CONFIG_KSM */

Expand Down Expand Up @@ -129,12 +123,10 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
}

#ifdef CONFIG_MEMORY_FAILURE
static inline void collect_procs_ksm(struct page *page,
static inline void collect_procs_ksm(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early)
{
}
#endif

#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
Expand Down
1 change: 0 additions & 1 deletion include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -3913,7 +3913,6 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
extern int memory_failure(unsigned long pfn, int flags);
extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
extern void shake_page(struct page *p);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
#ifdef CONFIG_MEMORY_FAILURE
Expand Down
6 changes: 6 additions & 0 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -1046,6 +1046,12 @@ static inline bool is_page_hwpoison(struct page *page)
return PageHuge(page) && PageHWPoison(compound_head(page));
}

static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
{
return folio_test_hwpoison(folio) ||
(folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
}

extern bool is_free_buddy_page(struct page *page);

PAGEFLAG(Isolated, isolated, PF_ANY);
Expand Down
2 changes: 1 addition & 1 deletion include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,

void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);

int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);

/*
* rmap_walk_control: To control rmap traversing for specific needs
Expand Down
6 changes: 3 additions & 3 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2087,13 +2087,13 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
/*
* Find and lock address space (mapping) in write mode.
*
* Upon entry, the page is locked which means that page_mapping() is
* Upon entry, the folio is locked which means that folio_mapping() is
* stable. Due to locking order, we can only trylock_write. If we can
* not get the lock, simply return NULL to caller.
*/
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
{
struct address_space *mapping = page_mapping(hpage);
struct address_space *mapping = folio_mapping(folio);

if (!mapping)
return mapping;
Expand Down
11 changes: 6 additions & 5 deletions mm/hwpoison-inject.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ static int hwpoison_inject(void *data, u64 val)
{
unsigned long pfn = val;
struct page *p;
struct page *hpage;
struct folio *folio;
int err;

if (!capable(CAP_SYS_ADMIN))
Expand All @@ -25,24 +25,25 @@ static int hwpoison_inject(void *data, u64 val)
return -ENXIO;

p = pfn_to_page(pfn);
hpage = compound_head(p);
folio = page_folio(p);

if (!hwpoison_filter_enable)
goto inject;

shake_page(hpage);
shake_folio(folio);
/*
* This implies unable to support non-LRU pages except free page.
*/
if (!PageLRU(hpage) && !PageHuge(p) && !is_free_buddy_page(p))
if (!folio_test_lru(folio) && !folio_test_hugetlb(folio) &&
!is_free_buddy_page(p))
return 0;

/*
* do a racy check to make sure PG_hwpoison will only be set for
* the targeted owner (or on a free page).
* memory_failure() will redo the check reliably inside page lock.
*/
err = hwpoison_filter(hpage);
err = hwpoison_filter(&folio->page);
if (err)
return 0;

Expand Down
37 changes: 19 additions & 18 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -754,13 +754,17 @@ void mlock_drain_remote(int cpu);

extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);

/*
* Return the start of user virtual address at the specific offset within
* a vma.
/**
* vma_address - Find the virtual address a page range is mapped at
* @vma: The vma which maps this object.
* @pgoff: The page offset within its object.
* @nr_pages: The number of pages to consider.
*
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
static inline unsigned long
vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
struct vm_area_struct *vma)
static inline unsigned long vma_address(struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;

Expand All @@ -779,18 +783,6 @@ vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
return address;
}

/*
* Return the start of user virtual address of a page within a vma.
* Returns -EFAULT if all of the page is outside the range of vma.
* If page is a compound head, the entire compound page is considered.
*/
static inline unsigned long
vma_address(struct page *page, struct vm_area_struct *vma)
{
VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
}

/*
* Then at what user virtual address will none of the range be found in vma?
* Assumes that vma_address() already returned a good starting address.
Expand Down Expand Up @@ -912,6 +904,9 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
/*
* mm/memory-failure.c
*/
#ifdef CONFIG_MEMORY_FAILURE
void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu);
void shake_folio(struct folio *folio);
extern int hwpoison_filter(struct page *p);

extern u32 hwpoison_filter_dev_major;
Expand All @@ -921,6 +916,12 @@ extern u64 hwpoison_filter_flags_value;
extern u64 hwpoison_filter_memcg;
extern u32 hwpoison_filter_enable;

#else
static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
{
}
#endif

extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
Expand Down
5 changes: 2 additions & 3 deletions mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2906,12 +2906,11 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
/*
* Collect processes when the error hit an ksm page.
*/
void collect_procs_ksm(struct page *page, struct list_head *to_kill,
int force_early)
void collect_procs_ksm(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early)
{
struct ksm_stable_node *stable_node;
struct ksm_rmap_item *rmap_item;
struct folio *folio = page_folio(page);
struct vm_area_struct *vma;
struct task_struct *tsk;

Expand Down
Loading
Loading