Skip to content

Commit 344ef45

Browse files
yangge1116-cpuakpm00
authored andcommitted
mm/hugetlb: remove unnecessary holding of hugetlb_lock
In isolate_or_dissolve_huge_folio(), after acquiring the hugetlb_lock, it is only for the purpose of obtaining the correct hstate, which is then passed to alloc_and_dissolve_hugetlb_folio(). alloc_and_dissolve_hugetlb_folio() itself also acquires the hugetlb_lock. We can have alloc_and_dissolve_hugetlb_folio() obtain the hstate by itself, so that isolate_or_dissolve_huge_folio() no longer needs to acquire the hugetlb_lock. In addition, we keep the folio_test_hugetlb() check within isolate_or_dissolve_huge_folio(). By doing so, we can avoid disrupting the normal path by vainly holding the hugetlb_lock. replace_free_hugepage_folios() has the same issue, and we should address it as well. Addresses a possible performance problem which was added by the hotfix 113ed54 ("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios"). Link: https://lkml.kernel.org/r/[email protected] Fixes: 113ed54 ("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios") Signed-off-by: Ge Yang <[email protected]> Suggested-by: Oscar Salvador <[email protected]> Reviewed-by: Muchun Song <[email protected]> Cc: Baolin Wang <[email protected]> Cc: Barry Song <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 3746351 commit 344ef45

File tree

1 file changed

+17
-37
lines changed

1 file changed

+17
-37
lines changed

mm/hugetlb.c

Lines changed: 17 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -2787,20 +2787,24 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
27872787
/*
27882788
* alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
27892789
* the old one
2790-
* @h: struct hstate old page belongs to
27912790
* @old_folio: Old folio to dissolve
27922791
* @list: List to isolate the page in case we need to
27932792
* Returns 0 on success, otherwise negated error.
27942793
*/
2795-
static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
2796-
struct folio *old_folio, struct list_head *list)
2794+
static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
2795+
struct list_head *list)
27972796
{
2798-
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2797+
gfp_t gfp_mask;
2798+
struct hstate *h;
27992799
int nid = folio_nid(old_folio);
28002800
struct folio *new_folio = NULL;
28012801
int ret = 0;
28022802

28032803
retry:
2804+
/*
2805+
* The old_folio might have been dissolved from under our feet, so make sure
2806+
* to carefully check the state under the lock.
2807+
*/
28042808
spin_lock_irq(&hugetlb_lock);
28052809
if (!folio_test_hugetlb(old_folio)) {
28062810
/*
@@ -2829,8 +2833,10 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
28292833
cond_resched();
28302834
goto retry;
28312835
} else {
2836+
h = folio_hstate(old_folio);
28322837
if (!new_folio) {
28332838
spin_unlock_irq(&hugetlb_lock);
2839+
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
28342840
new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
28352841
NULL, NULL);
28362842
if (!new_folio)
@@ -2874,35 +2880,24 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
28742880

28752881
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
28762882
{
2877-
struct hstate *h;
28782883
int ret = -EBUSY;
28792884

2880-
/*
2881-
* The page might have been dissolved from under our feet, so make sure
2882-
* to carefully check the state under the lock.
2883-
* Return success when racing as if we dissolved the page ourselves.
2884-
*/
2885-
spin_lock_irq(&hugetlb_lock);
2886-
if (folio_test_hugetlb(folio)) {
2887-
h = folio_hstate(folio);
2888-
} else {
2889-
spin_unlock_irq(&hugetlb_lock);
2885+
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2886+
if (!folio_test_hugetlb(folio))
28902887
return 0;
2891-
}
2892-
spin_unlock_irq(&hugetlb_lock);
28932888

28942889
/*
28952890
* Fence off gigantic pages as there is a cyclic dependency between
28962891
* alloc_contig_range and them. Return -ENOMEM as this has the effect
28972892
* of bailing out right away without further retrying.
28982893
*/
2899-
if (hstate_is_gigantic(h))
2894+
if (folio_order(folio) > MAX_PAGE_ORDER)
29002895
return -ENOMEM;
29012896

29022897
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
29032898
ret = 0;
29042899
else if (!folio_ref_count(folio))
2905-
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
2900+
ret = alloc_and_dissolve_hugetlb_folio(folio, list);
29062901

29072902
return ret;
29082903
}
@@ -2916,7 +2911,6 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
29162911
*/
29172912
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
29182913
{
2919-
struct hstate *h;
29202914
struct folio *folio;
29212915
int ret = 0;
29222916

@@ -2925,23 +2919,9 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
29252919
while (start_pfn < end_pfn) {
29262920
folio = pfn_folio(start_pfn);
29272921

2928-
/*
2929-
* The folio might have been dissolved from under our feet, so make sure
2930-
* to carefully check the state under the lock.
2931-
*/
2932-
spin_lock_irq(&hugetlb_lock);
2933-
if (folio_test_hugetlb(folio)) {
2934-
h = folio_hstate(folio);
2935-
} else {
2936-
spin_unlock_irq(&hugetlb_lock);
2937-
start_pfn++;
2938-
continue;
2939-
}
2940-
spin_unlock_irq(&hugetlb_lock);
2941-
2942-
if (!folio_ref_count(folio)) {
2943-
ret = alloc_and_dissolve_hugetlb_folio(h, folio,
2944-
&isolate_list);
2922+
/* Not to disrupt normal path by vainly holding hugetlb_lock */
2923+
if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
2924+
ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
29452925
if (ret)
29462926
break;
29472927

0 commit comments

Comments
 (0)