@@ -1048,6 +1048,7 @@ __always_inline bool free_pages_prepare(struct page *page,
1048
1048
bool skip_kasan_poison = should_skip_kasan_poison (page );
1049
1049
bool init = want_init_on_free ();
1050
1050
bool compound = PageCompound (page );
1051
+ struct folio * folio = page_folio (page );
1051
1052
1052
1053
VM_BUG_ON_PAGE (PageTail (page ), page );
1053
1054
@@ -1057,6 +1058,20 @@ __always_inline bool free_pages_prepare(struct page *page,
1057
1058
if (memcg_kmem_online () && PageMemcgKmem (page ))
1058
1059
__memcg_kmem_uncharge_page (page , order );
1059
1060
1061
+ /*
1062
+ * In rare cases, when truncation or holepunching raced with
1063
+ * munlock after VM_LOCKED was cleared, Mlocked may still be
1064
+ * found set here. This does not indicate a problem, unless
1065
+ * "unevictable_pgs_cleared" appears worryingly large.
1066
+ */
1067
+ if (unlikely (folio_test_mlocked (folio ))) {
1068
+ long nr_pages = folio_nr_pages (folio );
1069
+
1070
+ __folio_clear_mlocked (folio );
1071
+ zone_stat_mod_folio (folio , NR_MLOCK , - nr_pages );
1072
+ count_vm_events (UNEVICTABLE_PGCLEARED , nr_pages );
1073
+ }
1074
+
1060
1075
if (unlikely (PageHWPoison (page )) && !order ) {
1061
1076
/* Do not let hwpoison pages hit pcplists/buddy */
1062
1077
reset_page_owner (page , order );
0 commit comments