@@ -2401,10 +2401,16 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
2401
2401
}
2402
2402
}
2403
2403
2404
- static void __split_huge_page_tail (struct page * head , int tail ,
2404
+ static void __split_huge_page_tail (struct folio * folio , int tail ,
2405
2405
struct lruvec * lruvec , struct list_head * list )
2406
2406
{
2407
+ struct page * head = & folio -> page ;
2407
2408
struct page * page_tail = head + tail ;
2409
+ /*
2410
+ * Careful: new_folio is not a "real" folio before we cleared PageTail.
2411
+ * Don't pass it around before clear_compound_head().
2412
+ */
2413
+ struct folio * new_folio = (struct folio * )page_tail ;
2408
2414
2409
2415
VM_BUG_ON_PAGE (atomic_read (& page_tail -> _mapcount ) != -1 , page_tail );
2410
2416
@@ -2453,8 +2459,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
2453
2459
VM_WARN_ON_ONCE_PAGE (true, page_tail );
2454
2460
page_tail -> private = 0 ;
2455
2461
}
2456
- if (PageSwapCache ( head ))
2457
- set_page_private ( page_tail , ( unsigned long ) head -> private + tail ) ;
2462
+ if (folio_test_swapcache ( folio ))
2463
+ new_folio -> swap . val = folio -> swap . val + tail ;
2458
2464
2459
2465
/* Page flags must be visible before we make the page non-compound. */
2460
2466
smp_wmb ();
@@ -2500,11 +2506,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2500
2506
/* complete memcg works before add pages to LRU */
2501
2507
split_page_memcg (head , nr );
2502
2508
2503
- if (PageAnon (head ) && PageSwapCache (head )) {
2504
- swp_entry_t entry = { .val = page_private (head ) };
2505
-
2506
- offset = swp_offset (entry );
2507
- swap_cache = swap_address_space (entry );
2509
+ if (folio_test_anon (folio ) && folio_test_swapcache (folio )) {
2510
+ offset = swp_offset (folio -> swap );
2511
+ swap_cache = swap_address_space (folio -> swap );
2508
2512
xa_lock (& swap_cache -> i_pages );
2509
2513
}
2510
2514
@@ -2514,7 +2518,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2514
2518
ClearPageHasHWPoisoned (head );
2515
2519
2516
2520
for (i = nr - 1 ; i >= 1 ; i -- ) {
2517
- __split_huge_page_tail (head , i , lruvec , list );
2521
+ __split_huge_page_tail (folio , i , lruvec , list );
2518
2522
/* Some pages can be beyond EOF: drop them from page cache */
2519
2523
if (head [i ].index >= end ) {
2520
2524
struct folio * tail = page_folio (head + i );
@@ -2559,11 +2563,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2559
2563
2560
2564
remap_page (folio , nr );
2561
2565
2562
- if (PageSwapCache (head )) {
2563
- swp_entry_t entry = { .val = page_private (head ) };
2564
-
2565
- split_swap_cluster (entry );
2566
- }
2566
+ if (folio_test_swapcache (folio ))
2567
+ split_swap_cluster (folio -> swap );
2567
2568
2568
2569
for (i = 0 ; i < nr ; i ++ ) {
2569
2570
struct page * subpage = head + i ;
0 commit comments