@@ -1636,6 +1636,7 @@ bool zswap_load(struct folio *folio)
1636
1636
swp_entry_t swp = folio -> swap ;
1637
1637
pgoff_t offset = swp_offset (swp );
1638
1638
struct page * page = & folio -> page ;
1639
+ bool swapcache = folio_test_swapcache (folio );
1639
1640
struct zswap_tree * tree = swap_zswap_tree (swp );
1640
1641
struct zswap_entry * entry ;
1641
1642
u8 * dst ;
@@ -1648,7 +1649,20 @@ bool zswap_load(struct folio *folio)
1648
1649
spin_unlock (& tree -> lock );
1649
1650
return false;
1650
1651
}
1651
- zswap_rb_erase (& tree -> rbroot , entry );
1652
+ /*
1653
+ * When reading into the swapcache, invalidate our entry. The
1654
+ * swapcache can be the authoritative owner of the page and
1655
+ * its mappings, and the pressure that results from having two
1656
+ * in-memory copies outweighs any benefits of caching the
1657
+ * compression work.
1658
+ *
1659
+ * (Most swapins go through the swapcache. The notable
1660
+ * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1661
+ * files, which reads into a private page and may free it if
1662
+ * the fault fails. We remain the primary owner of the entry.)
1663
+ */
1664
+ if (swapcache )
1665
+ zswap_rb_erase (& tree -> rbroot , entry );
1652
1666
spin_unlock (& tree -> lock );
1653
1667
1654
1668
if (entry -> length )
@@ -1663,9 +1677,10 @@ bool zswap_load(struct folio *folio)
1663
1677
if (entry -> objcg )
1664
1678
count_objcg_event (entry -> objcg , ZSWPIN );
1665
1679
1666
- zswap_entry_free (entry );
1667
-
1668
- folio_mark_dirty (folio );
1680
+ if (swapcache ) {
1681
+ zswap_entry_free (entry );
1682
+ folio_mark_dirty (folio );
1683
+ }
1669
1684
1670
1685
return true;
1671
1686
}
0 commit comments