@@ -1331,31 +1331,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1331
1331
obj_to_location (obj , & page , & obj_idx );
1332
1332
zspage = get_zspage (page );
1333
1333
1334
- #ifdef CONFIG_ZPOOL
1335
- /*
1336
- * Move the zspage to front of pool's LRU.
1337
- *
1338
- * Note that this is swap-specific, so by definition there are no ongoing
1339
- * accesses to the memory while the page is swapped out that would make
1340
- * it "hot". A new entry is hot, then ages to the tail until it gets either
1341
- * written back or swaps back in.
1342
- *
1343
- * Furthermore, map is also called during writeback. We must not put an
1344
- * isolated page on the LRU mid-reclaim.
1345
- *
1346
- * As a result, only update the LRU when the page is mapped for write
1347
- * when it's first instantiated.
1348
- *
1349
- * This is a deviation from the other backends, which perform this update
1350
- * in the allocation function (zbud_alloc, z3fold_alloc).
1351
- */
1352
- if (mm == ZS_MM_WO ) {
1353
- if (!list_empty (& zspage -> lru ))
1354
- list_del (& zspage -> lru );
1355
- list_add (& zspage -> lru , & pool -> lru );
1356
- }
1357
- #endif
1358
-
1359
1334
/*
1360
1335
* migration cannot move any zpages in this zspage. Here, pool->lock
1361
1336
* is too heavy since callers would take some time until they calls
@@ -1525,9 +1500,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1525
1500
fix_fullness_group (class , zspage );
1526
1501
record_obj (handle , obj );
1527
1502
class_stat_inc (class , ZS_OBJS_INUSE , 1 );
1528
- spin_unlock (& pool -> lock );
1529
1503
1530
- return handle ;
1504
+ goto out ;
1531
1505
}
1532
1506
1533
1507
spin_unlock (& pool -> lock );
@@ -1550,6 +1524,14 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1550
1524
1551
1525
/* We completely set up zspage so mark them as movable */
1552
1526
SetZsPageMovable (pool , zspage );
1527
+ out :
1528
+ #ifdef CONFIG_ZPOOL
1529
+ /* Add/move zspage to beginning of LRU */
1530
+ if (!list_empty (& zspage -> lru ))
1531
+ list_del (& zspage -> lru );
1532
+ list_add (& zspage -> lru , & pool -> lru );
1533
+ #endif
1534
+
1553
1535
spin_unlock (& pool -> lock );
1554
1536
1555
1537
return handle ;
0 commit comments