Skip to content

Commit 67f1c9c

Browse files
minchanktorvalds
authored andcommitted
zsmalloc: introduce some helper functions
Patch series "zsmalloc: remove bit_spin_lock", v2. zsmalloc uses bit_spin_lock to minimize space overhead since it's zpage granularity lock. However, it causes zsmalloc non-working under PREEMPT_RT as well as adding too much complication. This patchset tries to replace the bit_spin_lock with per-pool rwlock. It also removes unnecessary zspage isolation logic from class, which was the other part too much complication added into zsmalloc. Last patch changes the get_cpu_var to local_lock to make it work in PREEMPT_RT. This patch (of 9): get_zspage_mapping returns fullness as well as class_idx. However, the fullness is usually not used since it could be stale in some contexts. It causes misleading as well as unnecessary instructions so this patch introduces zspage_class. obj_to_location also produces page and index but we don't need always the index, either so this patch introduces obj_to_page. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Minchan Kim <[email protected]> Acked-by: Sebastian Andrzej Siewior <[email protected]> Tested-by: Sebastian Andrzej Siewior <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 1622ed7 commit 67f1c9c

File tree

1 file changed

+23
-31
lines changed

1 file changed

+23
-31
lines changed

mm/zsmalloc.c

Lines changed: 23 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -517,6 +517,12 @@ static void get_zspage_mapping(struct zspage *zspage,
517517
*class_idx = zspage->class;
518518
}
519519

520+
static struct size_class *zspage_class(struct zs_pool *pool,
521+
struct zspage *zspage)
522+
{
523+
return pool->size_class[zspage->class];
524+
}
525+
520526
static void set_zspage_mapping(struct zspage *zspage,
521527
unsigned int class_idx,
522528
enum fullness_group fullness)
@@ -844,6 +850,12 @@ static void obj_to_location(unsigned long obj, struct page **page,
844850
*obj_idx = (obj & OBJ_INDEX_MASK);
845851
}
846852

853+
static void obj_to_page(unsigned long obj, struct page **page)
854+
{
855+
obj >>= OBJ_TAG_BITS;
856+
*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
857+
}
858+
847859
/**
848860
* location_to_obj - get obj value encoded from (<page>, <obj_idx>)
849861
* @page: page object resides in zspage
@@ -1246,8 +1258,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
12461258
unsigned long obj, off;
12471259
unsigned int obj_idx;
12481260

1249-
unsigned int class_idx;
1250-
enum fullness_group fg;
12511261
struct size_class *class;
12521262
struct mapping_area *area;
12531263
struct page *pages[2];
@@ -1270,8 +1280,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
12701280
/* migration cannot move any subpage in this zspage */
12711281
migrate_read_lock(zspage);
12721282

1273-
get_zspage_mapping(zspage, &class_idx, &fg);
1274-
class = pool->size_class[class_idx];
1283+
class = zspage_class(pool, zspage);
12751284
off = (class->size * obj_idx) & ~PAGE_MASK;
12761285

12771286
area = &get_cpu_var(zs_map_area);
@@ -1304,16 +1313,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
13041313
unsigned long obj, off;
13051314
unsigned int obj_idx;
13061315

1307-
unsigned int class_idx;
1308-
enum fullness_group fg;
13091316
struct size_class *class;
13101317
struct mapping_area *area;
13111318

13121319
obj = handle_to_obj(handle);
13131320
obj_to_location(obj, &page, &obj_idx);
13141321
zspage = get_zspage(page);
1315-
get_zspage_mapping(zspage, &class_idx, &fg);
1316-
class = pool->size_class[class_idx];
1322+
class = zspage_class(pool, zspage);
13171323
off = (class->size * obj_idx) & ~PAGE_MASK;
13181324

13191325
area = this_cpu_ptr(&zs_map_area);
@@ -1491,8 +1497,6 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
14911497
struct zspage *zspage;
14921498
struct page *f_page;
14931499
unsigned long obj;
1494-
unsigned int f_objidx;
1495-
int class_idx;
14961500
struct size_class *class;
14971501
enum fullness_group fullness;
14981502
bool isolated;
@@ -1502,13 +1506,11 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
15021506

15031507
pin_tag(handle);
15041508
obj = handle_to_obj(handle);
1505-
obj_to_location(obj, &f_page, &f_objidx);
1509+
obj_to_page(obj, &f_page);
15061510
zspage = get_zspage(f_page);
15071511

15081512
migrate_read_lock(zspage);
1509-
1510-
get_zspage_mapping(zspage, &class_idx, &fullness);
1511-
class = pool->size_class[class_idx];
1513+
class = zspage_class(pool, zspage);
15121514

15131515
spin_lock(&class->lock);
15141516
obj_free(class, obj);
@@ -1866,8 +1868,6 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
18661868
{
18671869
struct zs_pool *pool;
18681870
struct size_class *class;
1869-
int class_idx;
1870-
enum fullness_group fullness;
18711871
struct zspage *zspage;
18721872
struct address_space *mapping;
18731873

@@ -1880,15 +1880,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
18801880

18811881
zspage = get_zspage(page);
18821882

1883-
/*
1884-
* Without class lock, fullness could be stale while class_idx is okay
1885-
* because class_idx is constant unless page is freed so we should get
1886-
* fullness again under class lock.
1887-
*/
1888-
get_zspage_mapping(zspage, &class_idx, &fullness);
18891883
mapping = page_mapping(page);
18901884
pool = mapping->private_data;
1891-
class = pool->size_class[class_idx];
1885+
1886+
class = zspage_class(pool, zspage);
18921887

18931888
spin_lock(&class->lock);
18941889
if (get_zspage_inuse(zspage) == 0) {
@@ -1907,6 +1902,9 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
19071902
* size_class to prevent further object allocation from the zspage.
19081903
*/
19091904
if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1905+
enum fullness_group fullness;
1906+
unsigned int class_idx;
1907+
19101908
get_zspage_mapping(zspage, &class_idx, &fullness);
19111909
atomic_long_inc(&pool->isolated_pages);
19121910
remove_zspage(class, zspage, fullness);
@@ -1923,8 +1921,6 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
19231921
{
19241922
struct zs_pool *pool;
19251923
struct size_class *class;
1926-
int class_idx;
1927-
enum fullness_group fullness;
19281924
struct zspage *zspage;
19291925
struct page *dummy;
19301926
void *s_addr, *d_addr, *addr;
@@ -1949,9 +1945,8 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
19491945

19501946
/* Concurrent compactor cannot migrate any subpage in zspage */
19511947
migrate_write_lock(zspage);
1952-
get_zspage_mapping(zspage, &class_idx, &fullness);
19531948
pool = mapping->private_data;
1954-
class = pool->size_class[class_idx];
1949+
class = zspage_class(pool, zspage);
19551950
offset = get_first_obj_offset(page);
19561951

19571952
spin_lock(&class->lock);
@@ -2049,19 +2044,16 @@ static void zs_page_putback(struct page *page)
20492044
{
20502045
struct zs_pool *pool;
20512046
struct size_class *class;
2052-
int class_idx;
2053-
enum fullness_group fg;
20542047
struct address_space *mapping;
20552048
struct zspage *zspage;
20562049

20572050
VM_BUG_ON_PAGE(!PageMovable(page), page);
20582051
VM_BUG_ON_PAGE(!PageIsolated(page), page);
20592052

20602053
zspage = get_zspage(page);
2061-
get_zspage_mapping(zspage, &class_idx, &fg);
20622054
mapping = page_mapping(page);
20632055
pool = mapping->private_data;
2064-
class = pool->size_class[class_idx];
2056+
class = zspage_class(pool, zspage);
20652057

20662058
spin_lock(&class->lock);
20672059
dec_zspage_isolation(zspage);

0 commit comments

Comments
 (0)