Skip to content

Commit 0a5f079

Browse files
minchanktorvalds
authored andcommitted
zsmalloc: decouple class actions from zspage works
This patch moves class stat update out of obj_malloc since it's not related to zspage operation. This is a preparation to introduce new lock scheme in next patch. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Minchan Kim <[email protected]> Acked-by: Sebastian Andrzej Siewior <[email protected]> Tested-by: Sebastian Andrzej Siewior <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra (Intel) <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 3828a76 commit 0a5f079

File tree

1 file changed

+13
-10
lines changed

1 file changed

+13
-10
lines changed

mm/zsmalloc.c

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1360,17 +1360,19 @@ size_t zs_huge_class_size(struct zs_pool *pool)
13601360
}
13611361
EXPORT_SYMBOL_GPL(zs_huge_class_size);
13621362

1363-
static unsigned long obj_malloc(struct size_class *class,
1363+
static unsigned long obj_malloc(struct zs_pool *pool,
13641364
struct zspage *zspage, unsigned long handle)
13651365
{
13661366
int i, nr_page, offset;
13671367
unsigned long obj;
13681368
struct link_free *link;
1369+
struct size_class *class;
13691370

13701371
struct page *m_page;
13711372
unsigned long m_offset;
13721373
void *vaddr;
13731374

1375+
class = pool->size_class[zspage->class];
13741376
handle |= OBJ_ALLOCATED_TAG;
13751377
obj = get_freeobj(zspage);
13761378

@@ -1394,7 +1396,6 @@ static unsigned long obj_malloc(struct size_class *class,
13941396

13951397
kunmap_atomic(vaddr);
13961398
mod_zspage_inuse(zspage, 1);
1397-
class_stat_inc(class, OBJ_USED, 1);
13981399

13991400
obj = location_to_obj(m_page, obj);
14001401

@@ -1433,10 +1434,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
14331434
spin_lock(&class->lock);
14341435
zspage = find_get_zspage(class);
14351436
if (likely(zspage)) {
1436-
obj = obj_malloc(class, zspage, handle);
1437+
obj = obj_malloc(pool, zspage, handle);
14371438
/* Now move the zspage to another fullness group, if required */
14381439
fix_fullness_group(class, zspage);
14391440
record_obj(handle, obj);
1441+
class_stat_inc(class, OBJ_USED, 1);
14401442
spin_unlock(&class->lock);
14411443

14421444
return handle;
@@ -1451,14 +1453,15 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
14511453
}
14521454

14531455
spin_lock(&class->lock);
1454-
obj = obj_malloc(class, zspage, handle);
1456+
obj = obj_malloc(pool, zspage, handle);
14551457
newfg = get_fullness_group(class, zspage);
14561458
insert_zspage(class, zspage, newfg);
14571459
set_zspage_mapping(zspage, class->index, newfg);
14581460
record_obj(handle, obj);
14591461
atomic_long_add(class->pages_per_zspage,
14601462
&pool->pages_allocated);
14611463
class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
1464+
class_stat_inc(class, OBJ_USED, 1);
14621465

14631466
/* We completely set up zspage so mark them as movable */
14641467
SetZsPageMovable(pool, zspage);
@@ -1468,7 +1471,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
14681471
}
14691472
EXPORT_SYMBOL_GPL(zs_malloc);
14701473

1471-
static void obj_free(struct size_class *class, unsigned long obj)
1474+
static void obj_free(int class_size, unsigned long obj)
14721475
{
14731476
struct link_free *link;
14741477
struct zspage *zspage;
@@ -1478,7 +1481,7 @@ static void obj_free(struct size_class *class, unsigned long obj)
14781481
void *vaddr;
14791482

14801483
obj_to_location(obj, &f_page, &f_objidx);
1481-
f_offset = (class->size * f_objidx) & ~PAGE_MASK;
1484+
f_offset = (class_size * f_objidx) & ~PAGE_MASK;
14821485
zspage = get_zspage(f_page);
14831486

14841487
vaddr = kmap_atomic(f_page);
@@ -1489,7 +1492,6 @@ static void obj_free(struct size_class *class, unsigned long obj)
14891492
kunmap_atomic(vaddr);
14901493
set_freeobj(zspage, f_objidx);
14911494
mod_zspage_inuse(zspage, -1);
1492-
class_stat_dec(class, OBJ_USED, 1);
14931495
}
14941496

14951497
void zs_free(struct zs_pool *pool, unsigned long handle)
@@ -1513,7 +1515,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
15131515
class = zspage_class(pool, zspage);
15141516

15151517
spin_lock(&class->lock);
1516-
obj_free(class, obj);
1518+
obj_free(class->size, obj);
1519+
class_stat_dec(class, OBJ_USED, 1);
15171520
fullness = fix_fullness_group(class, zspage);
15181521
if (fullness != ZS_EMPTY) {
15191522
migrate_read_unlock(zspage);
@@ -1671,7 +1674,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
16711674
}
16721675

16731676
used_obj = handle_to_obj(handle);
1674-
free_obj = obj_malloc(class, get_zspage(d_page), handle);
1677+
free_obj = obj_malloc(pool, get_zspage(d_page), handle);
16751678
zs_object_copy(class, free_obj, used_obj);
16761679
obj_idx++;
16771680
/*
@@ -1683,7 +1686,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
16831686
free_obj |= BIT(HANDLE_PIN_BIT);
16841687
record_obj(handle, free_obj);
16851688
unpin_tag(handle);
1686-
obj_free(class, used_obj);
1689+
obj_free(class->size, used_obj);
16871690
}
16881691

16891692
/* Remember last position in this iteration */

0 commit comments

Comments
 (0)