@@ -158,7 +158,7 @@ enum fullness_group {
158
158
NR_ZS_FULLNESS ,
159
159
};
160
160
161
- enum zs_stat_type {
161
+ enum class_stat_type {
162
162
CLASS_EMPTY ,
163
163
CLASS_ALMOST_EMPTY ,
164
164
CLASS_ALMOST_FULL ,
@@ -549,21 +549,21 @@ static int get_size_class_index(int size)
549
549
return min_t (int , ZS_SIZE_CLASSES - 1 , idx );
550
550
}
551
551
552
- /* type can be of enum type zs_stat_type or fullness_group */
553
- static inline void zs_stat_inc (struct size_class * class ,
552
+ /* type can be of enum type class_stat_type or fullness_group */
553
+ static inline void class_stat_inc (struct size_class * class ,
554
554
int type , unsigned long cnt )
555
555
{
556
556
class -> stats .objs [type ] += cnt ;
557
557
}
558
558
559
- /* type can be of enum type zs_stat_type or fullness_group */
560
- static inline void zs_stat_dec (struct size_class * class ,
559
+ /* type can be of enum type class_stat_type or fullness_group */
560
+ static inline void class_stat_dec (struct size_class * class ,
561
561
int type , unsigned long cnt )
562
562
{
563
563
class -> stats .objs [type ] -= cnt ;
564
564
}
565
565
566
- /* type can be of enum type zs_stat_type or fullness_group */
566
+ /* type can be of enum type class_stat_type or fullness_group */
567
567
static inline unsigned long zs_stat_get (struct size_class * class ,
568
568
int type )
569
569
{
@@ -725,7 +725,7 @@ static void insert_zspage(struct size_class *class,
725
725
{
726
726
struct zspage * head ;
727
727
728
- zs_stat_inc (class , fullness , 1 );
728
+ class_stat_inc (class , fullness , 1 );
729
729
head = list_first_entry_or_null (& class -> fullness_list [fullness ],
730
730
struct zspage , list );
731
731
/*
@@ -750,7 +750,7 @@ static void remove_zspage(struct size_class *class,
750
750
VM_BUG_ON (is_zspage_isolated (zspage ));
751
751
752
752
list_del_init (& zspage -> list );
753
- zs_stat_dec (class , fullness , 1 );
753
+ class_stat_dec (class , fullness , 1 );
754
754
}
755
755
756
756
/*
@@ -964,7 +964,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
964
964
965
965
cache_free_zspage (pool , zspage );
966
966
967
- zs_stat_dec (class , OBJ_ALLOCATED , class -> objs_per_zspage );
967
+ class_stat_dec (class , OBJ_ALLOCATED , class -> objs_per_zspage );
968
968
atomic_long_sub (class -> pages_per_zspage ,
969
969
& pool -> pages_allocated );
970
970
}
@@ -1394,7 +1394,7 @@ static unsigned long obj_malloc(struct size_class *class,
1394
1394
1395
1395
kunmap_atomic (vaddr );
1396
1396
mod_zspage_inuse (zspage , 1 );
1397
- zs_stat_inc (class , OBJ_USED , 1 );
1397
+ class_stat_inc (class , OBJ_USED , 1 );
1398
1398
1399
1399
obj = location_to_obj (m_page , obj );
1400
1400
@@ -1458,7 +1458,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1458
1458
record_obj (handle , obj );
1459
1459
atomic_long_add (class -> pages_per_zspage ,
1460
1460
& pool -> pages_allocated );
1461
- zs_stat_inc (class , OBJ_ALLOCATED , class -> objs_per_zspage );
1461
+ class_stat_inc (class , OBJ_ALLOCATED , class -> objs_per_zspage );
1462
1462
1463
1463
/* We completely set up zspage so mark them as movable */
1464
1464
SetZsPageMovable (pool , zspage );
@@ -1489,7 +1489,7 @@ static void obj_free(struct size_class *class, unsigned long obj)
1489
1489
kunmap_atomic (vaddr );
1490
1490
set_freeobj (zspage , f_objidx );
1491
1491
mod_zspage_inuse (zspage , -1 );
1492
- zs_stat_dec (class , OBJ_USED , 1 );
1492
+ class_stat_dec (class , OBJ_USED , 1 );
1493
1493
}
1494
1494
1495
1495
void zs_free (struct zs_pool * pool , unsigned long handle )
0 commit comments