121
121
#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
122
122
#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
123
123
124
+ #define HUGE_BITS 1
124
125
#define FULLNESS_BITS 2
125
126
#define CLASS_BITS 8
126
127
#define ISOLATED_BITS 3
@@ -213,22 +214,6 @@ struct size_class {
213
214
struct zs_size_stat stats ;
214
215
};
215
216
216
- /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
217
- static void SetPageHugeObject (struct page * page )
218
- {
219
- SetPageOwnerPriv1 (page );
220
- }
221
-
222
- static void ClearPageHugeObject (struct page * page )
223
- {
224
- ClearPageOwnerPriv1 (page );
225
- }
226
-
227
- static int PageHugeObject (struct page * page )
228
- {
229
- return PageOwnerPriv1 (page );
230
- }
231
-
232
217
/*
233
218
* Placed within free objects to form a singly linked list.
234
219
* For every zspage, zspage->freeobj gives head of this list.
@@ -278,6 +263,7 @@ struct zs_pool {
278
263
279
264
struct zspage {
280
265
struct {
266
+ unsigned int huge :HUGE_BITS ;
281
267
unsigned int fullness :FULLNESS_BITS ;
282
268
unsigned int class :CLASS_BITS + 1 ;
283
269
unsigned int isolated :ISOLATED_BITS ;
@@ -298,6 +284,17 @@ struct mapping_area {
298
284
enum zs_mapmode vm_mm ; /* mapping mode */
299
285
};
300
286
287
+ /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
288
+ static void SetZsHugePage (struct zspage * zspage )
289
+ {
290
+ zspage -> huge = 1 ;
291
+ }
292
+
293
+ static bool ZsHugePage (struct zspage * zspage )
294
+ {
295
+ return zspage -> huge ;
296
+ }
297
+
301
298
#ifdef CONFIG_COMPACTION
302
299
static int zs_register_migration (struct zs_pool * pool );
303
300
static void zs_unregister_migration (struct zs_pool * pool );
@@ -830,7 +827,9 @@ static struct zspage *get_zspage(struct page *page)
830
827
831
828
static struct page * get_next_page (struct page * page )
832
829
{
833
- if (unlikely (PageHugeObject (page )))
830
+ struct zspage * zspage = get_zspage (page );
831
+
832
+ if (unlikely (ZsHugePage (zspage )))
834
833
return NULL ;
835
834
836
835
return (struct page * )page -> index ;
@@ -880,8 +879,9 @@ static unsigned long handle_to_obj(unsigned long handle)
880
879
static bool obj_allocated (struct page * page , void * obj , unsigned long * phandle )
881
880
{
882
881
unsigned long handle ;
882
+ struct zspage * zspage = get_zspage (page );
883
883
884
- if (unlikely (PageHugeObject ( page ))) {
884
+ if (unlikely (ZsHugePage ( zspage ))) {
885
885
VM_BUG_ON_PAGE (!is_first_page (page ), page );
886
886
handle = page -> index ;
887
887
} else
@@ -920,7 +920,6 @@ static void reset_page(struct page *page)
920
920
ClearPagePrivate (page );
921
921
set_page_private (page , 0 );
922
922
page_mapcount_reset (page );
923
- ClearPageHugeObject (page );
924
923
page -> index = 0 ;
925
924
}
926
925
@@ -1062,7 +1061,7 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage,
1062
1061
SetPagePrivate (page );
1063
1062
if (unlikely (class -> objs_per_zspage == 1 &&
1064
1063
class -> pages_per_zspage == 1 ))
1065
- SetPageHugeObject ( page );
1064
+ SetZsHugePage ( zspage );
1066
1065
} else {
1067
1066
prev_page -> index = (unsigned long )page ;
1068
1067
}
@@ -1307,7 +1306,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1307
1306
1308
1307
ret = __zs_map_object (area , pages , off , class -> size );
1309
1308
out :
1310
- if (likely (!PageHugeObject ( page )))
1309
+ if (likely (!ZsHugePage ( zspage )))
1311
1310
ret += ZS_HANDLE_SIZE ;
1312
1311
1313
1312
return ret ;
@@ -1395,7 +1394,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
1395
1394
vaddr = kmap_atomic (m_page );
1396
1395
link = (struct link_free * )vaddr + m_offset / sizeof (* link );
1397
1396
set_freeobj (zspage , link -> next >> OBJ_TAG_BITS );
1398
- if (likely (!PageHugeObject ( m_page )))
1397
+ if (likely (!ZsHugePage ( zspage )))
1399
1398
/* record handle in the header of allocated chunk */
1400
1399
link -> handle = handle ;
1401
1400
else
@@ -1496,7 +1495,10 @@ static void obj_free(int class_size, unsigned long obj)
1496
1495
1497
1496
/* Insert this object in containing zspage's freelist */
1498
1497
link = (struct link_free * )(vaddr + f_offset );
1499
- link -> next = get_freeobj (zspage ) << OBJ_TAG_BITS ;
1498
+ if (likely (!ZsHugePage (zspage )))
1499
+ link -> next = get_freeobj (zspage ) << OBJ_TAG_BITS ;
1500
+ else
1501
+ f_page -> index = 0 ;
1500
1502
kunmap_atomic (vaddr );
1501
1503
set_freeobj (zspage , f_objidx );
1502
1504
mod_zspage_inuse (zspage , -1 );
@@ -1867,7 +1869,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1867
1869
1868
1870
create_page_chain (class , zspage , pages );
1869
1871
set_first_obj_offset (newpage , get_first_obj_offset (oldpage ));
1870
- if (unlikely (PageHugeObject ( oldpage )))
1872
+ if (unlikely (ZsHugePage ( zspage )))
1871
1873
newpage -> index = oldpage -> index ;
1872
1874
__SetPageMovable (newpage , page_mapping (oldpage ));
1873
1875
}
0 commit comments