@@ -246,16 +246,16 @@ static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
246
246
{
247
247
thread_id_t thread_id = vdo_get_callback_thread_id ();
248
248
249
- ASSERT_LOG_ONLY ((thread_id == cache -> zone -> thread_id ),
250
- "%s() must only be called on cache thread %d, not thread %d" ,
251
- function_name , cache -> zone -> thread_id , thread_id );
249
+ VDO_ASSERT_LOG_ONLY ((thread_id == cache -> zone -> thread_id ),
250
+ "%s() must only be called on cache thread %d, not thread %d" ,
251
+ function_name , cache -> zone -> thread_id , thread_id );
252
252
}
253
253
254
254
/** assert_io_allowed() - Assert that a page cache may issue I/O. */
255
255
static inline void assert_io_allowed (struct vdo_page_cache * cache )
256
256
{
257
- ASSERT_LOG_ONLY (!vdo_is_state_quiescent (& cache -> zone -> state ),
258
- "VDO page cache may issue I/O" );
257
+ VDO_ASSERT_LOG_ONLY (!vdo_is_state_quiescent (& cache -> zone -> state ),
258
+ "VDO page cache may issue I/O" );
259
259
}
260
260
261
261
/** report_cache_pressure() - Log and, if enabled, report cache pressure. */
@@ -287,9 +287,9 @@ static const char * __must_check get_page_state_name(enum vdo_page_buffer_state
287
287
288
288
BUILD_BUG_ON (ARRAY_SIZE (state_names ) != PAGE_STATE_COUNT );
289
289
290
- result = ASSERT (state < ARRAY_SIZE (state_names ),
291
- "Unknown page_state value %d" , state );
292
- if (result != UDS_SUCCESS )
290
+ result = VDO_ASSERT (state < ARRAY_SIZE (state_names ),
291
+ "Unknown page_state value %d" , state );
292
+ if (result != VDO_SUCCESS )
293
293
return "[UNKNOWN PAGE STATE]" ;
294
294
295
295
return state_names [state ];
@@ -378,8 +378,8 @@ static int __must_check set_info_pbn(struct page_info *info, physical_block_numb
378
378
struct vdo_page_cache * cache = info -> cache ;
379
379
380
380
/* Either the new or the old page number must be NO_PAGE. */
381
- int result = ASSERT ((pbn == NO_PAGE ) || (info -> pbn == NO_PAGE ),
382
- "Must free a page before reusing it." );
381
+ int result = VDO_ASSERT ((pbn == NO_PAGE ) || (info -> pbn == NO_PAGE ),
382
+ "Must free a page before reusing it." );
383
383
if (result != VDO_SUCCESS )
384
384
return result ;
385
385
@@ -401,13 +401,13 @@ static int reset_page_info(struct page_info *info)
401
401
{
402
402
int result ;
403
403
404
- result = ASSERT (info -> busy == 0 , "VDO Page must not be busy" );
405
- if (result != UDS_SUCCESS )
404
+ result = VDO_ASSERT (info -> busy == 0 , "VDO Page must not be busy" );
405
+ if (result != VDO_SUCCESS )
406
406
return result ;
407
407
408
- result = ASSERT (!vdo_waitq_has_waiters (& info -> waiting ),
409
- "VDO Page must not have waiters" );
410
- if (result != UDS_SUCCESS )
408
+ result = VDO_ASSERT (!vdo_waitq_has_waiters (& info -> waiting ),
409
+ "VDO Page must not have waiters" );
410
+ if (result != VDO_SUCCESS )
411
411
return result ;
412
412
413
413
result = set_info_pbn (info , NO_PAGE );
@@ -592,29 +592,29 @@ static int __must_check validate_completed_page(struct vdo_page_completion *comp
592
592
{
593
593
int result ;
594
594
595
- result = ASSERT (completion -> ready , "VDO Page completion not ready" );
596
- if (result != UDS_SUCCESS )
595
+ result = VDO_ASSERT (completion -> ready , "VDO Page completion not ready" );
596
+ if (result != VDO_SUCCESS )
597
597
return result ;
598
598
599
- result = ASSERT (completion -> info != NULL ,
600
- "VDO Page Completion must be complete" );
601
- if (result != UDS_SUCCESS )
599
+ result = VDO_ASSERT (completion -> info != NULL ,
600
+ "VDO Page Completion must be complete" );
601
+ if (result != VDO_SUCCESS )
602
602
return result ;
603
603
604
- result = ASSERT (completion -> info -> pbn == completion -> pbn ,
605
- "VDO Page Completion pbn must be consistent" );
606
- if (result != UDS_SUCCESS )
604
+ result = VDO_ASSERT (completion -> info -> pbn == completion -> pbn ,
605
+ "VDO Page Completion pbn must be consistent" );
606
+ if (result != VDO_SUCCESS )
607
607
return result ;
608
608
609
- result = ASSERT (is_valid (completion -> info ),
610
- "VDO Page Completion page must be valid" );
611
- if (result != UDS_SUCCESS )
609
+ result = VDO_ASSERT (is_valid (completion -> info ),
610
+ "VDO Page Completion page must be valid" );
611
+ if (result != VDO_SUCCESS )
612
612
return result ;
613
613
614
614
if (writable ) {
615
- result = ASSERT (completion -> writable ,
616
- "VDO Page Completion must be writable" );
617
- if (result != UDS_SUCCESS )
615
+ result = VDO_ASSERT (completion -> writable ,
616
+ "VDO Page Completion must be writable" );
617
+ if (result != VDO_SUCCESS )
618
618
return result ;
619
619
}
620
620
@@ -776,7 +776,7 @@ static int __must_check launch_page_load(struct page_info *info,
776
776
if (result != VDO_SUCCESS )
777
777
return result ;
778
778
779
- result = ASSERT ((info -> busy == 0 ), "Page is not busy before loading." );
779
+ result = VDO_ASSERT ((info -> busy == 0 ), "Page is not busy before loading." );
780
780
if (result != VDO_SUCCESS )
781
781
return result ;
782
782
@@ -949,8 +949,8 @@ static void discard_a_page(struct vdo_page_cache *cache)
949
949
return ;
950
950
}
951
951
952
- ASSERT_LOG_ONLY (!is_in_flight (info ),
953
- "page selected for discard is not in flight" );
952
+ VDO_ASSERT_LOG_ONLY (!is_in_flight (info ),
953
+ "page selected for discard is not in flight" );
954
954
955
955
cache -> discard_count ++ ;
956
956
info -> write_status = WRITE_STATUS_DISCARD ;
@@ -1153,8 +1153,8 @@ void vdo_release_page_completion(struct vdo_completion *completion)
1153
1153
discard_info = page_completion -> info ;
1154
1154
}
1155
1155
1156
- ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1157
- "Page being released after leaving all queues" );
1156
+ VDO_ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1157
+ "Page being released after leaving all queues" );
1158
1158
1159
1159
page_completion -> info = NULL ;
1160
1160
cache = page_completion -> cache ;
@@ -1217,8 +1217,8 @@ void vdo_get_page(struct vdo_page_completion *page_completion,
1217
1217
struct page_info * info ;
1218
1218
1219
1219
assert_on_cache_thread (cache , __func__ );
1220
- ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1221
- "New page completion was not already on a wait queue" );
1220
+ VDO_ASSERT_LOG_ONLY ((page_completion -> waiter .next_waiter == NULL ),
1221
+ "New page completion was not already on a wait queue" );
1222
1222
1223
1223
* page_completion = (struct vdo_page_completion ) {
1224
1224
.pbn = pbn ,
@@ -1265,7 +1265,7 @@ void vdo_get_page(struct vdo_page_completion *page_completion,
1265
1265
}
1266
1266
1267
1267
/* Something horrible has gone wrong. */
1268
- ASSERT_LOG_ONLY (false, "Info found in a usable state." );
1268
+ VDO_ASSERT_LOG_ONLY (false, "Info found in a usable state." );
1269
1269
}
1270
1270
1271
1271
/* The page must be fetched. */
@@ -1334,7 +1334,7 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
1334
1334
1335
1335
/* Make sure we don't throw away any dirty pages. */
1336
1336
for (info = cache -> infos ; info < cache -> infos + cache -> page_count ; info ++ ) {
1337
- int result = ASSERT (!is_dirty (info ), "cache must have no dirty pages" );
1337
+ int result = VDO_ASSERT (!is_dirty (info ), "cache must have no dirty pages" );
1338
1338
1339
1339
if (result != VDO_SUCCESS )
1340
1340
return result ;
@@ -1440,10 +1440,10 @@ static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
1440
1440
{
1441
1441
int result ;
1442
1442
1443
- result = ASSERT ((in_cyclic_range (zone -> oldest_generation , a , zone -> generation , 1 << 8 ) &&
1444
- in_cyclic_range (zone -> oldest_generation , b , zone -> generation , 1 << 8 )),
1445
- "generation(s) %u, %u are out of range [%u, %u]" ,
1446
- a , b , zone -> oldest_generation , zone -> generation );
1443
+ result = VDO_ASSERT ((in_cyclic_range (zone -> oldest_generation , a , zone -> generation , 1 << 8 ) &&
1444
+ in_cyclic_range (zone -> oldest_generation , b , zone -> generation , 1 << 8 )),
1445
+ "generation(s) %u, %u are out of range [%u, %u]" ,
1446
+ a , b , zone -> oldest_generation , zone -> generation );
1447
1447
if (result != VDO_SUCCESS ) {
1448
1448
enter_zone_read_only_mode (zone , result );
1449
1449
return true;
@@ -1456,8 +1456,8 @@ static void release_generation(struct block_map_zone *zone, u8 generation)
1456
1456
{
1457
1457
int result ;
1458
1458
1459
- result = ASSERT ((zone -> dirty_page_counts [generation ] > 0 ),
1460
- "dirty page count underflow for generation %u" , generation );
1459
+ result = VDO_ASSERT ((zone -> dirty_page_counts [generation ] > 0 ),
1460
+ "dirty page count underflow for generation %u" , generation );
1461
1461
if (result != VDO_SUCCESS ) {
1462
1462
enter_zone_read_only_mode (zone , result );
1463
1463
return ;
@@ -1482,8 +1482,8 @@ static void set_generation(struct block_map_zone *zone, struct tree_page *page,
1482
1482
1483
1483
page -> generation = new_generation ;
1484
1484
new_count = ++ zone -> dirty_page_counts [new_generation ];
1485
- result = ASSERT ((new_count != 0 ), "dirty page count overflow for generation %u" ,
1486
- new_generation );
1485
+ result = VDO_ASSERT ((new_count != 0 ), "dirty page count overflow for generation %u" ,
1486
+ new_generation );
1487
1487
if (result != VDO_SUCCESS ) {
1488
1488
enter_zone_read_only_mode (zone , result );
1489
1489
return ;
@@ -1698,15 +1698,15 @@ static void release_page_lock(struct data_vio *data_vio, char *what)
1698
1698
struct tree_lock * lock_holder ;
1699
1699
struct tree_lock * lock = & data_vio -> tree_lock ;
1700
1700
1701
- ASSERT_LOG_ONLY (lock -> locked ,
1702
- "release of unlocked block map page %s for key %llu in tree %u" ,
1703
- what , (unsigned long long ) lock -> key , lock -> root_index );
1701
+ VDO_ASSERT_LOG_ONLY (lock -> locked ,
1702
+ "release of unlocked block map page %s for key %llu in tree %u" ,
1703
+ what , (unsigned long long ) lock -> key , lock -> root_index );
1704
1704
1705
1705
zone = data_vio -> logical .zone -> block_map_zone ;
1706
1706
lock_holder = vdo_int_map_remove (zone -> loading_pages , lock -> key );
1707
- ASSERT_LOG_ONLY ((lock_holder == lock ),
1708
- "block map page %s mismatch for key %llu in tree %u" ,
1709
- what , (unsigned long long ) lock -> key , lock -> root_index );
1707
+ VDO_ASSERT_LOG_ONLY ((lock_holder == lock ),
1708
+ "block map page %s mismatch for key %llu in tree %u" ,
1709
+ what , (unsigned long long ) lock -> key , lock -> root_index );
1710
1710
lock -> locked = false;
1711
1711
}
1712
1712
@@ -2008,8 +2008,8 @@ static void write_expired_elements(struct block_map_zone *zone)
2008
2008
2009
2009
list_del_init (& page -> entry );
2010
2010
2011
- result = ASSERT (!vdo_waiter_is_waiting (& page -> waiter ),
2012
- "Newly expired page not already waiting to write" );
2011
+ result = VDO_ASSERT (!vdo_waiter_is_waiting (& page -> waiter ),
2012
+ "Newly expired page not already waiting to write" );
2013
2013
if (result != VDO_SUCCESS ) {
2014
2014
enter_zone_read_only_mode (zone , result );
2015
2015
continue ;
@@ -2867,8 +2867,8 @@ int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical
2867
2867
BUILD_BUG_ON (VDO_BLOCK_MAP_ENTRIES_PER_PAGE !=
2868
2868
((VDO_BLOCK_SIZE - sizeof (struct block_map_page )) /
2869
2869
sizeof (struct block_map_entry )));
2870
- result = ASSERT (cache_size > 0 , "block map cache size is specified" );
2871
- if (result != UDS_SUCCESS )
2870
+ result = VDO_ASSERT (cache_size > 0 , "block map cache size is specified" );
2871
+ if (result != VDO_SUCCESS )
2872
2872
return result ;
2873
2873
2874
2874
result = vdo_allocate_extended (struct block_map ,
@@ -2937,7 +2937,7 @@ void vdo_initialize_block_map_from_journal(struct block_map *map,
2937
2937
for (z = 0 ; z < map -> zone_count ; z ++ ) {
2938
2938
struct dirty_lists * dirty_lists = map -> zones [z ].dirty_lists ;
2939
2939
2940
- ASSERT_LOG_ONLY (dirty_lists -> next_period == 0 , "current period not set" );
2940
+ VDO_ASSERT_LOG_ONLY (dirty_lists -> next_period == 0 , "current period not set" );
2941
2941
dirty_lists -> oldest_period = map -> current_era_point ;
2942
2942
dirty_lists -> next_period = map -> current_era_point + 1 ;
2943
2943
dirty_lists -> offset = map -> current_era_point % dirty_lists -> maximum_age ;
@@ -2971,8 +2971,8 @@ static void initiate_drain(struct admin_state *state)
2971
2971
{
2972
2972
struct block_map_zone * zone = container_of (state , struct block_map_zone , state );
2973
2973
2974
- ASSERT_LOG_ONLY ((zone -> active_lookups == 0 ),
2975
- "%s() called with no active lookups" , __func__ );
2974
+ VDO_ASSERT_LOG_ONLY ((zone -> active_lookups == 0 ),
2975
+ "%s() called with no active lookups" , __func__ );
2976
2976
2977
2977
if (!vdo_is_state_suspending (state )) {
2978
2978
while (zone -> dirty_lists -> oldest_period < zone -> dirty_lists -> next_period )
0 commit comments