@@ -86,7 +86,7 @@ void test_touch_anon_pages(void)
86
86
printk ("Kernel handled %lu page faults\n" , faults );
87
87
}
88
88
89
- void test_z_mem_page_out (void )
89
+ void test_k_mem_page_out (void )
90
90
{
91
91
unsigned long faults ;
92
92
int key , ret ;
@@ -96,8 +96,8 @@ void test_z_mem_page_out(void)
96
96
*/
97
97
key = irq_lock ();
98
98
faults = z_num_pagefaults_get ();
99
- ret = z_mem_page_out (arena , HALF_BYTES );
100
- zassert_equal (ret , 0 , "z_mem_page_out failed with %d" , ret );
99
+ ret = k_mem_page_out (arena , HALF_BYTES );
100
+ zassert_equal (ret , 0 , "k_mem_page_out failed with %d" , ret );
101
101
102
102
/* Write to the supposedly evicted region */
103
103
for (size_t i = 0 ; i < HALF_BYTES ; i ++ ) {
@@ -110,12 +110,12 @@ void test_z_mem_page_out(void)
110
110
"unexpected num pagefaults expected %lu got %d" ,
111
111
HALF_PAGES , faults );
112
112
113
- ret = z_mem_page_out (arena , arena_size );
114
- zassert_equal (ret , - ENOMEM , "z_mem_page_out should have failed" );
113
+ ret = k_mem_page_out (arena , arena_size );
114
+ zassert_equal (ret , - ENOMEM , "k_mem_page_out should have failed" );
115
115
116
116
}
117
117
118
- void test_z_mem_page_in (void )
118
+ void test_k_mem_page_in (void )
119
119
{
120
120
unsigned long faults ;
121
121
int key , ret ;
@@ -125,10 +125,10 @@ void test_z_mem_page_in(void)
125
125
*/
126
126
key = irq_lock ();
127
127
128
- ret = z_mem_page_out (arena , HALF_BYTES );
129
- zassert_equal (ret , 0 , "z_mem_page_out failed with %d" , ret );
128
+ ret = k_mem_page_out (arena , HALF_BYTES );
129
+ zassert_equal (ret , 0 , "k_mem_page_out failed with %d" , ret );
130
130
131
- z_mem_page_in (arena , HALF_BYTES );
131
+ k_mem_page_in (arena , HALF_BYTES );
132
132
133
133
faults = z_num_pagefaults_get ();
134
134
/* Write to the supposedly evicted region */
@@ -142,12 +142,12 @@ void test_z_mem_page_in(void)
142
142
faults );
143
143
}
144
144
145
- void test_z_mem_pin (void )
145
+ void test_k_mem_pin (void )
146
146
{
147
147
unsigned long faults ;
148
148
int key ;
149
149
150
- z_mem_pin (arena , HALF_BYTES );
150
+ k_mem_pin (arena , HALF_BYTES );
151
151
152
152
/* Write to the rest of the arena */
153
153
for (size_t i = HALF_BYTES ; i < arena_size ; i ++ ) {
@@ -167,19 +167,19 @@ void test_z_mem_pin(void)
167
167
faults );
168
168
169
169
/* Clean up */
170
- z_mem_unpin (arena , HALF_BYTES );
170
+ k_mem_unpin (arena , HALF_BYTES );
171
171
}
172
172
173
- void test_z_mem_unpin (void )
173
+ void test_k_mem_unpin (void )
174
174
{
175
175
/* Pin the memory (which we know works from prior test) */
176
- z_mem_pin (arena , HALF_BYTES );
176
+ k_mem_pin (arena , HALF_BYTES );
177
177
178
178
/* Now un-pin it */
179
- z_mem_unpin (arena , HALF_BYTES );
179
+ k_mem_unpin (arena , HALF_BYTES );
180
180
181
181
/* repeat the page_out scenario, which should work */
182
- test_z_mem_page_out ();
182
+ test_k_mem_page_out ();
183
183
}
184
184
185
185
/* Show that even if we map enough anonymous memory to fill the backing
@@ -223,10 +223,10 @@ void test_main(void)
223
223
ztest_test_suite (test_demand_paging ,
224
224
ztest_unit_test (test_map_anon_pages ),
225
225
ztest_unit_test (test_touch_anon_pages ),
226
- ztest_unit_test (test_z_mem_page_out ),
227
- ztest_unit_test (test_z_mem_page_in ),
228
- ztest_unit_test (test_z_mem_pin ),
229
- ztest_unit_test (test_z_mem_unpin ),
226
+ ztest_unit_test (test_k_mem_page_out ),
227
+ ztest_unit_test (test_k_mem_page_in ),
228
+ ztest_unit_test (test_k_mem_pin ),
229
+ ztest_unit_test (test_k_mem_unpin ),
230
230
ztest_unit_test (test_backing_store_capacity ));
231
231
232
232
ztest_run_test_suite (test_demand_paging );
0 commit comments