@@ -115,44 +115,64 @@ static bool should_dump_entire_vma(VmaEntry *vmae)
115
115
}
116
116
117
117
/*
118
- * should_dump_page returns vaddr if an addressed page has to be dumped.
119
- * Otherwise, it returns an address that has to be inspected next.
118
+ * should_dump_page writes vaddr in page_info->next if an addressed page has to be dumped.
119
+ * Otherwise, it writes an address that has to be inspected next.
120
120
*/
121
- u64 should_dump_page (pmc_t * pmc , VmaEntry * vmae , u64 vaddr , bool * softdirty )
121
+ int should_dump_page (pmc_t * pmc , VmaEntry * vmae , u64 vaddr , struct page_info * page_info )
122
122
{
123
+ if (!page_info )
124
+ goto err ;
125
+
123
126
if (vaddr >= pmc -> end && pmc_fill (pmc , vaddr , vmae -> end ))
124
- return -1 ;
127
+ goto err ;
125
128
126
129
if (pmc -> regs ) {
127
130
while (1 ) {
128
- if (pmc -> regs_idx == pmc -> regs_len )
129
- return pmc -> end ;
131
+ if (pmc -> regs_idx == pmc -> regs_len ) {
132
+ page_info -> next = pmc -> end ;
133
+ return 0 ;
134
+ }
135
+
130
136
if (vaddr < pmc -> regs [pmc -> regs_idx ].end )
131
137
break ;
132
138
pmc -> regs_idx ++ ;
133
139
}
134
- if (vaddr < pmc -> regs [pmc -> regs_idx ].start )
135
- return pmc -> regs [pmc -> regs_idx ].start ;
136
- if (softdirty )
137
- * softdirty = pmc -> regs [pmc -> regs_idx ].categories & PAGE_IS_SOFT_DIRTY ;
138
- return vaddr ;
140
+
141
+ if (vaddr < pmc -> regs [pmc -> regs_idx ].start ) {
142
+ page_info -> next = pmc -> regs [pmc -> regs_idx ].start ;
143
+ return 0 ;
144
+ }
145
+
146
+ page_info -> softdirty = pmc -> regs [pmc -> regs_idx ].categories & PAGE_IS_SOFT_DIRTY ;
147
+ page_info -> next = vaddr ;
148
+ return 0 ;
139
149
} else {
140
150
u64 pme = pmc -> map [PAGE_PFN (vaddr - pmc -> start )];
141
151
142
152
/*
143
153
* Optimisation for private mapping pages, that haven't
144
154
* yet being COW-ed
145
155
*/
146
- if (vma_entry_is (vmae , VMA_FILE_PRIVATE ) && (pme & PME_FILE ))
147
- return vaddr + PAGE_SIZE ;
156
+ if (vma_entry_is (vmae , VMA_FILE_PRIVATE ) && (pme & PME_FILE )) {
157
+ page_info -> next = vaddr + PAGE_SIZE ;
158
+ return 0 ;
159
+ }
160
+
148
161
if ((pme & (PME_PRESENT | PME_SWAP )) && !__page_is_zero (pme )) {
149
- if ( softdirty )
150
- * softdirty = pme & PME_SOFT_DIRTY ;
151
- return vaddr ;
162
+ page_info -> softdirty = pme & PME_SOFT_DIRTY ;
163
+ page_info -> next = vaddr ;
164
+ return 0 ;
152
165
}
153
166
154
- return vaddr + PAGE_SIZE ;
167
+ page_info -> next = vaddr + PAGE_SIZE ;
168
+ return 0 ;
155
169
}
170
+
171
+ err :
172
+ pr_err ("should_dump_page failed on vma "
173
+ "%#016" PRIx64 "-%#016" PRIx64 " vaddr=%#016" PRIx64 "\n" ,
174
+ vmae -> start , vmae -> end , vaddr );
175
+ return -1 ;
156
176
}
157
177
158
178
bool page_is_zero (u64 pme )
@@ -202,14 +222,15 @@ static int generate_iovs(struct pstree_item *item, struct vma_area *vma, struct
202
222
nr_scanned = 0 ;
203
223
for (vaddr = * pvaddr ; vaddr < vma -> e -> end ; vaddr += PAGE_SIZE , nr_scanned ++ ) {
204
224
unsigned int ppb_flags = 0 ;
205
- bool softdirty = false;
206
- u64 next ;
225
+ struct page_info page_info = {};
207
226
int st ;
208
227
209
228
/* If dump_all_pages is true, should_dump_page is called to get pme. */
210
- next = should_dump_page (pmc , vma -> e , vaddr , & softdirty );
211
- if (!dump_all_pages && next != vaddr ) {
212
- vaddr = next - PAGE_SIZE ;
229
+ if (should_dump_page (pmc , vma -> e , vaddr , & page_info ))
230
+ return -1 ;
231
+
232
+ if (!dump_all_pages && page_info .next != vaddr ) {
233
+ vaddr = page_info .next - PAGE_SIZE ;
213
234
continue ;
214
235
}
215
236
@@ -223,7 +244,7 @@ static int generate_iovs(struct pstree_item *item, struct vma_area *vma, struct
223
244
* page. The latter would be checked in page-xfer.
224
245
*/
225
246
226
- if (has_parent && page_in_parent (softdirty )) {
247
+ if (has_parent && page_in_parent (page_info . softdirty )) {
227
248
ret = page_pipe_add_hole (pp , vaddr , PP_HOLE_PARENT );
228
249
st = 0 ;
229
250
} else {
0 commit comments