@@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
148
148
* into the virtual memory. If those physical pages already had shadow/origin,
149
149
* those are ignored.
150
150
*/
151
- void kmsan_ioremap_page_range (unsigned long start , unsigned long end ,
152
- phys_addr_t phys_addr , pgprot_t prot ,
153
- unsigned int page_shift )
151
+ int kmsan_ioremap_page_range (unsigned long start , unsigned long end ,
152
+ phys_addr_t phys_addr , pgprot_t prot ,
153
+ unsigned int page_shift )
154
154
{
155
155
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO ;
156
156
struct page * shadow , * origin ;
157
157
unsigned long off = 0 ;
158
- int nr ;
158
+ int nr , err = 0 , clean = 0 , mapped ;
159
159
160
160
if (!kmsan_enabled || kmsan_in_runtime ())
161
- return ;
161
+ return 0 ;
162
162
163
163
nr = (end - start ) / PAGE_SIZE ;
164
164
kmsan_enter_runtime ();
165
- for (int i = 0 ; i < nr ; i ++ , off += PAGE_SIZE ) {
165
+ for (int i = 0 ; i < nr ; i ++ , off += PAGE_SIZE , clean = i ) {
166
166
shadow = alloc_pages (gfp_mask , 1 );
167
167
origin = alloc_pages (gfp_mask , 1 );
168
- __vmap_pages_range_noflush (
168
+ if (!shadow || !origin ) {
169
+ err = - ENOMEM ;
170
+ goto ret ;
171
+ }
172
+ mapped = __vmap_pages_range_noflush (
169
173
vmalloc_shadow (start + off ),
170
174
vmalloc_shadow (start + off + PAGE_SIZE ), prot , & shadow ,
171
175
PAGE_SHIFT );
172
- __vmap_pages_range_noflush (
176
+ if (mapped ) {
177
+ err = mapped ;
178
+ goto ret ;
179
+ }
180
+ shadow = NULL ;
181
+ mapped = __vmap_pages_range_noflush (
173
182
vmalloc_origin (start + off ),
174
183
vmalloc_origin (start + off + PAGE_SIZE ), prot , & origin ,
175
184
PAGE_SHIFT );
185
+ if (mapped ) {
186
+ __vunmap_range_noflush (
187
+ vmalloc_shadow (start + off ),
188
+ vmalloc_shadow (start + off + PAGE_SIZE ));
189
+ err = mapped ;
190
+ goto ret ;
191
+ }
192
+ origin = NULL ;
193
+ }
194
+ /* Page mapping loop finished normally, nothing to clean up. */
195
+ clean = 0 ;
196
+
197
+ ret :
198
+ if (clean > 0 ) {
199
+ /*
200
+ * Something went wrong. Clean up shadow/origin pages allocated
201
+ * on the last loop iteration, then delete mappings created
202
+ * during the previous iterations.
203
+ */
204
+ if (shadow )
205
+ __free_pages (shadow , 1 );
206
+ if (origin )
207
+ __free_pages (origin , 1 );
208
+ __vunmap_range_noflush (
209
+ vmalloc_shadow (start ),
210
+ vmalloc_shadow (start + clean * PAGE_SIZE ));
211
+ __vunmap_range_noflush (
212
+ vmalloc_origin (start ),
213
+ vmalloc_origin (start + clean * PAGE_SIZE ));
176
214
}
177
215
flush_cache_vmap (vmalloc_shadow (start ), vmalloc_shadow (end ));
178
216
flush_cache_vmap (vmalloc_origin (start ), vmalloc_origin (end ));
179
217
kmsan_leave_runtime ();
218
+ return err ;
180
219
}
181
220
182
221
void kmsan_iounmap_page_range (unsigned long start , unsigned long end )
0 commit comments