@@ -310,3 +310,111 @@ xfile_stat(
310
310
statbuf -> bytes = ks .blocks << SECTOR_SHIFT ;
311
311
return 0 ;
312
312
}
313
+
314
+ /*
315
+ * Grab the (locked) page for a memory object. The object cannot span a page
316
+ * boundary. Returns 0 (and a locked page) if successful, -ENOTBLK if we
317
+ * cannot grab the page, or the usual negative errno.
318
+ */
319
+ int
320
+ xfile_get_page (
321
+ struct xfile * xf ,
322
+ loff_t pos ,
323
+ unsigned int len ,
324
+ struct xfile_page * xfpage )
325
+ {
326
+ struct inode * inode = file_inode (xf -> file );
327
+ struct address_space * mapping = inode -> i_mapping ;
328
+ const struct address_space_operations * aops = mapping -> a_ops ;
329
+ struct page * page = NULL ;
330
+ void * fsdata = NULL ;
331
+ loff_t key = round_down (pos , PAGE_SIZE );
332
+ unsigned int pflags ;
333
+ int error ;
334
+
335
+ if (inode -> i_sb -> s_maxbytes - pos < len )
336
+ return - ENOMEM ;
337
+ if (len > PAGE_SIZE - offset_in_page (pos ))
338
+ return - ENOTBLK ;
339
+
340
+ trace_xfile_get_page (xf , pos , len );
341
+
342
+ pflags = memalloc_nofs_save ();
343
+
344
+ /*
345
+ * We call write_begin directly here to avoid all the freezer
346
+ * protection lock-taking that happens in the normal path. shmem
347
+ * doesn't support fs freeze, but lockdep doesn't know that and will
348
+ * trip over that.
349
+ */
350
+ error = aops -> write_begin (NULL , mapping , key , PAGE_SIZE , & page ,
351
+ & fsdata );
352
+ if (error )
353
+ goto out_pflags ;
354
+
355
+ /* We got the page, so make sure we push out EOF. */
356
+ if (i_size_read (inode ) < pos + len )
357
+ i_size_write (inode , pos + len );
358
+
359
+ /*
360
+ * If the page isn't up to date, fill it with zeroes before we hand it
361
+ * to the caller and make sure the backing store will hold on to them.
362
+ */
363
+ if (!PageUptodate (page )) {
364
+ void * kaddr ;
365
+
366
+ kaddr = kmap_local_page (page );
367
+ memset (kaddr , 0 , PAGE_SIZE );
368
+ kunmap_local (kaddr );
369
+ SetPageUptodate (page );
370
+ }
371
+
372
+ /*
373
+ * Mark each page dirty so that the contents are written to some
374
+ * backing store when we drop this buffer, and take an extra reference
375
+ * to prevent the xfile page from being swapped or removed from the
376
+ * page cache by reclaim if the caller unlocks the page.
377
+ */
378
+ set_page_dirty (page );
379
+ get_page (page );
380
+
381
+ xfpage -> page = page ;
382
+ xfpage -> fsdata = fsdata ;
383
+ xfpage -> pos = key ;
384
+ out_pflags :
385
+ memalloc_nofs_restore (pflags );
386
+ return error ;
387
+ }
388
+
389
+ /*
390
+ * Release the (locked) page for a memory object. Returns 0 or a negative
391
+ * errno.
392
+ */
393
+ int
394
+ xfile_put_page (
395
+ struct xfile * xf ,
396
+ struct xfile_page * xfpage )
397
+ {
398
+ struct inode * inode = file_inode (xf -> file );
399
+ struct address_space * mapping = inode -> i_mapping ;
400
+ const struct address_space_operations * aops = mapping -> a_ops ;
401
+ unsigned int pflags ;
402
+ int ret ;
403
+
404
+ trace_xfile_put_page (xf , xfpage -> pos , PAGE_SIZE );
405
+
406
+ /* Give back the reference that we took in xfile_get_page. */
407
+ put_page (xfpage -> page );
408
+
409
+ pflags = memalloc_nofs_save ();
410
+ ret = aops -> write_end (NULL , mapping , xfpage -> pos , PAGE_SIZE , PAGE_SIZE ,
411
+ xfpage -> page , xfpage -> fsdata );
412
+ memalloc_nofs_restore (pflags );
413
+ memset (xfpage , 0 , sizeof (struct xfile_page ));
414
+
415
+ if (ret < 0 )
416
+ return ret ;
417
+ if (ret != PAGE_SIZE )
418
+ return - EIO ;
419
+ return 0 ;
420
+ }
0 commit comments