@@ -108,7 +108,9 @@ static const struct netfs_read_request_ops v9fs_req_ops = {
108108 */
109109static int v9fs_vfs_readpage (struct file * file , struct page * page )
110110{
111- return netfs_readpage (file , page , & v9fs_req_ops , NULL );
111+ struct folio * folio = page_folio (page );
112+
113+ return netfs_readpage (file , folio , & v9fs_req_ops , NULL );
112114}
113115
114116/**
@@ -130,13 +132,15 @@ static void v9fs_vfs_readahead(struct readahead_control *ractl)
130132
131133static int v9fs_release_page (struct page * page , gfp_t gfp )
132134{
133- if (PagePrivate (page ))
135+ struct folio * folio = page_folio (page );
136+
137+ if (folio_test_private (folio ))
134138 return 0 ;
135139#ifdef CONFIG_9P_FSCACHE
136- if (PageFsCache ( page )) {
140+ if (folio_test_fscache ( folio )) {
137141 if (!(gfp & __GFP_DIRECT_RECLAIM ) || !(gfp & __GFP_FS ))
138142 return 0 ;
139- wait_on_page_fscache ( page );
143+ folio_wait_fscache ( folio );
140144 }
141145#endif
142146 return 1 ;
@@ -152,55 +156,58 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
152156static void v9fs_invalidate_page (struct page * page , unsigned int offset ,
153157 unsigned int length )
154158{
155- wait_on_page_fscache (page );
159+ struct folio * folio = page_folio (page );
160+
161+ folio_wait_fscache (folio );
156162}
157163
158- static int v9fs_vfs_writepage_locked (struct page * page )
164+ static int v9fs_vfs_write_folio_locked (struct folio * folio )
159165{
160- struct inode * inode = page -> mapping -> host ;
166+ struct inode * inode = folio_inode ( folio ) ;
161167 struct v9fs_inode * v9inode = V9FS_I (inode );
162- loff_t start = page_offset ( page );
163- loff_t size = i_size_read (inode );
168+ loff_t start = folio_pos ( folio );
169+ loff_t i_size = i_size_read (inode );
164170 struct iov_iter from ;
165- int err , len ;
171+ size_t len = folio_size (folio );
172+ int err ;
173+
174+ if (start >= i_size )
175+ return 0 ; /* Simultaneous truncation occurred */
166176
167- if (page -> index == size >> PAGE_SHIFT )
168- len = size & ~PAGE_MASK ;
169- else
170- len = PAGE_SIZE ;
177+ len = min_t (loff_t , i_size - start , len );
171178
172- iov_iter_xarray (& from , WRITE , & page -> mapping -> i_pages , start , len );
179+ iov_iter_xarray (& from , WRITE , & folio_mapping ( folio ) -> i_pages , start , len );
173180
174181 /* We should have writeback_fid always set */
175182 BUG_ON (!v9inode -> writeback_fid );
176183
177- set_page_writeback ( page );
184+ folio_start_writeback ( folio );
178185
179186 p9_client_write (v9inode -> writeback_fid , start , & from , & err );
180187
181- end_page_writeback ( page );
188+ folio_end_writeback ( folio );
182189 return err ;
183190}
184191
185192static int v9fs_vfs_writepage (struct page * page , struct writeback_control * wbc )
186193{
194+ struct folio * folio = page_folio (page );
187195 int retval ;
188196
189- p9_debug (P9_DEBUG_VFS , "page %p\n" , page );
197+ p9_debug (P9_DEBUG_VFS , "folio %p\n" , folio );
190198
191- retval = v9fs_vfs_writepage_locked ( page );
199+ retval = v9fs_vfs_write_folio_locked ( folio );
192200 if (retval < 0 ) {
193201 if (retval == - EAGAIN ) {
194- redirty_page_for_writepage (wbc , page );
202+ folio_redirty_for_writepage (wbc , folio );
195203 retval = 0 ;
196204 } else {
197- SetPageError (page );
198- mapping_set_error (page -> mapping , retval );
205+ mapping_set_error (folio_mapping (folio ), retval );
199206 }
200207 } else
201208 retval = 0 ;
202209
203- unlock_page ( page );
210+ folio_unlock ( folio );
204211 return retval ;
205212}
206213
@@ -213,14 +220,15 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
213220
214221static int v9fs_launder_page (struct page * page )
215222{
223+ struct folio * folio = page_folio (page );
216224 int retval ;
217225
218- if (clear_page_dirty_for_io ( page )) {
219- retval = v9fs_vfs_writepage_locked ( page );
226+ if (folio_clear_dirty_for_io ( folio )) {
227+ retval = v9fs_vfs_write_folio_locked ( folio );
220228 if (retval )
221229 return retval ;
222230 }
223- wait_on_page_fscache ( page );
231+ folio_wait_fscache ( folio );
224232 return 0 ;
225233}
226234
@@ -265,10 +273,10 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
265273
266274static int v9fs_write_begin (struct file * filp , struct address_space * mapping ,
267275 loff_t pos , unsigned int len , unsigned int flags ,
268- struct page * * pagep , void * * fsdata )
276+ struct page * * subpagep , void * * fsdata )
269277{
270278 int retval ;
271- struct page * page ;
279+ struct folio * folio ;
272280 struct v9fs_inode * v9inode = V9FS_I (mapping -> host );
273281
274282 p9_debug (P9_DEBUG_VFS , "filp %p, mapping %p\n" , filp , mapping );
@@ -279,31 +287,32 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
279287 * file. We need to do this before we get a lock on the page in case
280288 * there's more than one writer competing for the same cache block.
281289 */
282- retval = netfs_write_begin (filp , mapping , pos , len , flags , & page , fsdata ,
290+ retval = netfs_write_begin (filp , mapping , pos , len , flags , & folio , fsdata ,
283291 & v9fs_req_ops , NULL );
284292 if (retval < 0 )
285293 return retval ;
286294
287- * pagep = find_subpage ( page , pos / PAGE_SIZE ) ;
295+ * subpagep = & folio -> page ;
288296 return retval ;
289297}
290298
291299static int v9fs_write_end (struct file * filp , struct address_space * mapping ,
292300 loff_t pos , unsigned int len , unsigned int copied ,
293- struct page * page , void * fsdata )
301+ struct page * subpage , void * fsdata )
294302{
295303 loff_t last_pos = pos + copied ;
296- struct inode * inode = page -> mapping -> host ;
304+ struct folio * folio = page_folio (subpage );
305+ struct inode * inode = mapping -> host ;
297306
298307 p9_debug (P9_DEBUG_VFS , "filp %p, mapping %p\n" , filp , mapping );
299308
300- if (!PageUptodate ( page )) {
309+ if (!folio_test_uptodate ( folio )) {
301310 if (unlikely (copied < len )) {
302311 copied = 0 ;
303312 goto out ;
304313 }
305314
306- SetPageUptodate ( page );
315+ folio_mark_uptodate ( folio );
307316 }
308317
309318 /*
@@ -314,10 +323,10 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
314323 inode_add_bytes (inode , last_pos - inode -> i_size );
315324 i_size_write (inode , last_pos );
316325 }
317- set_page_dirty ( page );
326+ folio_mark_dirty ( folio );
318327out :
319- unlock_page ( page );
320- put_page ( page );
328+ folio_unlock ( folio );
329+ folio_put ( folio );
321330
322331 return copied ;
323332}
0 commit comments