@@ -108,7 +108,9 @@ static const struct netfs_read_request_ops v9fs_req_ops = {
108
108
*/
109
109
static int v9fs_vfs_readpage (struct file * file , struct page * page )
110
110
{
111
- return netfs_readpage (file , page , & v9fs_req_ops , NULL );
111
+ struct folio * folio = page_folio (page );
112
+
113
+ return netfs_readpage (file , folio , & v9fs_req_ops , NULL );
112
114
}
113
115
114
116
/**
@@ -130,13 +132,15 @@ static void v9fs_vfs_readahead(struct readahead_control *ractl)
130
132
131
133
static int v9fs_release_page (struct page * page , gfp_t gfp )
132
134
{
133
- if (PagePrivate (page ))
135
+ struct folio * folio = page_folio (page );
136
+
137
+ if (folio_test_private (folio ))
134
138
return 0 ;
135
139
#ifdef CONFIG_9P_FSCACHE
136
- if (PageFsCache ( page )) {
140
+ if (folio_test_fscache ( folio )) {
137
141
if (!(gfp & __GFP_DIRECT_RECLAIM ) || !(gfp & __GFP_FS ))
138
142
return 0 ;
139
- wait_on_page_fscache ( page );
143
+ folio_wait_fscache ( folio );
140
144
}
141
145
#endif
142
146
return 1 ;
@@ -152,55 +156,58 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
152
156
static void v9fs_invalidate_page (struct page * page , unsigned int offset ,
153
157
unsigned int length )
154
158
{
155
- wait_on_page_fscache (page );
159
+ struct folio * folio = page_folio (page );
160
+
161
+ folio_wait_fscache (folio );
156
162
}
157
163
158
- static int v9fs_vfs_writepage_locked (struct page * page )
164
+ static int v9fs_vfs_write_folio_locked (struct folio * folio )
159
165
{
160
- struct inode * inode = page -> mapping -> host ;
166
+ struct inode * inode = folio_inode ( folio ) ;
161
167
struct v9fs_inode * v9inode = V9FS_I (inode );
162
- loff_t start = page_offset ( page );
163
- loff_t size = i_size_read (inode );
168
+ loff_t start = folio_pos ( folio );
169
+ loff_t i_size = i_size_read (inode );
164
170
struct iov_iter from ;
165
- int err , len ;
171
+ size_t len = folio_size (folio );
172
+ int err ;
173
+
174
+ if (start >= i_size )
175
+ return 0 ; /* Simultaneous truncation occurred */
166
176
167
- if (page -> index == size >> PAGE_SHIFT )
168
- len = size & ~PAGE_MASK ;
169
- else
170
- len = PAGE_SIZE ;
177
+ len = min_t (loff_t , i_size - start , len );
171
178
172
- iov_iter_xarray (& from , WRITE , & page -> mapping -> i_pages , start , len );
179
+ iov_iter_xarray (& from , WRITE , & folio_mapping ( folio ) -> i_pages , start , len );
173
180
174
181
/* We should have writeback_fid always set */
175
182
BUG_ON (!v9inode -> writeback_fid );
176
183
177
- set_page_writeback ( page );
184
+ folio_start_writeback ( folio );
178
185
179
186
p9_client_write (v9inode -> writeback_fid , start , & from , & err );
180
187
181
- end_page_writeback ( page );
188
+ folio_end_writeback ( folio );
182
189
return err ;
183
190
}
184
191
185
192
static int v9fs_vfs_writepage (struct page * page , struct writeback_control * wbc )
186
193
{
194
+ struct folio * folio = page_folio (page );
187
195
int retval ;
188
196
189
- p9_debug (P9_DEBUG_VFS , "page %p\n" , page );
197
+ p9_debug (P9_DEBUG_VFS , "folio %p\n" , folio );
190
198
191
- retval = v9fs_vfs_writepage_locked ( page );
199
+ retval = v9fs_vfs_write_folio_locked ( folio );
192
200
if (retval < 0 ) {
193
201
if (retval == - EAGAIN ) {
194
- redirty_page_for_writepage (wbc , page );
202
+ folio_redirty_for_writepage (wbc , folio );
195
203
retval = 0 ;
196
204
} else {
197
- SetPageError (page );
198
- mapping_set_error (page -> mapping , retval );
205
+ mapping_set_error (folio_mapping (folio ), retval );
199
206
}
200
207
} else
201
208
retval = 0 ;
202
209
203
- unlock_page ( page );
210
+ folio_unlock ( folio );
204
211
return retval ;
205
212
}
206
213
@@ -213,14 +220,15 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
213
220
214
221
static int v9fs_launder_page (struct page * page )
215
222
{
223
+ struct folio * folio = page_folio (page );
216
224
int retval ;
217
225
218
- if (clear_page_dirty_for_io ( page )) {
219
- retval = v9fs_vfs_writepage_locked ( page );
226
+ if (folio_clear_dirty_for_io ( folio )) {
227
+ retval = v9fs_vfs_write_folio_locked ( folio );
220
228
if (retval )
221
229
return retval ;
222
230
}
223
- wait_on_page_fscache ( page );
231
+ folio_wait_fscache ( folio );
224
232
return 0 ;
225
233
}
226
234
@@ -265,10 +273,10 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
265
273
266
274
static int v9fs_write_begin (struct file * filp , struct address_space * mapping ,
267
275
loff_t pos , unsigned int len , unsigned int flags ,
268
- struct page * * pagep , void * * fsdata )
276
+ struct page * * subpagep , void * * fsdata )
269
277
{
270
278
int retval ;
271
- struct page * page ;
279
+ struct folio * folio ;
272
280
struct v9fs_inode * v9inode = V9FS_I (mapping -> host );
273
281
274
282
p9_debug (P9_DEBUG_VFS , "filp %p, mapping %p\n" , filp , mapping );
@@ -279,31 +287,32 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
279
287
* file. We need to do this before we get a lock on the page in case
280
288
* there's more than one writer competing for the same cache block.
281
289
*/
282
- retval = netfs_write_begin (filp , mapping , pos , len , flags , & page , fsdata ,
290
+ retval = netfs_write_begin (filp , mapping , pos , len , flags , & folio , fsdata ,
283
291
& v9fs_req_ops , NULL );
284
292
if (retval < 0 )
285
293
return retval ;
286
294
287
- * pagep = find_subpage ( page , pos / PAGE_SIZE ) ;
295
+ * subpagep = & folio -> page ;
288
296
return retval ;
289
297
}
290
298
291
299
static int v9fs_write_end (struct file * filp , struct address_space * mapping ,
292
300
loff_t pos , unsigned int len , unsigned int copied ,
293
- struct page * page , void * fsdata )
301
+ struct page * subpage , void * fsdata )
294
302
{
295
303
loff_t last_pos = pos + copied ;
296
- struct inode * inode = page -> mapping -> host ;
304
+ struct folio * folio = page_folio (subpage );
305
+ struct inode * inode = mapping -> host ;
297
306
298
307
p9_debug (P9_DEBUG_VFS , "filp %p, mapping %p\n" , filp , mapping );
299
308
300
- if (!PageUptodate ( page )) {
309
+ if (!folio_test_uptodate ( folio )) {
301
310
if (unlikely (copied < len )) {
302
311
copied = 0 ;
303
312
goto out ;
304
313
}
305
314
306
- SetPageUptodate ( page );
315
+ folio_mark_uptodate ( folio );
307
316
}
308
317
309
318
/*
@@ -314,10 +323,10 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
314
323
inode_add_bytes (inode , last_pos - inode -> i_size );
315
324
i_size_write (inode , last_pos );
316
325
}
317
- set_page_dirty ( page );
326
+ folio_mark_dirty ( folio );
318
327
out :
319
- unlock_page ( page );
320
- put_page ( page );
328
+ folio_unlock ( folio );
329
+ folio_put ( folio );
321
330
322
331
return copied ;
323
332
}
0 commit comments