Skip to content

Commit 0f7ddea

Browse files
committed
Merge tag 'netfs-folio-20211111' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs
Pull netfs, 9p, afs and ceph (partial) foliation from David Howells: "This converts netfslib, 9p and afs to use folios. It also partially converts ceph so that it uses folios on the boundaries with netfslib. To help with this, a couple of folio helper functions are added in the first two patches. These patches don't touch fscache and cachefiles as I intend to remove all the code that deals with pages directly from there. Only nfs and cifs are using the old fscache I/O API now. The new API uses iov_iter instead. Thanks to Jeff Layton, Dominique Martinet and AuriStor for testing and retesting the patches" * tag 'netfs-folio-20211111' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: afs: Use folios in directory handling netfs, 9p, afs, ceph: Use folios folio: Add a function to get the host inode for a folio folio: Add a function to change the private data attached to a folio
2 parents a9b9669 + 255ed63 commit 0f7ddea

File tree

13 files changed

+636
-626
lines changed

13 files changed

+636
-626
lines changed

fs/9p/vfs_addr.c

Lines changed: 46 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,9 @@ static const struct netfs_read_request_ops v9fs_req_ops = {
108108
*/
109109
static int v9fs_vfs_readpage(struct file *file, struct page *page)
110110
{
111-
return netfs_readpage(file, page, &v9fs_req_ops, NULL);
111+
struct folio *folio = page_folio(page);
112+
113+
return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
112114
}
113115

114116
/**
@@ -130,13 +132,15 @@ static void v9fs_vfs_readahead(struct readahead_control *ractl)
130132

131133
static int v9fs_release_page(struct page *page, gfp_t gfp)
132134
{
133-
if (PagePrivate(page))
135+
struct folio *folio = page_folio(page);
136+
137+
if (folio_test_private(folio))
134138
return 0;
135139
#ifdef CONFIG_9P_FSCACHE
136-
if (PageFsCache(page)) {
140+
if (folio_test_fscache(folio)) {
137141
if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
138142
return 0;
139-
wait_on_page_fscache(page);
143+
folio_wait_fscache(folio);
140144
}
141145
#endif
142146
return 1;
@@ -152,55 +156,58 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
152156
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
153157
unsigned int length)
154158
{
155-
wait_on_page_fscache(page);
159+
struct folio *folio = page_folio(page);
160+
161+
folio_wait_fscache(folio);
156162
}
157163

158-
static int v9fs_vfs_writepage_locked(struct page *page)
164+
static int v9fs_vfs_write_folio_locked(struct folio *folio)
159165
{
160-
struct inode *inode = page->mapping->host;
166+
struct inode *inode = folio_inode(folio);
161167
struct v9fs_inode *v9inode = V9FS_I(inode);
162-
loff_t start = page_offset(page);
163-
loff_t size = i_size_read(inode);
168+
loff_t start = folio_pos(folio);
169+
loff_t i_size = i_size_read(inode);
164170
struct iov_iter from;
165-
int err, len;
171+
size_t len = folio_size(folio);
172+
int err;
173+
174+
if (start >= i_size)
175+
return 0; /* Simultaneous truncation occurred */
166176

167-
if (page->index == size >> PAGE_SHIFT)
168-
len = size & ~PAGE_MASK;
169-
else
170-
len = PAGE_SIZE;
177+
len = min_t(loff_t, i_size - start, len);
171178

172-
iov_iter_xarray(&from, WRITE, &page->mapping->i_pages, start, len);
179+
iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
173180

174181
/* We should have writeback_fid always set */
175182
BUG_ON(!v9inode->writeback_fid);
176183

177-
set_page_writeback(page);
184+
folio_start_writeback(folio);
178185

179186
p9_client_write(v9inode->writeback_fid, start, &from, &err);
180187

181-
end_page_writeback(page);
188+
folio_end_writeback(folio);
182189
return err;
183190
}
184191

185192
static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
186193
{
194+
struct folio *folio = page_folio(page);
187195
int retval;
188196

189-
p9_debug(P9_DEBUG_VFS, "page %p\n", page);
197+
p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
190198

191-
retval = v9fs_vfs_writepage_locked(page);
199+
retval = v9fs_vfs_write_folio_locked(folio);
192200
if (retval < 0) {
193201
if (retval == -EAGAIN) {
194-
redirty_page_for_writepage(wbc, page);
202+
folio_redirty_for_writepage(wbc, folio);
195203
retval = 0;
196204
} else {
197-
SetPageError(page);
198-
mapping_set_error(page->mapping, retval);
205+
mapping_set_error(folio_mapping(folio), retval);
199206
}
200207
} else
201208
retval = 0;
202209

203-
unlock_page(page);
210+
folio_unlock(folio);
204211
return retval;
205212
}
206213

@@ -213,14 +220,15 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
213220

214221
static int v9fs_launder_page(struct page *page)
215222
{
223+
struct folio *folio = page_folio(page);
216224
int retval;
217225

218-
if (clear_page_dirty_for_io(page)) {
219-
retval = v9fs_vfs_writepage_locked(page);
226+
if (folio_clear_dirty_for_io(folio)) {
227+
retval = v9fs_vfs_write_folio_locked(folio);
220228
if (retval)
221229
return retval;
222230
}
223-
wait_on_page_fscache(page);
231+
folio_wait_fscache(folio);
224232
return 0;
225233
}
226234

@@ -265,10 +273,10 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
265273

266274
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
267275
loff_t pos, unsigned int len, unsigned int flags,
268-
struct page **pagep, void **fsdata)
276+
struct page **subpagep, void **fsdata)
269277
{
270278
int retval;
271-
struct page *page;
279+
struct folio *folio;
272280
struct v9fs_inode *v9inode = V9FS_I(mapping->host);
273281

274282
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
@@ -279,31 +287,32 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
279287
* file. We need to do this before we get a lock on the page in case
280288
* there's more than one writer competing for the same cache block.
281289
*/
282-
retval = netfs_write_begin(filp, mapping, pos, len, flags, &page, fsdata,
290+
retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
283291
&v9fs_req_ops, NULL);
284292
if (retval < 0)
285293
return retval;
286294

287-
*pagep = find_subpage(page, pos / PAGE_SIZE);
295+
*subpagep = &folio->page;
288296
return retval;
289297
}
290298

291299
static int v9fs_write_end(struct file *filp, struct address_space *mapping,
292300
loff_t pos, unsigned int len, unsigned int copied,
293-
struct page *page, void *fsdata)
301+
struct page *subpage, void *fsdata)
294302
{
295303
loff_t last_pos = pos + copied;
296-
struct inode *inode = page->mapping->host;
304+
struct folio *folio = page_folio(subpage);
305+
struct inode *inode = mapping->host;
297306

298307
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
299308

300-
if (!PageUptodate(page)) {
309+
if (!folio_test_uptodate(folio)) {
301310
if (unlikely(copied < len)) {
302311
copied = 0;
303312
goto out;
304313
}
305314

306-
SetPageUptodate(page);
315+
folio_mark_uptodate(folio);
307316
}
308317

309318
/*
@@ -314,10 +323,10 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
314323
inode_add_bytes(inode, last_pos - inode->i_size);
315324
i_size_write(inode, last_pos);
316325
}
317-
set_page_dirty(page);
326+
folio_mark_dirty(folio);
318327
out:
319-
unlock_page(page);
320-
put_page(page);
328+
folio_unlock(folio);
329+
folio_put(folio);
321330

322331
return copied;
323332
}

fs/9p/vfs_file.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -528,38 +528,38 @@ static vm_fault_t
528528
v9fs_vm_page_mkwrite(struct vm_fault *vmf)
529529
{
530530
struct v9fs_inode *v9inode;
531-
struct page *page = vmf->page;
531+
struct folio *folio = page_folio(vmf->page);
532532
struct file *filp = vmf->vma->vm_file;
533533
struct inode *inode = file_inode(filp);
534534

535535

536-
p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
537-
page, (unsigned long)filp->private_data);
536+
p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
537+
folio, (unsigned long)filp->private_data);
538538

539539
v9inode = V9FS_I(inode);
540540

541541
/* Wait for the page to be written to the cache before we allow it to
542542
* be modified. We then assume the entire page will need writing back.
543543
*/
544544
#ifdef CONFIG_9P_FSCACHE
545-
if (PageFsCache(page) &&
546-
wait_on_page_fscache_killable(page) < 0)
547-
return VM_FAULT_RETRY;
545+
if (folio_test_fscache(folio) &&
546+
folio_wait_fscache_killable(folio) < 0)
547+
return VM_FAULT_NOPAGE;
548548
#endif
549549

550550
/* Update file times before taking page lock */
551551
file_update_time(filp);
552552

553553
BUG_ON(!v9inode->writeback_fid);
554-
if (lock_page_killable(page) < 0)
554+
if (folio_lock_killable(folio) < 0)
555555
return VM_FAULT_RETRY;
556-
if (page->mapping != inode->i_mapping)
556+
if (folio_mapping(folio) != inode->i_mapping)
557557
goto out_unlock;
558-
wait_for_stable_page(page);
558+
folio_wait_stable(folio);
559559

560560
return VM_FAULT_LOCKED;
561561
out_unlock:
562-
unlock_page(page);
562+
folio_unlock(folio);
563563
return VM_FAULT_NOPAGE;
564564
}
565565

0 commit comments

Comments
 (0)