Skip to content

Commit 8e5ced7

Browse files
dhowellsbrauner
authored andcommitted
netfs, ceph: Revert "netfs: Remove deprecated use of PG_private_2 as a second writeback flag"
This reverts commit ae67831. Revert the patch that removes the deprecated use of PG_private_2 in netfslib for the moment as Ceph is actually still using this to track data copied to the cache. Fixes: ae67831 ("netfs: Remove deprecated use of PG_private_2 as a second writeback flag") Reported-by: Max Kellermann <[email protected]> Signed-off-by: David Howells <[email protected]> cc: Ilya Dryomov <[email protected]> cc: Xiubo Li <[email protected]> cc: Jeff Layton <[email protected]> cc: Matthew Wilcox <[email protected]> cc: [email protected] cc: [email protected] cc: [email protected] cc: [email protected] https: //lore.kernel.org/r/[email protected] Signed-off-by: Christian Brauner <[email protected]>
1 parent 86509e3 commit 8e5ced7

File tree

4 files changed

+170
-2
lines changed

4 files changed

+170
-2
lines changed

fs/ceph/addr.c

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -498,6 +498,11 @@ const struct netfs_request_ops ceph_netfs_ops = {
498498
};
499499

500500
#ifdef CONFIG_CEPH_FSCACHE
501+
static void ceph_set_page_fscache(struct page *page)
502+
{
503+
folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
504+
}
505+
501506
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
502507
{
503508
struct inode *inode = priv;
@@ -515,6 +520,10 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b
515520
ceph_fscache_write_terminated, inode, true, caching);
516521
}
517522
#else
523+
static inline void ceph_set_page_fscache(struct page *page)
524+
{
525+
}
526+
518527
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
519528
{
520529
}
@@ -706,6 +715,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
706715
len = wlen;
707716

708717
set_page_writeback(page);
718+
if (caching)
719+
ceph_set_page_fscache(page);
709720
ceph_fscache_write_to_cache(inode, page_off, len, caching);
710721

711722
if (IS_ENCRYPTED(inode)) {
@@ -789,6 +800,8 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
789800
return AOP_WRITEPAGE_ACTIVATE;
790801
}
791802

803+
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
804+
792805
err = writepage_nounlock(page, wbc);
793806
if (err == -ERESTARTSYS) {
794807
/* direct memory reclaimer was killed by SIGKILL. return 0
@@ -1062,14 +1075,16 @@ static int ceph_writepages_start(struct address_space *mapping,
10621075
unlock_page(page);
10631076
break;
10641077
}
1065-
if (PageWriteback(page)) {
1078+
if (PageWriteback(page) ||
1079+
PagePrivate2(page) /* [DEPRECATED] */) {
10661080
if (wbc->sync_mode == WB_SYNC_NONE) {
10671081
doutc(cl, "%p under writeback\n", page);
10681082
unlock_page(page);
10691083
continue;
10701084
}
10711085
doutc(cl, "waiting on writeback %p\n", page);
10721086
wait_on_page_writeback(page);
1087+
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
10731088
}
10741089

10751090
if (!clear_page_dirty_for_io(page)) {
@@ -1254,6 +1269,8 @@ static int ceph_writepages_start(struct address_space *mapping,
12541269
}
12551270

12561271
set_page_writeback(page);
1272+
if (caching)
1273+
ceph_set_page_fscache(page);
12571274
len += thp_size(page);
12581275
}
12591276
ceph_fscache_write_to_cache(inode, offset, len, caching);

fs/netfs/buffered_read.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -466,7 +466,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
466466
if (!netfs_is_cache_enabled(ctx) &&
467467
netfs_skip_folio_read(folio, pos, len, false)) {
468468
netfs_stat(&netfs_n_rh_write_zskip);
469-
goto have_folio;
469+
goto have_folio_no_wait;
470470
}
471471

472472
rreq = netfs_alloc_request(mapping, file,
@@ -507,6 +507,12 @@ int netfs_write_begin(struct netfs_inode *ctx,
507507
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
508508

509509
have_folio:
510+
if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) {
511+
ret = folio_wait_private_2_killable(folio);
512+
if (ret < 0)
513+
goto error;
514+
}
515+
have_folio_no_wait:
510516
*_folio = folio;
511517
_leave(" = 0");
512518
return 0;

fs/netfs/io.c

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,146 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
9898
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
9999
}
100100

101+
/*
102+
* [DEPRECATED] Deal with the completion of writing the data to the cache. We
103+
* have to clear the PG_fscache bits on the folios involved and release the
104+
* caller's ref.
105+
*
106+
* May be called in softirq mode and we inherit a ref from the caller.
107+
*/
108+
static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
109+
bool was_async)
110+
{
111+
struct netfs_io_subrequest *subreq;
112+
struct folio *folio;
113+
pgoff_t unlocked = 0;
114+
bool have_unlocked = false;
115+
116+
rcu_read_lock();
117+
118+
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
119+
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
120+
121+
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
122+
if (xas_retry(&xas, folio))
123+
continue;
124+
125+
/* We might have multiple writes from the same huge
126+
* folio, but we mustn't unlock a folio more than once.
127+
*/
128+
if (have_unlocked && folio->index <= unlocked)
129+
continue;
130+
unlocked = folio_next_index(folio) - 1;
131+
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
132+
folio_end_private_2(folio);
133+
have_unlocked = true;
134+
}
135+
}
136+
137+
rcu_read_unlock();
138+
netfs_rreq_completed(rreq, was_async);
139+
}
140+
141+
static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
142+
bool was_async) /* [DEPRECATED] */
143+
{
144+
struct netfs_io_subrequest *subreq = priv;
145+
struct netfs_io_request *rreq = subreq->rreq;
146+
147+
if (IS_ERR_VALUE(transferred_or_error)) {
148+
netfs_stat(&netfs_n_rh_write_failed);
149+
trace_netfs_failure(rreq, subreq, transferred_or_error,
150+
netfs_fail_copy_to_cache);
151+
} else {
152+
netfs_stat(&netfs_n_rh_write_done);
153+
}
154+
155+
trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
156+
157+
/* If we decrement nr_copy_ops to 0, the ref belongs to us. */
158+
if (atomic_dec_and_test(&rreq->nr_copy_ops))
159+
netfs_rreq_unmark_after_write(rreq, was_async);
160+
161+
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
162+
}
163+
164+
/*
165+
* [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
166+
* from the caller.
167+
*/
168+
static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
169+
{
170+
struct netfs_cache_resources *cres = &rreq->cache_resources;
171+
struct netfs_io_subrequest *subreq, *next, *p;
172+
struct iov_iter iter;
173+
int ret;
174+
175+
trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
176+
177+
/* We don't want terminating writes trying to wake us up whilst we're
178+
* still going through the list.
179+
*/
180+
atomic_inc(&rreq->nr_copy_ops);
181+
182+
list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
183+
if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
184+
list_del_init(&subreq->rreq_link);
185+
netfs_put_subrequest(subreq, false,
186+
netfs_sreq_trace_put_no_copy);
187+
}
188+
}
189+
190+
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
191+
/* Amalgamate adjacent writes */
192+
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
193+
next = list_next_entry(subreq, rreq_link);
194+
if (next->start != subreq->start + subreq->len)
195+
break;
196+
subreq->len += next->len;
197+
list_del_init(&next->rreq_link);
198+
netfs_put_subrequest(next, false,
199+
netfs_sreq_trace_put_merged);
200+
}
201+
202+
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
203+
subreq->len, rreq->i_size, true);
204+
if (ret < 0) {
205+
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
206+
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
207+
continue;
208+
}
209+
210+
iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
211+
subreq->start, subreq->len);
212+
213+
atomic_inc(&rreq->nr_copy_ops);
214+
netfs_stat(&netfs_n_rh_write);
215+
netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
216+
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
217+
cres->ops->write(cres, subreq->start, &iter,
218+
netfs_rreq_copy_terminated, subreq);
219+
}
220+
221+
/* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
222+
if (atomic_dec_and_test(&rreq->nr_copy_ops))
223+
netfs_rreq_unmark_after_write(rreq, false);
224+
}
225+
226+
static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
227+
{
228+
struct netfs_io_request *rreq =
229+
container_of(work, struct netfs_io_request, work);
230+
231+
netfs_rreq_do_write_to_cache(rreq);
232+
}
233+
234+
static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
235+
{
236+
rreq->work.func = netfs_rreq_write_to_cache_work;
237+
if (!queue_work(system_unbound_wq, &rreq->work))
238+
BUG();
239+
}
240+
101241
/*
102242
* Handle a short read.
103243
*/
@@ -275,6 +415,10 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
275415
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
276416
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
277417

418+
if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
419+
test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
420+
return netfs_rreq_write_to_cache(rreq);
421+
278422
netfs_rreq_completed(rreq, was_async);
279423
}
280424

include/trace/events/netfs.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@
145145
EM(netfs_folio_trace_clear_g, "clear-g") \
146146
EM(netfs_folio_trace_clear_s, "clear-s") \
147147
EM(netfs_folio_trace_copy_to_cache, "mark-copy") \
148+
EM(netfs_folio_trace_end_copy, "end-copy") \
148149
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
149150
EM(netfs_folio_trace_kill, "kill") \
150151
EM(netfs_folio_trace_kill_cc, "kill-cc") \

0 commit comments

Comments
 (0)