@@ -98,6 +98,146 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
98
98
netfs_put_request (rreq , was_async , netfs_rreq_trace_put_complete );
99
99
}
100
100
101
+ /*
102
+ * [DEPRECATED] Deal with the completion of writing the data to the cache. We
103
+ * have to clear the PG_fscache bits on the folios involved and release the
104
+ * caller's ref.
105
+ *
106
+ * May be called in softirq mode and we inherit a ref from the caller.
107
+ */
108
+ static void netfs_rreq_unmark_after_write (struct netfs_io_request * rreq ,
109
+ bool was_async )
110
+ {
111
+ struct netfs_io_subrequest * subreq ;
112
+ struct folio * folio ;
113
+ pgoff_t unlocked = 0 ;
114
+ bool have_unlocked = false;
115
+
116
+ rcu_read_lock ();
117
+
118
+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
119
+ XA_STATE (xas , & rreq -> mapping -> i_pages , subreq -> start / PAGE_SIZE );
120
+
121
+ xas_for_each (& xas , folio , (subreq -> start + subreq -> len - 1 ) / PAGE_SIZE ) {
122
+ if (xas_retry (& xas , folio ))
123
+ continue ;
124
+
125
+ /* We might have multiple writes from the same huge
126
+ * folio, but we mustn't unlock a folio more than once.
127
+ */
128
+ if (have_unlocked && folio -> index <= unlocked )
129
+ continue ;
130
+ unlocked = folio_next_index (folio ) - 1 ;
131
+ trace_netfs_folio (folio , netfs_folio_trace_end_copy );
132
+ folio_end_private_2 (folio );
133
+ have_unlocked = true;
134
+ }
135
+ }
136
+
137
+ rcu_read_unlock ();
138
+ netfs_rreq_completed (rreq , was_async );
139
+ }
140
+
141
+ static void netfs_rreq_copy_terminated (void * priv , ssize_t transferred_or_error ,
142
+ bool was_async ) /* [DEPRECATED] */
143
+ {
144
+ struct netfs_io_subrequest * subreq = priv ;
145
+ struct netfs_io_request * rreq = subreq -> rreq ;
146
+
147
+ if (IS_ERR_VALUE (transferred_or_error )) {
148
+ netfs_stat (& netfs_n_rh_write_failed );
149
+ trace_netfs_failure (rreq , subreq , transferred_or_error ,
150
+ netfs_fail_copy_to_cache );
151
+ } else {
152
+ netfs_stat (& netfs_n_rh_write_done );
153
+ }
154
+
155
+ trace_netfs_sreq (subreq , netfs_sreq_trace_write_term );
156
+
157
+ /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
158
+ if (atomic_dec_and_test (& rreq -> nr_copy_ops ))
159
+ netfs_rreq_unmark_after_write (rreq , was_async );
160
+
161
+ netfs_put_subrequest (subreq , was_async , netfs_sreq_trace_put_terminated );
162
+ }
163
+
164
+ /*
165
+ * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
166
+ * from the caller.
167
+ */
168
+ static void netfs_rreq_do_write_to_cache (struct netfs_io_request * rreq )
169
+ {
170
+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
171
+ struct netfs_io_subrequest * subreq , * next , * p ;
172
+ struct iov_iter iter ;
173
+ int ret ;
174
+
175
+ trace_netfs_rreq (rreq , netfs_rreq_trace_copy );
176
+
177
+ /* We don't want terminating writes trying to wake us up whilst we're
178
+ * still going through the list.
179
+ */
180
+ atomic_inc (& rreq -> nr_copy_ops );
181
+
182
+ list_for_each_entry_safe (subreq , p , & rreq -> subrequests , rreq_link ) {
183
+ if (!test_bit (NETFS_SREQ_COPY_TO_CACHE , & subreq -> flags )) {
184
+ list_del_init (& subreq -> rreq_link );
185
+ netfs_put_subrequest (subreq , false,
186
+ netfs_sreq_trace_put_no_copy );
187
+ }
188
+ }
189
+
190
+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
191
+ /* Amalgamate adjacent writes */
192
+ while (!list_is_last (& subreq -> rreq_link , & rreq -> subrequests )) {
193
+ next = list_next_entry (subreq , rreq_link );
194
+ if (next -> start != subreq -> start + subreq -> len )
195
+ break ;
196
+ subreq -> len += next -> len ;
197
+ list_del_init (& next -> rreq_link );
198
+ netfs_put_subrequest (next , false,
199
+ netfs_sreq_trace_put_merged );
200
+ }
201
+
202
+ ret = cres -> ops -> prepare_write (cres , & subreq -> start , & subreq -> len ,
203
+ subreq -> len , rreq -> i_size , true);
204
+ if (ret < 0 ) {
205
+ trace_netfs_failure (rreq , subreq , ret , netfs_fail_prepare_write );
206
+ trace_netfs_sreq (subreq , netfs_sreq_trace_write_skip );
207
+ continue ;
208
+ }
209
+
210
+ iov_iter_xarray (& iter , ITER_SOURCE , & rreq -> mapping -> i_pages ,
211
+ subreq -> start , subreq -> len );
212
+
213
+ atomic_inc (& rreq -> nr_copy_ops );
214
+ netfs_stat (& netfs_n_rh_write );
215
+ netfs_get_subrequest (subreq , netfs_sreq_trace_get_copy_to_cache );
216
+ trace_netfs_sreq (subreq , netfs_sreq_trace_write );
217
+ cres -> ops -> write (cres , subreq -> start , & iter ,
218
+ netfs_rreq_copy_terminated , subreq );
219
+ }
220
+
221
+ /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
222
+ if (atomic_dec_and_test (& rreq -> nr_copy_ops ))
223
+ netfs_rreq_unmark_after_write (rreq , false);
224
+ }
225
+
226
+ static void netfs_rreq_write_to_cache_work (struct work_struct * work ) /* [DEPRECATED] */
227
+ {
228
+ struct netfs_io_request * rreq =
229
+ container_of (work , struct netfs_io_request , work );
230
+
231
+ netfs_rreq_do_write_to_cache (rreq );
232
+ }
233
+
234
+ static void netfs_rreq_write_to_cache (struct netfs_io_request * rreq ) /* [DEPRECATED] */
235
+ {
236
+ rreq -> work .func = netfs_rreq_write_to_cache_work ;
237
+ if (!queue_work (system_unbound_wq , & rreq -> work ))
238
+ BUG ();
239
+ }
240
+
101
241
/*
102
242
* Handle a short read.
103
243
*/
@@ -275,6 +415,10 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
275
415
clear_bit_unlock (NETFS_RREQ_IN_PROGRESS , & rreq -> flags );
276
416
wake_up_bit (& rreq -> flags , NETFS_RREQ_IN_PROGRESS );
277
417
418
+ if (test_bit (NETFS_RREQ_COPY_TO_CACHE , & rreq -> flags ) &&
419
+ test_bit (NETFS_RREQ_USE_PGPRIV2 , & rreq -> flags ))
420
+ return netfs_rreq_write_to_cache (rreq );
421
+
278
422
netfs_rreq_completed (rreq , was_async );
279
423
}
280
424
0 commit comments