@@ -194,38 +194,6 @@ static struct nfs_page *nfs_folio_find_head_request(struct folio *folio)
194
194
return req ;
195
195
}
196
196
197
- static struct nfs_page * nfs_folio_find_and_lock_request (struct folio * folio )
198
- {
199
- struct inode * inode = folio -> mapping -> host ;
200
- struct nfs_page * head ;
201
- int ret ;
202
-
203
- retry :
204
- head = nfs_folio_find_head_request (folio );
205
- if (!head )
206
- return NULL ;
207
-
208
- while (!nfs_lock_request (head )) {
209
- ret = nfs_wait_on_request (head );
210
- if (ret < 0 )
211
- return ERR_PTR (ret );
212
- }
213
-
214
- /* Ensure that nobody removed the request before we locked it */
215
- if (head != folio -> private ) {
216
- nfs_unlock_and_release_request (head );
217
- goto retry ;
218
- }
219
-
220
- ret = nfs_cancel_remove_inode (head , inode );
221
- if (ret < 0 ) {
222
- nfs_unlock_and_release_request (head );
223
- return ERR_PTR (ret );
224
- }
225
-
226
- return head ;
227
- }
228
-
229
197
/* Adjust the file length if we're writing beyond the end */
230
198
static void nfs_grow_file (struct folio * folio , unsigned int offset ,
231
199
unsigned int count )
@@ -532,26 +500,44 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
532
500
struct nfs_commit_info cinfo ;
533
501
int ret ;
534
502
535
- nfs_init_cinfo_from_inode (& cinfo , inode );
536
503
/*
537
504
* A reference is taken only on the head request which acts as a
538
505
* reference to the whole page group - the group will not be destroyed
539
506
* until the head reference is released.
540
507
*/
541
- head = nfs_folio_find_and_lock_request (folio );
542
- if (IS_ERR_OR_NULL (head ))
543
- return head ;
508
+ retry :
509
+ head = nfs_folio_find_head_request (folio );
510
+ if (!head )
511
+ return NULL ;
544
512
545
- /* lock each request in the page group */
546
- ret = nfs_page_group_lock_subrequests (head );
547
- if (ret < 0 ) {
513
+ while (!nfs_lock_request (head )) {
514
+ ret = nfs_wait_on_request (head );
515
+ if (ret < 0 )
516
+ return ERR_PTR (ret );
517
+ }
518
+
519
+ /* Ensure that nobody removed the request before we locked it */
520
+ if (head != folio -> private ) {
548
521
nfs_unlock_and_release_request (head );
549
- return ERR_PTR ( ret ) ;
522
+ goto retry ;
550
523
}
551
524
552
- nfs_join_page_group (head , & cinfo , inode );
525
+ ret = nfs_cancel_remove_inode (head , inode );
526
+ if (ret < 0 )
527
+ goto out_unlock ;
553
528
529
+ /* lock each request in the page group */
530
+ ret = nfs_page_group_lock_subrequests (head );
531
+ if (ret < 0 )
532
+ goto out_unlock ;
533
+
534
+ nfs_init_cinfo_from_inode (& cinfo , inode );
535
+ nfs_join_page_group (head , & cinfo , inode );
554
536
return head ;
537
+
538
+ out_unlock :
539
+ nfs_unlock_and_release_request (head );
540
+ return ERR_PTR (ret );
555
541
}
556
542
557
543
static void nfs_write_error (struct nfs_page * req , int error )
0 commit comments