@@ -93,12 +93,10 @@ nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
93
93
dreq -> max_count = dreq_len ;
94
94
if (dreq -> count > dreq_len )
95
95
dreq -> count = dreq_len ;
96
-
97
- if (test_bit (NFS_IOHDR_ERROR , & hdr -> flags ))
98
- dreq -> error = hdr -> error ;
99
- else /* Clear outstanding error if this is EOF */
100
- dreq -> error = 0 ;
101
96
}
97
+
98
+ if (test_bit (NFS_IOHDR_ERROR , & hdr -> flags ) && !dreq -> error )
99
+ dreq -> error = hdr -> error ;
102
100
}
103
101
104
102
static void
@@ -120,6 +118,18 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq,
120
118
dreq -> count = dreq_len ;
121
119
}
122
120
121
+ static void nfs_direct_truncate_request (struct nfs_direct_req * dreq ,
122
+ struct nfs_page * req )
123
+ {
124
+ loff_t offs = req_offset (req );
125
+ size_t req_start = (size_t )(offs - dreq -> io_start );
126
+
127
+ if (req_start < dreq -> max_count )
128
+ dreq -> max_count = req_start ;
129
+ if (req_start < dreq -> count )
130
+ dreq -> count = req_start ;
131
+ }
132
+
123
133
/**
124
134
* nfs_swap_rw - NFS address space operation for swap I/O
125
135
* @iocb: target I/O control block
@@ -488,7 +498,9 @@ static void nfs_direct_add_page_head(struct list_head *list,
488
498
kref_get (& head -> wb_kref );
489
499
}
490
500
491
- static void nfs_direct_join_group (struct list_head * list , struct inode * inode )
501
+ static void nfs_direct_join_group (struct list_head * list ,
502
+ struct nfs_commit_info * cinfo ,
503
+ struct inode * inode )
492
504
{
493
505
struct nfs_page * req , * subreq ;
494
506
@@ -510,7 +522,7 @@ static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
510
522
nfs_release_request (subreq );
511
523
}
512
524
} while ((subreq = subreq -> wb_this_page ) != req );
513
- nfs_join_page_group (req , inode );
525
+ nfs_join_page_group (req , cinfo , inode );
514
526
}
515
527
}
516
528
@@ -528,48 +540,56 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
528
540
static void nfs_direct_write_reschedule (struct nfs_direct_req * dreq )
529
541
{
530
542
struct nfs_pageio_descriptor desc ;
531
- struct nfs_page * req , * tmp ;
543
+ struct nfs_page * req ;
532
544
LIST_HEAD (reqs );
533
545
struct nfs_commit_info cinfo ;
534
- LIST_HEAD (failed );
535
546
536
547
nfs_init_cinfo_from_dreq (& cinfo , dreq );
537
548
nfs_direct_write_scan_commit_list (dreq -> inode , & reqs , & cinfo );
538
549
539
- nfs_direct_join_group (& reqs , dreq -> inode );
550
+ nfs_direct_join_group (& reqs , & cinfo , dreq -> inode );
540
551
541
- dreq -> count = 0 ;
542
- dreq -> max_count = 0 ;
543
- list_for_each_entry (req , & reqs , wb_list )
544
- dreq -> max_count += req -> wb_bytes ;
545
552
nfs_clear_pnfs_ds_commit_verifiers (& dreq -> ds_cinfo );
546
553
get_dreq (dreq );
547
554
548
555
nfs_pageio_init_write (& desc , dreq -> inode , FLUSH_STABLE , false,
549
556
& nfs_direct_write_completion_ops );
550
557
desc .pg_dreq = dreq ;
551
558
552
- list_for_each_entry_safe (req , tmp , & reqs , wb_list ) {
559
+ while (!list_empty (& reqs )) {
560
+ req = nfs_list_entry (reqs .next );
553
561
/* Bump the transmission count */
554
562
req -> wb_nio ++ ;
555
563
if (!nfs_pageio_add_request (& desc , req )) {
556
- nfs_list_move_request (req , & failed );
557
- spin_lock (& cinfo .inode -> i_lock );
558
- dreq -> flags = 0 ;
559
- if (desc .pg_error < 0 )
564
+ spin_lock (& dreq -> lock );
565
+ if (dreq -> error < 0 ) {
566
+ desc .pg_error = dreq -> error ;
567
+ } else if (desc .pg_error != - EAGAIN ) {
568
+ dreq -> flags = 0 ;
569
+ if (!desc .pg_error )
570
+ desc .pg_error = - EIO ;
560
571
dreq -> error = desc .pg_error ;
561
- else
562
- dreq -> error = - EIO ;
563
- spin_unlock (& cinfo .inode -> i_lock );
572
+ } else
573
+ dreq -> flags = NFS_ODIRECT_RESCHED_WRITES ;
574
+ spin_unlock (& dreq -> lock );
575
+ break ;
564
576
}
565
577
nfs_release_request (req );
566
578
}
567
579
nfs_pageio_complete (& desc );
568
580
569
- while (!list_empty (& failed )) {
570
- req = nfs_list_entry (failed .next );
581
+ while (!list_empty (& reqs )) {
582
+ req = nfs_list_entry (reqs .next );
571
583
nfs_list_remove_request (req );
572
584
nfs_unlock_and_release_request (req );
585
+ if (desc .pg_error == - EAGAIN ) {
586
+ nfs_mark_request_commit (req , NULL , & cinfo , 0 );
587
+ } else {
588
+ spin_lock (& dreq -> lock );
589
+ nfs_direct_truncate_request (dreq , req );
590
+ spin_unlock (& dreq -> lock );
591
+ nfs_release_request (req );
592
+ }
573
593
}
574
594
575
595
if (put_dreq (dreq ))
@@ -589,8 +609,6 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
589
609
if (status < 0 ) {
590
610
/* Errors in commit are fatal */
591
611
dreq -> error = status ;
592
- dreq -> max_count = 0 ;
593
- dreq -> count = 0 ;
594
612
dreq -> flags = NFS_ODIRECT_DONE ;
595
613
} else {
596
614
status = dreq -> error ;
@@ -601,15 +619,20 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
601
619
while (!list_empty (& data -> pages )) {
602
620
req = nfs_list_entry (data -> pages .next );
603
621
nfs_list_remove_request (req );
604
- if (status >= 0 && !nfs_write_match_verf (verf , req )) {
622
+ if (status < 0 ) {
623
+ spin_lock (& dreq -> lock );
624
+ nfs_direct_truncate_request (dreq , req );
625
+ spin_unlock (& dreq -> lock );
626
+ nfs_release_request (req );
627
+ } else if (!nfs_write_match_verf (verf , req )) {
605
628
dreq -> flags = NFS_ODIRECT_RESCHED_WRITES ;
606
629
/*
607
630
* Despite the reboot, the write was successful,
608
631
* so reset wb_nio.
609
632
*/
610
633
req -> wb_nio = 0 ;
611
634
nfs_mark_request_commit (req , NULL , & cinfo , 0 );
612
- } else /* Error or match */
635
+ } else
613
636
nfs_release_request (req );
614
637
nfs_unlock_and_release_request (req );
615
638
}
@@ -662,6 +685,7 @@ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
662
685
while (!list_empty (& reqs )) {
663
686
req = nfs_list_entry (reqs .next );
664
687
nfs_list_remove_request (req );
688
+ nfs_direct_truncate_request (dreq , req );
665
689
nfs_release_request (req );
666
690
nfs_unlock_and_release_request (req );
667
691
}
@@ -711,7 +735,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
711
735
}
712
736
713
737
nfs_direct_count_bytes (dreq , hdr );
714
- if (test_bit (NFS_IOHDR_UNSTABLE_WRITES , & hdr -> flags )) {
738
+ if (test_bit (NFS_IOHDR_UNSTABLE_WRITES , & hdr -> flags ) &&
739
+ !test_bit (NFS_IOHDR_ERROR , & hdr -> flags )) {
715
740
if (!dreq -> flags )
716
741
dreq -> flags = NFS_ODIRECT_DO_COMMIT ;
717
742
flags = dreq -> flags ;
@@ -755,18 +780,23 @@ static void nfs_write_sync_pgio_error(struct list_head *head, int error)
755
780
static void nfs_direct_write_reschedule_io (struct nfs_pgio_header * hdr )
756
781
{
757
782
struct nfs_direct_req * dreq = hdr -> dreq ;
783
+ struct nfs_page * req ;
784
+ struct nfs_commit_info cinfo ;
758
785
759
786
trace_nfs_direct_write_reschedule_io (dreq );
760
787
788
+ nfs_init_cinfo_from_dreq (& cinfo , dreq );
761
789
spin_lock (& dreq -> lock );
762
- if (dreq -> error == 0 ) {
790
+ if (dreq -> error == 0 )
763
791
dreq -> flags = NFS_ODIRECT_RESCHED_WRITES ;
764
- /* fake unstable write to let common nfs resend pages */
765
- hdr -> verf .committed = NFS_UNSTABLE ;
766
- hdr -> good_bytes = hdr -> args .offset + hdr -> args .count -
767
- hdr -> io_start ;
768
- }
792
+ set_bit (NFS_IOHDR_REDO , & hdr -> flags );
769
793
spin_unlock (& dreq -> lock );
794
+ while (!list_empty (& hdr -> pages )) {
795
+ req = nfs_list_entry (hdr -> pages .next );
796
+ nfs_list_remove_request (req );
797
+ nfs_unlock_request (req );
798
+ nfs_mark_request_commit (req , NULL , & cinfo , 0 );
799
+ }
770
800
}
771
801
772
802
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
@@ -794,9 +824,11 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
794
824
{
795
825
struct nfs_pageio_descriptor desc ;
796
826
struct inode * inode = dreq -> inode ;
827
+ struct nfs_commit_info cinfo ;
797
828
ssize_t result = 0 ;
798
829
size_t requested_bytes = 0 ;
799
830
size_t wsize = max_t (size_t , NFS_SERVER (inode )-> wsize , PAGE_SIZE );
831
+ bool defer = false;
800
832
801
833
trace_nfs_direct_write_schedule_iovec (dreq );
802
834
@@ -837,17 +869,37 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
837
869
break ;
838
870
}
839
871
840
- nfs_lock_request (req );
841
- if (!nfs_pageio_add_request (& desc , req )) {
842
- result = desc .pg_error ;
843
- nfs_unlock_and_release_request (req );
844
- break ;
845
- }
846
872
pgbase = 0 ;
847
873
bytes -= req_len ;
848
874
requested_bytes += req_len ;
849
875
pos += req_len ;
850
876
dreq -> bytes_left -= req_len ;
877
+
878
+ if (defer ) {
879
+ nfs_mark_request_commit (req , NULL , & cinfo , 0 );
880
+ continue ;
881
+ }
882
+
883
+ nfs_lock_request (req );
884
+ if (nfs_pageio_add_request (& desc , req ))
885
+ continue ;
886
+
887
+ /* Exit on hard errors */
888
+ if (desc .pg_error < 0 && desc .pg_error != - EAGAIN ) {
889
+ result = desc .pg_error ;
890
+ nfs_unlock_and_release_request (req );
891
+ break ;
892
+ }
893
+
894
+ /* If the error is soft, defer remaining requests */
895
+ nfs_init_cinfo_from_dreq (& cinfo , dreq );
896
+ spin_lock (& dreq -> lock );
897
+ dreq -> flags = NFS_ODIRECT_RESCHED_WRITES ;
898
+ spin_unlock (& dreq -> lock );
899
+ nfs_unlock_request (req );
900
+ nfs_mark_request_commit (req , NULL , & cinfo , 0 );
901
+ desc .pg_error = 0 ;
902
+ defer = true;
851
903
}
852
904
nfs_direct_release_pages (pagevec , npages );
853
905
kvfree (pagevec );
0 commit comments