@@ -165,6 +165,21 @@ static int zonefs_writepages(struct address_space *mapping,
165
165
return iomap_writepages (mapping , wbc , & wpc , & zonefs_writeback_ops );
166
166
}
167
167
168
+ static int zonefs_swap_activate (struct swap_info_struct * sis ,
169
+ struct file * swap_file , sector_t * span )
170
+ {
171
+ struct inode * inode = file_inode (swap_file );
172
+ struct zonefs_inode_info * zi = ZONEFS_I (inode );
173
+
174
+ if (zi -> i_ztype != ZONEFS_ZTYPE_CNV ) {
175
+ zonefs_err (inode -> i_sb ,
176
+ "swap file: not a conventional zone file\n" );
177
+ return - EINVAL ;
178
+ }
179
+
180
+ return iomap_swapfile_activate (sis , swap_file , span , & zonefs_iomap_ops );
181
+ }
182
+
168
183
static const struct address_space_operations zonefs_file_aops = {
169
184
.readpage = zonefs_readpage ,
170
185
.readahead = zonefs_readahead ,
@@ -177,6 +192,7 @@ static const struct address_space_operations zonefs_file_aops = {
177
192
.is_partially_uptodate = iomap_is_partially_uptodate ,
178
193
.error_remove_page = generic_error_remove_page ,
179
194
.direct_IO = noop_direct_IO ,
195
+ .swap_activate = zonefs_swap_activate ,
180
196
};
181
197
182
198
static void zonefs_update_stats (struct inode * inode , loff_t new_isize )
@@ -727,6 +743,68 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
727
743
return ret ;
728
744
}
729
745
746
+ /*
747
+ * Do not exceed the LFS limits nor the file zone size. If pos is under the
748
+ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
749
+ */
750
+ static loff_t zonefs_write_check_limits (struct file * file , loff_t pos ,
751
+ loff_t count )
752
+ {
753
+ struct inode * inode = file_inode (file );
754
+ struct zonefs_inode_info * zi = ZONEFS_I (inode );
755
+ loff_t limit = rlimit (RLIMIT_FSIZE );
756
+ loff_t max_size = zi -> i_max_size ;
757
+
758
+ if (limit != RLIM_INFINITY ) {
759
+ if (pos >= limit ) {
760
+ send_sig (SIGXFSZ , current , 0 );
761
+ return - EFBIG ;
762
+ }
763
+ count = min (count , limit - pos );
764
+ }
765
+
766
+ if (!(file -> f_flags & O_LARGEFILE ))
767
+ max_size = min_t (loff_t , MAX_NON_LFS , max_size );
768
+
769
+ if (unlikely (pos >= max_size ))
770
+ return - EFBIG ;
771
+
772
+ return min (count , max_size - pos );
773
+ }
774
+
775
+ static ssize_t zonefs_write_checks (struct kiocb * iocb , struct iov_iter * from )
776
+ {
777
+ struct file * file = iocb -> ki_filp ;
778
+ struct inode * inode = file_inode (file );
779
+ struct zonefs_inode_info * zi = ZONEFS_I (inode );
780
+ loff_t count ;
781
+
782
+ if (IS_SWAPFILE (inode ))
783
+ return - ETXTBSY ;
784
+
785
+ if (!iov_iter_count (from ))
786
+ return 0 ;
787
+
788
+ if ((iocb -> ki_flags & IOCB_NOWAIT ) && !(iocb -> ki_flags & IOCB_DIRECT ))
789
+ return - EINVAL ;
790
+
791
+ if (iocb -> ki_flags & IOCB_APPEND ) {
792
+ if (zi -> i_ztype != ZONEFS_ZTYPE_SEQ )
793
+ return - EINVAL ;
794
+ mutex_lock (& zi -> i_truncate_mutex );
795
+ iocb -> ki_pos = zi -> i_wpoffset ;
796
+ mutex_unlock (& zi -> i_truncate_mutex );
797
+ }
798
+
799
+ count = zonefs_write_check_limits (file , iocb -> ki_pos ,
800
+ iov_iter_count (from ));
801
+ if (count < 0 )
802
+ return count ;
803
+
804
+ iov_iter_truncate (from , count );
805
+ return iov_iter_count (from );
806
+ }
807
+
730
808
/*
731
809
* Handle direct writes. For sequential zone files, this is the only possible
732
810
* write path. For these files, check that the user is issuing writes
@@ -744,8 +822,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
744
822
struct super_block * sb = inode -> i_sb ;
745
823
bool sync = is_sync_kiocb (iocb );
746
824
bool append = false;
747
- size_t count ;
748
- ssize_t ret ;
825
+ ssize_t ret , count ;
749
826
750
827
/*
751
828
* For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
@@ -763,12 +840,11 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
763
840
inode_lock (inode );
764
841
}
765
842
766
- ret = generic_write_checks (iocb , from );
767
- if (ret <= 0 )
843
+ count = zonefs_write_checks (iocb , from );
844
+ if (count <= 0 ) {
845
+ ret = count ;
768
846
goto inode_unlock ;
769
-
770
- iov_iter_truncate (from , zi -> i_max_size - iocb -> ki_pos );
771
- count = iov_iter_count (from );
847
+ }
772
848
773
849
if ((iocb -> ki_pos | count ) & (sb -> s_blocksize - 1 )) {
774
850
ret = - EINVAL ;
@@ -828,12 +904,10 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
828
904
inode_lock (inode );
829
905
}
830
906
831
- ret = generic_write_checks (iocb , from );
907
+ ret = zonefs_write_checks (iocb , from );
832
908
if (ret <= 0 )
833
909
goto inode_unlock ;
834
910
835
- iov_iter_truncate (from , zi -> i_max_size - iocb -> ki_pos );
836
-
837
911
ret = iomap_file_buffered_write (iocb , from , & zonefs_iomap_ops );
838
912
if (ret > 0 )
839
913
iocb -> ki_pos += ret ;
@@ -966,9 +1040,7 @@ static int zonefs_open_zone(struct inode *inode)
966
1040
967
1041
mutex_lock (& zi -> i_truncate_mutex );
968
1042
969
- zi -> i_wr_refcnt ++ ;
970
- if (zi -> i_wr_refcnt == 1 ) {
971
-
1043
+ if (!zi -> i_wr_refcnt ) {
972
1044
if (atomic_inc_return (& sbi -> s_open_zones ) > sbi -> s_max_open_zones ) {
973
1045
atomic_dec (& sbi -> s_open_zones );
974
1046
ret = - EBUSY ;
@@ -978,14 +1050,15 @@ static int zonefs_open_zone(struct inode *inode)
978
1050
if (i_size_read (inode ) < zi -> i_max_size ) {
979
1051
ret = zonefs_zone_mgmt (inode , REQ_OP_ZONE_OPEN );
980
1052
if (ret ) {
981
- zi -> i_wr_refcnt -- ;
982
1053
atomic_dec (& sbi -> s_open_zones );
983
1054
goto unlock ;
984
1055
}
985
1056
zi -> i_flags |= ZONEFS_ZONE_OPEN ;
986
1057
}
987
1058
}
988
1059
1060
+ zi -> i_wr_refcnt ++ ;
1061
+
989
1062
unlock :
990
1063
mutex_unlock (& zi -> i_truncate_mutex );
991
1064
0 commit comments