@@ -110,15 +110,51 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
110
110
}
111
111
}
112
112
113
- static int zonefs_iomap_begin (struct inode * inode , loff_t offset , loff_t length ,
114
- unsigned int flags , struct iomap * iomap ,
115
- struct iomap * srcmap )
113
+ static int zonefs_read_iomap_begin (struct inode * inode , loff_t offset ,
114
+ loff_t length , unsigned int flags ,
115
+ struct iomap * iomap , struct iomap * srcmap )
116
116
{
117
117
struct zonefs_inode_info * zi = ZONEFS_I (inode );
118
118
struct super_block * sb = inode -> i_sb ;
119
119
loff_t isize ;
120
120
121
- /* All I/Os should always be within the file maximum size */
121
+ /*
122
+ * All blocks are always mapped below EOF. If reading past EOF,
123
+ * act as if there is a hole up to the file maximum size.
124
+ */
125
+ mutex_lock (& zi -> i_truncate_mutex );
126
+ iomap -> bdev = inode -> i_sb -> s_bdev ;
127
+ iomap -> offset = ALIGN_DOWN (offset , sb -> s_blocksize );
128
+ isize = i_size_read (inode );
129
+ if (iomap -> offset >= isize ) {
130
+ iomap -> type = IOMAP_HOLE ;
131
+ iomap -> addr = IOMAP_NULL_ADDR ;
132
+ iomap -> length = length ;
133
+ } else {
134
+ iomap -> type = IOMAP_MAPPED ;
135
+ iomap -> addr = (zi -> i_zsector << SECTOR_SHIFT ) + iomap -> offset ;
136
+ iomap -> length = isize - iomap -> offset ;
137
+ }
138
+ mutex_unlock (& zi -> i_truncate_mutex );
139
+
140
+ trace_zonefs_iomap_begin (inode , iomap );
141
+
142
+ return 0 ;
143
+ }
144
+
145
+ static const struct iomap_ops zonefs_read_iomap_ops = {
146
+ .iomap_begin = zonefs_read_iomap_begin ,
147
+ };
148
+
149
+ static int zonefs_write_iomap_begin (struct inode * inode , loff_t offset ,
150
+ loff_t length , unsigned int flags ,
151
+ struct iomap * iomap , struct iomap * srcmap )
152
+ {
153
+ struct zonefs_inode_info * zi = ZONEFS_I (inode );
154
+ struct super_block * sb = inode -> i_sb ;
155
+ loff_t isize ;
156
+
157
+ /* All write I/Os should always be within the file maximum size */
122
158
if (WARN_ON_ONCE (offset + length > zi -> i_max_size ))
123
159
return - EIO ;
124
160
@@ -128,7 +164,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
128
164
* operation.
129
165
*/
130
166
if (WARN_ON_ONCE (zi -> i_ztype == ZONEFS_ZTYPE_SEQ &&
131
- ( flags & IOMAP_WRITE ) && !(flags & IOMAP_DIRECT )))
167
+ !(flags & IOMAP_DIRECT )))
132
168
return - EIO ;
133
169
134
170
/*
@@ -137,47 +173,44 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
137
173
* write pointer) and unwriten beyond.
138
174
*/
139
175
mutex_lock (& zi -> i_truncate_mutex );
176
+ iomap -> bdev = inode -> i_sb -> s_bdev ;
177
+ iomap -> offset = ALIGN_DOWN (offset , sb -> s_blocksize );
178
+ iomap -> addr = (zi -> i_zsector << SECTOR_SHIFT ) + iomap -> offset ;
140
179
isize = i_size_read (inode );
141
- if (offset >= isize )
180
+ if (iomap -> offset >= isize ) {
142
181
iomap -> type = IOMAP_UNWRITTEN ;
143
- else
182
+ iomap -> length = zi -> i_max_size - iomap -> offset ;
183
+ } else {
144
184
iomap -> type = IOMAP_MAPPED ;
145
- if (flags & IOMAP_WRITE )
146
- length = zi -> i_max_size - offset ;
147
- else
148
- length = min (length , isize - offset );
185
+ iomap -> length = isize - iomap -> offset ;
186
+ }
149
187
mutex_unlock (& zi -> i_truncate_mutex );
150
188
151
- iomap -> offset = ALIGN_DOWN (offset , sb -> s_blocksize );
152
- iomap -> length = ALIGN (offset + length , sb -> s_blocksize ) - iomap -> offset ;
153
- iomap -> bdev = inode -> i_sb -> s_bdev ;
154
- iomap -> addr = (zi -> i_zsector << SECTOR_SHIFT ) + iomap -> offset ;
155
-
156
189
trace_zonefs_iomap_begin (inode , iomap );
157
190
158
191
return 0 ;
159
192
}
160
193
161
- static const struct iomap_ops zonefs_iomap_ops = {
162
- .iomap_begin = zonefs_iomap_begin ,
194
+ static const struct iomap_ops zonefs_write_iomap_ops = {
195
+ .iomap_begin = zonefs_write_iomap_begin ,
163
196
};
164
197
165
198
static int zonefs_read_folio (struct file * unused , struct folio * folio )
166
199
{
167
- return iomap_read_folio (folio , & zonefs_iomap_ops );
200
+ return iomap_read_folio (folio , & zonefs_read_iomap_ops );
168
201
}
169
202
170
203
static void zonefs_readahead (struct readahead_control * rac )
171
204
{
172
- iomap_readahead (rac , & zonefs_iomap_ops );
205
+ iomap_readahead (rac , & zonefs_read_iomap_ops );
173
206
}
174
207
175
208
/*
176
209
* Map blocks for page writeback. This is used only on conventional zone files,
177
210
* which implies that the page range can only be within the fixed inode size.
178
211
*/
179
- static int zonefs_map_blocks (struct iomap_writepage_ctx * wpc ,
180
- struct inode * inode , loff_t offset )
212
+ static int zonefs_write_map_blocks (struct iomap_writepage_ctx * wpc ,
213
+ struct inode * inode , loff_t offset )
181
214
{
182
215
struct zonefs_inode_info * zi = ZONEFS_I (inode );
183
216
@@ -191,12 +224,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
191
224
offset < wpc -> iomap .offset + wpc -> iomap .length )
192
225
return 0 ;
193
226
194
- return zonefs_iomap_begin (inode , offset , zi -> i_max_size - offset ,
195
- IOMAP_WRITE , & wpc -> iomap , NULL );
227
+ return zonefs_write_iomap_begin (inode , offset , zi -> i_max_size - offset ,
228
+ IOMAP_WRITE , & wpc -> iomap , NULL );
196
229
}
197
230
198
231
static const struct iomap_writeback_ops zonefs_writeback_ops = {
199
- .map_blocks = zonefs_map_blocks ,
232
+ .map_blocks = zonefs_write_map_blocks ,
200
233
};
201
234
202
235
static int zonefs_writepage (struct page * page , struct writeback_control * wbc )
@@ -226,7 +259,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis,
226
259
return - EINVAL ;
227
260
}
228
261
229
- return iomap_swapfile_activate (sis , swap_file , span , & zonefs_iomap_ops );
262
+ return iomap_swapfile_activate (sis , swap_file , span ,
263
+ & zonefs_read_iomap_ops );
230
264
}
231
265
232
266
static const struct address_space_operations zonefs_file_aops = {
@@ -647,7 +681,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
647
681
648
682
/* Serialize against truncates */
649
683
filemap_invalidate_lock_shared (inode -> i_mapping );
650
- ret = iomap_page_mkwrite (vmf , & zonefs_iomap_ops );
684
+ ret = iomap_page_mkwrite (vmf , & zonefs_write_iomap_ops );
651
685
filemap_invalidate_unlock_shared (inode -> i_mapping );
652
686
653
687
sb_end_pagefault (inode -> i_sb );
@@ -899,7 +933,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
899
933
if (append )
900
934
ret = zonefs_file_dio_append (iocb , from );
901
935
else
902
- ret = iomap_dio_rw (iocb , from , & zonefs_iomap_ops ,
936
+ ret = iomap_dio_rw (iocb , from , & zonefs_write_iomap_ops ,
903
937
& zonefs_write_dio_ops , 0 , NULL , 0 );
904
938
if (zi -> i_ztype == ZONEFS_ZTYPE_SEQ &&
905
939
(ret > 0 || ret == - EIOCBQUEUED )) {
@@ -948,7 +982,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
948
982
if (ret <= 0 )
949
983
goto inode_unlock ;
950
984
951
- ret = iomap_file_buffered_write (iocb , from , & zonefs_iomap_ops );
985
+ ret = iomap_file_buffered_write (iocb , from , & zonefs_write_iomap_ops );
952
986
if (ret > 0 )
953
987
iocb -> ki_pos += ret ;
954
988
else if (ret == - EIO )
@@ -1041,7 +1075,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1041
1075
goto inode_unlock ;
1042
1076
}
1043
1077
file_accessed (iocb -> ki_filp );
1044
- ret = iomap_dio_rw (iocb , to , & zonefs_iomap_ops ,
1078
+ ret = iomap_dio_rw (iocb , to , & zonefs_read_iomap_ops ,
1045
1079
& zonefs_read_dio_ops , 0 , NULL , 0 );
1046
1080
} else {
1047
1081
ret = generic_file_read_iter (iocb , to );
@@ -1085,7 +1119,8 @@ static int zonefs_seq_file_write_open(struct inode *inode)
1085
1119
1086
1120
if (sbi -> s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN ) {
1087
1121
1088
- if (wro > sbi -> s_max_wro_seq_files ) {
1122
+ if (sbi -> s_max_wro_seq_files
1123
+ && wro > sbi -> s_max_wro_seq_files ) {
1089
1124
atomic_dec (& sbi -> s_wro_seq_files );
1090
1125
ret = - EBUSY ;
1091
1126
goto unlock ;
@@ -1760,12 +1795,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
1760
1795
1761
1796
atomic_set (& sbi -> s_wro_seq_files , 0 );
1762
1797
sbi -> s_max_wro_seq_files = bdev_max_open_zones (sb -> s_bdev );
1763
- if (!sbi -> s_max_wro_seq_files &&
1764
- sbi -> s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN ) {
1765
- zonefs_info (sb , "No open zones limit. Ignoring explicit_open mount option\n" );
1766
- sbi -> s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN ;
1767
- }
1768
-
1769
1798
atomic_set (& sbi -> s_active_seq_files , 0 );
1770
1799
sbi -> s_max_active_seq_files = bdev_max_active_zones (sb -> s_bdev );
1771
1800
@@ -1790,6 +1819,14 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
1790
1819
zonefs_info (sb , "Mounting %u zones" ,
1791
1820
blkdev_nr_zones (sb -> s_bdev -> bd_disk ));
1792
1821
1822
+ if (!sbi -> s_max_wro_seq_files &&
1823
+ !sbi -> s_max_active_seq_files &&
1824
+ sbi -> s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN ) {
1825
+ zonefs_info (sb ,
1826
+ "No open and active zone limits. Ignoring explicit_open mount option\n" );
1827
+ sbi -> s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN ;
1828
+ }
1829
+
1793
1830
/* Create root directory inode */
1794
1831
ret = - ENOMEM ;
1795
1832
inode = new_inode (sb );
0 commit comments