@@ -25,8 +25,7 @@ void erofs_put_metabuf(struct erofs_buf *buf)
25
25
buf -> page = NULL ;
26
26
}
27
27
28
- void * erofs_bread (struct erofs_buf * buf , erofs_off_t offset ,
29
- enum erofs_kmap_type type )
28
+ void * erofs_bread (struct erofs_buf * buf , erofs_off_t offset , bool need_kmap )
30
29
{
31
30
pgoff_t index = offset >> PAGE_SHIFT ;
32
31
struct folio * folio = NULL ;
@@ -43,10 +42,10 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
43
42
return folio ;
44
43
}
45
44
buf -> page = folio_file_page (folio , index );
46
- if (!buf -> base && type == EROFS_KMAP )
47
- buf -> base = kmap_local_page (buf -> page );
48
- if (type == EROFS_NO_KMAP )
45
+ if (!need_kmap )
49
46
return NULL ;
47
+ if (!buf -> base )
48
+ buf -> base = kmap_local_page (buf -> page );
50
49
return buf -> base + (offset & ~PAGE_MASK );
51
50
}
52
51
@@ -65,64 +64,47 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
65
64
}
66
65
67
66
void * erofs_read_metabuf (struct erofs_buf * buf , struct super_block * sb ,
68
- erofs_off_t offset , enum erofs_kmap_type type )
67
+ erofs_off_t offset , bool need_kmap )
69
68
{
70
69
erofs_init_metabuf (buf , sb );
71
- return erofs_bread (buf , offset , type );
72
- }
73
-
74
- static int erofs_map_blocks_flatmode (struct inode * inode ,
75
- struct erofs_map_blocks * map )
76
- {
77
- struct erofs_inode * vi = EROFS_I (inode );
78
- struct super_block * sb = inode -> i_sb ;
79
- bool tailendpacking = (vi -> datalayout == EROFS_INODE_FLAT_INLINE );
80
- erofs_blk_t lastblk = erofs_iblks (inode ) - tailendpacking ;
81
-
82
- map -> m_flags = EROFS_MAP_MAPPED ; /* no hole in flat inodes */
83
- if (map -> m_la < erofs_pos (sb , lastblk )) {
84
- map -> m_pa = erofs_pos (sb , vi -> raw_blkaddr ) + map -> m_la ;
85
- map -> m_plen = erofs_pos (sb , lastblk ) - map -> m_la ;
86
- } else {
87
- DBG_BUGON (!tailendpacking );
88
- map -> m_pa = erofs_iloc (inode ) + vi -> inode_isize +
89
- vi -> xattr_isize + erofs_blkoff (sb , map -> m_la );
90
- map -> m_plen = inode -> i_size - map -> m_la ;
91
-
92
- /* inline data should be located in the same meta block */
93
- if (erofs_blkoff (sb , map -> m_pa ) + map -> m_plen > sb -> s_blocksize ) {
94
- erofs_err (sb , "inline data across blocks @ nid %llu" , vi -> nid );
95
- DBG_BUGON (1 );
96
- return - EFSCORRUPTED ;
97
- }
98
- map -> m_flags |= EROFS_MAP_META ;
99
- }
100
- return 0 ;
70
+ return erofs_bread (buf , offset , need_kmap );
101
71
}
102
72
103
73
int erofs_map_blocks (struct inode * inode , struct erofs_map_blocks * map )
104
74
{
75
+ struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
105
76
struct super_block * sb = inode -> i_sb ;
77
+ unsigned int unit , blksz = sb -> s_blocksize ;
106
78
struct erofs_inode * vi = EROFS_I (inode );
107
79
struct erofs_inode_chunk_index * idx ;
108
- struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
109
- u64 chunknr ;
110
- unsigned int unit ;
80
+ erofs_blk_t startblk , addrmask ;
81
+ bool tailpacking ;
111
82
erofs_off_t pos ;
112
- void * kaddr ;
83
+ u64 chunknr ;
113
84
int err = 0 ;
114
85
115
86
trace_erofs_map_blocks_enter (inode , map , 0 );
116
87
map -> m_deviceid = 0 ;
117
- if (map -> m_la >= inode -> i_size ) {
118
- /* leave out-of-bound access unmapped */
119
- map -> m_flags = 0 ;
120
- map -> m_plen = map -> m_llen ;
88
+ map -> m_flags = 0 ;
89
+ if (map -> m_la >= inode -> i_size )
121
90
goto out ;
122
- }
123
91
124
92
if (vi -> datalayout != EROFS_INODE_CHUNK_BASED ) {
125
- err = erofs_map_blocks_flatmode (inode , map );
93
+ tailpacking = (vi -> datalayout == EROFS_INODE_FLAT_INLINE );
94
+ if (!tailpacking && vi -> startblk == EROFS_NULL_ADDR )
95
+ goto out ;
96
+ pos = erofs_pos (sb , erofs_iblks (inode ) - tailpacking );
97
+
98
+ map -> m_flags = EROFS_MAP_MAPPED ;
99
+ if (map -> m_la < pos ) {
100
+ map -> m_pa = erofs_pos (sb , vi -> startblk ) + map -> m_la ;
101
+ map -> m_llen = pos - map -> m_la ;
102
+ } else {
103
+ map -> m_pa = erofs_iloc (inode ) + vi -> inode_isize +
104
+ vi -> xattr_isize + erofs_blkoff (sb , map -> m_la );
105
+ map -> m_llen = inode -> i_size - map -> m_la ;
106
+ map -> m_flags |= EROFS_MAP_META ;
107
+ }
126
108
goto out ;
127
109
}
128
110
@@ -135,45 +117,44 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
135
117
pos = ALIGN (erofs_iloc (inode ) + vi -> inode_isize +
136
118
vi -> xattr_isize , unit ) + unit * chunknr ;
137
119
138
- kaddr = erofs_read_metabuf (& buf , sb , pos , EROFS_KMAP );
139
- if (IS_ERR (kaddr )) {
140
- err = PTR_ERR (kaddr );
120
+ idx = erofs_read_metabuf (& buf , sb , pos , true );
121
+ if (IS_ERR (idx )) {
122
+ err = PTR_ERR (idx );
141
123
goto out ;
142
124
}
143
125
map -> m_la = chunknr << vi -> chunkbits ;
144
- map -> m_plen = min_t (erofs_off_t , 1UL << vi -> chunkbits ,
145
- round_up (inode -> i_size - map -> m_la , sb -> s_blocksize ));
146
-
147
- /* handle block map */
148
- if (!(vi -> chunkformat & EROFS_CHUNK_FORMAT_INDEXES )) {
149
- __le32 * blkaddr = kaddr ;
150
-
151
- if (le32_to_cpu (* blkaddr ) == EROFS_NULL_ADDR ) {
152
- map -> m_flags = 0 ;
153
- } else {
154
- map -> m_pa = erofs_pos (sb , le32_to_cpu (* blkaddr ));
126
+ map -> m_llen = min_t (erofs_off_t , 1UL << vi -> chunkbits ,
127
+ round_up (inode -> i_size - map -> m_la , blksz ));
128
+ if (vi -> chunkformat & EROFS_CHUNK_FORMAT_INDEXES ) {
129
+ addrmask = (vi -> chunkformat & EROFS_CHUNK_FORMAT_48BIT ) ?
130
+ BIT_ULL (48 ) - 1 : BIT_ULL (32 ) - 1 ;
131
+ startblk = (((u64 )le16_to_cpu (idx -> startblk_hi ) << 32 ) |
132
+ le32_to_cpu (idx -> startblk_lo )) & addrmask ;
133
+ if ((startblk ^ EROFS_NULL_ADDR ) & addrmask ) {
134
+ map -> m_deviceid = le16_to_cpu (idx -> device_id ) &
135
+ EROFS_SB (sb )-> device_id_mask ;
136
+ map -> m_pa = erofs_pos (sb , startblk );
137
+ map -> m_flags = EROFS_MAP_MAPPED ;
138
+ }
139
+ } else {
140
+ startblk = le32_to_cpu (* (__le32 * )idx );
141
+ if (startblk != (u32 )EROFS_NULL_ADDR ) {
142
+ map -> m_pa = erofs_pos (sb , startblk );
155
143
map -> m_flags = EROFS_MAP_MAPPED ;
156
144
}
157
- goto out_unlock ;
158
- }
159
- /* parse chunk indexes */
160
- idx = kaddr ;
161
- switch (le32_to_cpu (idx -> blkaddr )) {
162
- case EROFS_NULL_ADDR :
163
- map -> m_flags = 0 ;
164
- break ;
165
- default :
166
- map -> m_deviceid = le16_to_cpu (idx -> device_id ) &
167
- EROFS_SB (sb )-> device_id_mask ;
168
- map -> m_pa = erofs_pos (sb , le32_to_cpu (idx -> blkaddr ));
169
- map -> m_flags = EROFS_MAP_MAPPED ;
170
- break ;
171
145
}
172
- out_unlock :
173
146
erofs_put_metabuf (& buf );
174
147
out :
175
- if (!err )
176
- map -> m_llen = map -> m_plen ;
148
+ if (!err ) {
149
+ map -> m_plen = map -> m_llen ;
150
+ /* inline data should be located in the same meta block */
151
+ if ((map -> m_flags & EROFS_MAP_META ) &&
152
+ erofs_blkoff (sb , map -> m_pa ) + map -> m_plen > blksz ) {
153
+ erofs_err (sb , "inline data across blocks @ nid %llu" , vi -> nid );
154
+ DBG_BUGON (1 );
155
+ return - EFSCORRUPTED ;
156
+ }
157
+ }
177
158
trace_erofs_map_blocks_exit (inode , map , 0 , err );
178
159
return err ;
179
160
}
@@ -192,7 +173,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
192
173
{
193
174
struct erofs_dev_context * devs = EROFS_SB (sb )-> devs ;
194
175
struct erofs_device_info * dif ;
195
- erofs_off_t startoff , length ;
176
+ erofs_off_t startoff ;
196
177
int id ;
197
178
198
179
erofs_fill_from_devinfo (map , sb , & EROFS_SB (sb )-> dif0 );
@@ -205,7 +186,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
205
186
return - ENODEV ;
206
187
}
207
188
if (devs -> flatdev ) {
208
- map -> m_pa += erofs_pos (sb , dif -> mapped_blkaddr );
189
+ map -> m_pa += erofs_pos (sb , dif -> uniaddr );
209
190
up_read (& devs -> rwsem );
210
191
return 0 ;
211
192
}
@@ -214,13 +195,12 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
214
195
} else if (devs -> extra_devices && !devs -> flatdev ) {
215
196
down_read (& devs -> rwsem );
216
197
idr_for_each_entry (& devs -> tree , dif , id ) {
217
- if (!dif -> mapped_blkaddr )
198
+ if (!dif -> uniaddr )
218
199
continue ;
219
200
220
- startoff = erofs_pos (sb , dif -> mapped_blkaddr );
221
- length = erofs_pos (sb , dif -> blocks );
201
+ startoff = erofs_pos (sb , dif -> uniaddr );
222
202
if (map -> m_pa >= startoff &&
223
- map -> m_pa < startoff + length ) {
203
+ map -> m_pa < startoff + erofs_pos ( sb , dif -> blocks ) ) {
224
204
map -> m_pa -= startoff ;
225
205
erofs_fill_from_devinfo (map , sb , dif );
226
206
break ;
@@ -312,7 +292,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
312
292
struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
313
293
314
294
iomap -> type = IOMAP_INLINE ;
315
- ptr = erofs_read_metabuf (& buf , sb , mdev .m_pa , EROFS_KMAP );
295
+ ptr = erofs_read_metabuf (& buf , sb , mdev .m_pa , true );
316
296
if (IS_ERR (ptr ))
317
297
return PTR_ERR (ptr );
318
298
iomap -> inline_data = ptr ;
0 commit comments