@@ -80,23 +80,22 @@ static int squashfs_bio_read_cached(struct bio *fullbio,
80
80
struct address_space * cache_mapping , u64 index , int length ,
81
81
u64 read_start , u64 read_end , int page_count )
82
82
{
83
- struct page * head_to_cache = NULL , * tail_to_cache = NULL ;
83
+ struct folio * head_to_cache = NULL , * tail_to_cache = NULL ;
84
84
struct block_device * bdev = fullbio -> bi_bdev ;
85
85
int start_idx = 0 , end_idx = 0 ;
86
- struct bvec_iter_all iter_all ;
86
+ struct folio_iter fi ; ;
87
87
struct bio * bio = NULL ;
88
- struct bio_vec * bv ;
89
88
int idx = 0 ;
90
89
int err = 0 ;
91
90
#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
92
- struct page * * cache_pages = kmalloc_array (page_count ,
91
+ struct folio * * cache_folios = kmalloc_array (page_count ,
93
92
sizeof (void * ), GFP_KERNEL | __GFP_ZERO );
94
93
#endif
95
94
96
- bio_for_each_segment_all ( bv , fullbio , iter_all ) {
97
- struct page * page = bv -> bv_page ;
95
+ bio_for_each_folio_all ( fi , fullbio ) {
96
+ struct folio * folio = fi . folio ;
98
97
99
- if (page -> mapping == cache_mapping ) {
98
+ if (folio -> mapping == cache_mapping ) {
100
99
idx ++ ;
101
100
continue ;
102
101
}
@@ -111,13 +110,13 @@ static int squashfs_bio_read_cached(struct bio *fullbio,
111
110
* adjacent blocks.
112
111
*/
113
112
if (idx == 0 && index != read_start )
114
- head_to_cache = page ;
113
+ head_to_cache = folio ;
115
114
else if (idx == page_count - 1 && index + length != read_end )
116
- tail_to_cache = page ;
115
+ tail_to_cache = folio ;
117
116
#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
118
117
/* Cache all pages in the BIO for repeated reads */
119
- else if (cache_pages )
120
- cache_pages [idx ] = page ;
118
+ else if (cache_folios )
119
+ cache_folios [idx ] = folio ;
121
120
#endif
122
121
123
122
if (!bio || idx != end_idx ) {
@@ -150,45 +149,45 @@ static int squashfs_bio_read_cached(struct bio *fullbio,
150
149
return err ;
151
150
152
151
if (head_to_cache ) {
153
- int ret = add_to_page_cache_lru ( head_to_cache , cache_mapping ,
152
+ int ret = filemap_add_folio ( cache_mapping , head_to_cache ,
154
153
read_start >> PAGE_SHIFT ,
155
154
GFP_NOIO );
156
155
157
156
if (!ret ) {
158
- SetPageUptodate (head_to_cache );
159
- unlock_page (head_to_cache );
157
+ folio_mark_uptodate (head_to_cache );
158
+ folio_unlock (head_to_cache );
160
159
}
161
160
162
161
}
163
162
164
163
if (tail_to_cache ) {
165
- int ret = add_to_page_cache_lru ( tail_to_cache , cache_mapping ,
164
+ int ret = filemap_add_folio ( cache_mapping , tail_to_cache ,
166
165
(read_end >> PAGE_SHIFT ) - 1 ,
167
166
GFP_NOIO );
168
167
169
168
if (!ret ) {
170
- SetPageUptodate (tail_to_cache );
171
- unlock_page (tail_to_cache );
169
+ folio_mark_uptodate (tail_to_cache );
170
+ folio_unlock (tail_to_cache );
172
171
}
173
172
}
174
173
175
174
#ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
176
- if (!cache_pages )
175
+ if (!cache_folios )
177
176
goto out ;
178
177
179
178
for (idx = 0 ; idx < page_count ; idx ++ ) {
180
- if (!cache_pages [idx ])
179
+ if (!cache_folios [idx ])
181
180
continue ;
182
- int ret = add_to_page_cache_lru ( cache_pages [idx ], cache_mapping ,
181
+ int ret = filemap_add_folio ( cache_mapping , cache_folios [idx ],
183
182
(read_start >> PAGE_SHIFT ) + idx ,
184
183
GFP_NOIO );
185
184
186
185
if (!ret ) {
187
- SetPageUptodate ( cache_pages [idx ]);
188
- unlock_page ( cache_pages [idx ]);
186
+ folio_mark_uptodate ( cache_folios [idx ]);
187
+ folio_unlock ( cache_folios [idx ]);
189
188
}
190
189
}
191
- kfree (cache_pages );
190
+ kfree (cache_folios );
192
191
out :
193
192
#endif
194
193
return 0 ;
0 commit comments