@@ -89,15 +89,15 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
8989void nilfs_dat_commit_alloc (struct inode * dat , struct nilfs_palloc_req * req )
9090{
9191 struct nilfs_dat_entry * entry ;
92- void * kaddr ;
92+ size_t offset ;
9393
94- kaddr = kmap_local_page ( req -> pr_entry_bh -> b_page );
95- entry = nilfs_palloc_block_get_entry ( dat , req -> pr_entry_nr ,
96- req -> pr_entry_bh , kaddr );
94+ offset = nilfs_palloc_entry_offset ( dat , req -> pr_entry_nr ,
95+ req -> pr_entry_bh );
96+ entry = kmap_local_folio ( req -> pr_entry_bh -> b_folio , offset );
9797 entry -> de_start = cpu_to_le64 (NILFS_CNO_MIN );
9898 entry -> de_end = cpu_to_le64 (NILFS_CNO_MAX );
9999 entry -> de_blocknr = cpu_to_le64 (0 );
100- kunmap_local (kaddr );
100+ kunmap_local (entry );
101101
102102 nilfs_palloc_commit_alloc_entry (dat , req );
103103 nilfs_dat_commit_entry (dat , req );
@@ -113,15 +113,15 @@ static void nilfs_dat_commit_free(struct inode *dat,
113113 struct nilfs_palloc_req * req )
114114{
115115 struct nilfs_dat_entry * entry ;
116- void * kaddr ;
116+ size_t offset ;
117117
118- kaddr = kmap_local_page ( req -> pr_entry_bh -> b_page );
119- entry = nilfs_palloc_block_get_entry ( dat , req -> pr_entry_nr ,
120- req -> pr_entry_bh , kaddr );
118+ offset = nilfs_palloc_entry_offset ( dat , req -> pr_entry_nr ,
119+ req -> pr_entry_bh );
120+ entry = kmap_local_folio ( req -> pr_entry_bh -> b_folio , offset );
121121 entry -> de_start = cpu_to_le64 (NILFS_CNO_MIN );
122122 entry -> de_end = cpu_to_le64 (NILFS_CNO_MIN );
123123 entry -> de_blocknr = cpu_to_le64 (0 );
124- kunmap_local (kaddr );
124+ kunmap_local (entry );
125125
126126 nilfs_dat_commit_entry (dat , req );
127127
@@ -143,14 +143,14 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
143143 sector_t blocknr )
144144{
145145 struct nilfs_dat_entry * entry ;
146- void * kaddr ;
146+ size_t offset ;
147147
148- kaddr = kmap_local_page ( req -> pr_entry_bh -> b_page );
149- entry = nilfs_palloc_block_get_entry ( dat , req -> pr_entry_nr ,
150- req -> pr_entry_bh , kaddr );
148+ offset = nilfs_palloc_entry_offset ( dat , req -> pr_entry_nr ,
149+ req -> pr_entry_bh );
150+ entry = kmap_local_folio ( req -> pr_entry_bh -> b_folio , offset );
151151 entry -> de_start = cpu_to_le64 (nilfs_mdt_cno (dat ));
152152 entry -> de_blocknr = cpu_to_le64 (blocknr );
153- kunmap_local (kaddr );
153+ kunmap_local (entry );
154154
155155 nilfs_dat_commit_entry (dat , req );
156156}
@@ -160,19 +160,19 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
160160 struct nilfs_dat_entry * entry ;
161161 __u64 start ;
162162 sector_t blocknr ;
163- void * kaddr ;
163+ size_t offset ;
164164 int ret ;
165165
166166 ret = nilfs_dat_prepare_entry (dat , req , 0 );
167167 if (ret < 0 )
168168 return ret ;
169169
170- kaddr = kmap_local_page ( req -> pr_entry_bh -> b_page );
171- entry = nilfs_palloc_block_get_entry ( dat , req -> pr_entry_nr ,
172- req -> pr_entry_bh , kaddr );
170+ offset = nilfs_palloc_entry_offset ( dat , req -> pr_entry_nr ,
171+ req -> pr_entry_bh );
172+ entry = kmap_local_folio ( req -> pr_entry_bh -> b_folio , offset );
173173 start = le64_to_cpu (entry -> de_start );
174174 blocknr = le64_to_cpu (entry -> de_blocknr );
175- kunmap_local (kaddr );
175+ kunmap_local (entry );
176176
177177 if (blocknr == 0 ) {
178178 ret = nilfs_palloc_prepare_free_entry (dat , req );
@@ -200,19 +200,19 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
200200 struct nilfs_dat_entry * entry ;
201201 __u64 start , end ;
202202 sector_t blocknr ;
203- void * kaddr ;
203+ size_t offset ;
204204
205- kaddr = kmap_local_page ( req -> pr_entry_bh -> b_page );
206- entry = nilfs_palloc_block_get_entry ( dat , req -> pr_entry_nr ,
207- req -> pr_entry_bh , kaddr );
205+ offset = nilfs_palloc_entry_offset ( dat , req -> pr_entry_nr ,
206+ req -> pr_entry_bh );
207+ entry = kmap_local_folio ( req -> pr_entry_bh -> b_folio , offset );
208208 end = start = le64_to_cpu (entry -> de_start );
209209 if (!dead ) {
210210 end = nilfs_mdt_cno (dat );
211211 WARN_ON (start > end );
212212 }
213213 entry -> de_end = cpu_to_le64 (end );
214214 blocknr = le64_to_cpu (entry -> de_blocknr );
215- kunmap_local (kaddr );
215+ kunmap_local (entry );
216216
217217 if (blocknr == 0 )
218218 nilfs_dat_commit_free (dat , req );
@@ -225,14 +225,14 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
225225 struct nilfs_dat_entry * entry ;
226226 __u64 start ;
227227 sector_t blocknr ;
228- void * kaddr ;
228+ size_t offset ;
229229
230- kaddr = kmap_local_page ( req -> pr_entry_bh -> b_page );
231- entry = nilfs_palloc_block_get_entry ( dat , req -> pr_entry_nr ,
232- req -> pr_entry_bh , kaddr );
230+ offset = nilfs_palloc_entry_offset ( dat , req -> pr_entry_nr ,
231+ req -> pr_entry_bh );
232+ entry = kmap_local_folio ( req -> pr_entry_bh -> b_folio , offset );
233233 start = le64_to_cpu (entry -> de_start );
234234 blocknr = le64_to_cpu (entry -> de_blocknr );
235- kunmap_local (kaddr );
235+ kunmap_local (entry );
236236
237237 if (start == nilfs_mdt_cno (dat ) && blocknr == 0 )
238238 nilfs_palloc_abort_free_entry (dat , req );
@@ -336,7 +336,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
336336{
337337 struct buffer_head * entry_bh ;
338338 struct nilfs_dat_entry * entry ;
339- void * kaddr ;
339+ size_t offset ;
340340 int ret ;
341341
342342 ret = nilfs_palloc_get_entry_block (dat , vblocknr , 0 , & entry_bh );
@@ -359,21 +359,21 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
359359 }
360360 }
361361
362- kaddr = kmap_local_page ( entry_bh -> b_page );
363- entry = nilfs_palloc_block_get_entry ( dat , vblocknr , entry_bh , kaddr );
362+ offset = nilfs_palloc_entry_offset ( dat , vblocknr , entry_bh );
363+ entry = kmap_local_folio ( entry_bh -> b_folio , offset );
364364 if (unlikely (entry -> de_blocknr == cpu_to_le64 (0 ))) {
365365 nilfs_crit (dat -> i_sb ,
366366 "%s: invalid vblocknr = %llu, [%llu, %llu)" ,
367367 __func__ , (unsigned long long )vblocknr ,
368368 (unsigned long long )le64_to_cpu (entry -> de_start ),
369369 (unsigned long long )le64_to_cpu (entry -> de_end ));
370- kunmap_local (kaddr );
370+ kunmap_local (entry );
371371 brelse (entry_bh );
372372 return - EINVAL ;
373373 }
374374 WARN_ON (blocknr == 0 );
375375 entry -> de_blocknr = cpu_to_le64 (blocknr );
376- kunmap_local (kaddr );
376+ kunmap_local (entry );
377377
378378 mark_buffer_dirty (entry_bh );
379379 nilfs_mdt_mark_dirty (dat );
@@ -407,7 +407,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
407407 struct buffer_head * entry_bh , * bh ;
408408 struct nilfs_dat_entry * entry ;
409409 sector_t blocknr ;
410- void * kaddr ;
410+ size_t offset ;
411411 int ret ;
412412
413413 ret = nilfs_palloc_get_entry_block (dat , vblocknr , 0 , & entry_bh );
@@ -423,8 +423,8 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
423423 }
424424 }
425425
426- kaddr = kmap_local_page ( entry_bh -> b_page );
427- entry = nilfs_palloc_block_get_entry ( dat , vblocknr , entry_bh , kaddr );
426+ offset = nilfs_palloc_entry_offset ( dat , vblocknr , entry_bh );
427+ entry = kmap_local_folio ( entry_bh -> b_folio , offset );
428428 blocknr = le64_to_cpu (entry -> de_blocknr );
429429 if (blocknr == 0 ) {
430430 ret = - ENOENT ;
@@ -433,7 +433,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
433433 * blocknrp = blocknr ;
434434
435435 out :
436- kunmap_local (kaddr );
436+ kunmap_local (entry );
437437 brelse (entry_bh );
438438 return ret ;
439439}
@@ -442,35 +442,41 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
442442 size_t nvi )
443443{
444444 struct buffer_head * entry_bh ;
445- struct nilfs_dat_entry * entry ;
445+ struct nilfs_dat_entry * entry , * first_entry ;
446446 struct nilfs_vinfo * vinfo = buf ;
447447 __u64 first , last ;
448- void * kaddr ;
448+ size_t offset ;
449449 unsigned long entries_per_block = NILFS_MDT (dat )-> mi_entries_per_block ;
450+ unsigned int entry_size = NILFS_MDT (dat )-> mi_entry_size ;
450451 int i , j , n , ret ;
451452
452453 for (i = 0 ; i < nvi ; i += n ) {
453454 ret = nilfs_palloc_get_entry_block (dat , vinfo -> vi_vblocknr ,
454455 0 , & entry_bh );
455456 if (ret < 0 )
456457 return ret ;
457- kaddr = kmap_local_page (entry_bh -> b_page );
458- /* last virtual block number in this block */
458+
459459 first = vinfo -> vi_vblocknr ;
460460 first = div64_ul (first , entries_per_block );
461461 first *= entries_per_block ;
462+ /* first virtual block number in this block */
463+
462464 last = first + entries_per_block - 1 ;
465+ /* last virtual block number in this block */
466+
467+ offset = nilfs_palloc_entry_offset (dat , first , entry_bh );
468+ first_entry = kmap_local_folio (entry_bh -> b_folio , offset );
463469 for (j = i , n = 0 ;
464470 j < nvi && vinfo -> vi_vblocknr >= first &&
465471 vinfo -> vi_vblocknr <= last ;
466472 j ++ , n ++ , vinfo = (void * )vinfo + visz ) {
467- entry = nilfs_palloc_block_get_entry (
468- dat , vinfo -> vi_vblocknr , entry_bh , kaddr ) ;
473+ entry = ( void * ) first_entry +
474+ ( vinfo -> vi_vblocknr - first ) * entry_size ;
469475 vinfo -> vi_start = le64_to_cpu (entry -> de_start );
470476 vinfo -> vi_end = le64_to_cpu (entry -> de_end );
471477 vinfo -> vi_blocknr = le64_to_cpu (entry -> de_blocknr );
472478 }
473- kunmap_local (kaddr );
479+ kunmap_local (first_entry );
474480 brelse (entry_bh );
475481 }
476482
0 commit comments