@@ -231,7 +231,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
231231 struct ntfs_sb_info * sbi ;
232232 struct ATTRIB * attr_s ;
233233 struct MFT_REC * rec ;
234- u32 used , asize , rsize , aoff , align ;
234+ u32 used , asize , rsize , aoff ;
235235 bool is_data ;
236236 CLST len , alen ;
237237 char * next ;
@@ -252,10 +252,13 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
252252 rsize = le32_to_cpu (attr -> res .data_size );
253253 is_data = attr -> type == ATTR_DATA && !attr -> name_len ;
254254
255- align = sbi -> cluster_size ;
256- if (is_attr_compressed (attr ))
257- align <<= COMPRESSION_UNIT ;
258- len = (rsize + align - 1 ) >> sbi -> cluster_bits ;
255+ /* len - how many clusters required to store 'rsize' bytes */
256+ if (is_attr_compressed (attr )) {
257+ u8 shift = sbi -> cluster_bits + NTFS_LZNT_CUNIT ;
258+ len = ((rsize + (1u << shift ) - 1 ) >> shift ) << NTFS_LZNT_CUNIT ;
259+ } else {
260+ len = bytes_to_cluster (sbi , rsize );
261+ }
259262
260263 run_init (run );
261264
@@ -285,22 +288,21 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
285288 if (err )
286289 goto out2 ;
287290 } else if (!page ) {
288- char * kaddr ;
289-
290- page = grab_cache_page (ni -> vfs_inode .i_mapping , 0 );
291- if (!page ) {
292- err = - ENOMEM ;
291+ struct address_space * mapping = ni -> vfs_inode .i_mapping ;
292+ struct folio * folio ;
293+
294+ folio = __filemap_get_folio (
295+ mapping , 0 , FGP_LOCK | FGP_ACCESSED | FGP_CREAT ,
296+ mapping_gfp_mask (mapping ));
297+ if (IS_ERR (folio )) {
298+ err = PTR_ERR (folio );
293299 goto out2 ;
294300 }
295- kaddr = kmap_atomic (page );
296- memcpy (kaddr , data , rsize );
297- memset (kaddr + rsize , 0 , PAGE_SIZE - rsize );
298- kunmap_atomic (kaddr );
299- flush_dcache_page (page );
300- SetPageUptodate (page );
301- set_page_dirty (page );
302- unlock_page (page );
303- put_page (page );
301+ folio_fill_tail (folio , 0 , data , rsize );
302+ folio_mark_uptodate (folio );
303+ folio_mark_dirty (folio );
304+ folio_unlock (folio );
305+ folio_put (folio );
304306 }
305307 }
306308
@@ -670,7 +672,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
670672 goto undo_2 ;
671673 }
672674
673- if (!is_mft )
675+ /* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
676+ if (ni -> mi .rno != MFT_REC_MFT )
674677 run_truncate_head (run , evcn + 1 );
675678
676679 svcn = le64_to_cpu (attr -> nres .svcn );
@@ -972,6 +975,19 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
972975 if (err )
973976 goto out ;
974977
978+ /* Check for compressed frame. */
979+ err = attr_is_frame_compressed (ni , attr , vcn >> NTFS_LZNT_CUNIT , & hint );
980+ if (err )
981+ goto out ;
982+
983+ if (hint ) {
984+ /* if frame is compressed - don't touch it. */
985+ * lcn = COMPRESSED_LCN ;
986+ * len = hint ;
987+ err = - EOPNOTSUPP ;
988+ goto out ;
989+ }
990+
975991 if (!* len ) {
976992 if (run_lookup_entry (run , vcn , lcn , len , NULL )) {
977993 if (* lcn != SPARSE_LCN || !new )
@@ -1223,11 +1239,12 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
12231239 goto out ;
12241240}
12251241
1226- int attr_data_read_resident (struct ntfs_inode * ni , struct page * page )
1242+ int attr_data_read_resident (struct ntfs_inode * ni , struct folio * folio )
12271243{
12281244 u64 vbo ;
12291245 struct ATTRIB * attr ;
12301246 u32 data_size ;
1247+ size_t len ;
12311248
12321249 attr = ni_find_attr (ni , NULL , NULL , ATTR_DATA , NULL , 0 , NULL , NULL );
12331250 if (!attr )
@@ -1236,30 +1253,20 @@ int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
12361253 if (attr -> non_res )
12371254 return E_NTFS_NONRESIDENT ;
12381255
1239- vbo = page -> index << PAGE_SHIFT ;
1256+ vbo = folio -> index << PAGE_SHIFT ;
12401257 data_size = le32_to_cpu (attr -> res .data_size );
1241- if (vbo < data_size ) {
1242- const char * data = resident_data (attr );
1243- char * kaddr = kmap_atomic (page );
1244- u32 use = data_size - vbo ;
1245-
1246- if (use > PAGE_SIZE )
1247- use = PAGE_SIZE ;
1258+ if (vbo > data_size )
1259+ len = 0 ;
1260+ else
1261+ len = min (data_size - vbo , folio_size (folio ));
12481262
1249- memcpy (kaddr , data + vbo , use );
1250- memset (kaddr + use , 0 , PAGE_SIZE - use );
1251- kunmap_atomic (kaddr );
1252- flush_dcache_page (page );
1253- SetPageUptodate (page );
1254- } else if (!PageUptodate (page )) {
1255- zero_user_segment (page , 0 , PAGE_SIZE );
1256- SetPageUptodate (page );
1257- }
1263+ folio_fill_tail (folio , 0 , resident_data (attr ) + vbo , len );
1264+ folio_mark_uptodate (folio );
12581265
12591266 return 0 ;
12601267}
12611268
1262- int attr_data_write_resident (struct ntfs_inode * ni , struct page * page )
1269+ int attr_data_write_resident (struct ntfs_inode * ni , struct folio * folio )
12631270{
12641271 u64 vbo ;
12651272 struct mft_inode * mi ;
@@ -1275,17 +1282,13 @@ int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
12751282 return E_NTFS_NONRESIDENT ;
12761283 }
12771284
1278- vbo = page -> index << PAGE_SHIFT ;
1285+ vbo = folio -> index << PAGE_SHIFT ;
12791286 data_size = le32_to_cpu (attr -> res .data_size );
12801287 if (vbo < data_size ) {
12811288 char * data = resident_data (attr );
1282- char * kaddr = kmap_atomic (page );
1283- u32 use = data_size - vbo ;
1289+ size_t len = min (data_size - vbo , folio_size (folio ));
12841290
1285- if (use > PAGE_SIZE )
1286- use = PAGE_SIZE ;
1287- memcpy (data + vbo , kaddr , use );
1288- kunmap_atomic (kaddr );
1291+ memcpy_from_folio (data + vbo , folio , 0 , len );
12891292 mi -> dirty = true;
12901293 }
12911294 ni -> i_valid = data_size ;
@@ -1378,7 +1381,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
13781381 u32 voff ;
13791382 u8 bytes_per_off ;
13801383 char * addr ;
1381- struct page * page ;
1384+ struct folio * folio ;
13821385 int i , err ;
13831386 __le32 * off32 ;
13841387 __le64 * off64 ;
@@ -1423,18 +1426,18 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
14231426
14241427 wof_size = le64_to_cpu (attr -> nres .data_size );
14251428 down_write (& ni -> file .run_lock );
1426- page = ni -> file .offs_page ;
1427- if (!page ) {
1428- page = alloc_page (GFP_KERNEL );
1429- if (!page ) {
1429+ folio = ni -> file .offs_folio ;
1430+ if (!folio ) {
1431+ folio = folio_alloc (GFP_KERNEL , 0 );
1432+ if (!folio ) {
14301433 err = - ENOMEM ;
14311434 goto out ;
14321435 }
1433- page -> index = -1 ;
1434- ni -> file .offs_page = page ;
1436+ folio -> index = -1 ;
1437+ ni -> file .offs_folio = folio ;
14351438 }
1436- lock_page ( page );
1437- addr = page_address ( page );
1439+ folio_lock ( folio );
1440+ addr = folio_address ( folio );
14381441
14391442 if (vbo [1 ]) {
14401443 voff = vbo [1 ] & (PAGE_SIZE - 1 );
@@ -1450,7 +1453,8 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
14501453 do {
14511454 pgoff_t index = vbo [i ] >> PAGE_SHIFT ;
14521455
1453- if (index != page -> index ) {
1456+ if (index != folio -> index ) {
1457+ struct page * page = & folio -> page ;
14541458 u64 from = vbo [i ] & ~(u64 )(PAGE_SIZE - 1 );
14551459 u64 to = min (from + PAGE_SIZE , wof_size );
14561460
@@ -1463,10 +1467,10 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
14631467 err = ntfs_bio_pages (sbi , run , & page , 1 , from ,
14641468 to - from , REQ_OP_READ );
14651469 if (err ) {
1466- page -> index = -1 ;
1470+ folio -> index = -1 ;
14671471 goto out1 ;
14681472 }
1469- page -> index = index ;
1473+ folio -> index = index ;
14701474 }
14711475
14721476 if (i ) {
@@ -1504,7 +1508,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
15041508 * ondisk_size = off [1 ] - off [0 ];
15051509
15061510out1 :
1507- unlock_page ( page );
1511+ folio_unlock ( folio );
15081512out :
15091513 up_write (& ni -> file .run_lock );
15101514 return err ;
@@ -1722,6 +1726,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
17221726
17231727 attr_b -> nres .total_size = cpu_to_le64 (total_size );
17241728 inode_set_bytes (& ni -> vfs_inode , total_size );
1729+ ni -> ni_flags |= NI_FLAG_UPDATE_PARENT ;
17251730
17261731 mi_b -> dirty = true;
17271732 mark_inode_dirty (& ni -> vfs_inode );
@@ -2356,8 +2361,13 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
23562361 mask = (sbi -> cluster_size << attr_b -> nres .c_unit ) - 1 ;
23572362 }
23582363
2359- if (vbo > data_size ) {
2360- /* Insert range after the file size is not allowed. */
2364+ if (vbo >= data_size ) {
2365+ /*
2366+ * Insert range after the file size is not allowed.
2367+ * If the offset is equal to or greater than the end of
2368+ * file, an error is returned. For such operations (i.e., inserting
2369+ * a hole at the end of file), ftruncate(2) should be used.
2370+ */
23612371 return - EINVAL ;
23622372 }
23632373
0 commit comments