@@ -176,7 +176,7 @@ static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
176
176
int attr_allocate_clusters (struct ntfs_sb_info * sbi , struct runs_tree * run ,
177
177
CLST vcn , CLST lcn , CLST len , CLST * pre_alloc ,
178
178
enum ALLOCATE_OPT opt , CLST * alen , const size_t fr ,
179
- CLST * new_lcn )
179
+ CLST * new_lcn , CLST * new_len )
180
180
{
181
181
int err ;
182
182
CLST flen , vcn0 = vcn , pre = pre_alloc ? * pre_alloc : 0 ;
@@ -196,20 +196,36 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
196
196
if (err )
197
197
goto out ;
198
198
199
- if (new_lcn && vcn == vcn0 )
200
- * new_lcn = lcn ;
199
+ if (vcn == vcn0 ) {
200
+ /* Return the first fragment. */
201
+ if (new_lcn )
202
+ * new_lcn = lcn ;
203
+ if (new_len )
204
+ * new_len = flen ;
205
+ }
201
206
202
207
/* Add new fragment into run storage. */
203
- if (!run_add_entry (run , vcn , lcn , flen , opt == ALLOCATE_MFT )) {
208
+ if (!run_add_entry (run , vcn , lcn , flen , opt & ALLOCATE_MFT )) {
204
209
/* Undo last 'ntfs_look_for_free_space' */
205
210
mark_as_free_ex (sbi , lcn , len , false);
206
211
err = - ENOMEM ;
207
212
goto out ;
208
213
}
209
214
215
+ if (opt & ALLOCATE_ZERO ) {
216
+ u8 shift = sbi -> cluster_bits - SECTOR_SHIFT ;
217
+
218
+ err = blkdev_issue_zeroout (sbi -> sb -> s_bdev ,
219
+ (sector_t )lcn << shift ,
220
+ (sector_t )flen << shift ,
221
+ GFP_NOFS , 0 );
222
+ if (err )
223
+ goto out ;
224
+ }
225
+
210
226
vcn += flen ;
211
227
212
- if (flen >= len || opt == ALLOCATE_MFT ||
228
+ if (flen >= len || ( opt & ALLOCATE_MFT ) ||
213
229
(fr && run -> count - cnt >= fr )) {
214
230
* alen = vcn - vcn0 ;
215
231
return 0 ;
@@ -287,7 +303,8 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
287
303
const char * data = resident_data (attr );
288
304
289
305
err = attr_allocate_clusters (sbi , run , 0 , 0 , len , NULL ,
290
- ALLOCATE_DEF , & alen , 0 , NULL );
306
+ ALLOCATE_DEF , & alen , 0 , NULL ,
307
+ NULL );
291
308
if (err )
292
309
goto out1 ;
293
310
@@ -582,13 +599,13 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
582
599
/* ~3 bytes per fragment. */
583
600
err = attr_allocate_clusters (
584
601
sbi , run , vcn , lcn , to_allocate , & pre_alloc ,
585
- is_mft ? ALLOCATE_MFT : 0 , & alen ,
602
+ is_mft ? ALLOCATE_MFT : ALLOCATE_DEF , & alen ,
586
603
is_mft ? 0
587
604
: (sbi -> record_size -
588
605
le32_to_cpu (rec -> used ) + 8 ) /
589
606
3 +
590
607
1 ,
591
- NULL );
608
+ NULL , NULL );
592
609
if (err )
593
610
goto out ;
594
611
}
@@ -886,8 +903,19 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
886
903
return err ;
887
904
}
888
905
906
+ /*
907
+ * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
908
+ *
909
+ * @new == NULL means just to get current mapping for 'vcn'
910
+ * @new != NULL means allocate real cluster if 'vcn' maps to hole
911
+ * @zero - zeroout new allocated clusters
912
+ *
913
+ * NOTE:
914
+ * - @new != NULL is called only for sparsed or compressed attributes.
915
+ * - new allocated clusters are zeroed via blkdev_issue_zeroout.
916
+ */
889
917
int attr_data_get_block (struct ntfs_inode * ni , CLST vcn , CLST clen , CLST * lcn ,
890
- CLST * len , bool * new )
918
+ CLST * len , bool * new , bool zero )
891
919
{
892
920
int err = 0 ;
893
921
struct runs_tree * run = & ni -> file .run ;
@@ -896,29 +924,27 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
896
924
struct ATTRIB * attr = NULL , * attr_b ;
897
925
struct ATTR_LIST_ENTRY * le , * le_b ;
898
926
struct mft_inode * mi , * mi_b ;
899
- CLST hint , svcn , to_alloc , evcn1 , next_svcn , asize , end ;
927
+ CLST hint , svcn , to_alloc , evcn1 , next_svcn , asize , end , vcn0 , alen ;
928
+ unsigned int fr ;
900
929
u64 total_size ;
901
- u32 clst_per_frame ;
902
- bool ok ;
903
930
904
931
if (new )
905
932
* new = false;
906
933
934
+ /* Try to find in cache. */
907
935
down_read (& ni -> file .run_lock );
908
- ok = run_lookup_entry (run , vcn , lcn , len , NULL );
936
+ if (!run_lookup_entry (run , vcn , lcn , len , NULL ))
937
+ * len = 0 ;
909
938
up_read (& ni -> file .run_lock );
910
939
911
- if (ok && (* lcn != SPARSE_LCN || !new )) {
912
- /* Normal way. */
913
- return 0 ;
940
+ if (* len ) {
941
+ if (* lcn != SPARSE_LCN || !new )
942
+ return 0 ; /* Fast normal way without allocation. */
943
+ else if (clen > * len )
944
+ clen = * len ;
914
945
}
915
946
916
- if (!clen )
917
- clen = 1 ;
918
-
919
- if (ok && clen > * len )
920
- clen = * len ;
921
-
947
+ /* No cluster in cache or we need to allocate cluster in hole. */
922
948
sbi = ni -> mi .sbi ;
923
949
cluster_bits = sbi -> cluster_bits ;
924
950
@@ -944,12 +970,6 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
944
970
goto out ;
945
971
}
946
972
947
- clst_per_frame = 1u << attr_b -> nres .c_unit ;
948
- to_alloc = (clen + clst_per_frame - 1 ) & ~(clst_per_frame - 1 );
949
-
950
- if (vcn + to_alloc > asize )
951
- to_alloc = asize - vcn ;
952
-
953
973
svcn = le64_to_cpu (attr_b -> nres .svcn );
954
974
evcn1 = le64_to_cpu (attr_b -> nres .evcn ) + 1 ;
955
975
@@ -968,36 +988,68 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
968
988
evcn1 = le64_to_cpu (attr -> nres .evcn ) + 1 ;
969
989
}
970
990
991
+ /* Load in cache actual information. */
971
992
err = attr_load_runs (attr , ni , run , NULL );
972
993
if (err )
973
994
goto out ;
974
995
975
- if (!ok ) {
976
- ok = run_lookup_entry (run , vcn , lcn , len , NULL );
977
- if (ok && (* lcn != SPARSE_LCN || !new )) {
978
- /* Normal way. */
979
- err = 0 ;
980
- goto ok ;
981
- }
996
+ if (!* len ) {
997
+ if (run_lookup_entry (run , vcn , lcn , len , NULL )) {
998
+ if (* lcn != SPARSE_LCN || !new )
999
+ goto ok ; /* Slow normal way without allocation. */
982
1000
983
- if (!ok && !new ) {
984
- * len = 0 ;
985
- err = 0 ;
1001
+ if (clen > * len )
1002
+ clen = * len ;
1003
+ } else if (!new ) {
1004
+ /* Here we may return -ENOENT.
1005
+ * In any case caller gets zero length. */
986
1006
goto ok ;
987
1007
}
988
-
989
- if (ok && clen > * len ) {
990
- clen = * len ;
991
- to_alloc = (clen + clst_per_frame - 1 ) &
992
- ~(clst_per_frame - 1 );
993
- }
994
1008
}
995
1009
996
1010
if (!is_attr_ext (attr_b )) {
1011
+ /* The code below only for sparsed or compressed attributes. */
997
1012
err = - EINVAL ;
998
1013
goto out ;
999
1014
}
1000
1015
1016
+ vcn0 = vcn ;
1017
+ to_alloc = clen ;
1018
+ fr = (sbi -> record_size - le32_to_cpu (mi -> mrec -> used ) + 8 ) / 3 + 1 ;
1019
+ /* Allocate frame aligned clusters.
1020
+ * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1021
+ * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1022
+ if (attr_b -> nres .c_unit ) {
1023
+ CLST clst_per_frame = 1u << attr_b -> nres .c_unit ;
1024
+ CLST cmask = ~(clst_per_frame - 1 );
1025
+
1026
+ /* Get frame aligned vcn and to_alloc. */
1027
+ vcn = vcn0 & cmask ;
1028
+ to_alloc = ((vcn0 + clen + clst_per_frame - 1 ) & cmask ) - vcn ;
1029
+ if (fr < clst_per_frame )
1030
+ fr = clst_per_frame ;
1031
+ zero = true;
1032
+
1033
+ /* Check if 'vcn' and 'vcn0' in different attribute segments. */
1034
+ if (vcn < svcn || evcn1 <= vcn ) {
1035
+ /* Load attribute for truncated vcn. */
1036
+ attr = ni_find_attr (ni , attr_b , & le , ATTR_DATA , NULL , 0 ,
1037
+ & vcn , & mi );
1038
+ if (!attr ) {
1039
+ err = - EINVAL ;
1040
+ goto out ;
1041
+ }
1042
+ svcn = le64_to_cpu (attr -> nres .svcn );
1043
+ evcn1 = le64_to_cpu (attr -> nres .evcn ) + 1 ;
1044
+ err = attr_load_runs (attr , ni , run , NULL );
1045
+ if (err )
1046
+ goto out ;
1047
+ }
1048
+ }
1049
+
1050
+ if (vcn + to_alloc > asize )
1051
+ to_alloc = asize - vcn ;
1052
+
1001
1053
/* Get the last LCN to allocate from. */
1002
1054
hint = 0 ;
1003
1055
@@ -1011,18 +1063,33 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
1011
1063
hint = -1 ;
1012
1064
}
1013
1065
1014
- err = attr_allocate_clusters (
1015
- sbi , run , vcn , hint + 1 , to_alloc , NULL , 0 , len ,
1016
- ( sbi -> record_size - le32_to_cpu ( mi -> mrec -> used ) + 8 ) / 3 + 1 ,
1017
- lcn );
1066
+ /* Allocate and zeroout new clusters. */
1067
+ err = attr_allocate_clusters ( sbi , run , vcn , hint + 1 , to_alloc , NULL ,
1068
+ zero ? ALLOCATE_ZERO : ALLOCATE_DEF , & alen ,
1069
+ fr , lcn , len );
1018
1070
if (err )
1019
1071
goto out ;
1020
1072
* new = true;
1021
1073
1022
- end = vcn + * len ;
1023
-
1074
+ end = vcn + alen ;
1024
1075
total_size = le64_to_cpu (attr_b -> nres .total_size ) +
1025
- ((u64 )* len << cluster_bits );
1076
+ ((u64 )alen << cluster_bits );
1077
+
1078
+ if (vcn != vcn0 ) {
1079
+ if (!run_lookup_entry (run , vcn0 , lcn , len , NULL )) {
1080
+ err = - EINVAL ;
1081
+ goto out ;
1082
+ }
1083
+ if (* lcn == SPARSE_LCN ) {
1084
+ /* Internal error. Should not happened. */
1085
+ WARN_ON (1 );
1086
+ err = - EINVAL ;
1087
+ goto out ;
1088
+ }
1089
+ /* Check case when vcn0 + len overlaps new allocated clusters. */
1090
+ if (vcn0 + * len > end )
1091
+ * len = end - vcn0 ;
1092
+ }
1026
1093
1027
1094
repack :
1028
1095
err = mi_pack_runs (mi , attr , run , max (end , evcn1 ) - svcn );
@@ -1547,7 +1614,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1547
1614
struct ATTRIB * attr = NULL , * attr_b ;
1548
1615
struct ATTR_LIST_ENTRY * le , * le_b ;
1549
1616
struct mft_inode * mi , * mi_b ;
1550
- CLST svcn , evcn1 , next_svcn , lcn , len ;
1617
+ CLST svcn , evcn1 , next_svcn , len ;
1551
1618
CLST vcn , end , clst_data ;
1552
1619
u64 total_size , valid_size , data_size ;
1553
1620
@@ -1623,8 +1690,9 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1623
1690
}
1624
1691
1625
1692
err = attr_allocate_clusters (sbi , run , vcn + clst_data ,
1626
- hint + 1 , len - clst_data , NULL , 0 ,
1627
- & alen , 0 , & lcn );
1693
+ hint + 1 , len - clst_data , NULL ,
1694
+ ALLOCATE_DEF , & alen , 0 , NULL ,
1695
+ NULL );
1628
1696
if (err )
1629
1697
goto out ;
1630
1698
0 commit comments