@@ -161,7 +161,7 @@ static unsigned long do_compress(void **pptr, unsigned long size)
161
161
return stream .total_out ;
162
162
}
163
163
164
- static unsigned long write_large_blob_data (struct git_istream * st , struct sha1file * f ,
164
+ static unsigned long write_large_blob_data (struct git_istream * st , struct hashfile * f ,
165
165
const struct object_id * oid )
166
166
{
167
167
git_zstream stream ;
@@ -185,7 +185,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi
185
185
stream .next_out = obuf ;
186
186
stream .avail_out = sizeof (obuf );
187
187
zret = git_deflate (& stream , readlen ? 0 : Z_FINISH );
188
- sha1write (f , obuf , stream .next_out - obuf );
188
+ hashwrite (f , obuf , stream .next_out - obuf );
189
189
olen += stream .next_out - obuf ;
190
190
}
191
191
if (stream .avail_in )
@@ -230,7 +230,7 @@ static int check_pack_inflate(struct packed_git *p,
230
230
stream .total_in == len ) ? 0 : -1 ;
231
231
}
232
232
233
- static void copy_pack_data (struct sha1file * f ,
233
+ static void copy_pack_data (struct hashfile * f ,
234
234
struct packed_git * p ,
235
235
struct pack_window * * w_curs ,
236
236
off_t offset ,
@@ -243,14 +243,14 @@ static void copy_pack_data(struct sha1file *f,
243
243
in = use_pack (p , w_curs , offset , & avail );
244
244
if (avail > len )
245
245
avail = (unsigned long )len ;
246
- sha1write (f , in , avail );
246
+ hashwrite (f , in , avail );
247
247
offset += avail ;
248
248
len -= avail ;
249
249
}
250
250
}
251
251
252
252
/* Return 0 if we will bust the pack-size limit */
253
- static unsigned long write_no_reuse_object (struct sha1file * f , struct object_entry * entry ,
253
+ static unsigned long write_no_reuse_object (struct hashfile * f , struct object_entry * entry ,
254
254
unsigned long limit , int usable_delta )
255
255
{
256
256
unsigned long size , datalen ;
@@ -323,8 +323,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
323
323
free (buf );
324
324
return 0 ;
325
325
}
326
- sha1write (f , header , hdrlen );
327
- sha1write (f , dheader + pos , sizeof (dheader ) - pos );
326
+ hashwrite (f , header , hdrlen );
327
+ hashwrite (f , dheader + pos , sizeof (dheader ) - pos );
328
328
hdrlen += sizeof (dheader ) - pos ;
329
329
} else if (type == OBJ_REF_DELTA ) {
330
330
/*
@@ -337,8 +337,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
337
337
free (buf );
338
338
return 0 ;
339
339
}
340
- sha1write (f , header , hdrlen );
341
- sha1write (f , entry -> delta -> idx .oid .hash , 20 );
340
+ hashwrite (f , header , hdrlen );
341
+ hashwrite (f , entry -> delta -> idx .oid .hash , 20 );
342
342
hdrlen += 20 ;
343
343
} else {
344
344
if (limit && hdrlen + datalen + 20 >= limit ) {
@@ -347,21 +347,21 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
347
347
free (buf );
348
348
return 0 ;
349
349
}
350
- sha1write (f , header , hdrlen );
350
+ hashwrite (f , header , hdrlen );
351
351
}
352
352
if (st ) {
353
353
datalen = write_large_blob_data (st , f , & entry -> idx .oid );
354
354
close_istream (st );
355
355
} else {
356
- sha1write (f , buf , datalen );
356
+ hashwrite (f , buf , datalen );
357
357
free (buf );
358
358
}
359
359
360
360
return hdrlen + datalen ;
361
361
}
362
362
363
363
/* Return 0 if we will bust the pack-size limit */
364
- static off_t write_reuse_object (struct sha1file * f , struct object_entry * entry ,
364
+ static off_t write_reuse_object (struct hashfile * f , struct object_entry * entry ,
365
365
unsigned long limit , int usable_delta )
366
366
{
367
367
struct packed_git * p = entry -> in_pack ;
@@ -412,25 +412,25 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
412
412
unuse_pack (& w_curs );
413
413
return 0 ;
414
414
}
415
- sha1write (f , header , hdrlen );
416
- sha1write (f , dheader + pos , sizeof (dheader ) - pos );
415
+ hashwrite (f , header , hdrlen );
416
+ hashwrite (f , dheader + pos , sizeof (dheader ) - pos );
417
417
hdrlen += sizeof (dheader ) - pos ;
418
418
reused_delta ++ ;
419
419
} else if (type == OBJ_REF_DELTA ) {
420
420
if (limit && hdrlen + 20 + datalen + 20 >= limit ) {
421
421
unuse_pack (& w_curs );
422
422
return 0 ;
423
423
}
424
- sha1write (f , header , hdrlen );
425
- sha1write (f , entry -> delta -> idx .oid .hash , 20 );
424
+ hashwrite (f , header , hdrlen );
425
+ hashwrite (f , entry -> delta -> idx .oid .hash , 20 );
426
426
hdrlen += 20 ;
427
427
reused_delta ++ ;
428
428
} else {
429
429
if (limit && hdrlen + datalen + 20 >= limit ) {
430
430
unuse_pack (& w_curs );
431
431
return 0 ;
432
432
}
433
- sha1write (f , header , hdrlen );
433
+ hashwrite (f , header , hdrlen );
434
434
}
435
435
copy_pack_data (f , p , & w_curs , offset , datalen );
436
436
unuse_pack (& w_curs );
@@ -439,7 +439,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
439
439
}
440
440
441
441
/* Return 0 if we will bust the pack-size limit */
442
- static off_t write_object (struct sha1file * f ,
442
+ static off_t write_object (struct hashfile * f ,
443
443
struct object_entry * entry ,
444
444
off_t write_offset )
445
445
{
@@ -512,7 +512,7 @@ enum write_one_status {
512
512
WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
513
513
};
514
514
515
- static enum write_one_status write_one (struct sha1file * f ,
515
+ static enum write_one_status write_one (struct hashfile * f ,
516
516
struct object_entry * e ,
517
517
off_t * offset )
518
518
{
@@ -731,7 +731,7 @@ static struct object_entry **compute_write_order(void)
731
731
return wo ;
732
732
}
733
733
734
- static off_t write_reused_pack (struct sha1file * f )
734
+ static off_t write_reused_pack (struct hashfile * f )
735
735
{
736
736
unsigned char buffer [8192 ];
737
737
off_t to_write , total ;
@@ -762,7 +762,7 @@ static off_t write_reused_pack(struct sha1file *f)
762
762
if (read_pack > to_write )
763
763
read_pack = to_write ;
764
764
765
- sha1write (f , buffer , read_pack );
765
+ hashwrite (f , buffer , read_pack );
766
766
to_write -= read_pack ;
767
767
768
768
/*
@@ -791,7 +791,7 @@ static const char no_split_warning[] = N_(
791
791
static void write_pack_file (void )
792
792
{
793
793
uint32_t i = 0 , j ;
794
- struct sha1file * f ;
794
+ struct hashfile * f ;
795
795
off_t offset ;
796
796
uint32_t nr_remaining = nr_result ;
797
797
time_t last_mtime = 0 ;
@@ -807,7 +807,7 @@ static void write_pack_file(void)
807
807
char * pack_tmp_name = NULL ;
808
808
809
809
if (pack_to_stdout )
810
- f = sha1fd_throughput (1 , "<stdout>" , progress_state );
810
+ f = hashfd_throughput (1 , "<stdout>" , progress_state );
811
811
else
812
812
f = create_tmp_packfile (& pack_tmp_name );
813
813
@@ -834,11 +834,11 @@ static void write_pack_file(void)
834
834
* If so, rewrite it like in fast-import
835
835
*/
836
836
if (pack_to_stdout ) {
837
- sha1close (f , oid .hash , CSUM_CLOSE );
837
+ hashclose (f , oid .hash , CSUM_CLOSE );
838
838
} else if (nr_written == nr_remaining ) {
839
- sha1close (f , oid .hash , CSUM_FSYNC );
839
+ hashclose (f , oid .hash , CSUM_FSYNC );
840
840
} else {
841
- int fd = sha1close (f , oid .hash , 0 );
841
+ int fd = hashclose (f , oid .hash , 0 );
842
842
fixup_pack_header_footer (fd , oid .hash , pack_tmp_name ,
843
843
nr_written , oid .hash , offset );
844
844
close (fd );
0 commit comments