@@ -409,33 +409,64 @@ static unsigned long write_object(struct sha1file *f,
409
409
return hdrlen + datalen ;
410
410
}
411
411
412
- static int write_one (struct sha1file * f ,
413
- struct object_entry * e ,
414
- off_t * offset )
412
+ enum write_one_status {
413
+ WRITE_ONE_SKIP = -1 , /* already written */
414
+ WRITE_ONE_BREAK = 0 , /* writing this will bust the limit; not written */
415
+ WRITE_ONE_WRITTEN = 1 , /* normal */
416
+ WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
417
+ };
418
+
419
+ static enum write_one_status write_one (struct sha1file * f ,
420
+ struct object_entry * e ,
421
+ off_t * offset )
415
422
{
416
423
unsigned long size ;
424
+ int recursing ;
417
425
418
- /* offset is non zero if object is written already. */
419
- if (e -> idx .offset || e -> preferred_base )
420
- return -1 ;
426
+ /*
427
+ * we set offset to 1 (which is an impossible value) to mark
428
+ * the fact that this object is involved in "write its base
429
+ * first before writing a deltified object" recursion.
430
+ */
431
+ recursing = (e -> idx .offset == 1 );
432
+ if (recursing ) {
433
+ warning ("recursive delta detected for object %s" ,
434
+ sha1_to_hex (e -> idx .sha1 ));
435
+ return WRITE_ONE_RECURSIVE ;
436
+ } else if (e -> idx .offset || e -> preferred_base ) {
437
+ /* offset is non zero if object is written already. */
438
+ return WRITE_ONE_SKIP ;
439
+ }
421
440
422
441
/* if we are deltified, write out base object first. */
423
- if (e -> delta && !write_one (f , e -> delta , offset ))
424
- return 0 ;
442
+ if (e -> delta ) {
443
+ e -> idx .offset = 1 ; /* now recurse */
444
+ switch (write_one (f , e -> delta , offset )) {
445
+ case WRITE_ONE_RECURSIVE :
446
+ /* we cannot depend on this one */
447
+ e -> delta = NULL ;
448
+ break ;
449
+ default :
450
+ break ;
451
+ case WRITE_ONE_BREAK :
452
+ e -> idx .offset = recursing ;
453
+ return WRITE_ONE_BREAK ;
454
+ }
455
+ }
425
456
426
457
e -> idx .offset = * offset ;
427
458
size = write_object (f , e , * offset );
428
459
if (!size ) {
429
- e -> idx .offset = 0 ;
430
- return 0 ;
460
+ e -> idx .offset = recursing ;
461
+ return WRITE_ONE_BREAK ;
431
462
}
432
463
written_list [nr_written ++ ] = & e -> idx ;
433
464
434
465
/* make sure off_t is sufficiently large not to wrap */
435
466
if (signed_add_overflows (* offset , size ))
436
467
die ("pack too large for current definition of off_t" );
437
468
* offset += size ;
438
- return 1 ;
469
+ return WRITE_ONE_WRITTEN ;
439
470
}
440
471
441
472
static int mark_tagged (const char * path , const unsigned char * sha1 , int flag ,
@@ -640,7 +671,7 @@ static void write_pack_file(void)
640
671
nr_written = 0 ;
641
672
for (; i < nr_objects ; i ++ ) {
642
673
struct object_entry * e = write_order [i ];
643
- if (! write_one (f , e , & offset ))
674
+ if (write_one (f , e , & offset ) == WRITE_ONE_BREAK )
644
675
break ;
645
676
display_progress (progress_state , written );
646
677
}
0 commit comments