@@ -409,33 +409,64 @@ static unsigned long write_object(struct sha1file *f,
409409 return hdrlen + datalen ;
410410}
411411
412- static int write_one (struct sha1file * f ,
413- struct object_entry * e ,
414- off_t * offset )
412+ enum write_one_status {
413+ WRITE_ONE_SKIP = -1 , /* already written */
414+ WRITE_ONE_BREAK = 0 , /* writing this will bust the limit; not written */
415+ WRITE_ONE_WRITTEN = 1 , /* normal */
416+ WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
417+ };
418+
419+ static enum write_one_status write_one (struct sha1file * f ,
420+ struct object_entry * e ,
421+ off_t * offset )
415422{
416423 unsigned long size ;
424+ int recursing ;
417425
418- /* offset is non zero if object is written already. */
419- if (e -> idx .offset || e -> preferred_base )
420- return -1 ;
426+ /*
427+ * we set offset to 1 (which is an impossible value) to mark
428+ * the fact that this object is involved in "write its base
429+ * first before writing a deltified object" recursion.
430+ */
431+ recursing = (e -> idx .offset == 1 );
432+ if (recursing ) {
433+ warning ("recursive delta detected for object %s" ,
434+ sha1_to_hex (e -> idx .sha1 ));
435+ return WRITE_ONE_RECURSIVE ;
436+ } else if (e -> idx .offset || e -> preferred_base ) {
437+ /* offset is non zero if object is written already. */
438+ return WRITE_ONE_SKIP ;
439+ }
421440
422441 /* if we are deltified, write out base object first. */
423- if (e -> delta && !write_one (f , e -> delta , offset ))
424- return 0 ;
442+ if (e -> delta ) {
443+ e -> idx .offset = 1 ; /* now recurse */
444+ switch (write_one (f , e -> delta , offset )) {
445+ case WRITE_ONE_RECURSIVE :
446+ /* we cannot depend on this one */
447+ e -> delta = NULL ;
448+ break ;
449+ default :
450+ break ;
451+ case WRITE_ONE_BREAK :
452+ e -> idx .offset = recursing ;
453+ return WRITE_ONE_BREAK ;
454+ }
455+ }
425456
426457 e -> idx .offset = * offset ;
427458 size = write_object (f , e , * offset );
428459 if (!size ) {
429- e -> idx .offset = 0 ;
430- return 0 ;
460+ e -> idx .offset = recursing ;
461+ return WRITE_ONE_BREAK ;
431462 }
432463 written_list [nr_written ++ ] = & e -> idx ;
433464
434465 /* make sure off_t is sufficiently large not to wrap */
435466 if (signed_add_overflows (* offset , size ))
436467 die ("pack too large for current definition of off_t" );
437468 * offset += size ;
438- return 1 ;
469+ return WRITE_ONE_WRITTEN ;
439470}
440471
441472static int mark_tagged (const char * path , const unsigned char * sha1 , int flag ,
@@ -640,7 +671,7 @@ static void write_pack_file(void)
640671 nr_written = 0 ;
641672 for (; i < nr_objects ; i ++ ) {
642673 struct object_entry * e = write_order [i ];
643- if (! write_one (f , e , & offset ))
674+ if (write_one (f , e , & offset ) == WRITE_ONE_BREAK )
644675 break ;
645676 display_progress (progress_state , written );
646677 }
0 commit comments