@@ -1286,7 +1286,17 @@ enum peel_status {
1286
1286
PEEL_INVALID = -1 ,
1287
1287
1288
1288
/* object cannot be peeled because it is not a tag: */
1289
- PEEL_NON_TAG = -2
1289
+ PEEL_NON_TAG = -2 ,
1290
+
1291
+ /* ref_entry contains no peeled value because it is a symref: */
1292
+ PEEL_IS_SYMREF = -3 ,
1293
+
1294
+ /*
1295
+ * ref_entry cannot be peeled because it is broken (i.e., the
1296
+ * symbolic reference cannot even be resolved to an object
1297
+ * name):
1298
+ */
1299
+ PEEL_BROKEN = -4
1290
1300
};
1291
1301
1292
1302
/*
@@ -1318,31 +1328,56 @@ static enum peel_status peel_object(const unsigned char *name, unsigned char *sh
1318
1328
return PEEL_PEELED ;
1319
1329
}
1320
1330
1331
+ /*
1332
+ * Peel the entry (if possible) and return its new peel_status.
1333
+ */
1334
+ static enum peel_status peel_entry (struct ref_entry * entry )
1335
+ {
1336
+ enum peel_status status ;
1337
+
1338
+ if (entry -> flag & REF_KNOWS_PEELED )
1339
+ return is_null_sha1 (entry -> u .value .peeled ) ?
1340
+ PEEL_NON_TAG : PEEL_PEELED ;
1341
+ if (entry -> flag & REF_ISBROKEN )
1342
+ return PEEL_BROKEN ;
1343
+ if (entry -> flag & REF_ISSYMREF )
1344
+ return PEEL_IS_SYMREF ;
1345
+
1346
+ status = peel_object (entry -> u .value .sha1 , entry -> u .value .peeled );
1347
+ if (status == PEEL_PEELED || status == PEEL_NON_TAG )
1348
+ entry -> flag |= REF_KNOWS_PEELED ;
1349
+ return status ;
1350
+ }
1351
+
1321
1352
int peel_ref (const char * refname , unsigned char * sha1 )
1322
1353
{
1323
1354
int flag ;
1324
1355
unsigned char base [20 ];
1325
1356
1326
1357
if (current_ref && (current_ref -> name == refname
1327
- || !strcmp (current_ref -> name , refname ))) {
1328
- if (current_ref -> flag & REF_KNOWS_PEELED ) {
1329
- if (is_null_sha1 (current_ref -> u .value .peeled ))
1330
- return -1 ;
1331
- hashcpy (sha1 , current_ref -> u .value .peeled );
1332
- return 0 ;
1333
- }
1334
- return peel_object (current_ref -> u .value .sha1 , sha1 );
1358
+ || !strcmp (current_ref -> name , refname ))) {
1359
+ if (peel_entry (current_ref ))
1360
+ return -1 ;
1361
+ hashcpy (sha1 , current_ref -> u .value .peeled );
1362
+ return 0 ;
1335
1363
}
1336
1364
1337
1365
if (read_ref_full (refname , base , 1 , & flag ))
1338
1366
return -1 ;
1339
1367
1340
- if ((flag & REF_ISPACKED )) {
1368
+ /*
1369
+ * If the reference is packed, read its ref_entry from the
1370
+ * cache in the hope that we already know its peeled value.
1371
+ * We only try this optimization on packed references because
1372
+ * (a) forcing the filling of the loose reference cache could
1373
+ * be expensive and (b) loose references anyway usually do not
1374
+ * have REF_KNOWS_PEELED.
1375
+ */
1376
+ if (flag & REF_ISPACKED ) {
1341
1377
struct ref_entry * r = get_packed_ref (refname );
1342
-
1343
- if (r && (r -> flag & REF_KNOWS_PEELED )) {
1344
- if (is_null_sha1 (r -> u .value .peeled ))
1345
- return -1 ;
1378
+ if (r ) {
1379
+ if (peel_entry (r ))
1380
+ return -1 ;
1346
1381
hashcpy (sha1 , r -> u .value .peeled );
1347
1382
return 0 ;
1348
1383
}
0 commit comments