@@ -159,10 +159,6 @@ static pthread_mutex_t deepest_delta_mutex;
159
159
#define deepest_delta_lock () lock_mutex(&deepest_delta_mutex)
160
160
#define deepest_delta_unlock () unlock_mutex(&deepest_delta_mutex)
161
161
162
- static pthread_mutex_t type_cas_mutex ;
163
- #define type_cas_lock () lock_mutex(&type_cas_mutex)
164
- #define type_cas_unlock () unlock_mutex(&type_cas_mutex)
165
-
166
162
static pthread_key_t key ;
167
163
168
164
static inline void lock_mutex (pthread_mutex_t * mutex )
@@ -186,7 +182,6 @@ static void init_thread(void)
186
182
init_recursive_mutex (& read_mutex );
187
183
pthread_mutex_init (& counter_mutex , NULL );
188
184
pthread_mutex_init (& work_mutex , NULL );
189
- pthread_mutex_init (& type_cas_mutex , NULL );
190
185
if (show_stat )
191
186
pthread_mutex_init (& deepest_delta_mutex , NULL );
192
187
pthread_key_create (& key , NULL );
@@ -209,7 +204,6 @@ static void cleanup_thread(void)
209
204
pthread_mutex_destroy (& read_mutex );
210
205
pthread_mutex_destroy (& counter_mutex );
211
206
pthread_mutex_destroy (& work_mutex );
212
- pthread_mutex_destroy (& type_cas_mutex );
213
207
if (show_stat )
214
208
pthread_mutex_destroy (& deepest_delta_mutex );
215
209
for (i = 0 ; i < nr_threads ; i ++ )
@@ -894,18 +888,15 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
894
888
}
895
889
896
890
/*
897
- * Walk from current node up
898
- * to top parent if necessary to deflate the node. In normal
899
- * situation, its parent node would be already deflated, so it just
900
- * needs to apply delta.
901
- *
902
- * In the worst case scenario, parent node is no longer deflated because
903
- * we're running out of delta_base_cache_limit; we need to re-deflate
904
- * parents, possibly up to the top base.
891
+ * Ensure that this node has been reconstructed and return its contents.
905
892
*
906
- * All deflated objects here are subject to be freed if we exceed
907
- * delta_base_cache_limit, just like in find_unresolved_deltas(), we
908
- * just need to make sure the last node is not freed.
893
+ * In the typical and best case, this node would already be reconstructed
894
+ * (through the invocation to resolve_delta() in threaded_second_pass()) and it
895
+ * would not be pruned. However, if pruning of this node was necessary due to
896
+ * reaching delta_base_cache_limit, this function will find the closest
897
+ * ancestor with reconstructed data that has not been pruned (or if there is
898
+ * none, the ultimate base object), and reconstruct each node in the delta
899
+ * chain in order to generate the reconstructed data for this node.
909
900
*/
910
901
static void * get_base_data (struct base_data * c )
911
902
{
@@ -1028,6 +1019,10 @@ static void *threaded_second_pass(void *data)
1028
1019
struct object_entry * child_obj ;
1029
1020
struct base_data * child ;
1030
1021
1022
+ counter_lock ();
1023
+ display_progress (progress , nr_resolved_deltas );
1024
+ counter_unlock ();
1025
+
1031
1026
work_lock ();
1032
1027
if (list_empty (& work_head )) {
1033
1028
/*
0 commit comments