diff --git a/Documentation/config/http.adoc b/Documentation/config/http.adoc index 9da5c298cc1d5e..9e3c888df47867 100644 --- a/Documentation/config/http.adoc +++ b/Documentation/config/http.adoc @@ -315,6 +315,30 @@ http.keepAliveCount:: unset, curl's default value is used. Can be overridden by the `GIT_HTTP_KEEPALIVE_COUNT` environment variable. +http.retryAfter:: + Default wait time in seconds before retrying when a server returns + HTTP 429 (Too Many Requests) without a Retry-After header. If set + to -1 (the default), Git will fail immediately when encountering + a 429 response without a Retry-After header. When a Retry-After + header is present, its value takes precedence over this setting. + Can be overridden by the `GIT_HTTP_RETRY_AFTER` environment variable. + See also `http.maxRetries` and `http.maxRetryTime`. + +http.maxRetries:: + Maximum number of times to retry after receiving HTTP 429 (Too Many + Requests) responses. Set to 0 (the default) to disable retries. + Can be overridden by the `GIT_HTTP_MAX_RETRIES` environment variable. + See also `http.retryAfter` and `http.maxRetryTime`. + +http.maxRetryTime:: + Maximum time in seconds to wait for a single retry attempt when + handling HTTP 429 (Too Many Requests) responses. If the server + requests a delay (via Retry-After header) or if `http.retryAfter` + is configured with a value that exceeds this maximum, Git will fail + immediately rather than waiting. Default is 300 seconds (5 minutes). + Can be overridden by the `GIT_HTTP_MAX_RETRY_TIME` environment + variable. See also `http.retryAfter` and `http.maxRetries`. + http.noEPSV:: A boolean which disables using of EPSV ftp command by curl. This can be helpful with some "poor" ftp servers which don't diff --git a/http-push.c b/http-push.c index d86ce771198206..a602a302ec79f1 100644 --- a/http-push.c +++ b/http-push.c @@ -716,6 +716,10 @@ static int fetch_indices(void) case HTTP_MISSING_TARGET: ret = 0; break; + case HTTP_RATE_LIMITED: + error("rate limited by '%s', please try again later", repo->url); + ret = -1; + break; default: ret = -1; } @@ -1548,6 +1552,10 @@ static int remote_exists(const char *path) case HTTP_MISSING_TARGET: ret = 0; break; + case HTTP_RATE_LIMITED: + error("rate limited by '%s', please try again later", url); + ret = -1; + break; case HTTP_ERROR: error("unable to access '%s': %s", url, curl_errorstr); /* fallthrough */ diff --git a/http-walker.c b/http-walker.c index e886e6486646d1..9f06f47de1c5c9 100644 --- a/http-walker.c +++ b/http-walker.c @@ -414,6 +414,11 @@ static int fetch_indices(struct walker *walker, struct alt_base *repo) repo->got_indices = 1; ret = 0; break; + case HTTP_RATE_LIMITED: + error("rate limited by '%s', please try again later", repo->base); + repo->got_indices = 0; + ret = -1; + break; default: repo->got_indices = 0; ret = -1; diff --git a/http.c b/http.c index 41f850db16d19f..5ef128cfde6693 100644 --- a/http.c +++ b/http.c @@ -22,6 +22,8 @@ #include "object-file.h" #include "odb.h" #include "tempfile.h" +#include "date.h" +#include "trace2.h" static struct trace_key trace_curl = TRACE_KEY_INIT(CURL); static int trace_curl_data = 1; @@ -133,9 +135,7 @@ static unsigned long http_auth_methods = CURLAUTH_ANY; static int http_auth_methods_restricted; /* Modes for which empty_auth cannot actually help us. */ static unsigned long empty_auth_useless = - CURLAUTH_BASIC - | CURLAUTH_DIGEST_IE - | CURLAUTH_DIGEST; + CURLAUTH_BASIC | CURLAUTH_DIGEST_IE | CURLAUTH_DIGEST; static struct curl_slist *pragma_header; static struct string_list extra_http_headers = STRING_LIST_INIT_DUP; @@ -149,6 +149,14 @@ static char *cached_accept_language; static char *http_ssl_backend; static int http_schannel_check_revoke = 1; + +/* Retry configuration */ +static long http_retry_after = -1; /* Default retry-after in seconds when header is missing (-1 means not set, exit with 128) */ +static long http_max_retries = 0; /* Maximum number of retry attempts (0 means retries are disabled) */ +static long http_max_retry_time = 300; /* Maximum time to wait for a single retry (default 5 minutes) */ + +/* Store retry_after value from 429 responses for retry logic (-1 = not set, 0 = retry immediately, >0 = delay in seconds) */ +static long last_retry_after = -1; /* * With the backend being set to `schannel`, setting sslCAinfo would override * the Certificate Store in cURL v7.60.0 and later, which is not what we want @@ -209,13 +217,14 @@ static inline int is_hdr_continuation(const char *ptr, const size_t size) return size && (*ptr == ' ' || *ptr == '\t'); } -static size_t fwrite_wwwauth(char *ptr, size_t eltsize, size_t nmemb, void *p UNUSED) +static size_t fwrite_wwwauth(char *ptr, size_t eltsize, size_t nmemb, void *p) { size_t size = eltsize * nmemb; struct strvec *values = &http_auth.wwwauth_headers; struct strbuf buf = STRBUF_INIT; const char *val; size_t val_len; + struct active_request_slot *slot = (struct active_request_slot *)p; /* * Header lines may not come NULL-terminated from libcurl so we must @@ -257,6 +266,47 @@ static size_t fwrite_wwwauth(char *ptr, size_t eltsize, size_t nmemb, void *p UN goto exit; } + /* Parse Retry-After header for rate limiting */ + if (skip_iprefix_mem(ptr, size, "retry-after:", &val, &val_len)) { + strbuf_add(&buf, val, val_len); + strbuf_trim(&buf); + + if (slot && slot->results) { + /* Parse the retry-after value (delay-seconds or HTTP-date) */ + char *endptr; + long retry_after; + + errno = 0; + retry_after = strtol(buf.buf, &endptr, 10); + + /* Check if it's a valid integer (delay-seconds format) */ + if (endptr != buf.buf && *endptr == '\0' && + errno != ERANGE && retry_after > 0) { + slot->results->retry_after = retry_after; + } else { + /* Try parsing as HTTP-date format */ + timestamp_t timestamp; + int offset; + if (!parse_date_basic(buf.buf, ×tamp, &offset)) { + /* Successfully parsed as date, calculate delay from now */ + timestamp_t now = time(NULL); + if (timestamp > now) { + slot->results->retry_after = (long)(timestamp - now); + } else { + /* Past date means retry immediately */ + slot->results->retry_after = 0; + } + } else { + /* Failed to parse as either delay-seconds or HTTP-date */ + warning(_("unable to parse Retry-After header value: '%s'"), buf.buf); + } + } + } + + http_auth.header_is_last_match = 1; + goto exit; + } + /* * This line could be a continuation of the previously matched header * field. If this is the case then we should append this value to the @@ -341,7 +391,7 @@ static void finish_active_slot(struct active_request_slot *slot) &slot->results->auth_avail); curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CONNECTCODE, - &slot->results->http_connectcode); + &slot->results->http_connectcode); } /* Run callback if appropriate */ @@ -385,9 +435,8 @@ static void process_curl_messages(void) static int http_options(const char *var, const char *value, const struct config_context *ctx, void *data) { - if (!strcmp("http.version", var)) { + if (!strcmp("http.version", var)) return git_config_string(&curl_http_version, var, value); - } if (!strcmp("http.sslverify", var)) { curl_ssl_verify = git_config_bool(var, value); return 0; @@ -511,18 +560,16 @@ static int http_options(const char *var, const char *value, #endif } - if (!strcmp("http.pinnedpubkey", var)) { + if (!strcmp("http.pinnedpubkey", var)) return git_config_pathname(&ssl_pinnedkey, var, value); - } if (!strcmp("http.extraheader", var)) { - if (!value) { + if (!value) return config_error_nonbool(var); - } else if (!*value) { + else if (!*value) string_list_clear(&extra_http_headers, 0); - } else { + else string_list_append(&extra_http_headers, value); - } return 0; } @@ -575,6 +622,21 @@ static int http_options(const char *var, const char *value, return 0; } + if (!strcmp("http.retryafter", var)) { + http_retry_after = git_config_int(var, value, ctx->kvi); + return 0; + } + + if (!strcmp("http.maxretries", var)) { + http_max_retries = git_config_int(var, value, ctx->kvi); + return 0; + } + + if (!strcmp("http.maxretrytime", var)) { + http_max_retry_time = git_config_int(var, value, ctx->kvi); + return 0; + } + /* Fall back on the default ones */ return git_default_config(var, value, ctx, data); } @@ -656,9 +718,9 @@ static void set_proxyauth_name_password(CURL *result) { if (proxy_auth.password) { curl_easy_setopt(result, CURLOPT_PROXYUSERNAME, - proxy_auth.username); + proxy_auth.username); curl_easy_setopt(result, CURLOPT_PROXYPASSWORD, - proxy_auth.password); + proxy_auth.password); } else if (proxy_auth.authtype && proxy_auth.credential) { curl_easy_setopt(result, CURLOPT_PROXYHEADER, http_append_auth_header(&proxy_auth, NULL)); @@ -680,17 +742,16 @@ static void init_curl_proxy_auth(CURL *result) for (i = 0; i < ARRAY_SIZE(proxy_authmethods); i++) { if (!strcmp(http_proxy_authmethod, proxy_authmethods[i].name)) { curl_easy_setopt(result, CURLOPT_PROXYAUTH, - proxy_authmethods[i].curlauth_param); + proxy_authmethods[i].curlauth_param); break; } } if (i == ARRAY_SIZE(proxy_authmethods)) { warning("unsupported proxy authentication method %s: using anyauth", - http_proxy_authmethod); + http_proxy_authmethod); curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY); } - } - else + } else curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY); } @@ -737,7 +798,7 @@ static int redact_sensitive_header(struct strbuf *header, size_t offset) while (*sensitive_header && !isspace(*sensitive_header)) sensitive_header++; /* Everything else is opaque and possibly sensitive */ - strbuf_setlen(header, sensitive_header - header->buf); + strbuf_setlen(header, sensitive_header - header->buf); strbuf_addstr(header, " "); ret = 1; } else if (trace_curl_redact && @@ -833,7 +894,7 @@ static void curl_dump_header(const char *text, unsigned char *ptr, size_t size, struct strbuf **headers, **header; strbuf_addf(&out, "%s, %10.10ld bytes (0x%8.8lx)\n", - text, (long)size, (long)size); + text, (long)size, (long)size); trace_strbuf(&trace_curl, &out); strbuf_reset(&out); strbuf_add(&out, ptr, size); @@ -859,7 +920,7 @@ static void curl_dump_data(const char *text, unsigned char *ptr, size_t size) unsigned int width = 60; strbuf_addf(&out, "%s, %10.10ld bytes (0x%8.8lx)\n", - text, (long)size, (long)size); + text, (long)size, (long)size); trace_strbuf(&trace_curl, &out); for (i = 0; i < size; i += width) { @@ -871,8 +932,7 @@ static void curl_dump_data(const char *text, unsigned char *ptr, size_t size) unsigned char ch = ptr[i + w]; strbuf_addch(&out, - (ch >= 0x20) && (ch < 0x80) - ? ch : '.'); + (ch >= 0x20) && (ch < 0x80) ? ch : '.'); } strbuf_addch(&out, '\n'); trace_strbuf(&trace_curl, &out); @@ -897,7 +957,8 @@ static int curl_trace(CURL *handle UNUSED, curl_infotype type, void *userp UNUSED) { const char *text; - enum { NO_FILTER = 0, DO_FILTER = 1 }; + enum { NO_FILTER = 0, + DO_FILTER = 1 }; switch (type) { case CURLINFO_TEXT: @@ -936,7 +997,7 @@ static int curl_trace(CURL *handle UNUSED, curl_infotype type, } break; - default: /* we ignore unknown types by default */ + default: /* we ignore unknown types by default */ return 0; } return 0; @@ -1029,13 +1090,13 @@ static CURL *get_curl_handle(void) curl_easy_setopt(result, CURLOPT_SSL_VERIFYHOST, 2L); } - if (curl_http_version) { + if (curl_http_version) { long opt; if (!get_curl_http_version_opt(curl_http_version, &opt)) { /* Set request use http version */ curl_easy_setopt(result, CURLOPT_HTTP_VERSION, opt); } - } + } curl_easy_setopt(result, CURLOPT_NETRC, CURL_NETRC_OPTIONAL); curl_easy_setopt(result, CURLOPT_HTTPAUTH, CURLAUTH_ANY); @@ -1046,7 +1107,7 @@ static CURL *get_curl_handle(void) for (i = 0; i < ARRAY_SIZE(curl_deleg_levels); i++) { if (!strcmp(curl_deleg, curl_deleg_levels[i].name)) { curl_easy_setopt(result, CURLOPT_GSSAPI_DELEGATION, - curl_deleg_levels[i].curl_deleg_param); + curl_deleg_levels[i].curl_deleg_param); break; } } @@ -1057,9 +1118,8 @@ static CURL *get_curl_handle(void) #endif if (http_ssl_backend && !strcmp("schannel", http_ssl_backend) && - !http_schannel_check_revoke) { + !http_schannel_check_revoke) curl_easy_setopt(result, CURLOPT_SSL_OPTIONS, (long)CURLSSLOPT_NO_REVOKE); - } if (http_proactive_auth != PROACTIVE_AUTH_NONE) init_curl_http_auth(result); @@ -1084,7 +1144,7 @@ static CURL *get_curl_handle(void) ssl_cipherlist = getenv("GIT_SSL_CIPHER_LIST"); if (ssl_cipherlist != NULL && *ssl_cipherlist) curl_easy_setopt(result, CURLOPT_SSL_CIPHER_LIST, - ssl_cipherlist); + ssl_cipherlist); if (ssl_cert) curl_easy_setopt(result, CURLOPT_SSLCERT, ssl_cert); @@ -1149,7 +1209,7 @@ static CURL *get_curl_handle(void) trace_curl_redact = 0; curl_easy_setopt(result, CURLOPT_USERAGENT, - user_agent ? user_agent : git_user_agent()); + user_agent ? user_agent : git_user_agent()); if (curl_ftp_no_epsv) curl_easy_setopt(result, CURLOPT_FTP_USE_EPSV, 0L); @@ -1194,16 +1254,16 @@ static CURL *get_curl_handle(void) if (starts_with(curl_http_proxy, "socks5h")) curl_easy_setopt(result, - CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS5_HOSTNAME); + CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS5_HOSTNAME); else if (starts_with(curl_http_proxy, "socks5")) curl_easy_setopt(result, - CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS5); + CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS5); else if (starts_with(curl_http_proxy, "socks4a")) curl_easy_setopt(result, - CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS4A); + CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS4A); else if (starts_with(curl_http_proxy, "socks")) curl_easy_setopt(result, - CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS4); + CURLOPT_PROXYTYPE, (long)CURLPROXY_SOCKS4); else if (starts_with(curl_http_proxy, "https")) { curl_easy_setopt(result, CURLOPT_PROXYTYPE, (long)CURLPROXY_HTTPS); @@ -1329,7 +1389,7 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) case CURLSSLSET_UNKNOWN_BACKEND: strbuf_addf(&buf, _("Unsupported SSL backend '%s'. " "Supported SSL backends:"), - http_ssl_backend); + http_ssl_backend); for (i = 0; backends[i]; i++) strbuf_addf(&buf, "\n\t%s", backends[i]->name); die("%s", buf.buf); @@ -1366,7 +1426,7 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) var_override(&http_proxy_authmethod, remote->http_proxy_authmethod); pragma_header = curl_slist_append(http_copy_default_headers(), - "Pragma: no-cache"); + "Pragma: no-cache"); { char *http_max_requests = getenv("GIT_HTTP_MAX_REQUESTS"); @@ -1422,6 +1482,10 @@ void http_init(struct remote *remote, const char *url, int proactive_auth) set_long_from_env(&curl_tcp_keepintvl, "GIT_TCP_KEEPINTVL"); set_long_from_env(&curl_tcp_keepcnt, "GIT_TCP_KEEPCNT"); + set_long_from_env(&http_retry_after, "GIT_HTTP_RETRY_AFTER"); + set_long_from_env(&http_max_retries, "GIT_HTTP_MAX_RETRIES"); + set_long_from_env(&http_max_retry_time, "GIT_HTTP_MAX_RETRY_TIME"); + curl_default = get_curl_handle(); } @@ -1629,8 +1693,7 @@ void fill_active_slots(void) } while (slot != NULL) { - if (!slot->in_use && slot->curl != NULL - && curl_session_count > min_curl_sessions) { + if (!slot->in_use && slot->curl != NULL && curl_session_count > min_curl_sessions) { curl_easy_cleanup(slot->curl); slot->curl = NULL; curl_session_count--; @@ -1672,10 +1735,10 @@ void run_active_slot(struct active_request_slot *slot) if (curl_timeout == 0) { continue; } else if (curl_timeout == -1) { - select_timeout.tv_sec = 0; + select_timeout.tv_sec = 0; select_timeout.tv_usec = 50000; } else { - select_timeout.tv_sec = curl_timeout / 1000; + select_timeout.tv_sec = curl_timeout / 1000; select_timeout.tv_usec = (curl_timeout % 1000) * 1000; } @@ -1693,11 +1756,11 @@ void run_active_slot(struct active_request_slot *slot) if (max_fd < 0 && (select_timeout.tv_sec > 0 || select_timeout.tv_usec > 50000)) { - select_timeout.tv_sec = 0; + select_timeout.tv_sec = 0; select_timeout.tv_usec = 50000; } - select(max_fd+1, &readfds, &writefds, &excfds, &select_timeout); + select(max_fd + 1, &readfds, &writefds, &excfds, &select_timeout); } } @@ -1758,12 +1821,7 @@ void finish_all_active_slots(void) /* Helpers for modifying and creating URLs */ static inline int needs_quote(int ch) { - if (((ch >= 'A') && (ch <= 'Z')) - || ((ch >= 'a') && (ch <= 'z')) - || ((ch >= '0') && (ch <= '9')) - || (ch == '/') - || (ch == '-') - || (ch == '.')) + if (((ch >= 'A') && (ch <= 'Z')) || ((ch >= 'a') && (ch <= 'z')) || ((ch >= '0') && (ch <= '9')) || (ch == '/') || (ch == '-') || (ch == '.')) return 0; return 1; } @@ -1853,7 +1911,7 @@ static int handle_curl_result(struct slot_results *results) } else if (missing_target(results)) return HTTP_MISSING_TARGET; else if (results->http_code == 401) { - if ((http_auth.username && http_auth.password) ||\ + if ((http_auth.username && http_auth.password) || (http_auth.authtype && http_auth.credential)) { if (http_auth.multistage) { credential_clear_secrets(&http_auth); @@ -1871,6 +1929,12 @@ static int handle_curl_result(struct slot_results *results) } return HTTP_REAUTH; } + } else if (results->http_code == 429) { + /* Store the retry_after value for use in retry logic */ + last_retry_after = results->retry_after; + trace2_data_intmax("http", the_repository, "http/429-retry-after", + last_retry_after); + return HTTP_RATE_LIMITED; } else { if (results->http_connectcode == 407) credential_reject(the_repository, &proxy_auth); @@ -1886,6 +1950,8 @@ int run_one_slot(struct active_request_slot *slot, struct slot_results *results) { slot->results = results; + /* Initialize retry_after to -1 (not set) */ + results->retry_after = -1; if (!start_active_slot(slot)) { xsnprintf(curl_errorstr, sizeof(curl_errorstr), "failed to start HTTP request"); @@ -2109,13 +2175,13 @@ const char *http_get_accept_language_header(void) static void http_opt_request_remainder(CURL *curl, off_t pos) { char buf[128]; - xsnprintf(buf, sizeof(buf), "%"PRIuMAX"-", (uintmax_t)pos); + xsnprintf(buf, sizeof(buf), "%" PRIuMAX "-", (uintmax_t)pos); curl_easy_setopt(curl, CURLOPT_RANGE, buf); } /* http_request() targets */ -#define HTTP_REQUEST_STRBUF 0 -#define HTTP_REQUEST_FILE 1 +#define HTTP_REQUEST_STRBUF 0 +#define HTTP_REQUEST_FILE 1 static int http_request(const char *url, void *result, int target, @@ -2149,6 +2215,7 @@ static int http_request(const char *url, } curl_easy_setopt(slot->curl, CURLOPT_HEADERFUNCTION, fwrite_wwwauth); + curl_easy_setopt(slot->curl, CURLOPT_HEADERDATA, slot); accept_language = http_get_accept_language_header(); @@ -2253,19 +2320,40 @@ static int update_url_from_redirect(struct strbuf *base, return 1; } +/* + * Sleep for the specified number of seconds before retrying. + */ +static void sleep_for_retry(long retry_after) +{ + if (retry_after > 0) { + unsigned int remaining; + warning(_("rate limited, waiting %ld seconds before retry"), retry_after); + trace2_region_enter("http", "retry-sleep", the_repository); + trace2_data_intmax("http", the_repository, "http/retry-sleep-seconds", + retry_after); + remaining = sleep(retry_after); + while (remaining > 0) { + /* Sleep was interrupted, continue sleeping */ + remaining = sleep(remaining); + } + trace2_region_leave("http", "retry-sleep", the_repository); + } +} + static int http_request_reauth(const char *url, void *result, int target, struct http_get_options *options) { int i = 3; int ret; + int rate_limit_retries = http_max_retries; if (always_auth_proactively()) credential_fill(the_repository, &http_auth, 1); ret = http_request(url, result, target, options); - if (ret != HTTP_OK && ret != HTTP_REAUTH) + if (ret != HTTP_OK && ret != HTTP_REAUTH && ret != HTTP_RATE_LIMITED) return ret; if (options && options->effective_url && options->base_url) { @@ -2276,7 +2364,7 @@ static int http_request_reauth(const char *url, } } - while (ret == HTTP_REAUTH && --i) { + while ((ret == HTTP_REAUTH || ret == HTTP_RATE_LIMITED) && --i) { /* * The previous request may have put cruft into our output stream; we * should clear it out before making our next request. @@ -2302,7 +2390,69 @@ static int http_request_reauth(const char *url, BUG("Unknown http_request target"); } - credential_fill(the_repository, &http_auth, 1); + if (ret == HTTP_RATE_LIMITED) { + /* Handle rate limiting with retry logic */ + int retry_attempt = http_max_retries - rate_limit_retries + 1; + + trace2_data_intmax("http", the_repository, "http/429-retry-attempt", + retry_attempt); + + if (rate_limit_retries <= 0) { + /* Retries are disabled or exhausted */ + if (http_max_retries > 0) { + error(_("too many rate limit retries, giving up")); + trace2_data_string("http", the_repository, + "http/429-error", "retries-exhausted"); + } + return HTTP_ERROR; + } + + /* Decrement retries counter */ + rate_limit_retries--; + + /* Use the stored retry_after value or configured default */ + if (last_retry_after >= 0) { + /* Check if retry delay exceeds maximum allowed */ + if (last_retry_after > http_max_retry_time) { + error(_("rate limited (HTTP 429) requested %ld second delay, " + "exceeds http.maxRetryTime of %ld seconds"), + last_retry_after, http_max_retry_time); + trace2_data_string("http", the_repository, + "http/429-error", "exceeds-max-retry-time"); + trace2_data_intmax("http", the_repository, + "http/429-requested-delay", last_retry_after); + last_retry_after = -1; /* Reset after use */ + return HTTP_ERROR; + } + sleep_for_retry(last_retry_after); + last_retry_after = -1; /* Reset after use */ + } else { + /* No Retry-After header provided */ + if (http_retry_after < 0) { + /* Not configured - exit with error */ + error(_("rate limited (HTTP 429) and no Retry-After header provided. " + "Configure http.retryAfter or set GIT_HTTP_RETRY_AFTER.")); + trace2_data_string("http", the_repository, + "http/429-error", "no-retry-after-config"); + return HTTP_ERROR; + } + /* Check if configured default exceeds maximum allowed */ + if (http_retry_after > http_max_retry_time) { + error(_("configured http.retryAfter (%ld seconds) exceeds " + "http.maxRetryTime (%ld seconds)"), + http_retry_after, http_max_retry_time); + trace2_data_string("http", the_repository, + "http/429-error", "config-exceeds-max-retry-time"); + return HTTP_ERROR; + } + /* Use configured default retry-after value */ + trace2_data_string("http", the_repository, + "http/429-retry-source", "config-default"); + sleep_for_retry(http_retry_after); + } + } else if (ret == HTTP_REAUTH) { + credential_fill(the_repository, &http_auth, 1); + } ret = http_request(url, result, target, options); } @@ -2349,7 +2499,7 @@ int http_get_file(const char *url, const char *filename, int http_fetch_ref(const char *base, struct ref *ref) { - struct http_get_options options = {0}; + struct http_get_options options = { 0 }; char *url; struct strbuf buffer = STRBUF_INIT; int ret = -1; @@ -2425,7 +2575,8 @@ static int fetch_and_setup_pack_index(struct packfile_list *packs, * If we already have the pack locally, no need to fetch its index or * even add it to list; we already have all of its objects. */ - repo_for_each_pack(the_repository, p) { + repo_for_each_pack(the_repository, p) + { if (hasheq(p->hash, sha1, the_repository->hash_algo)) return 0; } @@ -2455,7 +2606,7 @@ static int fetch_and_setup_pack_index(struct packfile_list *packs, int http_get_info_packs(const char *base_url, struct packfile_list *packs) { - struct http_get_options options = {0}; + struct http_get_options options = { 0 }; int ret = 0; char *url; const char *data; @@ -2476,11 +2627,10 @@ int http_get_info_packs(const char *base_url, struct packfile_list *packs) if (skip_prefix(data, "P pack-", &data) && !parse_oid_hex(data, &oid, &data) && skip_prefix(data, ".pack", &data) && - (*data == '\n' || *data == '\0')) { + (*data == '\n' || *data == '\0')) fetch_and_setup_pack_index(packs, oid.hash, base_url); - } else { + else data = strchrnul(data, '\n'); - } if (*data) data++; /* skip past newline */ } @@ -2504,8 +2654,7 @@ void release_http_pack_request(struct http_pack_request *preq) free(preq); } -static const char *default_index_pack_args[] = - {"index-pack", "--stdin", NULL}; +static const char *default_index_pack_args[] = { "index-pack", "--stdin", NULL }; int finish_http_pack_request(struct http_pack_request *preq) { @@ -2521,8 +2670,8 @@ int finish_http_pack_request(struct http_pack_request *preq) ip.git_cmd = 1; ip.in = tmpfile_fd; strvec_pushv(&ip.args, preq->index_pack_args ? - preq->index_pack_args : - default_index_pack_args); + preq->index_pack_args : + default_index_pack_args); if (preq->preserve_index_pack_stdout) ip.out = 0; @@ -2548,13 +2697,13 @@ void http_install_packfile(struct packed_git *p, } struct http_pack_request *new_http_pack_request( - const unsigned char *packed_git_hash, const char *base_url) { - + const unsigned char *packed_git_hash, const char *base_url) +{ struct strbuf buf = STRBUF_INIT; end_url_with_slash(&buf, base_url); strbuf_addf(&buf, "objects/pack/pack-%s.pack", - hash_to_hex(packed_git_hash)); + hash_to_hex(packed_git_hash)); return new_direct_http_pack_request(packed_git_hash, strbuf_detach(&buf, NULL)); } @@ -2591,10 +2740,10 @@ struct http_pack_request *new_direct_http_pack_request( * resume where it left off */ prev_posn = ftello(preq->packfile); - if (prev_posn>0) { + if (prev_posn > 0) { if (http_is_verbose) fprintf(stderr, - "Resuming fetch of pack %s at byte %"PRIuMAX"\n", + "Resuming fetch of pack %s at byte %" PRIuMAX "\n", hash_to_hex(packed_git_hash), (uintmax_t)prev_posn); http_opt_request_remainder(preq->slot->curl, prev_posn); @@ -2621,17 +2770,17 @@ static size_t fwrite_sha1_file(char *ptr, size_t eltsize, size_t nmemb, if (slot) { CURLcode c = curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CODE, - &slot->http_code); + &slot->http_code); if (c != CURLE_OK) BUG("curl_easy_getinfo for HTTP code failed: %s", - curl_easy_strerror(c)); + curl_easy_strerror(c)); if (slot->http_code >= 300) return nmemb; } do { ssize_t retval = xwrite(freq->localfile, - (char *) ptr + posn, size - posn); + (char *)ptr + posn, size - posn); if (retval < 0) return posn / eltsize; posn += retval; @@ -2714,15 +2863,14 @@ struct http_object_request *new_http_object_request(const char *base_url, if (prevlocal != -1) { do { prev_read = xread(prevlocal, prev_buf, PREV_BUF_SIZE); - if (prev_read>0) { + if (prev_read > 0) { if (fwrite_sha1_file(prev_buf, 1, prev_read, - freq) == prev_read) { + freq) == prev_read) prev_posn += prev_read; - } else { + else prev_read = -1; - } } } while (prev_read > 0); close(prevlocal); @@ -2739,7 +2887,7 @@ struct http_object_request *new_http_object_request(const char *base_url, memset(&freq->stream, 0, sizeof(freq->stream)); git_inflate_init(&freq->stream); the_hash_algo->init_fn(&freq->c); - if (prev_posn>0) { + if (prev_posn > 0) { prev_posn = 0; lseek(freq->localfile, 0, SEEK_SET); if (ftruncate(freq->localfile, 0) < 0) { @@ -2764,10 +2912,10 @@ struct http_object_request *new_http_object_request(const char *base_url, * If we have successfully processed data from a previous fetch * attempt, only fetch the data we don't already have. */ - if (prev_posn>0) { + if (prev_posn > 0) { if (http_is_verbose) fprintf(stderr, - "Resuming fetch of object %s at byte %"PRIuMAX"\n", + "Resuming fetch of object %s at byte %" PRIuMAX "\n", hex, (uintmax_t)prev_posn); http_opt_request_remainder(freq->slot->curl, prev_posn); } diff --git a/http.h b/http.h index f9d459340476e4..eb404564502165 100644 --- a/http.h +++ b/http.h @@ -20,6 +20,7 @@ struct slot_results { long http_code; long auth_avail; long http_connectcode; + long retry_after; }; struct active_request_slot { @@ -167,6 +168,7 @@ struct http_get_options { #define HTTP_REAUTH 4 #define HTTP_NOAUTH 5 #define HTTP_NOMATCHPUBLICKEY 6 +#define HTTP_RATE_LIMITED 7 /* * Requests a URL and stores the result in a strbuf. diff --git a/remote-curl.c b/remote-curl.c index 69f919454a4565..dd0680e5ae2461 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -371,6 +371,7 @@ static int show_http_message(struct strbuf *type, struct strbuf *charset, struct strbuf *msg) { const char *p, *eol; + struct strbuf msgbuf = STRBUF_INIT; /* * We only show text/plain parts, as other types are likely @@ -378,19 +379,24 @@ static int show_http_message(struct strbuf *type, struct strbuf *charset, */ if (strcmp(type->buf, "text/plain")) return -1; + + strbuf_addbuf(&msgbuf, msg); if (charset->len) - strbuf_reencode(msg, charset->buf, get_log_output_encoding()); + strbuf_reencode(&msgbuf, charset->buf, get_log_output_encoding()); - strbuf_trim(msg); - if (!msg->len) + strbuf_trim(&msgbuf); + if (!msgbuf.len) { + strbuf_release(&msgbuf); return -1; + } - p = msg->buf; + p = msgbuf.buf; do { eol = strchrnul(p, '\n'); fprintf(stderr, "remote: %.*s\n", (int)(eol - p), p); p = eol + 1; } while(*eol); + strbuf_release(&msgbuf); return 0; } @@ -529,6 +535,10 @@ static struct discovery *discover_refs(const char *service, int for_push) show_http_message(&type, &charset, &buffer); die(_("unable to access '%s' with http.pinnedPubkey configuration: %s"), transport_anonymize_url(url.buf), curl_errorstr); + case HTTP_RATE_LIMITED: + show_http_message(&type, &charset, &buffer); + die(_("rate limited by '%s', please try again later"), + transport_anonymize_url(url.buf)); default: show_http_message(&type, &charset, &buffer); die(_("unable to access '%s': %s"), diff --git a/t/meson.build b/t/meson.build index dc43d69636d15c..98bd6949e60269 100644 --- a/t/meson.build +++ b/t/meson.build @@ -698,6 +698,7 @@ integration_tests = [ 't5581-http-curl-verbose.sh', 't5582-fetch-negative-refspec.sh', 't5583-push-branches.sh', + 't5584-http-429-retry.sh', 't5600-clone-fail-cleanup.sh', 't5601-clone.sh', 't5602-clone-remote-exec.sh', diff --git a/t/t5584-http-429-retry.sh b/t/t5584-http-429-retry.sh new file mode 100755 index 00000000000000..8bcc382763037b --- /dev/null +++ b/t/t5584-http-429-retry.sh @@ -0,0 +1,429 @@ +#!/bin/sh + +test_description='test HTTP 429 Too Many Requests retry logic' + +. ./test-lib.sh + +. "$TEST_DIRECTORY"/lib-httpd.sh + +start_httpd + +test_expect_success 'setup test repository' ' + test_commit initial && + git clone --bare . "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" && + git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" config http.receivepack true +' + +test_expect_success 'HTTP 429 with retries disabled (maxRetries=0) fails immediately' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 1\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited\n" + cat "$1" >/dev/null + EOF + + # Set maxRetries to 0 (disabled) + test_config http.maxRetries 0 && + test_config http.retryAfter 1 && + + # Should fail immediately without any retry attempt + test_must_fail git ls-remote "$HTTPD_URL/one_time_script/repo.git" 2>err && + + # Verify no retry happened (no "waiting" message in stderr) + ! grep -i "waiting.*retry" err && + + # The one-time script will be consumed on first request (not a retry) + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 permanent should fail after max retries' ' + # Install a permanent error script to prove retries are limited + write_script "$HTTPD_ROOT_PATH/http-429-permanent.sh" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 1\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Permanently rate limited\n" + EOF + + # Enable retries with a limit + test_config http.maxRetries 2 && + + # Git should retry but eventually fail when 429 persists + test_must_fail git ls-remote "$HTTPD_URL/error/http-429-permanent.sh/repo.git" 2>err +' + +test_expect_success 'HTTP 429 with Retry-After is retried and succeeds' ' + # Create a one-time script that returns 429 with Retry-After header + # on the first request. Subsequent requests will succeed. + # This contrasts with the permanent 429 above - proving retry works + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + # Return HTTP 429 response instead of git response + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 1\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited - please retry after 1 second\n" + # Output something different from input so the script gets removed + cat "$1" >/dev/null + EOF + + # Enable retries + test_config http.maxRetries 3 && + + # Git should retry after receiving 429 and eventually succeed + git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + test_grep "refs/heads/" output && + + # The one-time script should have been consumed (proving retry happened) + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 without Retry-After uses configured default' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited - no retry info\n" + cat "$1" >/dev/null + EOF + + # Enable retries and configure default delay + test_config http.maxRetries 3 && + test_config http.retryAfter 1 && + + # Git should retry using configured default and succeed + git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + test_grep "refs/heads/" output && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 retry delays are respected' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 2\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited\n" + cat "$1" >/dev/null + EOF + + # Enable retries + test_config http.maxRetries 3 && + + # Time the operation - it should take at least 2 seconds due to retry delay + start=$(date +%s) && + git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Verify it took at least 2 seconds (allowing some tolerance) + test "$duration" -ge 1 && + test_grep "refs/heads/" output && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 fails immediately if Retry-After exceeds http.maxRetryTime' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 100\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited with long delay\n" + cat "$1" >/dev/null + EOF + + # Configure max retry time to 3 seconds (much less than requested 100) + test_config http.maxRetries 3 && + test_config http.maxRetryTime 3 && + + # Should fail immediately without waiting + start=$(date +%s) && + test_must_fail git ls-remote "$HTTPD_URL/one_time_script/repo.git" 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should fail quickly (less than 2 seconds, no 100 second wait) + test "$duration" -lt 2 && + test_grep "exceeds http.maxRetryTime" err && + + # The one-time script will be consumed on first request + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 fails if configured http.retryAfter exceeds http.maxRetryTime' ' + # Test misconfiguration: retryAfter > maxRetryTime + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited without header\n" + cat "$1" >/dev/null + EOF + + # Configure retryAfter larger than maxRetryTime + test_config http.maxRetries 3 && + test_config http.retryAfter 100 && + test_config http.maxRetryTime 5 && + + # Should fail immediately with configuration error + start=$(date +%s) && + test_must_fail git ls-remote "$HTTPD_URL/one_time_script/repo.git" 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should fail quickly + test "$duration" -lt 2 && + test_grep "configured http.retryAfter.*exceeds.*http.maxRetryTime" err +' + +test_expect_success 'HTTP 429 with Retry-After HTTP-date format' ' + # Test HTTP-date format (RFC 2822) in Retry-After header + # Generate a date 2 seconds in the future + future_date=$(TZ=GMT date -d "+2 seconds" "+%a, %d %b %Y %H:%M:%S GMT" 2>/dev/null || \ + TZ=GMT date -v+2S "+%a, %d %b %Y %H:%M:%S GMT" 2>/dev/null || \ + echo "skip") && + + if test "$future_date" = "skip" + then + skip_all="date command does not support required format" && + test_done + fi && + + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-EOF && + printf "Status: 429 Too Many Requests\\r\\n" + printf "Retry-After: $future_date\\r\\n" + printf "Content-Type: text/plain\\r\\n" + printf "\\r\\n" + printf "Rate limited with HTTP-date\\n" + cat "\$1" >/dev/null + EOF + + # Enable retries + test_config http.maxRetries 3 && + + # Git should parse the HTTP-date and retry after the delay + start=$(date +%s) && + git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should take at least 1 second (allowing tolerance for processing time) + test "$duration" -ge 1 && + test_grep "refs/heads/" output && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 with HTTP-date exceeding maxRetryTime fails immediately' ' + # Generate a date 200 seconds in the future + future_date=$(TZ=GMT date -d "+200 seconds" "+%a, %d %b %Y %H:%M:%S GMT" 2>/dev/null || \ + TZ=GMT date -v+200S "+%a, %d %b %Y %H:%M:%S GMT" 2>/dev/null || \ + echo "skip") && + + if test "$future_date" = "skip" + then + skip_all="date command does not support required format" && + test_done + fi && + + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-EOF && + printf "Status: 429 Too Many Requests\\r\\n" + printf "Retry-After: $future_date\\r\\n" + printf "Content-Type: text/plain\\r\\n" + printf "\\r\\n" + printf "Rate limited with long HTTP-date\\n" + cat "\$1" >/dev/null + EOF + + # Configure max retry time much less than the 200 second delay + test_config http.maxRetries 3 && + test_config http.maxRetryTime 10 && + + # Should fail immediately without waiting 200 seconds + start=$(date +%s) && + test_must_fail git ls-remote "$HTTPD_URL/one_time_script/repo.git" 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should fail quickly (not wait 200 seconds) + test "$duration" -lt 2 && + test_grep "exceeds http.maxRetryTime" err && + + # The one-time script will be consumed on first request + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 with past HTTP-date should not wait' ' + past_date=$(TZ=GMT date -d "-10 seconds" "+%a, %d %b %Y %H:%M:%S GMT" 2>/dev/null || \ + TZ=GMT date -v-10S "+%a, %d %b %Y %H:%M:%S GMT" 2>/dev/null || \ + echo "skip") && + + if test "$past_date" = "skip" + then + skip_all="date command does not support required format" && + test_done + fi && + + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-EOF && + printf "Status: 429 Too Many Requests\\r\\n" + printf "Retry-After: $past_date\\r\\n" + printf "Content-Type: text/plain\\r\\n" + printf "\\r\\n" + printf "Rate limited with past date\\n" + cat "\$1" >/dev/null + EOF + + # Enable retries + test_config http.maxRetries 3 && + + # Git should retry immediately without waiting + start=$(date +%s) && + git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should complete quickly (less than 2 seconds) + test "$duration" -lt 2 && + test_grep "refs/heads/" output && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 with invalid Retry-After format uses configured default' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: invalid-format-123abc\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited with malformed header\n" + cat "$1" >/dev/null + EOF + + # Configure default retry-after + test_config http.maxRetries 3 && + test_config http.retryAfter 1 && + + # Should use configured default (1 second) since header is invalid + start=$(date +%s) && + git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should take at least 1 second (the configured default) + test "$duration" -ge 1 && + test_grep "refs/heads/" output && + test_grep "waiting.*retry" err && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'HTTP 429 will not be retried without config' ' + # Default config means http.maxRetries=0 (retries disabled) + # When 429 is received, it should fail immediately without retry + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 1\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited\n" + cat "$1" >/dev/null + EOF + + # Do NOT configure anything - use defaults (http.maxRetries defaults to 0) + + # Should fail immediately without retry + test_must_fail git ls-remote "$HTTPD_URL/one_time_script/repo.git" 2>err && + + # Verify no retry happened (no "waiting" message) + ! grep -i "waiting.*retry" err && + + # Should get 429 error + test_grep "429" err && + + # The one-time script should be consumed on first request + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'GIT_HTTP_RETRY_AFTER overrides http.retryAfter config' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited - no Retry-After header\n" + cat "$1" >/dev/null + EOF + + # Configure retryAfter to 10 seconds + test_config http.maxRetries 3 && + test_config http.retryAfter 10 && + + # Override with environment variable to 1 second + start=$(date +%s) && + GIT_HTTP_RETRY_AFTER=1 git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should use env var (1 second), not config (10 seconds) + test "$duration" -ge 1 && + test "$duration" -lt 5 && + test_grep "refs/heads/" output && + test_grep "waiting.*retry" err && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'GIT_HTTP_MAX_RETRIES overrides http.maxRetries config' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 1\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited\n" + cat "$1" >/dev/null + EOF + + # Configure maxRetries to 0 (disabled) + test_config http.maxRetries 0 && + test_config http.retryAfter 1 && + + # Override with environment variable to enable retries + GIT_HTTP_MAX_RETRIES=3 git ls-remote "$HTTPD_URL/one_time_script/repo.git" >output 2>err && + + # Should retry (env var enables it despite config saying disabled) + test_grep "refs/heads/" output && + test_grep "waiting.*retry" err && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'GIT_HTTP_MAX_RETRY_TIME overrides http.maxRetryTime config' ' + write_script "$HTTPD_ROOT_PATH/one-time-script" <<-\EOF && + printf "Status: 429 Too Many Requests\r\n" + printf "Retry-After: 50\r\n" + printf "Content-Type: text/plain\r\n" + printf "\r\n" + printf "Rate limited with long delay\n" + cat "$1" >/dev/null + EOF + + # Configure maxRetryTime to 100 seconds (would accept 50 second delay) + test_config http.maxRetries 3 && + test_config http.maxRetryTime 100 && + + # Override with environment variable to 10 seconds (should reject 50 second delay) + start=$(date +%s) && + test_must_fail env GIT_HTTP_MAX_RETRY_TIME=10 \ + git ls-remote "$HTTPD_URL/one_time_script/repo.git" 2>err && + end=$(date +%s) && + duration=$((end - start)) && + + # Should fail quickly (not wait 50 seconds) because env var limits to 10 + test "$duration" -lt 5 && + test_grep "exceeds http.maxRetryTime" err && + test_path_is_missing "$HTTPD_ROOT_PATH/one-time-script" +' + +test_expect_success 'verify normal repository access still works' ' + git ls-remote "$HTTPD_URL/smart/repo.git" >output && + test_grep "refs/heads/" output +' + +test_done