Skip to content

Commit 0560d0f

Browse files
committed
Add 504 to the list of responses we retry on
This is "gateway timeout" which we do see from Docker Hub from time to time (probably due to some HTTP middleware, redeploys, etc).
1 parent e7b1446 commit 0560d0f

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

registry/rate-limits.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package registry
22

33
import (
44
"net/http"
5+
"slices"
56
"time"
67

78
"golang.org/x/time/rate"
@@ -24,7 +25,7 @@ func (d *rateLimitedRetryingRoundTripper) RoundTrip(req *http.Request) (*http.Re
2425
// cap request retries at once per second
2526
requestRetryLimiter = rate.NewLimiter(rate.Every(time.Second), 1)
2627

27-
// if we see 3x (503 or 502 or 500) during retry, we should bail
28+
// if we see 50x three times during retry, we should bail
2829
maxTry50X = 3
2930

3031
ctx = req.Context()
@@ -54,7 +55,7 @@ func (d *rateLimitedRetryingRoundTripper) RoundTrip(req *http.Request) (*http.Re
5455
}
5556

5657
// certain status codes should result in a few auto-retries (especially with the automatic retry delay this injects), but up to a limit so we don't contribute to the "thundering herd" too much in a serious outage
57-
if (res.StatusCode == 503 || res.StatusCode == 502 || res.StatusCode == 500) && maxTry50X > 1 {
58+
if maxTry50X > 1 && slices.Contains([]int{500, 502, 503, 504}, res.StatusCode) {
5859
maxTry50X--
5960
doRetry = true
6061
// no need to eat up the rate limiter tokens as we do for 429 because this is not a rate limiting error (and we have the "requestRetryLimiter" that separately limits our retries of *this* request)

0 commit comments

Comments
 (0)