diff --git a/docs/proposals/nap-waf.md b/docs/proposals/nap-waf.md index c0fd553b0a..dc2f39c04b 100644 --- a/docs/proposals/nap-waf.md +++ b/docs/proposals/nap-waf.md @@ -296,6 +296,9 @@ sha256sum compiled-policy.tgz > compiled-policy.tgz.sha256 aws s3 cp compiled-policy.tgz s3://company-policies/prod-policy.tgz aws s3 cp compiled-policy.tgz.sha256 s3://company-policies/prod-policy.tgz.sha256 +# Note: In WAFPolicy, reference S3 objects using HTTPS URLs: +# fileLocation: "https://company-policies.s3.amazonaws.com/prod-policy.tgz" + # No Kubernetes resource changes needed - NGF automatically detects the update echo "Policy updated. NGF will detect changes within polling interval." ``` @@ -427,6 +430,8 @@ spec: ### WAFPolicy Custom Resource with Policy Attachment +**Note**: When referencing S3 objects, use HTTPS URLs (e.g., `https://bucket.s3.amazonaws.com/path/file.tgz`) rather than S3 protocol URLs (`s3://bucket/path/file.tgz`). + ```yaml apiVersion: gateway.nginx.org/v1alpha1 kind: WAFPolicy @@ -442,7 +447,7 @@ spec: namespace: applications policySource: - fileLocation: "s3://ngf-waf-policies/production/gateway-policy-v1.2.3.tgz" + fileLocation: "https://ngf-waf-policies.s3.amazonaws.com/production/gateway-policy-v1.2.3.tgz" authSecret: name: "policy-store-credentials" validation: @@ -456,7 +461,7 @@ spec: interval: "5m" # Check every 5 minutes # Optional: explicit checksum location # If not specified, defaults to .sha256 - checksumLocation: "s3://ngf-waf-policies/production/gateway-policy-v1.2.3.tgz" + checksumLocation: "https://ngf-waf-policies.s3.amazonaws.com/production/gateway-policy-v1.2.3.tgz.sha256" # Retry configuration for policy fetch failures retryPolicy: @@ -480,7 +485,7 @@ spec: # Custom logging profile bundle (similar to policy bundle) # logProfile and logProfileBundle are mutually exclusive per security log configuration entry logProfileBundle: - fileLocation: "s3://ngf-waf-policies/logging/custom-log-profile.tgz" + fileLocation: "https://ngf-waf-policies.s3.amazonaws.com/logging/custom-log-profile.tgz" authSecret: name: "policy-store-credentials" validation: @@ -527,7 +532,7 @@ spec: # Stricter policy for admin endpoints policySource: - fileLocation: "s3://ngf-waf-policies/production/admin-strict-policy-v1.0.0.tgz" + fileLocation: "https://ngf-waf-policies.s3.amazonaws.com/production/admin-strict-policy-v1.0.0.tgz" authSecret: name: "policy-store-credentials" polling: @@ -664,7 +669,7 @@ metadata: # NGF service account in nginx-gateway namespace provides IRSA authentication spec: policySource: - fileLocation: "s3://company-waf-policies/policy.tgz" + fileLocation: "https://company-waf-policies.s3.amazonaws.com/policy.tgz" # No authSecret needed - uses IRSA automatically ``` @@ -1009,7 +1014,7 @@ spec: namespace: applications policySource: - fileLocation: "s3://company-waf-policies/production/base-policy.tgz" + fileLocation: "https://company-waf-policies.s3.amazonaws.com/production/base-policy.tgz" # Secret referenced for fallback - NGF will use IRSA if available, secret if not authSecret: name: "policy-store-credentials" @@ -1020,7 +1025,7 @@ spec: interval: "5m" # Optional explicit checksum location # If not specified, defaults to base-policy.tgz.sha256 - checksumLocation: "s3://company-waf-policies/production/base-policy.tgz.sha256" + checksumLocation: "https://company-waf-policies.s3.amazonaws.com/production/base-policy.tgz.sha256" securityLogs: - name: "gateway-logging" @@ -1044,7 +1049,7 @@ spec: namespace: applications policySource: - fileLocation: "s3://company-waf-policies/production/admin-strict-policy.tgz" + fileLocation: "https://company-waf-policies.s3.amazonaws.com/production/admin-strict-policy.tgz" polling: enabled: true diff --git a/internal/framework/fetch/errors.go b/internal/framework/fetch/errors.go new file mode 100644 index 0000000000..08890af216 --- /dev/null +++ b/internal/framework/fetch/errors.go @@ -0,0 +1,23 @@ +package fetch + +import "fmt" + +// ChecksumMismatchError represents an error when the calculated checksum doesn't match the expected checksum. +// This type of error should not trigger retries as it indicates data corruption or tampering. +type ChecksumMismatchError struct { + Expected string + Actual string +} + +func (e *ChecksumMismatchError) Error() string { + return fmt.Sprintf("checksum mismatch: expected %s, got %s", e.Expected, e.Actual) +} + +// HTTPStatusError represents an HTTP status code error for retry logic. +type HTTPStatusError struct { + StatusCode int +} + +func (e *HTTPStatusError) Error() string { + return fmt.Sprintf("unexpected status code: %d", e.StatusCode) +} diff --git a/internal/framework/fetch/fetch.go b/internal/framework/fetch/fetch.go new file mode 100644 index 0000000000..6ad33f3aa7 --- /dev/null +++ b/internal/framework/fetch/fetch.go @@ -0,0 +1,291 @@ +package fetch + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +//go:generate go tool counterfeiter -generate + +const ( + // Default configuration values. + defaultTimeout = 30 * time.Second + defaultRetryAttempts = 3 + defaultRetryMaxDelay = 5 * time.Minute + defaultRetryInitialDuration = 200 * time.Millisecond + defaultRetryJitter = 0.1 + defaultRetryLinearFactor = 1.0 + exponentialBackoffFactor = 2.0 + + // HTTP configuration. + userAgent = "nginx-gateway-fabric" + + // Checksum configuration. + checksumFileSuffix = ".sha256" +) + +// RetryBackoffType defines supported backoff strategies. +type RetryBackoffType string + +const ( + RetryBackoffExponential RetryBackoffType = "exponential" + RetryBackoffLinear RetryBackoffType = "linear" +) + +// Option defines a function that modifies fetch options. +type Option func(*DefaultFetcher) + +// WithTimeout sets the HTTP request timeout. +func WithTimeout(timeout time.Duration) Option { + return func(f *DefaultFetcher) { + f.timeout = timeout + } +} + +// WithRetryAttempts sets the number of retry attempts (total attempts = 1 + retries). +func WithRetryAttempts(attempts int32) Option { + return func(f *DefaultFetcher) { + f.retryAttempts = attempts + } +} + +// WithRetryBackoff sets the retry backoff strategy. +func WithRetryBackoff(backoff RetryBackoffType) Option { + return func(f *DefaultFetcher) { + f.retryBackoff = backoff + } +} + +// WithMaxRetryDelay sets the maximum delay between retries. +func WithMaxRetryDelay(delay time.Duration) Option { + return func(f *DefaultFetcher) { + f.retryMaxDelay = delay + } +} + +// WithChecksum enables checksum validation with an optional custom checksum location. +// If no location is provided, it defaults to .sha256. +func WithChecksum(checksumLocation ...string) Option { + return func(f *DefaultFetcher) { + f.checksumEnabled = true + if len(checksumLocation) > 0 { + f.checksumLocation = checksumLocation[0] + } + } +} + +// Fetcher defines the interface for fetching remote files. +// +//counterfeiter:generate . Fetcher +type Fetcher interface { + GetRemoteFile(targetURL string) ([]byte, error) +} + +// DefaultFetcher is the default implementation of Fetcher. +type DefaultFetcher struct { + httpClient *http.Client + checksumLocation string + retryBackoff RetryBackoffType + timeout time.Duration + retryMaxDelay time.Duration + retryAttempts int32 + checksumEnabled bool +} + +// NewDefaultFetcher creates a new DefaultFetcher. +func NewDefaultFetcher(opts ...Option) *DefaultFetcher { + fetcher := &DefaultFetcher{ + httpClient: &http.Client{ + Timeout: defaultTimeout, + }, + timeout: defaultTimeout, + retryAttempts: defaultRetryAttempts, + retryMaxDelay: defaultRetryMaxDelay, + retryBackoff: RetryBackoffExponential, + } + + for _, opt := range opts { + opt(fetcher) + } + + return fetcher +} + +// GetRemoteFile fetches a remote file with retry logic and optional validation. +func (f *DefaultFetcher) GetRemoteFile(targetURL string) ([]byte, error) { + ctx := context.Background() + + if !strings.HasPrefix(targetURL, "http://") && !strings.HasPrefix(targetURL, "https://") { + return nil, fmt.Errorf("unsupported URL scheme (supported: http://, https://)") + } + + backoff := createBackoffConfig(f.retryBackoff, f.retryAttempts, f.retryMaxDelay) + var lastErr error + var result []byte + + err := wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { + data, err := f.getFileContent(ctx, targetURL, f.timeout) + if err != nil { + lastErr = fmt.Errorf("HTTP error for %s: %w", targetURL, err) + + shouldRetry, retryErr := f.shouldRetryHTTPError(err) + if !shouldRetry { + return false, retryErr + } + return false, nil + } + + if f.checksumEnabled { + if err := f.validateChecksum(ctx, data, targetURL); err != nil { + lastErr = fmt.Errorf("checksum validation failed: %w", err) + + var checksumErr *ChecksumMismatchError + if errors.As(err, &checksumErr) { + return false, lastErr // Stop retrying on checksum mismatches + } + + if strings.Contains(err.Error(), "failed to fetch checksum from") { + shouldRetry, retryErr := f.shouldRetryHTTPError(err) + if !shouldRetry { + return false, retryErr + } + return false, nil + } + + return false, nil // Retry on other checksum errors + } + } + + result = data + return true, nil + }) + if err != nil { + // If the backoff timed out or was aborted by a non-retryable error, + // return the last recorded error for better context. + if lastErr != nil { + return nil, lastErr + } + return nil, fmt.Errorf("failed to fetch HTTP file after retries: %w", err) + } + + return result, nil +} + +// getFileContent fetches content via HTTP(S). +func (f *DefaultFetcher) getFileContent( + ctx context.Context, + targetURL string, + timeout time.Duration, +) ([]byte, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("User-Agent", userAgent) + + resp, err := f.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, &HTTPStatusError{StatusCode: resp.StatusCode} + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read HTTP response body: %w", err) + } + + return body, nil +} + +// validateChecksum validates file content against a SHA256 checksum. +func (f *DefaultFetcher) validateChecksum( + ctx context.Context, + data []byte, + targetURL string, +) error { + // Determine checksum URL + checksumURL := f.checksumLocation + if checksumURL == "" { + checksumURL = targetURL + checksumFileSuffix + } + + // Fetch checksum file + checksumData, err := f.getFileContent(ctx, checksumURL, f.timeout) + if err != nil { + return fmt.Errorf("failed to fetch checksum from %s: %w", checksumURL, err) + } + + // Parse checksum (format: "hash filename" or just "hash") + checksumStr := strings.TrimSpace(string(checksumData)) + checksumFields := strings.Fields(checksumStr) + + if len(checksumFields) == 0 { + return fmt.Errorf("checksum file is empty or contains only whitespace") + } + + expectedChecksum := checksumFields[0] + + // Calculate actual checksum + hasher := sha256.New() + hasher.Write(data) + actualChecksum := hex.EncodeToString(hasher.Sum(nil)) + + if actualChecksum != expectedChecksum { + return &ChecksumMismatchError{Expected: expectedChecksum, Actual: actualChecksum} + } + + return nil +} + +// shouldRetryHTTPError determines if an HTTP error should trigger a retry. +// It returns true if the error is retryable. If the error is not retryable, +// it also returns the error that should be propagated to the caller. +func (f *DefaultFetcher) shouldRetryHTTPError(err error) (bool, error) { + var statusErr *HTTPStatusError + if errors.As(err, &statusErr) { + switch statusErr.StatusCode { + case http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: + return true, nil // Retry on retryable status codes + default: + return false, err // Stop retrying on non-retryable status codes + } + } + return true, nil // Retry on other HTTP errors +} + +// createBackoffConfig creates a backoff configuration for retries. +func createBackoffConfig( + backoffType RetryBackoffType, + attempts int32, + maxDelay time.Duration, +) wait.Backoff { + backoff := wait.Backoff{ + Duration: defaultRetryInitialDuration, + Factor: defaultRetryLinearFactor, + Jitter: defaultRetryJitter, + Steps: int(attempts + 1), + Cap: maxDelay, + } + + if backoffType == RetryBackoffExponential { + backoff.Factor = exponentialBackoffFactor + } + + return backoff +} diff --git a/internal/framework/fetch/fetch_test.go b/internal/framework/fetch/fetch_test.go new file mode 100644 index 0000000000..7194a08a84 --- /dev/null +++ b/internal/framework/fetch/fetch_test.go @@ -0,0 +1,289 @@ +package fetch + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + "time" + + . "github.com/onsi/gomega" +) + +func TestGetRemoteFile(t *testing.T) { + t.Parallel() + + fileContent := "test file content" + hasher := sha256.New() + hasher.Write([]byte(fileContent)) + expectedChecksum := hex.EncodeToString(hasher.Sum(nil)) + + tests := []struct { + setupServer func() *httptest.Server + validateFunc func(g *WithT, data []byte, err error) + name string + url string + options []Option + }{ + { + name: "valid checksum with filename", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, ".sha256") { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(expectedChecksum + " filename.txt")) + } else { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(fileContent)) + } + })) + }, + url: "/file.txt", + options: []Option{WithChecksum()}, + validateFunc: func(g *WithT, data []byte, err error) { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(data).To(Equal([]byte(fileContent))) + }, + }, + { + name: "checksum mismatch", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, ".sha256") { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("0000000000000000000000000000000000000000000000000000000000000000")) + } else { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(fileContent)) + } + })) + }, + url: "/file.txt", + options: []Option{WithChecksum(), WithRetryAttempts(3)}, + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + var checksumErr *ChecksumMismatchError + g.Expect(errors.As(err, &checksumErr)).To(BeTrue()) + }, + }, + { + name: "empty checksum file", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, ".sha256") { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(" \n\t ")) + } else { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(fileContent)) + } + })) + }, + url: "/file.txt", + options: []Option{WithChecksum()}, + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("checksum file is empty")) + }, + }, + { + name: "unsupported URL scheme", + url: "ftp://example.com/file.txt", + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("unsupported URL scheme")) + }, + }, + { + name: "HTTP error response", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + }, + url: "/", + options: []Option{WithRetryAttempts(2)}, + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("HTTP error for")) + var statusErr *HTTPStatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue()) + }, + }, + { + name: "network connection error", + url: "http://127.0.0.1:1", + options: []Option{WithRetryAttempts(0), WithTimeout(10 * time.Millisecond)}, + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("HTTP error for")) + }, + }, + { + name: "timeout during request", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + time.Sleep(100 * time.Millisecond) + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("delayed response")) + })) + }, + options: []Option{WithTimeout(10 * time.Millisecond), WithRetryAttempts(0)}, + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + }, + }, + { + name: "retry success", + setupServer: func() *httptest.Server { + var attemptCount atomic.Int32 + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + count := attemptCount.Add(1) + if count < 3 { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("success")) + })) + }, + url: "/", + options: []Option{ + WithRetryAttempts(2), + }, + validateFunc: func(g *WithT, data []byte, err error) { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(data).To(Equal([]byte("success"))) + }, + }, + { + name: "retry attempts exhausted", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + }, + url: "/", + options: []Option{ + WithRetryAttempts(2), + WithRetryBackoff(RetryBackoffLinear), + }, + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("HTTP error for")) + var statusErr *HTTPStatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue()) + }, + }, + { + name: "checksum fetch returns non-retryable error", + setupServer: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, ".sha256") { + w.WriteHeader(http.StatusNotFound) + return + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(fileContent)) + })) + }, + url: "/file.txt", + options: []Option{WithChecksum(), WithRetryAttempts(2)}, + validateFunc: func(g *WithT, _ []byte, err error) { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("checksum validation failed")) + }, + }, + { + name: "checksum fetch succeeds after retry", + setupServer: func() *httptest.Server { + var attemptCount atomic.Int32 + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, ".sha256") { + count := attemptCount.Add(1) + if count < 3 { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(expectedChecksum)) + } else { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(fileContent)) + } + })) + }, + url: "/file.txt", + options: []Option{WithChecksum(), WithRetryAttempts(2)}, + validateFunc: func(g *WithT, data []byte, err error) { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(data).To(Equal([]byte(fileContent))) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + var serverURL string + if tc.setupServer != nil { + server := tc.setupServer() + defer server.Close() + serverURL = server.URL + } + + fetcher := NewDefaultFetcher(tc.options...) + url := tc.url + if strings.HasPrefix(url, "/") { + url = serverURL + url + } + + data, err := fetcher.GetRemoteFile(url) + tc.validateFunc(g, data, err) + }) + } +} + +func TestErrorTypes(t *testing.T) { + t.Parallel() + tests := []struct { + err error + unwraps error + name string + expected string + }{ + { + name: "ChecksumMismatchError", + err: &ChecksumMismatchError{ + Expected: "abc123", + Actual: "def456", + }, + expected: "checksum mismatch: expected abc123, got def456", + }, + { + name: "HTTPStatusError", + err: &HTTPStatusError{ + StatusCode: 404, + }, + expected: "unexpected status code: 404", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + t.Parallel() + g.Expect(tc.err.Error()).To(Equal(tc.expected)) + + if tc.unwraps != nil { + g.Expect(errors.Unwrap(tc.err)).To(Equal(tc.unwraps)) + } + }) + } +} diff --git a/internal/framework/fetch/fetchfakes/fake_fetcher.go b/internal/framework/fetch/fetchfakes/fake_fetcher.go new file mode 100644 index 0000000000..84198ebdb0 --- /dev/null +++ b/internal/framework/fetch/fetchfakes/fake_fetcher.go @@ -0,0 +1,116 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fetchfakes + +import ( + "sync" + + "github.com/nginx/nginx-gateway-fabric/internal/framework/fetch" +) + +type FakeFetcher struct { + GetRemoteFileStub func(string) ([]byte, error) + getRemoteFileMutex sync.RWMutex + getRemoteFileArgsForCall []struct { + arg1 string + } + getRemoteFileReturns struct { + result1 []byte + result2 error + } + getRemoteFileReturnsOnCall map[int]struct { + result1 []byte + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeFetcher) GetRemoteFile(arg1 string) ([]byte, error) { + fake.getRemoteFileMutex.Lock() + ret, specificReturn := fake.getRemoteFileReturnsOnCall[len(fake.getRemoteFileArgsForCall)] + fake.getRemoteFileArgsForCall = append(fake.getRemoteFileArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetRemoteFileStub + fakeReturns := fake.getRemoteFileReturns + fake.recordInvocation("GetRemoteFile", []interface{}{arg1}) + fake.getRemoteFileMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeFetcher) GetRemoteFileCallCount() int { + fake.getRemoteFileMutex.RLock() + defer fake.getRemoteFileMutex.RUnlock() + return len(fake.getRemoteFileArgsForCall) +} + +func (fake *FakeFetcher) GetRemoteFileCalls(stub func(string) ([]byte, error)) { + fake.getRemoteFileMutex.Lock() + defer fake.getRemoteFileMutex.Unlock() + fake.GetRemoteFileStub = stub +} + +func (fake *FakeFetcher) GetRemoteFileArgsForCall(i int) string { + fake.getRemoteFileMutex.RLock() + defer fake.getRemoteFileMutex.RUnlock() + argsForCall := fake.getRemoteFileArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeFetcher) GetRemoteFileReturns(result1 []byte, result2 error) { + fake.getRemoteFileMutex.Lock() + defer fake.getRemoteFileMutex.Unlock() + fake.GetRemoteFileStub = nil + fake.getRemoteFileReturns = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *FakeFetcher) GetRemoteFileReturnsOnCall(i int, result1 []byte, result2 error) { + fake.getRemoteFileMutex.Lock() + defer fake.getRemoteFileMutex.Unlock() + fake.GetRemoteFileStub = nil + if fake.getRemoteFileReturnsOnCall == nil { + fake.getRemoteFileReturnsOnCall = make(map[int]struct { + result1 []byte + result2 error + }) + } + fake.getRemoteFileReturnsOnCall[i] = struct { + result1 []byte + result2 error + }{result1, result2} +} + +func (fake *FakeFetcher) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.getRemoteFileMutex.RLock() + defer fake.getRemoteFileMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeFetcher) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ fetch.Fetcher = new(FakeFetcher)