diff --git a/changelog/fragments/1757423719-Make-file-storage-size-configurable.yaml b/changelog/fragments/1757423719-Make-file-storage-size-configurable.yaml new file mode 100644 index 0000000000..c76273d5cd --- /dev/null +++ b/changelog/fragments/1757423719-Make-file-storage-size-configurable.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: enhancement + +# Change summary; a 80ish characters long description of the change. +summary: Makes file storage size configurable + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: fleet-server + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/internal/pkg/api/handleFileDelivery.go b/internal/pkg/api/handleFileDelivery.go index 22ce1a8693..b58337807e 100644 --- a/internal/pkg/api/handleFileDelivery.go +++ b/internal/pkg/api/handleFileDelivery.go @@ -30,7 +30,7 @@ func NewFileDeliveryT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elastic return &FileDeliveryT{ bulker: bulker, cache: cache, - deliverer: delivery.New(chunkClient, bulker, maxFileSize), + deliverer: delivery.New(chunkClient, bulker, cfg.Limits.MaxFileStorageByteSize), authAgent: authAgent, } } diff --git a/internal/pkg/api/handleFileDelivery_test.go b/internal/pkg/api/handleFileDelivery_test.go index b036f0a052..e21b20046b 100644 --- a/internal/pkg/api/handleFileDelivery_test.go +++ b/internal/pkg/api/handleFileDelivery_test.go @@ -405,7 +405,7 @@ func prepareFileDeliveryMock(t *testing.T) (http.Handler, apiServer, *MockTransp ft: &FileDeliveryT{ bulker: fakebulk, cache: c, - deliverer: delivery.New(mockES, fakebulk, maxFileSize), + deliverer: delivery.New(mockES, fakebulk, 0), authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { return &model.Agent{ ESDocument: model.ESDocument{ diff --git a/internal/pkg/api/handleUpload.go b/internal/pkg/api/handleUpload.go index 3f567b5cc0..d13548957a 100644 --- a/internal/pkg/api/handleUpload.go +++ b/internal/pkg/api/handleUpload.go @@ -31,8 +31,6 @@ import ( ) const ( - // TODO: move to a config - maxFileSize = 104857600 // 100 MiB maxUploadTimer = 24 * time.Hour ) @@ -58,7 +56,7 @@ func NewUploadT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elasticsearch chunkClient: chunkClient, bulker: bulker, cache: cache, - uploader: uploader.New(chunkClient, bulker, cache, maxFileSize, maxUploadTimer), + uploader: uploader.New(chunkClient, bulker, cache, cfg.Limits.MaxFileStorageByteSize, maxUploadTimer), authAgent: authAgent, authAPIKey: authAPIKey, } diff --git a/internal/pkg/api/handleUpload_test.go b/internal/pkg/api/handleUpload_test.go index f3c08cb338..1b53232000 100644 --- a/internal/pkg/api/handleUpload_test.go +++ b/internal/pkg/api/handleUpload_test.go @@ -11,6 +11,7 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + "fmt" "io" "io/ioutil" "net/http" @@ -71,18 +72,6 @@ func TestUploadBeginValidation(t *testing.T) { "src": "agent" }`, }, - {"Oversized file should be rejected", http.StatusBadRequest, "size", - `{ - "file": { - "size": ` + strconv.Itoa(maxFileSize+1024) + `, - "name": "foo.png", - "mime_type": "image/png" - }, - "agent_id": "foo", - "action_id": "123", - "src": "agent" - }`, - }, {"zero size file should be rejected", http.StatusBadRequest, "size", `{ "file": { @@ -346,6 +335,48 @@ func TestUploadBeginBadRequest(t *testing.T) { assert.Equal(t, http.StatusBadRequest, rec.Code) } +func TestUploadBeginFileSize(t *testing.T) { + + mockFile := func(size int64) string { + return fmt.Sprintf(`{ + "file": { + "size": %d, + "name": "foo.png", + "mime_type": "image/png" + }, + "agent_id": "foo", + "action_id": "123", + "src": "agent" + }`, size) + } + + // now test various body contents + tests := []struct { + Name string + MaxSize int64 + ExpectStatus int + InputSize int64 + }{ + {"MaxSize 0 allows uploads", 0, http.StatusOK, 1000}, + {"MaxSize 0 allows large uploads", 0, http.StatusOK, 1024 * 1024 * 1024 * 2}, + {"MaxSize 0 does not allow 0-length files", 0, http.StatusBadRequest, 0}, + {"Sizes larger than MaxSize are denied", 1024, http.StatusBadRequest, 2048}, + {"Sizes smaller than MaxSize are allowed", 1024, http.StatusOK, 900}, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + + hr, _, _, _ := configureUploaderMock(t, tc.MaxSize) + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, RouteUploadBegin, strings.NewReader(mockFile(tc.InputSize))) + hr.ServeHTTP(rec, req) + assert.Equal(t, tc.ExpectStatus, rec.Code) + }) + } + +} + /* Chunk data upload route */ @@ -377,7 +408,7 @@ func TestChunkUploadRouteParams(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: file.MaxChunkSize + 1, Count: 2, // this is a 2-chunk "file" based on size above Start: time.Now(), @@ -410,7 +441,7 @@ func TestChunkUploadRequiresChunkHashHeader(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now(), @@ -458,7 +489,7 @@ func TestChunkUploadStatus(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now(), @@ -509,7 +540,7 @@ func TestChunkUploadExpiry(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: tc.StartTime, @@ -547,7 +578,7 @@ func TestChunkUploadWritesTimestamp(t *testing.T) { mockUploadInfoResult(fakebulk, file.Info{ DocID: "bar.foo", ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now(), @@ -597,7 +628,7 @@ func TestUploadCompleteRequiresMatchingAuth(t *testing.T) { mockInfo := file.Info{ DocID: "bar." + tc.AgentInFileRecord, ID: mockUploadID, - ChunkSize: maxFileSize, + ChunkSize: file.MaxChunkSize, Total: 10, Count: 1, Start: time.Now().Add(-time.Minute), @@ -998,6 +1029,10 @@ func TestUploadCompleteBadRequests(t *testing.T) { // prepareUploaderMock sets up common dependencies and registers upload routes to a returned router func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) { + return configureUploaderMock(t, 0) +} + +func configureUploaderMock(t *testing.T, fileSize int64) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) { // chunk index operations skip the bulker in order to send binary docs directly // so a mock *elasticsearch.Client needs to be be prepared es, tx := mockESClient(t) @@ -1034,7 +1069,7 @@ func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockB bulker: fakebulk, chunkClient: es, cache: c, - uploader: uploader.New(es, fakebulk, c, maxFileSize, maxUploadTimer), + uploader: uploader.New(es, fakebulk, c, fileSize, maxUploadTimer), authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { return &model.Agent{ ESDocument: model.ESDocument{ diff --git a/internal/pkg/config/env_defaults.go b/internal/pkg/config/env_defaults.go index a0bf3df9dd..ad1fb11e61 100644 --- a/internal/pkg/config/env_defaults.go +++ b/internal/pkg/config/env_defaults.go @@ -57,6 +57,8 @@ const ( defaultStatusMax = 50 defaultStatusMaxBody = 0 + defaultFileStorageByteSize = 0 // no limit + defaultUploadStartInterval = time.Second * 2 defaultUploadStartBurst = 5 defaultUploadStartMax = 10 @@ -70,7 +72,7 @@ const ( defaultUploadChunkInterval = time.Millisecond * 3 defaultUploadChunkBurst = 5 defaultUploadChunkMax = 10 - defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MAY NOT be larger than 4 MiB + defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MUST NOT be larger than 4 MiB defaultFileDelivInterval = time.Millisecond * 100 defaultFileDelivBurst = 5 @@ -141,8 +143,9 @@ type limit struct { } type serverLimitDefaults struct { - PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit - MaxConnections int `config:"max_connections"` + PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit + MaxConnections int `config:"max_connections"` + MaxFileStorageByteSize int64 `config:"max_file_storage_size"` ActionLimit limit `config:"action_limit"` PolicyLimit limit `config:"policy_limit"` @@ -161,7 +164,8 @@ type serverLimitDefaults struct { func defaultserverLimitDefaults() *serverLimitDefaults { return &serverLimitDefaults{ - MaxConnections: defaultMaxConnections, + MaxConnections: defaultMaxConnections, + MaxFileStorageByteSize: defaultFileStorageByteSize, ActionLimit: limit{ Interval: defaultActionInterval, Burst: defaultActionBurst, diff --git a/internal/pkg/config/limits.go b/internal/pkg/config/limits.go index 8e6dbf391c..0a8e11bbf9 100644 --- a/internal/pkg/config/limits.go +++ b/internal/pkg/config/limits.go @@ -16,9 +16,10 @@ type Limit struct { } type ServerLimits struct { - MaxAgents int `config:"max_agents"` - MaxHeaderByteSize int `config:"max_header_byte_size"` - MaxConnections int `config:"max_connections"` + MaxAgents int `config:"max_agents"` + MaxHeaderByteSize int `config:"max_header_byte_size"` + MaxConnections int `config:"max_connections"` + MaxFileStorageByteSize int64 `config:"max_file_storage_size"` ActionLimit Limit `config:"action_limit"` PolicyLimit Limit `config:"policy_limit"` @@ -47,6 +48,7 @@ func (c *ServerLimits) LoadLimits(limits *envLimits) { if c.MaxConnections == 0 { c.MaxConnections = l.MaxConnections } + c.MaxFileStorageByteSize = l.MaxFileStorageByteSize c.ActionLimit = mergeEnvLimit(c.ActionLimit, l.ActionLimit) c.PolicyLimit = mergeEnvLimit(c.PolicyLimit, l.PolicyLimit) diff --git a/internal/pkg/file/uploader/upload.go b/internal/pkg/file/uploader/upload.go index 3ce35a4c96..11537b54d8 100644 --- a/internal/pkg/file/uploader/upload.go +++ b/internal/pkg/file/uploader/upload.go @@ -73,7 +73,7 @@ func (u *Uploader) Begin(ctx context.Context, namespaces []string, data JSDict) } size, _ := data.Int64("file", "size") - if size > u.sizeLimit { + if u.sizeLimit != 0 && size > u.sizeLimit { vSpan.End() return file.Info{}, ErrFileSizeTooLarge } diff --git a/internal/pkg/file/uploader/upload_test.go b/internal/pkg/file/uploader/upload_test.go index 8f9483ba27..88a95c95b5 100644 --- a/internal/pkg/file/uploader/upload_test.go +++ b/internal/pkg/file/uploader/upload_test.go @@ -83,7 +83,7 @@ func TestUploadBeginReturnsCorrectInfo(t *testing.T) { c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) u := New(nil, fakeBulk, c, int64(size), time.Hour) - info, err := u.Begin(context.Background(), []string{}, data) + info, err := u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) assert.Equal(t, int64(size), info.Total) @@ -127,7 +127,7 @@ func TestUploadBeginWritesDocumentFromInputs(t *testing.T) { c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) u := New(nil, fakeBulk, c, int64(size), time.Hour) - _, err = u.Begin(context.Background(), []string{}, data) + _, err = u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) payload, ok := fakeBulk.Calls[0].Arguments[3].([]byte) @@ -171,7 +171,7 @@ func TestUploadBeginCalculatesCorrectChunkCount(t *testing.T) { data := makeUploadRequestDict(map[string]interface{}{ "file.size": tc.FileSize, }) - info, err := u.Begin(context.Background(), []string{}, data) + info, err := u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) assert.Equal(t, tc.ExpectedCount, info.Count) }) @@ -185,6 +185,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) { ShouldError bool Name string }{ + {0, 1024 * 1024 * 300, false, "0 limit is unlimited"}, {500, 800, true, "800 is too large"}, {800, 500, false, "file within limits"}, {1024, 1023, false, "1-less than limit"}, @@ -210,7 +211,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) { data := makeUploadRequestDict(map[string]interface{}{ "file.size": tc.FileSize, }) - _, err := u.Begin(context.Background(), []string{}, data) + _, err := u.Begin(t.Context(), []string{}, data) if tc.ShouldError { assert.ErrorIs(t, err, ErrFileSizeTooLarge) } else { @@ -264,7 +265,7 @@ func TestUploadRejectsMissingRequiredFields(t *testing.T) { } } - _, err = u.Begin(context.Background(), []string{}, data) + _, err = u.Begin(t.Context(), []string{}, data) assert.Errorf(t, err, "%s is a required field and should error if not provided", field) }) @@ -342,13 +343,13 @@ func TestChunkMarksFinal(t *testing.T) { "file.size": tc.FileSize, }) - info, err := u.Begin(context.Background(), []string{}, data) + info, err := u.Begin(t.Context(), []string{}, data) assert.NoError(t, err) // for anything larger than 1-chunk, check for off-by-ones if tc.FinalChunk > 0 { mockUploadInfoResult(fakeBulk, info) - _, prev, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk-1, "") + _, prev, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk-1, "") assert.NoError(t, err) assert.Falsef(t, prev.Last, "penultimate chunk number (%d) should not be marked final", tc.FinalChunk-1) } @@ -356,7 +357,7 @@ func TestChunkMarksFinal(t *testing.T) { mockUploadInfoResult(fakeBulk, info) // make sure the final chunk is marked as such - _, chunk, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk, "") + _, chunk, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk, "") assert.NoError(t, err) assert.Truef(t, chunk.Last, "chunk number %d should be marked as Last", tc.FinalChunk) })