Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Kind can be one of:
# - breaking-change: a change to previously-documented behavior
# - deprecation: functionality that is being removed in a later release
# - bug-fix: fixes a problem in a previous version
# - enhancement: extends functionality but does not break or fix existing behavior
# - feature: new functionality
# - known-issue: problems that we are aware of in a given version
# - security: impacts on the security of a product or a user’s deployment.
# - upgrade: important information for someone upgrading from a prior version
# - other: does not fit into any of the other categories
kind: enhancement

# Change summary; a 80ish characters long description of the change.
summary: Makes file storage size configurable

# Long description; in case the summary is not enough to describe the change
# this field accommodate a description without length limits.
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
#description:

# Affected component; a word indicating the component this changeset affects.
component: fleet-server

# PR URL; optional; the PR number that added the changeset.
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
# Please provide it if you are adding a fragment for a different PR.
#pr: https://github.com/owner/repo/1234
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
#pr: https://github.com/owner/repo/1234
pr: https://github.com/elastic/fleet-server/pull/5478


# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
# If not present is automatically filled by the tooling with the issue linked to the PR number.
#issue: https://github.com/owner/repo/1234
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

small nit, if there is a parent issue it would be nice to have it here

2 changes: 1 addition & 1 deletion internal/pkg/api/handleFileDelivery.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func NewFileDeliveryT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elastic
return &FileDeliveryT{
bulker: bulker,
cache: cache,
deliverer: delivery.New(chunkClient, bulker, maxFileSize),
deliverer: delivery.New(chunkClient, bulker, cfg.Limits.MaxFileStorageSize),
authAgent: authAgent,
}
}
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/api/handleFileDelivery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ func prepareFileDeliveryMock(t *testing.T) (http.Handler, apiServer, *MockTransp
ft: &FileDeliveryT{
bulker: fakebulk,
cache: c,
deliverer: delivery.New(mockES, fakebulk, maxFileSize),
deliverer: delivery.New(mockES, fakebulk, 0),
authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) {
return &model.Agent{
ESDocument: model.ESDocument{
Expand Down
4 changes: 1 addition & 3 deletions internal/pkg/api/handleUpload.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ import (
)

const (
// TODO: move to a config
maxFileSize = 104857600 // 100 MiB
maxUploadTimer = 24 * time.Hour
)

Expand All @@ -58,7 +56,7 @@ func NewUploadT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elasticsearch
chunkClient: chunkClient,
bulker: bulker,
cache: cache,
uploader: uploader.New(chunkClient, bulker, cache, maxFileSize, maxUploadTimer),
uploader: uploader.New(chunkClient, bulker, cache, cfg.Limits.MaxFileStorageSize, maxUploadTimer),
authAgent: authAgent,
authAPIKey: authAPIKey,
}
Expand Down
73 changes: 54 additions & 19 deletions internal/pkg/api/handleUpload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
Expand Down Expand Up @@ -71,18 +72,6 @@ func TestUploadBeginValidation(t *testing.T) {
"src": "agent"
}`,
},
{"Oversized file should be rejected", http.StatusBadRequest, "size",
`{
"file": {
"size": ` + strconv.Itoa(maxFileSize+1024) + `,
"name": "foo.png",
"mime_type": "image/png"
},
"agent_id": "foo",
"action_id": "123",
"src": "agent"
}`,
},
{"zero size file should be rejected", http.StatusBadRequest, "size",
`{
"file": {
Expand Down Expand Up @@ -346,6 +335,48 @@ func TestUploadBeginBadRequest(t *testing.T) {
assert.Equal(t, http.StatusBadRequest, rec.Code)
}

func TestUploadBeginFileSize(t *testing.T) {

mockFile := func(size int64) string {
return fmt.Sprintf(`{
"file": {
"size": %d,
"name": "foo.png",
"mime_type": "image/png"
},
"agent_id": "foo",
"action_id": "123",
"src": "agent"
}`, size)
}

// now test various body contents
tests := []struct {
Name string
MaxSize int64
ExpectStatus int
InputSize int64
}{
{"MaxSize 0 allows uploads", 0, http.StatusOK, 1000},
{"MaxSize 0 allows large uploads", 0, http.StatusOK, 1024 * 1024 * 1024 * 2},
{"MaxSize 0 does not allow 0-length files", 0, http.StatusBadRequest, 0},
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems a little odd? If I set MaxSize 1, would I allow 0-length files?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nope! This test was just to make sure a size limit of "0" is not interpreted as a literal value, making "0 length files" suddenly succeed, when the limit is 0.

0-length files should always fail, but this makes sure we don't trip on an edge case when the limit is also 0.

{"Sizes larger than MaxSize are denied", 1024, http.StatusBadRequest, 2048},
{"Sizes smaller than MaxSize are allowed", 1024, http.StatusOK, 900},
}

for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {

hr, _, _, _ := configureUploaderMock(t, tc.MaxSize)
rec := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodPost, RouteUploadBegin, strings.NewReader(mockFile(tc.InputSize)))
hr.ServeHTTP(rec, req)
assert.Equal(t, tc.ExpectStatus, rec.Code)
})
}

}

/*
Chunk data upload route
*/
Expand Down Expand Up @@ -377,7 +408,7 @@ func TestChunkUploadRouteParams(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: file.MaxChunkSize + 1,
Count: 2, // this is a 2-chunk "file" based on size above
Start: time.Now(),
Expand Down Expand Up @@ -410,7 +441,7 @@ func TestChunkUploadRequiresChunkHashHeader(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -458,7 +489,7 @@ func TestChunkUploadStatus(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -509,7 +540,7 @@ func TestChunkUploadExpiry(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: tc.StartTime,
Expand Down Expand Up @@ -547,7 +578,7 @@ func TestChunkUploadWritesTimestamp(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -597,7 +628,7 @@ func TestUploadCompleteRequiresMatchingAuth(t *testing.T) {
mockInfo := file.Info{
DocID: "bar." + tc.AgentInFileRecord,
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now().Add(-time.Minute),
Expand Down Expand Up @@ -998,6 +1029,10 @@ func TestUploadCompleteBadRequests(t *testing.T) {

// prepareUploaderMock sets up common dependencies and registers upload routes to a returned router
func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) {
return configureUploaderMock(t, 0)
}

func configureUploaderMock(t *testing.T, fileSize int64) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) {
// chunk index operations skip the bulker in order to send binary docs directly
// so a mock *elasticsearch.Client needs to be be prepared
es, tx := mockESClient(t)
Expand Down Expand Up @@ -1034,7 +1069,7 @@ func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockB
bulker: fakebulk,
chunkClient: es,
cache: c,
uploader: uploader.New(es, fakebulk, c, maxFileSize, maxUploadTimer),
uploader: uploader.New(es, fakebulk, c, fileSize, maxUploadTimer),
authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) {
return &model.Agent{
ESDocument: model.ESDocument{
Expand Down
12 changes: 8 additions & 4 deletions internal/pkg/config/env_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ const (
defaultStatusMax = 50
defaultStatusMaxBody = 0

defaultFileStorageSize = 0 // no limit
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we change this to match the previous default, 104857600?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The specific onus for this PR is removing the file size limit. It was initially put there as a precautionary measure during initial development of the feature, but upcoming feature work in the stack will see much larger file sizes in default use cases.

Rather than deleting this control altogether, it seemed sensible to keep it as an optional limit (either as cost-control for the user, or to prevent "oopsies" if they don't intend to use the larger-file features coming). If we'd rather the control just be eliminated as complexity, that seems fine too.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@pzl has any benchmark been performed to check the behaviour of fleet server when uploading large(r) files? Do we have an idea of the size of files in the upcoming feature ?

Copy link
Member Author

@pzl pzl Sep 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes - a modified horde was used to have 8k drones sending >2GiB files (randomized range was 1.6Gib to 16 GiB) through fleet server with disabled file size limit, without issue.

The primary control here that maintains stability (in the face of all that traffic) is the request rate limits (the file ops have their own limit settings), and the architecture of file chunking. An entire 16GiB file is not continuously streamed in one request, but rather individual 4MiB chunks. Those chunk requests have several rate controls on them, as well as limits on how many "upload jobs" may be ongoing (i.e. there will not be 8k agents all trying to upload chunks, fleet server controls how many in-progress file jobs there are). So memory will not be exhausted by too many parallel uploads, nor by buffering large files themselves (the 4MiB chunk data as well, is immediately streamed to elasticsearch with minimal processing, so even that is effectively io.Copyied from network-in to net-out.

Fleet server is then able to handle its "max parallel upload jobs" amount -- essentially continually and indefinitely. A chunk request comes, is copied to elasticsearch, and closes. There is a (still very conservative!) limit on how many chunk requests can be handled at once. So large files is not currently increasing any of that handling in parallel, but only how many chunk calls will happen in sequence until the whole "upload job" has concluded. And then of course, at rest, fleet server does nothing with these files, they just sit as documents in elasticsearch. No load or impact to fleet server there.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would users / customers be accustomed to this limiting / preventing certain files from being sent? I'm asking to try to get a handle what would be the "principle of least surprise" for users. Whether that would be "very large files are sent (even if multiple TBs)", or "I need to configure a max if above a threshold", and whether our previous limit of 100 MiB would have set an expectation.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This feature is only in use in two places:

  • agent diagnostics bundle (very small zips, not going to change here)
  • defend/endpoint file retrieval or distribution.

The defend feature is very much manual user interaction to perform. Users have run into size limits when attempting to inspect larger binaries, and have had to resort to other means to retrieve those files. Defend needs to unblock a lot of those common workflows.

We're not aware of any case of relying on the size limit as a feature, but more cases of users being hampered by it.


defaultUploadStartInterval = time.Second * 2
defaultUploadStartBurst = 5
defaultUploadStartMax = 10
Expand All @@ -70,7 +72,7 @@ const (
defaultUploadChunkInterval = time.Millisecond * 3
defaultUploadChunkBurst = 5
defaultUploadChunkMax = 10
defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MAY NOT be larger than 4 MiB
defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MUST NOT be larger than 4 MiB

defaultFileDelivInterval = time.Millisecond * 100
defaultFileDelivBurst = 5
Expand Down Expand Up @@ -141,8 +143,9 @@ type limit struct {
}

type serverLimitDefaults struct {
PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit
MaxConnections int `config:"max_connections"`
PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit
MaxConnections int `config:"max_connections"`
MaxFileStorageSize int64 `config:"max_file_storage_size"`

ActionLimit limit `config:"action_limit"`
PolicyLimit limit `config:"policy_limit"`
Expand All @@ -161,7 +164,8 @@ type serverLimitDefaults struct {

func defaultserverLimitDefaults() *serverLimitDefaults {
return &serverLimitDefaults{
MaxConnections: defaultMaxConnections,
MaxConnections: defaultMaxConnections,
MaxFileStorageSize: defaultFileStorageSize,
ActionLimit: limit{
Interval: defaultActionInterval,
Burst: defaultActionBurst,
Expand Down
8 changes: 5 additions & 3 deletions internal/pkg/config/limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@ type Limit struct {
}

type ServerLimits struct {
MaxAgents int `config:"max_agents"`
MaxHeaderByteSize int `config:"max_header_byte_size"`
MaxConnections int `config:"max_connections"`
MaxAgents int `config:"max_agents"`
MaxHeaderByteSize int `config:"max_header_byte_size"`
MaxConnections int `config:"max_connections"`
MaxFileStorageSize int64 `config:"max_file_storage_size"`

ActionLimit Limit `config:"action_limit"`
PolicyLimit Limit `config:"policy_limit"`
Expand Down Expand Up @@ -47,6 +48,7 @@ func (c *ServerLimits) LoadLimits(limits *envLimits) {
if c.MaxConnections == 0 {
c.MaxConnections = l.MaxConnections
}
c.MaxFileStorageSize = l.MaxFileStorageSize

c.ActionLimit = mergeEnvLimit(c.ActionLimit, l.ActionLimit)
c.PolicyLimit = mergeEnvLimit(c.PolicyLimit, l.PolicyLimit)
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/file/uploader/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (u *Uploader) Begin(ctx context.Context, namespaces []string, data JSDict)
}

size, _ := data.Int64("file", "size")
if size > u.sizeLimit {
if u.sizeLimit != 0 && size > u.sizeLimit {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand the u.sizeLimit != 0 clause here is for allowing uploads of any size. So 0 is the sentinel value for signaling to the code that uploads of any size are allowed.

I'm wondering if we should use 0 to signal that uploads are not allowed and -1 (or any other value < 0) to signal that uploads of any size are allowed?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Disallowing in just this manner would lead to some wacky and hard-to-debug errors in kibana features that rely on the file transfer. Kibana features (especially in Defend) would still appear enabled and allow user actions to take place that would fail when the agent (or endpoint) receives an action and then proceeds to upload.

I do see the arbitrary nature of 0's and negatives as not particularly meaningful to whichever disabled/unlimited mode here. I guess they happen to be flipped here. -1 (or any negative value) will fail to upload, as file size will always be larger than that. Negatives seem to be the disabler here (not by design, just by happenstance). 0 seemed to be a common unset/default/unlimited value elsewhere in these configs (and go) and I went with that by convention. I think the unset/default value of the language probably shouldn't lead to the feature being disabled.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did we consider making an explicit [something]Enabled: true/false flag (and require the max*Size to unsigned) to differentiate and avoid magic numbers (at least in the user-facing interface)?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure I understand the suggestion here.

change the config's type to uint64, and then:

// with other the consts
var fileSizeLimitDisabled = 0

...

// when setting defaults
defaultFileSizeLimit = fileSizeLimitDisabled

?

That's good with me, I'm happy with that.


Just to restate the general case here: the end goal is to remove the file size cap, that is the specific need. I'd like to leave it configurable in case some user has strange edge cases I cannot predict and wants to bound it for their own reasons. But it is not essential to keep this a config. We can simplify the PR if we just want to remove the cap. The actual protections in place are the rate limits. If having a sentinel value here at 0 is the blocker, should we go the other way?

Copy link
Member

@pchila pchila Sep 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was about to add a comment about the config type to be changed to uint64, then I saw this comment so I'll try to keep the number of open threads to a minimum.

I don't want to hijack @nkvoll comment and will leave it to him to get back here, I would love to see the uint64 type if negative values are not allowed and a constant that will read something like wrote in your comment.

About the sentinel value/explicit enabled/disabled flag I am not sure: if we have such a flag we could leave the default limit to 100MB as today (still configurable using MaxFileStorageByteSize) and just turn off the limit using the FileStorageLimitEnabled: false (name still open to change 😉 ) but I am not 100% sure that this is what @nkvoll envisioned

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the general-case explanation @pzl, much appreciated!

What I'm thinking is pretty much what @pchila is saying, but maybe with a slight variant to avoid discussing the naming, by having it be a pointer. If the pointer is nil, there is no max.

MaxFileStorageByteSize *int64 `config:"max_file_storage_size"`

If I understand correctly, we want to default to "no maximum configured", and this being a pointer, usually defaults to nil, and is different from the any other legal value, which intuitively would be compared to the file size on disk?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in re: negative numbers being nonsensical values here -- do we want this to be *uint64?

Also what should happen if it is configured and set to 0? Feature disabled?

vSpan.End()
return file.Info{}, ErrFileSizeTooLarge
}
Expand Down
17 changes: 9 additions & 8 deletions internal/pkg/file/uploader/upload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func TestUploadBeginReturnsCorrectInfo(t *testing.T) {
c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000})
require.NoError(t, err)
u := New(nil, fakeBulk, c, int64(size), time.Hour)
info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

assert.Equal(t, int64(size), info.Total)
Expand Down Expand Up @@ -127,7 +127,7 @@ func TestUploadBeginWritesDocumentFromInputs(t *testing.T) {
c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000})
require.NoError(t, err)
u := New(nil, fakeBulk, c, int64(size), time.Hour)
_, err = u.Begin(context.Background(), []string{}, data)
_, err = u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

payload, ok := fakeBulk.Calls[0].Arguments[3].([]byte)
Expand Down Expand Up @@ -171,7 +171,7 @@ func TestUploadBeginCalculatesCorrectChunkCount(t *testing.T) {
data := makeUploadRequestDict(map[string]interface{}{
"file.size": tc.FileSize,
})
info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)
assert.Equal(t, tc.ExpectedCount, info.Count)
})
Expand All @@ -185,6 +185,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) {
ShouldError bool
Name string
}{
{0, 1024 * 1024 * 300, false, "0 limit is unlimited"},
{500, 800, true, "800 is too large"},
{800, 500, false, "file within limits"},
{1024, 1023, false, "1-less than limit"},
Expand All @@ -210,7 +211,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) {
data := makeUploadRequestDict(map[string]interface{}{
"file.size": tc.FileSize,
})
_, err := u.Begin(context.Background(), []string{}, data)
_, err := u.Begin(t.Context(), []string{}, data)
if tc.ShouldError {
assert.ErrorIs(t, err, ErrFileSizeTooLarge)
} else {
Expand Down Expand Up @@ -264,7 +265,7 @@ func TestUploadRejectsMissingRequiredFields(t *testing.T) {
}
}

_, err = u.Begin(context.Background(), []string{}, data)
_, err = u.Begin(t.Context(), []string{}, data)
assert.Errorf(t, err, "%s is a required field and should error if not provided", field)
})

Expand Down Expand Up @@ -342,21 +343,21 @@ func TestChunkMarksFinal(t *testing.T) {
"file.size": tc.FileSize,
})

info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

// for anything larger than 1-chunk, check for off-by-ones
if tc.FinalChunk > 0 {
mockUploadInfoResult(fakeBulk, info)
_, prev, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk-1, "")
_, prev, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk-1, "")
assert.NoError(t, err)
assert.Falsef(t, prev.Last, "penultimate chunk number (%d) should not be marked final", tc.FinalChunk-1)
}

mockUploadInfoResult(fakeBulk, info)

// make sure the final chunk is marked as such
_, chunk, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk, "")
_, chunk, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk, "")
assert.NoError(t, err)
assert.Truef(t, chunk.Last, "chunk number %d should be marked as Last", tc.FinalChunk)
})
Expand Down