- 
          
 - 
                Notifications
    
You must be signed in to change notification settings  - Fork 6.2k
 
Fix artifact v4 upload above 8MB #31664
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
895e8b2
              a6fb8a5
              1c641fe
              4cde6db
              5a2a395
              d9f55bc
              02479bf
              add8925
              06f3e93
              a96be7c
              a7ef238
              6a1383d
              5ee6ddc
              f036ed8
              4bab1d4
              69be0c8
              010825a
              33b3e04
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
| 
          
            
          
           | 
    @@ -24,8 +24,15 @@ package actions | |
| // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block | ||
| // 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded | ||
| // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock | ||
| // 1.4. Unknown xml payload to Blobstorage (unauthenticated request), ignored for now | ||
| // 1.4. BlockList xml payload to Blobstorage (unauthenticated request) | ||
| // Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order | ||
| // PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList | ||
| // Request | ||
| // <?xml version="1.0" encoding="UTF-8" standalone="yes"?> | ||
| // <BlockList> | ||
| // <Latest>blockId1</Latest> | ||
| // <Latest>blockId2</Latest> | ||
| // </BlockList> | ||
| // 1.5. FinalizeArtifact | ||
| // Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact | ||
| // Request | ||
| 
          
            
          
           | 
    @@ -82,6 +89,7 @@ import ( | |
| "crypto/hmac" | ||
| "crypto/sha256" | ||
| "encoding/base64" | ||
| "encoding/xml" | ||
| "fmt" | ||
| "io" | ||
| "net/http" | ||
| 
          
            
          
           | 
    @@ -271,6 +279,8 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) { | |
| return | ||
| } | ||
| artifact.ContentEncoding = ArtifactV4ContentEncoding | ||
| artifact.FileSize = 0 | ||
| artifact.FileCompressedSize = 0 | ||
| if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil { | ||
| log.Error("Error UpdateArtifactByID: %v", err) | ||
| ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID") | ||
| 
        
          
        
         | 
    @@ -293,38 +303,57 @@ func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) { | |
| comp := ctx.Req.URL.Query().Get("comp") | ||
| switch comp { | ||
| case "block", "appendBlock": | ||
| // get artifact by name | ||
| artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName) | ||
| if err != nil { | ||
| log.Error("Error artifact not found: %v", err) | ||
| ctx.Error(http.StatusNotFound, "Error artifact not found") | ||
| return | ||
| } | ||
| 
     | 
||
| if comp == "block" { | ||
| artifact.FileSize = 0 | ||
| artifact.FileCompressedSize = 0 | ||
| blockid := ctx.Req.URL.Query().Get("blockid") | ||
| if blockid == "" { | ||
| // get artifact by name | ||
| artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName) | ||
| if err != nil { | ||
| log.Error("Error artifact not found: %v", err) | ||
| ctx.Error(http.StatusNotFound, "Error artifact not found") | ||
| return | ||
| } | ||
| 
     | 
||
| _, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID) | ||
| if err != nil { | ||
| log.Error("Error runner api getting task: task is not running") | ||
| ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running") | ||
| return | ||
| } | ||
| artifact.FileCompressedSize += ctx.Req.ContentLength | ||
| artifact.FileSize += ctx.Req.ContentLength | ||
| if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil { | ||
| log.Error("Error UpdateArtifactByID: %v", err) | ||
| ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID") | ||
| return | ||
| } | ||
| } else { | ||
| _, err := r.fs.Save(fmt.Sprintf("tmp%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, blockid), ctx.Req.Body, -1) | ||
| if err != nil { | ||
| log.Error("Error runner api getting task: task is not running") | ||
| ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running") | ||
| return | ||
                
       | 
||
| } | ||
| } | ||
| 
     | 
||
| _, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID) | ||
| ctx.JSON(http.StatusCreated, "appended") | ||
| case "blocklist": | ||
| _, err := r.fs.Save(fmt.Sprintf("tmp%d/%d-blocklist", task.Job.RunID, task.Job.RunID), ctx.Req.Body, -1) | ||
                
       | 
||
| if err != nil { | ||
| log.Error("Error runner api getting task: task is not running") | ||
| ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running") | ||
| return | ||
| } | ||
| artifact.FileCompressedSize += ctx.Req.ContentLength | ||
| artifact.FileSize += ctx.Req.ContentLength | ||
| if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil { | ||
| log.Error("Error UpdateArtifactByID: %v", err) | ||
| ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID") | ||
| return | ||
| } | ||
| ctx.JSON(http.StatusCreated, "appended") | ||
| case "blocklist": | ||
| ctx.JSON(http.StatusCreated, "created") | ||
| } | ||
| } | ||
| 
     | 
||
| type BlockList struct { | ||
| Latest []string `xml:"Latest"` | ||
| } | ||
| 
     | 
||
| type Latest struct { | ||
| Value string `xml:",chardata"` | ||
| } | ||
| 
     | 
||
| func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) { | ||
| var req FinalizeArtifactRequest | ||
| 
     | 
||
| 
        
          
        
         | 
    @@ -343,18 +372,48 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) { | |
| ctx.Error(http.StatusNotFound, "Error artifact not found") | ||
| return | ||
| } | ||
| chunkMap, err := listChunksByRunID(r.fs, runID) | ||
| 
     | 
||
| blockListName := fmt.Sprintf("tmp%d/%d-blocklist", runID, runID) | ||
| var chunks []*chunkFileItem | ||
| s, err := r.fs.Open(blockListName) | ||
| if err != nil { | ||
| log.Error("Error merge chunks: %v", err) | ||
| ctx.Error(http.StatusInternalServerError, "Error merge chunks") | ||
| return | ||
| } | ||
| chunks, ok := chunkMap[artifact.ID] | ||
| if !ok { | ||
| log.Error("Error merge chunks") | ||
| ctx.Error(http.StatusInternalServerError, "Error merge chunks") | ||
| return | ||
| log.Warn("Error merge chunks: %v", err) | ||
| chunkMap, err := listChunksByRunID(r.fs, runID) | ||
| if err != nil { | ||
| log.Error("Error merge chunks: %v", err) | ||
| ctx.Error(http.StatusInternalServerError, "Error merge chunks") | ||
| return | ||
| } | ||
| chunks, ok = chunkMap[artifact.ID] | ||
| if !ok { | ||
| log.Error("Error merge chunks") | ||
| ctx.Error(http.StatusInternalServerError, "Error merge chunks") | ||
| return | ||
| } | ||
                
       | 
||
| } else { | ||
| err = r.fs.Delete(blockListName) | ||
| if err != nil { | ||
| log.Warn("Failed to delete blockList %s: %v", blockListName, err) | ||
| } | ||
| 
     | 
||
| xdec := xml.NewDecoder(s) | ||
| blockList := &BlockList{} | ||
| err = xdec.Decode(blockList) | ||
| if err != nil { | ||
| log.Error("Error merge chunks: %v", err) | ||
| ctx.Error(http.StatusInternalServerError, "Error merge chunks") | ||
| return | ||
| } | ||
| chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList) | ||
| if err != nil { | ||
| log.Error("Error merge chunks: %v", err) | ||
| ctx.Error(http.StatusInternalServerError, "Error merge chunks") | ||
| return | ||
| } | ||
| artifact.FileSize = chunks[len(chunks)-1].End + 1 | ||
| artifact.FileCompressedSize = chunks[len(chunks)-1].End + 1 | ||
| } | ||
| 
     | 
||
| checksum := "" | ||
| if req.Hash != nil { | ||
| checksum = req.Hash.Value | ||
| 
          
            
          
           | 
    ||
Uh oh!
There was an error while loading. Please reload this page.