Skip to content

Commit a53c8b1

Browse files
committed
fix bug
1 parent 64e967f commit a53c8b1

File tree

3 files changed

+30
-19
lines changed

3 files changed

+30
-19
lines changed

drivers/123/upload.go

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package _123
22

33
import (
4+
"bytes"
45
"context"
56
"fmt"
67
"io"
@@ -69,25 +70,23 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
6970
}
7071

7172
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
72-
chunkSize := int64(16 * utils.MB)
7373
// fetch s3 pre signed urls
7474
size := file.GetSize()
75+
chunkSize := min(size, 16*utils.MB)
7576
chunkCount := int(size / chunkSize)
7677
lastChunkSize := size % chunkSize
7778
if lastChunkSize > 0 {
7879
chunkCount++
79-
} else {
80-
lastChunkSize = chunkSize
8180
}
8281
// only 1 batch is allowed
83-
isMultipart := chunkCount > 1
8482
batchSize := 1
8583
getS3UploadUrl := d.getS3Auth
86-
if isMultipart {
84+
if chunkCount > 1 {
8785
batchSize = 10
8886
getS3UploadUrl = d.getS3PreSignedUrls
8987
}
9088
limited := driver.NewLimitedUploadStream(ctx, file)
89+
buf := make([]byte, chunkSize)
9190
for i := 1; i <= chunkCount; i += batchSize {
9291
if utils.IsCanceled(ctx) {
9392
return ctx.Err()
@@ -103,10 +102,17 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
103102
if utils.IsCanceled(ctx) {
104103
return ctx.Err()
105104
}
106-
if j == chunkCount {
107-
chunkSize = lastChunkSize
105+
if j == chunkCount && lastChunkSize > 0 {
106+
buf = buf[:lastChunkSize]
108107
}
109-
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), chunkSize, false, getS3UploadUrl)
108+
n, err := io.ReadFull(limited, buf)
109+
if err == io.ErrUnexpectedEOF {
110+
return fmt.Errorf("upload s3 chunk %d failed, can't read data, expected=%d, got=%d", j, len(buf), n)
111+
}
112+
if err != nil {
113+
return err
114+
}
115+
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, bytes.NewReader(buf), int64(n), false, getS3UploadUrl)
110116
if err != nil {
111117
return err
112118
}
@@ -117,7 +123,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
117123
return d.completeS3(ctx, upReq, file, chunkCount > 1)
118124
}
119125

120-
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
126+
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *bytes.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
121127
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
122128
if uploadUrl == "" {
123129
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
@@ -145,6 +151,7 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
145151
}
146152
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
147153
// retry
154+
reader.Seek(0, io.SeekStart)
148155
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
149156
}
150157
if res.StatusCode != http.StatusOK {

drivers/aliyundrive_open/upload.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package aliyundrive_open
22

33
import (
4-
"bytes"
54
"context"
65
"encoding/base64"
76
"fmt"
@@ -132,16 +131,19 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
132131
return "", err
133132
}
134133
length := proofRange.End - proofRange.Start
135-
buf := bytes.NewBuffer(make([]byte, 0, length))
136134
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
137135
if err != nil {
138136
return "", err
139137
}
140-
_, err = utils.CopyWithBufferN(buf, reader, length)
138+
buf := make([]byte, length)
139+
n, err := io.ReadFull(reader, buf)
140+
if err == io.ErrUnexpectedEOF {
141+
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
142+
}
141143
if err != nil {
142144
return "", err
143145
}
144-
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
146+
return base64.StdEncoding.EncodeToString(buf), nil
145147
}
146148

147149
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {

internal/stream/stream.go

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -128,15 +128,17 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
128128
if f.tmpFile == nil {
129129
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
130130
bufSize := utils.Min(httpRange.Length, f.GetSize())
131-
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
132-
n, err := utils.CopyWithBufferN(newBuf, f.Reader, bufSize)
131+
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
132+
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
133+
buf := make([]byte, bufSize)
134+
n, err := io.ReadFull(f.Reader, buf)
135+
if err == io.ErrUnexpectedEOF {
136+
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
137+
}
133138
if err != nil {
134139
return nil, err
135140
}
136-
if n != bufSize {
137-
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
138-
}
139-
f.peekBuff = bytes.NewReader(newBuf.Bytes())
141+
f.peekBuff = bytes.NewReader(buf)
140142
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
141143
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
142144
} else {

0 commit comments

Comments
 (0)