Skip to content

Commit 672d797

Browse files
committed
no test
1 parent 7999f62 commit 672d797

13 files changed

Lines changed: 235 additions & 191 deletions

File tree

drivers/115/driver.go

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
99
"github.com/alist-org/alist/v3/internal/driver"
1010
"github.com/alist-org/alist/v3/internal/model"
11+
"github.com/alist-org/alist/v3/internal/stream"
1112
"github.com/alist-org/alist/v3/pkg/http_range"
1213
"github.com/alist-org/alist/v3/pkg/utils"
1314
"github.com/pkg/errors"
@@ -149,7 +150,7 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
149150
return d.client.Delete(obj.GetID())
150151
}
151152

152-
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
153+
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
153154
if err := d.WaitLimit(ctx); err != nil {
154155
return nil, err
155156
}
@@ -162,7 +163,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
162163
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
163164
return nil, err
164165
}
165-
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
166+
if s.GetSize() > d.client.UploadMetaInfo.SizeLimit {
166167
return nil, driver115.ErrUploadTooLarge
167168
}
168169
//if digest, err = d.client.GetDigestResult(stream); err != nil {
@@ -171,10 +172,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
171172

172173
const PreHashSize int64 = 128 * utils.KB
173174
hashSize := PreHashSize
174-
if stream.GetSize() < PreHashSize {
175-
hashSize = stream.GetSize()
175+
if s.GetSize() < PreHashSize {
176+
hashSize = s.GetSize()
176177
}
177-
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
178+
reader, err := s.RangeRead(http_range.Range{Start: 0, Length: hashSize})
178179
if err != nil {
179180
return nil, err
180181
}
@@ -183,13 +184,9 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
183184
return nil, err
184185
}
185186
preHash = strings.ToUpper(preHash)
186-
fullHash := stream.GetHash().GetHash(utils.SHA1)
187-
if len(fullHash) <= 0 {
188-
tmpF, err := stream.CacheFullInTempFile()
189-
if err != nil {
190-
return nil, err
191-
}
192-
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
187+
fullHash := s.GetHash().GetHash(utils.SHA1)
188+
if len(fullHash) != utils.SHA1.Width {
189+
_, fullHash, err = stream.CacheFullInTempFileAndHash(s, utils.SHA1)
193190
if err != nil {
194191
return nil, err
195192
}
@@ -199,7 +196,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
199196
// rapid-upload
200197
// note that 115 add timeout for rapid-upload,
201198
// and "sig invalid" err is thrown even when the hash is correct after timeout.
202-
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
199+
if fastInfo, err = d.rapidUpload(s.GetSize(), s.GetName(), dirID, preHash, fullHash, s); err != nil {
203200
return nil, err
204201
}
205202
if matched, err := fastInfo.Ok(); err != nil {
@@ -214,13 +211,13 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
214211

215212
var uploadResult *UploadResult
216213
// 闪传失败,上传
217-
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
218-
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil {
214+
if s.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
215+
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, s, dirID, up); err != nil {
219216
return nil, err
220217
}
221218
} else {
222219
// 分片上传
223-
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil {
220+
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, s.GetSize(), s, dirID, up); err != nil {
224221
return nil, err
225222
}
226223
}

drivers/115_open/driver.go

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ import (
1515
"github.com/alist-org/alist/v3/internal/driver"
1616
"github.com/alist-org/alist/v3/internal/model"
1717
"github.com/alist-org/alist/v3/internal/op"
18+
"github.com/alist-org/alist/v3/internal/stream"
19+
"github.com/alist-org/alist/v3/pkg/http_range"
1820
"github.com/alist-org/alist/v3/pkg/utils"
1921
"github.com/aliyun/aliyun-oss-go-sdk/oss"
2022
sdk "github.com/xhofe/115-sdk-go"
@@ -183,33 +185,37 @@ func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
183185
return nil
184186
}
185187

186-
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
187-
tempF, err := file.CacheFullInTempFile()
188-
if err != nil {
189-
return err
190-
}
188+
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
191189
// cal full sha1
192-
sha1, err := utils.HashReader(utils.SHA1, tempF)
193-
if err != nil {
194-
return err
195-
}
196-
_, err = tempF.Seek(0, io.SeekStart)
197-
if err != nil {
198-
return err
190+
sha1 := s.GetHash().GetHash(utils.SHA1)
191+
var tempF model.File
192+
var err error
193+
if len(sha1) != utils.SHA1.Width {
194+
tempF, sha1, err = stream.CacheFullInTempFileAndHash(s, utils.SHA1)
195+
if err != nil {
196+
return err
197+
}
198+
} else {
199+
tempF, err = s.CacheFullInTempFile()
200+
if err != nil {
201+
return err
202+
}
199203
}
200204
// pre 128k sha1
201-
sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024))
205+
const PreHashSize int64 = 128 * utils.KB
206+
hashSize := min(s.GetSize(), PreHashSize)
207+
reader, err := s.RangeRead(http_range.Range{Start: 0, Length: hashSize})
202208
if err != nil {
203209
return err
204210
}
205-
_, err = tempF.Seek(0, io.SeekStart)
211+
sha1128k, err := utils.HashReader(utils.SHA1, reader)
206212
if err != nil {
207213
return err
208214
}
209215
// 1. Init
210216
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
211-
FileName: file.GetName(),
212-
FileSize: file.GetSize(),
217+
FileName: s.GetName(),
218+
FileSize: s.GetSize(),
213219
Target: dstDir.GetID(),
214220
FileID: strings.ToUpper(sha1),
215221
PreID: strings.ToUpper(sha1128k),
@@ -244,8 +250,8 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
244250
return err
245251
}
246252
resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{
247-
FileName: file.GetName(),
248-
FileSize: file.GetSize(),
253+
FileName: s.GetName(),
254+
FileSize: s.GetSize(),
249255
Target: dstDir.GetID(),
250256
FileID: strings.ToUpper(sha1),
251257
PreID: strings.ToUpper(sha1128k),

drivers/baidu_netdisk/driver.go

Lines changed: 39 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,15 @@ import (
88
"io"
99
"math"
1010
"net/url"
11+
"os"
1112
stdpath "path"
1213
"strconv"
1314
"time"
1415

1516
"golang.org/x/sync/semaphore"
1617

1718
"github.com/alist-org/alist/v3/drivers/base"
19+
"github.com/alist-org/alist/v3/internal/conf"
1820
"github.com/alist-org/alist/v3/internal/driver"
1921
"github.com/alist-org/alist/v3/internal/errs"
2022
"github.com/alist-org/alist/v3/internal/model"
@@ -176,18 +178,28 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
176178
//
177179
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
178180
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
179-
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
181+
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
180182
// rapid upload
181-
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
183+
if newObj, err := d.PutRapid(ctx, dstDir, file); err == nil {
182184
return newObj, nil
183185
}
184186

185-
tempFile, err := stream.CacheFullInTempFile()
186-
if err != nil {
187-
return nil, err
187+
var readerAt = file.GetCache()
188+
var (
189+
tmpF *os.File
190+
err error
191+
)
192+
writers := make([]io.Writer, 0, 4)
193+
if _, ok := readerAt.(io.ReaderAt); !ok {
194+
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
195+
if err != nil {
196+
return nil, err
197+
}
198+
writers = append(writers, tmpF)
199+
readerAt = tmpF
188200
}
189201

190-
streamSize := stream.GetSize()
202+
streamSize := file.GetSize()
191203
sliceSize := d.getSliceSize(streamSize)
192204
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
193205
lastBlockSize := streamSize % sliceSize
@@ -204,6 +216,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
204216
sliceMd5H := md5.New()
205217
sliceMd5H2 := md5.New()
206218
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
219+
writers = append(writers, fileMd5H, sliceMd5H, slicemd5H2Write)
220+
written := int64(0)
207221

208222
for i := 1; i <= count; i++ {
209223
if utils.IsCanceled(ctx) {
@@ -212,19 +226,32 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
212226
if i == count {
213227
byteSize = lastBlockSize
214228
}
215-
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
229+
n, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), file, byteSize)
230+
written += n
216231
if err != nil && err != io.EOF {
217232
return nil, err
218233
}
219234
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
220235
sliceMd5H.Reset()
221236
}
237+
if tmpF != nil {
238+
if written != streamSize {
239+
_ = os.Remove(tmpF.Name())
240+
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
241+
}
242+
_, err = tmpF.Seek(0, io.SeekStart)
243+
if err != nil {
244+
_ = os.Remove(tmpF.Name())
245+
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
246+
}
247+
file.SetTmpFile(tmpF)
248+
}
222249
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
223250
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
224251
blockListStr, _ := utils.Json.MarshalToString(blockList)
225-
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
226-
mtime := stream.ModTime().Unix()
227-
ctime := stream.CreateTime().Unix()
252+
path := stdpath.Join(dstDir.GetPath(), file.GetName())
253+
mtime := file.ModTime().Unix()
254+
ctime := file.CreateTime().Unix()
228255

229256
// step.1 预上传
230257
// 尝试获取之前的进度
@@ -284,8 +311,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
284311
"uploadid": precreateResp.Uploadid,
285312
"partseq": strconv.Itoa(partseq),
286313
}
287-
err := d.uploadSlice(ctx, params, stream.GetName(),
288-
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
314+
err := d.uploadSlice(ctx, params, file.GetName(),
315+
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(readerAt, offset, byteSize)))
289316
if err != nil {
290317
return err
291318
}

drivers/baidu_photo/driver.go

Lines changed: 41 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"fmt"
99
"io"
1010
"math"
11+
"os"
1112
"regexp"
1213
"strconv"
1314
"strings"
@@ -16,6 +17,7 @@ import (
1617
"golang.org/x/sync/semaphore"
1718

1819
"github.com/alist-org/alist/v3/drivers/base"
20+
"github.com/alist-org/alist/v3/internal/conf"
1921
"github.com/alist-org/alist/v3/internal/driver"
2022
"github.com/alist-org/alist/v3/internal/errs"
2123
"github.com/alist-org/alist/v3/internal/model"
@@ -233,26 +235,35 @@ func (d *BaiduPhoto) Remove(ctx context.Context, obj model.Obj) error {
233235
return errs.NotSupport
234236
}
235237

236-
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
238+
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
237239
// 不支持大小为0的文件
238-
if stream.GetSize() == 0 {
240+
if file.GetSize() == 0 {
239241
return nil, fmt.Errorf("file size cannot be zero")
240242
}
241243

242244
// TODO:
243245
// 暂时没有找到妙传方式
244246

245-
// 需要获取完整文件md5,必须支持 io.Seek
246-
tempFile, err := stream.CacheFullInTempFile()
247-
if err != nil {
248-
return nil, err
247+
var readerAt = file.GetCache()
248+
var (
249+
tmpF *os.File
250+
err error
251+
)
252+
writers := make([]io.Writer, 0, 4)
253+
if _, ok := readerAt.(io.ReaderAt); !ok {
254+
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
255+
if err != nil {
256+
return nil, err
257+
}
258+
writers = append(writers, tmpF)
259+
readerAt = tmpF
249260
}
250261

251262
const DEFAULT int64 = 1 << 22
252263
const SliceSize int64 = 1 << 18
253264

254265
// 计算需要的数据
255-
streamSize := stream.GetSize()
266+
streamSize := file.GetSize()
256267
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
257268
lastBlockSize := streamSize % DEFAULT
258269
if lastBlockSize == 0 {
@@ -266,20 +277,38 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
266277
sliceMd5H := md5.New()
267278
sliceMd5H2 := md5.New()
268279
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
280+
writers = append(writers, fileMd5H, sliceMd5H, slicemd5H2Write)
281+
written := int64(0)
269282
for i := 1; i <= count; i++ {
270283
if utils.IsCanceled(ctx) {
271284
return nil, ctx.Err()
272285
}
273286
if i == count {
274287
byteSize = lastBlockSize
275288
}
276-
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
289+
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), file, byteSize)
290+
written += n
277291
if err != nil && err != io.EOF {
292+
if tmpF != nil {
293+
_ = os.Remove(tmpF.Name())
294+
}
278295
return nil, err
279296
}
280297
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
281298
sliceMd5H.Reset()
282299
}
300+
if tmpF != nil {
301+
if written != streamSize {
302+
_ = os.Remove(tmpF.Name())
303+
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
304+
}
305+
_, err = tmpF.Seek(0, io.SeekStart)
306+
if err != nil {
307+
_ = os.Remove(tmpF.Name())
308+
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
309+
}
310+
file.SetTmpFile(tmpF)
311+
}
283312
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
284313
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
285314
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
@@ -290,8 +319,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
290319
"isdir": "0",
291320
"rtype": "1",
292321
"ctype": "11",
293-
"path": fmt.Sprintf("/%s", stream.GetName()),
294-
"size": fmt.Sprint(stream.GetSize()),
322+
"path": fmt.Sprintf("/%s", file.GetName()),
323+
"size": fmt.Sprint(streamSize),
295324
"slice-md5": sliceMd5,
296325
"content-md5": contentMd5,
297326
"block_list": blockListStr,
@@ -342,8 +371,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
342371
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
343372
r.SetContext(ctx)
344373
r.SetQueryParams(uploadParams)
345-
r.SetFileReader("file", stream.GetName(),
346-
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
374+
r.SetFileReader("file", file.GetName(),
375+
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(readerAt, offset, byteSize)))
347376
}, nil)
348377
if err != nil {
349378
return err

0 commit comments

Comments
 (0)