Skip to content

Commit 9fa7c61

Browse files
committed
mulitpart
1 parent 14076d5 commit 9fa7c61

File tree

2 files changed

+119
-91
lines changed

2 files changed

+119
-91
lines changed

upyun/io.go

Lines changed: 45 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -1,78 +1,63 @@
11
package upyun
22

33
import (
4+
"bytes"
5+
"crypto/md5"
46
"fmt"
57
"io"
6-
"os"
78
)
89

910
type UpYunPutReader interface {
1011
Len() (n int)
1112
MD5() (ret string)
1213
Read([]byte) (n int, err error)
13-
Copyed() (n int)
1414
}
1515

16-
type fragmentFile struct {
17-
realFile *os.File
18-
offset int64
19-
limit int64
20-
cursor int64
16+
type Chunk struct {
17+
buf io.Reader
18+
buf2 *bytes.Buffer
19+
id int
20+
n int
2121
}
2222

23-
func (f *fragmentFile) Seek(offset int64, whence int) (ret int64, err error) {
24-
switch whence {
25-
case 0:
26-
f.cursor = offset
27-
ret, err = f.realFile.Seek(f.offset+f.cursor, 0)
28-
return ret - f.offset, err
29-
default:
30-
return 0, fmt.Errorf("whence must be 0")
23+
func (c *Chunk) Read(b []byte) (n int, err error) {
24+
if c.buf2 != nil {
25+
return c.buf2.Read(b)
3126
}
32-
}
33-
34-
func (f *fragmentFile) Read(b []byte) (n int, err error) {
35-
if f.cursor >= f.limit {
36-
return 0, io.EOF
37-
}
38-
n, err = f.realFile.Read(b)
39-
if f.cursor+int64(n) > f.limit {
40-
n = int(f.limit - f.cursor)
41-
}
42-
f.cursor += int64(n)
43-
return n, err
44-
}
45-
46-
func (f *fragmentFile) Stat() (fInfo os.FileInfo, err error) {
47-
return fInfo, fmt.Errorf("fragmentFile not implement Stat()")
48-
}
49-
50-
func (f *fragmentFile) Close() error {
51-
return nil
52-
}
53-
54-
func (f *fragmentFile) Copyed() int {
55-
return int(f.cursor - f.offset)
56-
}
57-
58-
func (f *fragmentFile) Len() int {
59-
return int(f.limit - f.offset)
60-
}
61-
62-
func (f *fragmentFile) MD5() string {
63-
s, _ := md5File(f)
64-
return s
65-
}
66-
67-
func newFragmentFile(file *os.File, offset, limit int64) (*fragmentFile, error) {
68-
f := &fragmentFile{
69-
realFile: file,
70-
offset: offset,
71-
limit: limit,
72-
}
73-
74-
if _, err := f.Seek(0, 0); err != nil {
75-
return nil, err
27+
return c.buf.Read(b)
28+
}
29+
30+
func (c *Chunk) Len() int {
31+
return c.n
32+
}
33+
func (c *Chunk) ID() int {
34+
return c.id
35+
}
36+
37+
func (c *Chunk) MD5() string {
38+
c.buf2 = bytes.NewBuffer(nil)
39+
reader := io.TeeReader(c.buf, c.buf2)
40+
hash := md5.New()
41+
_, _ = io.Copy(hash, reader)
42+
return fmt.Sprintf("%x", hash.Sum(nil))
43+
}
44+
45+
func GetReadChunk(input io.Reader, size, partSize int64, ch chan *Chunk) {
46+
id := 0
47+
bytesLeft := size
48+
for bytesLeft > 0 {
49+
n := partSize
50+
if bytesLeft <= partSize {
51+
n = bytesLeft
52+
}
53+
reader := io.LimitReader(input, n)
54+
ch <- &Chunk{
55+
buf: reader,
56+
id: id,
57+
n: int(n),
58+
}
59+
id++
60+
bytesLeft -= n
7661
}
77-
return f, nil
62+
close(ch)
7863
}

upyun/rest.go

Lines changed: 74 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,8 @@ type GetRequestConfig struct {
7979
Headers map[string]string
8080
}
8181

82+
type ProxyReader func(size, offset int64, r io.Reader) io.Reader
83+
8284
// PutObjectConfig provides a configuration to Put method.
8385
type PutObjectConfig struct {
8486
Path string
@@ -91,6 +93,7 @@ type PutObjectConfig struct {
9193
// AppendContent bool
9294
ResumePartSize int64
9395
MaxResumePutTries int
96+
ProxyReader ProxyReader
9497
}
9598

9699
type MoveObjectConfig struct {
@@ -278,6 +281,34 @@ func getPartInfo(partSize, fsize int64) (int64, int64, error) {
278281
return partSize, partNum, nil
279282
}
280283

284+
func (up *UpYun) getMultipartUploadProcess(config *PutObjectConfig, fsize int64) (*ResumeProcessResult, error) {
285+
resumeProcessResult, _ := up.GetResumeProcess(config.Path)
286+
if resumeProcessResult != nil {
287+
if resumeProcessResult.Order {
288+
return resumeProcessResult, nil
289+
}
290+
}
291+
292+
initMultipartUploadConfig := &InitMultipartUploadConfig{
293+
Path: config.Path,
294+
ContentLength: fsize,
295+
PartSize: config.ResumePartSize,
296+
ContentType: config.Headers["Content-Type"],
297+
OrderUpload: true,
298+
}
299+
initMultipartUploadResult, err := up.InitMultipartUpload(initMultipartUploadConfig)
300+
if err != nil {
301+
return nil, err
302+
}
303+
return &ResumeProcessResult{
304+
UploadID: initMultipartUploadResult.UploadID,
305+
Path: initMultipartUploadConfig.Path,
306+
NextPartID: 0,
307+
NextPartSize: config.ResumePartSize,
308+
Parts: make([]*DisorderPart, 0),
309+
}, nil
310+
}
311+
281312
func (up *UpYun) resumePut(config *PutObjectConfig) error {
282313
f, ok := config.Reader.(*os.File)
283314
if !ok {
@@ -301,7 +332,6 @@ func (up *UpYun) resumePut(config *PutObjectConfig) error {
301332
if config.Headers == nil {
302333
config.Headers = make(map[string]string)
303334
}
304-
headers := config.Headers
305335

306336
var breakpoint *BreakPointConfig
307337
if up.Recorder != nil {
@@ -310,23 +340,16 @@ func (up *UpYun) resumePut(config *PutObjectConfig) error {
310340
// first upload or file has expired
311341
maxPartID := int((fsize+config.ResumePartSize-1)/config.ResumePartSize - 1)
312342

313-
var uploadInfo *InitMultipartUploadResult
314343
if breakpoint == nil || isRecordExpired(fileinfo, breakpoint) {
315-
uploadInfo, err = up.InitMultipartUpload(&InitMultipartUploadConfig{
316-
Path: config.Path,
317-
PartSize: config.ResumePartSize,
318-
ContentType: headers["Content-Type"],
319-
ContentLength: fsize,
320-
OrderUpload: true,
321-
})
344+
uploadProcess, err := up.getMultipartUploadProcess(config, fsize)
322345
if err != nil {
323346
return err
324347
}
325348

326349
breakpoint = &BreakPointConfig{
327-
UploadID: uploadInfo.UploadID,
328-
PartSize: uploadInfo.PartSize,
329-
PartID: 0,
350+
UploadID: uploadProcess.UploadID,
351+
PartSize: uploadProcess.NextPartSize,
352+
PartID: int(uploadProcess.NextPartID),
330353
}
331354

332355
if up.Recorder != nil {
@@ -590,6 +613,21 @@ func (up *UpYun) GetRequest(config *GetRequestConfig) (*http.Response, error) {
590613
return resp, nil
591614
}
592615

616+
func (up *UpYun) GetInfoWithHeaders(path string, headers map[string]string) (*FileInfo, error) {
617+
resp, err := up.doRESTRequest(&restReqConfig{
618+
method: "HEAD",
619+
uri: path,
620+
headers: headers,
621+
closeBody: true,
622+
})
623+
if err != nil {
624+
return nil, errorOperation("get info", err)
625+
}
626+
fInfo := parseHeaderToFileInfo(resp.Header, true)
627+
fInfo.Name = path
628+
return fInfo, nil
629+
}
630+
593631
func (up *UpYun) GetInfo(path string) (*FileInfo, error) {
594632
resp, err := up.doRESTRequest(&restReqConfig{
595633
method: "HEAD",
@@ -876,21 +914,21 @@ type BreakPointConfig struct {
876914
LastTime time.Time
877915
}
878916

879-
func (up *UpYun) resumeUploadPart(config *PutObjectConfig, breakpoint *BreakPointConfig, f *os.File, fileInfo fs.FileInfo) (*BreakPointConfig, error) {
917+
func (up *UpYun) resumeUploadPart(config *PutObjectConfig, breakpoint *BreakPointConfig, f io.Reader, fileInfo fs.FileInfo) (*BreakPointConfig, error) {
880918
fsize := fileInfo.Size()
881-
maxPartID := int((fsize+config.ResumePartSize-1)/config.ResumePartSize - 1)
882919
partID := breakpoint.PartID
883920
curSize, partSize := int64(partID)*breakpoint.PartSize, breakpoint.PartSize
884-
885-
for id := partID; id <= maxPartID; id++ {
886-
if curSize+partSize > fsize {
887-
partSize = fsize - curSize
888-
}
889-
fragFile, err := newFragmentFile(f, curSize, partSize)
890-
if err != nil {
891-
return breakpoint, errorOperation("new fragment file", err)
892-
}
893-
921+
bytesLeft := fsize - curSize
922+
ch := make(chan *Chunk, 1)
923+
var err error
924+
var reader io.Reader
925+
if config.ProxyReader != nil {
926+
reader = config.ProxyReader(fsize, curSize, f)
927+
} else {
928+
reader = f
929+
}
930+
go GetReadChunk(reader, bytesLeft, partSize, ch)
931+
for chunk := range ch {
894932
try := 0
895933
for ; config.MaxResumePutTries == 0 || try < config.MaxResumePutTries; try++ {
896934
err = up.UploadPart(
@@ -900,28 +938,26 @@ func (up *UpYun) resumeUploadPart(config *PutObjectConfig, breakpoint *BreakPoin
900938
PartSize: breakpoint.PartSize,
901939
},
902940
&UploadPartConfig{
903-
PartID: id,
904-
PartSize: partSize,
905-
Reader: fragFile,
941+
PartID: partID + chunk.ID(),
942+
PartSize: int64(chunk.Len()),
943+
Reader: chunk,
906944
})
907945
if err == nil {
908946
break
909947
}
910948
}
911949

912950
if config.MaxResumePutTries > 0 && try == config.MaxResumePutTries {
913-
breakpoint.PartID = id
951+
breakpoint.PartID = partID + chunk.ID()
914952
breakpoint.FileSize = fsize
915953
breakpoint.LastTime = time.Now()
916954
breakpoint.FileModTime = fileInfo.ModTime()
917-
918955
if up.Recorder != nil {
919956
up.Recorder.Set(config.Path, breakpoint)
920957
}
921958
return breakpoint, err
922959
}
923-
curSize += partSize
924-
breakpoint.PartID = id + 1
960+
breakpoint.PartID = partID + chunk.ID() + 1
925961
}
926962
return breakpoint, nil
927963
}
@@ -940,6 +976,7 @@ type ResumeDisorderResult struct {
940976
type ResumeProcessResult struct {
941977
UploadID string
942978
Path string
979+
Order bool
943980
NextPartSize int64
944981
NextPartID int64
945982
Parts []*DisorderPart
@@ -965,6 +1002,11 @@ func (up *UpYun) GetResumeProcess(path string) (*ResumeProcessResult, error) {
9651002
partSizeStr := resp.Header.Get("X-Upyun-Next-Part-Size")
9661003
partIDStr := resp.Header.Get("X-Upyun-Next-Part-Id")
9671004
uploadID := resp.Header.Get("X-Upyun-Multi-Uuid")
1005+
order := resp.Header.Get("X-Upyun-Meta-Order")
1006+
o := true
1007+
if order == "false" {
1008+
o = false
1009+
}
9681010

9691011
if partSizeStr != "" {
9701012
partSize, err = strconv.ParseInt(partSizeStr, 10, 64)
@@ -995,6 +1037,7 @@ func (up *UpYun) GetResumeProcess(path string) (*ResumeProcessResult, error) {
9951037
NextPartSize: partSize,
9961038
NextPartID: partID,
9971039
Path: path,
1040+
Order: o,
9981041
Parts: disorderRes.Parts,
9991042
}, nil
10001043
}

0 commit comments

Comments
 (0)