-
Notifications
You must be signed in to change notification settings - Fork 63
/
Copy pathschedulers.go
175 lines (159 loc) · 4.82 KB
/
schedulers.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
package uploader
import (
"context"
"sort"
"sync"
"github.com/qiniu/go-sdk/v7/storagev2/uploader/source"
"golang.org/x/sync/errgroup"
)
type (
serialMultiPartsUploaderScheduler struct {
uploader MultiPartsUploader
partSize uint64
}
// 串行分片上传调度器选项
serialMultiPartsUploaderSchedulerOptions struct {
PartSize uint64 // 分片大小
}
concurrentMultiPartsUploaderScheduler struct {
uploader MultiPartsUploader
partSize uint64
concurrency int
}
// 并行分片上传调度器选项
concurrentMultiPartsUploaderSchedulerOptions struct {
PartSize uint64 // 分片大小
Concurrency int // 并发度
}
)
// 创建串行分片上传调度器
func newSerialMultiPartsUploaderScheduler(uploader MultiPartsUploader, options *serialMultiPartsUploaderSchedulerOptions) multiPartsUploaderScheduler {
if options == nil {
options = &serialMultiPartsUploaderSchedulerOptions{}
}
partSize := options.PartSize
if partSize == 0 {
partSize = 1 << 22
} else if partSize < (1 << 20) {
partSize = 1 << 20
} else if partSize > (1 << 30) {
partSize = 1 << 30
}
return serialMultiPartsUploaderScheduler{uploader, partSize}
}
// 创建并行分片上传调度器
func newConcurrentMultiPartsUploaderScheduler(uploader MultiPartsUploader, options *concurrentMultiPartsUploaderSchedulerOptions) multiPartsUploaderScheduler {
if options == nil {
options = &concurrentMultiPartsUploaderSchedulerOptions{}
}
partSize := options.PartSize
if partSize == 0 {
partSize = 1 << 22
} else if partSize < (1 << 20) {
partSize = 1 << 20
} else if partSize > (1 << 30) {
partSize = 1 << 30
}
concurrency := options.Concurrency
if concurrency <= 0 {
concurrency = 4
}
return concurrentMultiPartsUploaderScheduler{uploader, partSize, concurrency}
}
func (scheduler serialMultiPartsUploaderScheduler) UploadParts(ctx context.Context, initialized InitializedParts, src source.Source, options *UploadPartsOptions) ([]UploadedPart, error) {
parts := make([]UploadedPart, 0)
for {
part, err := src.Slice(scheduler.partSize)
if err != nil {
return nil, err
}
if part == nil {
break
}
var uploadPartParam UploadPartOptions
if options != nil && options.OnUploadingProgress != nil {
uploadPartParam.OnUploadingProgress = func(progress *UploadingPartProgress) {
options.OnUploadingProgress(part.PartNumber(), &UploadingPartProgress{Uploaded: progress.Uploaded, PartSize: part.Size()})
}
}
uploadedPart, err := scheduler.uploader.UploadPart(ctx, initialized, part, &uploadPartParam)
if err != nil {
return nil, err
}
if options != nil && options.OnPartUploaded != nil {
if err = options.OnPartUploaded(uploadedPart); err != nil {
return nil, err
}
}
parts = append(parts, uploadedPart)
}
return parts, nil
}
func (scheduler serialMultiPartsUploaderScheduler) MultiPartsUploader() MultiPartsUploader {
return scheduler.uploader
}
func (scheduler serialMultiPartsUploaderScheduler) PartSize() uint64 {
return scheduler.partSize
}
func (scheduler concurrentMultiPartsUploaderScheduler) UploadParts(ctx context.Context, initialized InitializedParts, src source.Source, options *UploadPartsOptions) ([]UploadedPart, error) {
var (
parts []UploadedPart
partsLock sync.Mutex
)
if ss, ok := src.(source.SizedSource); ok {
totalSize, err := ss.TotalSize()
if err != nil {
return nil, err
}
partsCount := (totalSize + scheduler.partSize - 1) / scheduler.partSize
parts = make([]UploadedPart, 0, partsCount)
}
g, ctx := errgroup.WithContext(ctx)
g.SetLimit(scheduler.concurrency)
var onUploadingProgressMutex sync.Mutex
for {
part, err := src.Slice(scheduler.partSize)
if err != nil {
return nil, err
}
if part == nil {
break
}
g.Go(func() error {
var uploadPartParam UploadPartOptions
if options != nil && options.OnUploadingProgress != nil {
uploadPartParam.OnUploadingProgress = func(progress *UploadingPartProgress) {
onUploadingProgressMutex.Lock()
defer onUploadingProgressMutex.Unlock()
options.OnUploadingProgress(part.PartNumber(), progress)
}
}
uploadedPart, err := scheduler.uploader.UploadPart(ctx, initialized, part, &uploadPartParam)
if err != nil {
return err
}
if options != nil && options.OnPartUploaded != nil {
if err = options.OnPartUploaded(uploadedPart); err != nil {
return err
}
}
partsLock.Lock()
defer partsLock.Unlock()
parts = append(parts, uploadedPart)
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
sort.Slice(parts, func(i, j int) bool {
return parts[i].Offset() < parts[j].Offset()
})
return parts, nil
}
func (scheduler concurrentMultiPartsUploaderScheduler) MultiPartsUploader() MultiPartsUploader {
return scheduler.uploader
}
func (scheduler concurrentMultiPartsUploaderScheduler) PartSize() uint64 {
return scheduler.partSize
}