forked from bcongdon/corral
/
s3_io.go
122 lines (105 loc) 路 2.57 KB
/
s3_io.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
package corfs
import (
"fmt"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/mattetti/filebuffer"
)
type s3Writer struct {
client *s3.S3
bucket string
key string
buf *filebuffer.Buffer
uploadChunkSize int64
uploadID string
complatedParts []*s3.CompletedPart
}
func (s *s3Writer) Init() error {
params := &s3.CreateMultipartUploadInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.key),
}
result, err := s.client.CreateMultipartUpload(params)
if result != nil {
s.uploadID = *result.UploadId
}
return err
}
func (s *s3Writer) uploadChunk() error {
if _, err := s.buf.Seek(0, io.SeekStart); err != nil {
return err
}
partNumber := int64(len(s.complatedParts) + 1)
uploadParams := &s3.UploadPartInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.key),
UploadId: aws.String(s.uploadID),
Body: s.buf,
PartNumber: aws.Int64(partNumber),
}
result, err := s.client.UploadPart(uploadParams)
if result != nil {
s.complatedParts = append(s.complatedParts, &s3.CompletedPart{
ETag: result.ETag,
PartNumber: aws.Int64(partNumber),
})
}
// Reset buffer
s.buf = filebuffer.New(nil)
return err
}
func (s *s3Writer) Write(p []byte) (n int, err error) {
n, err = s.buf.Write(p)
if int64(len(s.buf.Bytes())) > s.uploadChunkSize {
err = s.uploadChunk()
}
return n, err
}
func (s *s3Writer) Close() error {
if err := s.uploadChunk(); err != nil {
return err
}
completeParams := &s3.CompleteMultipartUploadInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.key),
UploadId: aws.String(s.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: s.complatedParts,
},
}
_, err := s.client.CompleteMultipartUpload(completeParams)
return err
}
type s3Reader struct {
client *s3.S3
bucket string
key string
offset int64
chunkSize int64
chunk io.ReadCloser
totalSize int64
}
func (s *s3Reader) loadNextChunk() error {
size := min64(s.chunkSize, s.totalSize-s.offset)
params := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.key),
Range: aws.String(fmt.Sprintf("bytes=%d-%d", s.offset, s.offset+size-1)),
}
s.offset += size
output, err := s.client.GetObject(params)
s.chunk = output.Body
return err
}
func (s *s3Reader) Read(b []byte) (n int, err error) {
n, err = s.chunk.Read(b)
if err == io.EOF && s.offset != s.totalSize {
s.chunk.Close()
err = s.loadNextChunk()
}
return n, err
}
func (s *s3Reader) Close() error {
return s.chunk.Close()
}