-
Notifications
You must be signed in to change notification settings - Fork 154
/
untar.go
142 lines (134 loc) · 3.75 KB
/
untar.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
package content
import (
"archive/tar"
"fmt"
"io"
"github.com/containerd/containerd/content"
)
// NewUntarWriter wrap a writer with an untar, so that the stream is untarred
//
// By default, it calculates the hash when writing. If the option `skipHash` is true,
// it will skip doing the hash. Skipping the hash is intended to be used only
// if you are confident about the validity of the data being passed to the writer,
// and wish to save on the hashing time.
func NewUntarWriter(writer content.Writer, opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) {
tr := tar.NewReader(r)
var err error
for {
_, err := tr.Next()
if err == io.EOF {
// clear the error, since we do not pass an io.EOF
err = nil
break // End of archive
}
if err != nil {
// pass the error on
err = fmt.Errorf("UntarWriter tar file header read error: %v", err)
break
}
// write out the untarred data
// we can handle io.EOF, just go to the next file
// any other errors should stop and get reported
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = tr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("UntarWriter file data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("UntarWriter error writing to underlying writer: %v", err2)
break
}
if err == io.EOF {
// go to the next file
break
}
}
// did we break with a non-nil and non-EOF error?
if err != nil && err != io.EOF {
break
}
}
done <- err
}, opts...)
}
// NewUntarWriterByName wrap multiple writers with an untar, so that the stream is untarred and passed
// to the appropriate writer, based on the filename. If a filename is not found, it is up to the called func
// to determine how to process it.
func NewUntarWriterByName(writers func(string) (content.Writer, error), opts ...WriterOpt) content.Writer {
// process opts for default
wOpts := DefaultWriterOpts()
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil
}
}
// need a PassthroughMultiWriter here
return NewPassthroughMultiWriter(writers, func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error) {
tr := tar.NewReader(r)
var err error
for {
header, err := tr.Next()
if err == io.EOF {
// clear the error, since we do not pass an io.EOF
err = nil
break // End of archive
}
if err != nil {
// pass the error on
err = fmt.Errorf("UntarWriter tar file header read error: %v", err)
break
}
// get the filename
filename := header.Name
// get the writer for this filename
w := getwriter(filename)
if w == nil {
continue
}
// write out the untarred data
// we can handle io.EOF, just go to the next file
// any other errors should stop and get reported
b := make([]byte, wOpts.Blocksize, wOpts.Blocksize)
for {
var n int
n, err = tr.Read(b)
if err != nil && err != io.EOF {
err = fmt.Errorf("UntarWriter file data read error: %v\n", err)
break
}
l := n
if n > len(b) {
l = len(b)
}
if _, err2 := w.Write(b[:l]); err2 != nil {
err = fmt.Errorf("UntarWriter error writing to underlying writer at for name '%s': %v", filename, err2)
break
}
if err == io.EOF {
// go to the next file
break
}
}
// did we break with a non-nil and non-EOF error?
if err != nil && err != io.EOF {
break
}
}
done <- err
}, opts...)
}