forked from sahib/brig
/
std.go
449 lines (377 loc) · 9.89 KB
/
std.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
// Package util implements small helper function that
// should be included in the stdlib in our opinion.
package util
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"sync"
"sync/atomic"
"time"
log "github.com/Sirupsen/logrus"
)
// Empty is just an empty struct.
// Empty{} reads nicer than struct{}{}
type Empty struct{}
// Min returns the minimum of a and b.
func Min(a, b int) int {
if a < b {
return a
}
return b
}
// Max returns the maximum of a and b.
func Max(a, b int) int {
if a < b {
return b
}
return a
}
// Min64 returns the minimum of a and b.
func Min64(a, b int64) int64 {
if a < b {
return a
}
return b
}
// Max64 returns the maximum of a and b.
func Max64(a, b int64) int64 {
if a < b {
return b
}
return a
}
// Clamp limits x to the range [lo, hi]
func Clamp(x, lo, hi int) int {
return Max(lo, Min(x, hi))
}
// UMin returns the unsigned minimum of a and b
func UMin(a, b uint) uint {
if a < b {
return a
}
return b
}
// UMax returns the unsigned minimum of a and b
func UMax(a, b uint) uint {
if a < b {
return b
}
return a
}
// UClamp limits x to the range [lo, hi]
func UClamp(x, lo, hi uint) uint {
return UMax(lo, UMin(x, hi))
}
// Closer closes c. If that fails, it will log the error.
// The intended usage is for convinient defer calls only!
// It gives only little knowledge about where the error is,
// but it's slightly better than a bare defer xyz.Close()
func Closer(c io.Closer) {
if err := c.Close(); err != nil {
log.Warningf("Error on close `%v`: %v", c, err)
}
}
// Touch works like the unix touch(1)
func Touch(path string) error {
fd, err := os.Create(path)
if err != nil {
return err
}
return fd.Close()
}
// SizeAccumulator is a io.Writer that simply counts
// the amount of bytes that has been written to it.
// It's useful to count the received bytes from a reader
// in conjunction with a io.TeeReader
//
// Example usage without error handling:
//
// s := &SizeAccumulator{}
// teeR := io.TeeReader(r, s)
// io.Copy(os.Stdout, teeR)
// fmt.Printf("Wrote %d bytes to stdout\n", s.Size())
//
type SizeAccumulator struct {
size uint64
}
// Write simply increments the internal size count without any IO.
// It can be safely called from any go routine.
func (s *SizeAccumulator) Write(buf []byte) (int, error) {
atomic.AddUint64(&s.size, uint64(len(buf)))
return len(buf), nil
}
// Size returns the cumulated written bytes.
// It can be safely called from any go routine.
func (s *SizeAccumulator) Size() uint64 {
return atomic.LoadUint64(&s.size)
}
// Reset resets the size counter to 0.
func (s *SizeAccumulator) Reset() {
atomic.StoreUint64(&s.size, 0)
}
// NopWriteCloser returns a WriteCloser with a no-op Close method wrapping the
// provided Writer w.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return nopCloser{w}
}
type nopCloser struct {
io.Writer
}
func (nopCloser) Close() error { return nil }
type syncReadWriter struct {
io.ReadWriter
sync.Mutex
}
func (s *syncReadWriter) Write(buf []byte) (int, error) {
s.Lock()
defer s.Unlock()
return s.ReadWriter.Write(buf)
}
func (s *syncReadWriter) Read(buf []byte) (int, error) {
s.Lock()
defer s.Unlock()
return s.ReadWriter.Read(buf)
}
// SyncedReadWriter returns a io.ReadWriter that protects each call
// to Read() and Write() with a sync.Mutex.
func SyncedReadWriter(w io.ReadWriter) io.ReadWriter {
return &syncReadWriter{ReadWriter: w}
}
// SyncBuffer is a bytes.Buffer that protects each call
// to Read() and Write() with a sync.RWMutex, i.e. parallel
// access to Read() is possible, but blocks when doing a Write().
type SyncBuffer struct {
sync.RWMutex
buf bytes.Buffer
}
func (b *SyncBuffer) Read(p []byte) (int, error) {
b.Lock()
defer b.Unlock()
return b.buf.Read(p)
}
func (b *SyncBuffer) Write(p []byte) (int, error) {
b.Lock()
defer b.Unlock()
return b.buf.Write(p)
}
// TimeoutReadWriter is io.ReadWriter capable of returning ErrTimeout
// if there was no result in a certain timeout period.
type TimeoutReadWriter struct {
io.Writer
io.Reader
rtimeout time.Duration
wtimeout time.Duration
useDeadline bool
rdeadline time.Time
wdeadline time.Time
}
// ErrTimeout will be returned by Read/Write in case of a timeout.
var ErrTimeout = errors.New("I/O Timeout: Operation timed out")
func (rw *TimeoutReadWriter) io(p []byte, doRead bool) (n int, err error) {
var deadline <-chan time.Time
// Figoure out when it's too late:
switch {
case doRead && rw.useDeadline:
deadline = time.After(rw.rdeadline.Sub(time.Now()))
case doRead && !rw.useDeadline:
deadline = time.After(rw.rtimeout)
case !doRead && rw.useDeadline:
deadline = time.After(rw.wdeadline.Sub(time.Now()))
case !doRead && !rw.useDeadline:
deadline = time.After(rw.wtimeout)
}
// Resever one element, so the go routine gets cleaned up
// early even if the timeout already expired.
done := make(chan bool, 1)
go func() {
if doRead {
n, err = rw.Reader.Read(p)
} else {
n, err = rw.Writer.Write(p)
}
done <- true
}()
// Wait for something to happen:
select {
case <-done:
return
case <-deadline:
return 0, ErrTimeout
}
}
func (rw *TimeoutReadWriter) Read(p []byte) (n int, err error) {
return rw.io(p, true)
}
func (rw *TimeoutReadWriter) Write(p []byte) (n int, err error) {
return rw.io(p, false)
}
// SetReadDeadline sets an absolute time in the future where
// a read option should be canceled.
func (rw *TimeoutReadWriter) SetReadDeadline(d time.Time) error {
rw.useDeadline = true
rw.rdeadline = d
return nil
}
// SetWriteDeadline sets an absolute time in the future where
// a write option should be canceled.
func (rw *TimeoutReadWriter) SetWriteDeadline(d time.Time) error {
rw.useDeadline = true
rw.wdeadline = d
return nil
}
// SetDeadline sets an absolute time in the future where an I/O
// operation should be canceled.
func (rw *TimeoutReadWriter) SetDeadline(d time.Time) error {
rw.SetWriteDeadline(d)
rw.SetReadDeadline(d)
return nil
}
// SetWriteTimeout sets an own timeout for writing.
func (rw *TimeoutReadWriter) SetWriteTimeout(d time.Duration) error {
rw.wtimeout = d
return nil
}
// SetReadTimeout sets an own timeout for reading.
func (rw *TimeoutReadWriter) SetReadTimeout(d time.Duration) error {
rw.rtimeout = d
return nil
}
// SetTimeout sets both the read and write timeout to `d`.
func (rw *TimeoutReadWriter) SetTimeout(d time.Duration) error {
rw.rtimeout = d
rw.wtimeout = d
return nil
}
// NewTimeoutWriter wraps `w` and returns a io.Writer that times out
// after `d` elapsed with ErrTimeout if `w` didn't succeed in that time.
func NewTimeoutWriter(w io.Writer, d time.Duration) io.Writer {
return &TimeoutReadWriter{Writer: w, wtimeout: d}
}
// NewTimeoutReader wraps `r` and returns a io.Reader that times out
// after `d` elapsed with ErrTimeout if `r` didn't succeed in that time.
func NewTimeoutReader(r io.Reader, d time.Duration) io.Reader {
return &TimeoutReadWriter{Reader: r, rtimeout: d}
}
// NewTimeoutReadWriter wraps `rw` and returns a io.ReadWriter that times out
// after `d` elapsed with ErrTimeout if `rw` didn't succeed in that time.
func NewTimeoutReadWriter(rw io.ReadWriter, d time.Duration) *TimeoutReadWriter {
return &TimeoutReadWriter{
Reader: rw, Writer: rw,
rtimeout: d, wtimeout: d,
}
}
// Errors is a list of errors that render to one single message
type Errors []error
func (es Errors) Error() string {
switch len(es) {
case 0:
return ""
case 1:
return es[0].Error()
default:
base := "More than one error happened:\n"
for _, err := range es {
base += "\t" + err.Error() + "\n"
}
return base
}
}
// ToErr combines all errors in the list to a single error.
// If there were no errors, it returns nil.
func (es Errors) ToErr() error {
if len(es) > 0 {
return es
}
return nil
}
// OmitBytes converts a byte slice into a string representation that
// omits data in the middle if necessary. It is useful for testing
// and printing user information. `lim` is the number of bytes
//
// Example:
//
// OmitBytes([]byte{1,2,3,4}, 2)
// -> [1 ... 2]
// OmitBytes([]byte{1,2,3,4}, 4)
// -> [1, 2, 3, 4]
//
func OmitBytes(data []byte, lim int) string {
lo := lim
if lo > len(data) {
lo = len(data)
}
hi := len(data) - lim
if hi < 0 {
hi = len(data)
}
if len(data[hi:]) > 0 {
return fmt.Sprintf("%v ... %v", data[:lo], data[hi:])
}
return fmt.Sprintf("%v", data[:lo])
}
type limitWriter struct {
wr io.Writer
sz int64
pos int64
}
// LimitWriter is like io.LimitReader but for an io.Writer
func LimitWriter(w io.Writer, sz int64) io.Writer {
return &limitWriter{
wr: w,
sz: sz,
}
}
func (lw *limitWriter) Write(buf []byte) (int, error) {
if lw.pos >= lw.sz {
return len(buf), nil
}
n := Min64(lw.sz-lw.pos, int64(len(buf)))
lw.pos += n
_, err := lw.wr.Write(buf[:n])
if err != nil {
return -1, err
}
// many go std functions require that all of `buf` was written,
// or else they return with errShortWrite. Let's act like we
// used all of it.
return len(buf), nil
}
type prefixReader struct {
data []byte
curs int
r io.Reader
}
func (pr *prefixReader) Read(buf []byte) (n int, err error) {
nread := 0
if pr.curs < len(pr.data) {
n := copy(buf, pr.data[pr.curs:])
buf = buf[n:]
pr.curs += n
nread += n
}
if len(buf) == 0 {
return nread, nil
}
n, err = pr.r.Read(buf)
nread += n
return nread, err
}
// PrefixReader returns an io.Reader that outputs `data` before the rest of `r`.
func PrefixReader(data []byte, r io.Reader) io.Reader {
return &prefixReader{data: data, r: r}
}
// PeekHeader returns a new reader that will yield the very same data as `r`.
// It reads `size` bytes from `r` and returns it. The underlying implementation
// uses PrefixReader to prefix the stream with the header again.
func PeekHeader(r io.Reader, size int64) ([]byte, io.Reader, error) {
headerBuf := make([]byte, size)
n, err := r.Read(headerBuf)
if err != nil && err != io.EOF {
return nil, nil, err
}
headerBuf = headerBuf[:n]
return headerBuf, PrefixReader(headerBuf, r), nil
}