diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 07ff086..2f37bc5 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -100,7 +100,7 @@ }, { "ImportPath": "github.com/moul/connect-four", - "Rev": "6629297bee497e0e39b9cbaab3205665ac2ebe7d" + "Rev": "0706016612d58c70352a53bdd07bfd752e94a934" }, { "ImportPath": "github.com/moul/einstein-riddle-generator", @@ -207,11 +207,30 @@ "ImportPath": "golang.org/x/sys/unix", "Rev": "f64b50fbea64174967a8882830d621a18ee1548e" }, + { + "ImportPath": "gopkg.in/bsm/ratelimit.v1", + "Rev": "bda20d5067a03094fc6762f7ead53027afac5f28" + }, + { + "ImportPath": "gopkg.in/bufio.v1", + "Comment": "v1", + "Rev": "567b2bfa514e796916c4747494d6ff5132a1dfce" + }, { "ImportPath": "gopkg.in/go-playground/validator.v8", "Comment": "v8.17.1", "Rev": "014792cf3e266caff1e916876be12282b33059e0" }, + { + "ImportPath": "gopkg.in/redis.v3", + "Comment": "v3.2.5", + "Rev": "73e1e9f501e946d35387e30f49ef58adda82b4ee" + }, + { + "ImportPath": "gopkg.in/redis.v3/internal/consistenthash", + "Comment": "v3.2.5", + "Rev": "73e1e9f501e946d35387e30f49ef58adda82b4ee" + }, { "ImportPath": "gopkg.in/vmihailenco/msgpack.v2", "Comment": "v2.4.2", diff --git a/vendor/github.com/moul/connect-four/connectfour.go b/vendor/github.com/moul/connect-four/connectfour.go index 977d1b8..3dd1c2f 100644 --- a/vendor/github.com/moul/connect-four/connectfour.go +++ b/vendor/github.com/moul/connect-four/connectfour.go @@ -4,8 +4,13 @@ import ( "fmt" "math" "math/rand" + "os" + "strconv" + "strings" "time" + "gopkg.in/redis.v3" + "github.com/Sirupsen/logrus" "github.com/moul/bolosseum/bots" "github.com/robfig/go-cache" @@ -15,10 +20,23 @@ var Rows = 6 var Cols = 7 var MaxDeepness = 6 +var rc *redis.Client var c *cache.Cache func init() { + // initialize cache c = cache.New(5*time.Minute, 30*time.Second) + + // initialize redis + if os.Getenv("REDIS_HOSTNAME") != "" { + rc = redis.NewClient(&redis.Options{ + Addr: os.Getenv("REDIS_HOSTNAME"), + Password: os.Getenv("REDIS_PASSWORD"), + DB: 0, + }) + pong, err := rc.Ping().Result() + logrus.Warnf("Redis ping: %v, %v", pong, err) + } } func NewConnectfourBot() *ConnectfourBot { @@ -182,6 +200,24 @@ func (b *ConnectFour) BestMovements() []Movement { return cachedMoves.([]Movement) } + if rc != nil { + cachedMoves, err := rc.Get(hash).Result() + if err == nil { + moves := []Movement{} + for _, playStr := range strings.Split(cachedMoves, ",") { + play, _ := strconv.Atoi(playStr) + moves = append(moves, Movement{ + Play: play, + }) + } + c.Set(hash, moves, -1) + return moves + } + if err != redis.Nil { + logrus.Errorf("Redis: failed to get value for hash=%q: %v", hash, err) + } + } + logrus.Warnf("bot: %v", b) moves := b.ScoreMovements(b.Player, 1) logrus.Warnf("score-moves: %v", moves) @@ -206,6 +242,18 @@ func (b *ConnectFour) BestMovements() []Movement { } c.Set(hash, bestMoves, -1) + if rc != nil { + bestMovesStr := "" + if len(bestMoves) > 0 { + bestMovesStr = fmt.Sprintf("%d", bestMoves[0].Play) + for _, move := range bestMoves[1:] { + bestMovesStr += fmt.Sprintf(",%d", move.Play) + } + } + if err := rc.Set(hash, bestMovesStr, 0).Err(); err != nil { + logrus.Errorf("Redis: failed to write value for hash=%q", hash) + } + } return bestMoves } diff --git a/vendor/gopkg.in/bsm/ratelimit.v1/.travis.yml b/vendor/gopkg.in/bsm/ratelimit.v1/.travis.yml new file mode 100644 index 0000000..14543fd --- /dev/null +++ b/vendor/gopkg.in/bsm/ratelimit.v1/.travis.yml @@ -0,0 +1,7 @@ +language: go +script: make testall +go: + - 1.4 + - 1.3 + - 1.2 + - tip diff --git a/vendor/gopkg.in/bsm/ratelimit.v1/Makefile b/vendor/gopkg.in/bsm/ratelimit.v1/Makefile new file mode 100644 index 0000000..fb960c6 --- /dev/null +++ b/vendor/gopkg.in/bsm/ratelimit.v1/Makefile @@ -0,0 +1,13 @@ +default: test + +testdeps: + @go get github.com/onsi/ginkgo + @go get github.com/onsi/gomega + +test: testdeps + @go test ./... + +testrace: testdeps + @go test ./... -race + +testall: test testrace diff --git a/vendor/gopkg.in/bsm/ratelimit.v1/README.md b/vendor/gopkg.in/bsm/ratelimit.v1/README.md new file mode 100644 index 0000000..538eec8 --- /dev/null +++ b/vendor/gopkg.in/bsm/ratelimit.v1/README.md @@ -0,0 +1,54 @@ +# RateLimit [![Build Status](https://travis-ci.org/bsm/ratelimit.png?branch=master)](https://travis-ci.org/bsm/ratelimit) + +Simple, thread-safe Go rate-limiter. +Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327 + +### Example + +```go +package main + +import ( + "github.com/bsm/redeo" + "log" +) + +func main() { + // Create a new rate-limiter, allowing up-to 10 calls + // per second + rl := ratelimit.New(10, time.Second) + + for i:=0; i<20; i++ { + if rl.Limit() { + fmt.Println("DOH! Over limit!") + } else { + fmt.Println("OK") + } + } +} +``` + +### Licence + +``` +Copyright (c) 2015 Black Square Media + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +``` diff --git a/vendor/gopkg.in/bsm/ratelimit.v1/ratelimit.go b/vendor/gopkg.in/bsm/ratelimit.v1/ratelimit.go new file mode 100644 index 0000000..5808cdb --- /dev/null +++ b/vendor/gopkg.in/bsm/ratelimit.v1/ratelimit.go @@ -0,0 +1,83 @@ +/* +Simple, thread-safe Go rate-limiter. +Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327 + +Example: + + // Create a new rate-limiter, allowing up-to 10 calls + // per second + rl := ratelimit.New(10, time.Second) + + for i:=0; i<20; i++ { + if rl.Limit() { + fmt.Println("DOH! Over limit!") + } else { + fmt.Println("OK") + } + } +*/ +package ratelimit + +import ( + "sync/atomic" + "time" +) + +// RateLimit instances are thread-safe. +type RateLimiter struct { + allowance, max, unit, lastCheck uint64 +} + +// New creates a new rate limiter instance +func New(rate int, per time.Duration) *RateLimiter { + nano := uint64(per) + if nano < 1 { + nano = uint64(time.Second) + } + if rate < 1 { + rate = 1 + } + + return &RateLimiter{ + allowance: uint64(rate) * nano, // store our allowance, in ns units + max: uint64(rate) * nano, // remember our maximum allowance + unit: nano, // remember our unit size + + lastCheck: uint64(time.Now().UnixNano()), + } +} + +// Limit returns true if rate was exceeded +func (rl *RateLimiter) Limit() bool { + // Calculate the number of ns that have passed since our last call + now := uint64(time.Now().UnixNano()) + passed := now - atomic.SwapUint64(&rl.lastCheck, now) + + // Add them to our allowance + current := atomic.AddUint64(&rl.allowance, passed) + + // Ensure our allowance is not over maximum + if current > rl.max { + atomic.AddUint64(&rl.allowance, rl.max-current) + current = rl.max + } + + // If our allowance is less than one unit, rate-limit! + if current < rl.unit { + return true + } + + // Not limited, subtract a unit + atomic.AddUint64(&rl.allowance, -rl.unit) + return false +} + +// Undo reverts the last Limit() call, returning consumed allowance +func (rl *RateLimiter) Undo() { + current := atomic.AddUint64(&rl.allowance, rl.unit) + + // Ensure our allowance is not over maximum + if current > rl.max { + atomic.AddUint64(&rl.allowance, rl.max-current) + } +} diff --git a/vendor/gopkg.in/bufio.v1/.travis.yml b/vendor/gopkg.in/bufio.v1/.travis.yml new file mode 100644 index 0000000..ccca6bb --- /dev/null +++ b/vendor/gopkg.in/bufio.v1/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip + +install: + - go get launchpad.net/gocheck + - go get gopkg.in/bufio.v1 diff --git a/vendor/gopkg.in/bufio.v1/LICENSE b/vendor/gopkg.in/bufio.v1/LICENSE new file mode 100644 index 0000000..07a316c --- /dev/null +++ b/vendor/gopkg.in/bufio.v1/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The bufio Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/bufio.v1/Makefile b/vendor/gopkg.in/bufio.v1/Makefile new file mode 100644 index 0000000..038ed47 --- /dev/null +++ b/vendor/gopkg.in/bufio.v1/Makefile @@ -0,0 +1,2 @@ +all: + go test gopkg.in/bufio.v1 diff --git a/vendor/gopkg.in/bufio.v1/README.md b/vendor/gopkg.in/bufio.v1/README.md new file mode 100644 index 0000000..bfb85ee --- /dev/null +++ b/vendor/gopkg.in/bufio.v1/README.md @@ -0,0 +1,4 @@ +bufio +===== + +This is a fork of the http://golang.org/pkg/bufio/ package. It adds `ReadN` method that allows reading next `n` bytes from the internal buffer without allocating intermediate buffer. This method works just like the [Buffer.Next](http://golang.org/pkg/bytes/#Buffer.Next) method, but has slightly different signature. diff --git a/vendor/gopkg.in/bufio.v1/buffer.go b/vendor/gopkg.in/bufio.v1/buffer.go new file mode 100644 index 0000000..8b91560 --- /dev/null +++ b/vendor/gopkg.in/bufio.v1/buffer.go @@ -0,0 +1,413 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio + +// Simple byte buffer for marshaling data. + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. + lastRead readOp // last read operation, so that Unread* can work correctly. +} + +// The readOp constants describe the last action performed on +// the buffer, so that UnreadRune and UnreadByte can +// check for invalid usage. +type readOp int + +const ( + opInvalid readOp = iota // Non-read operation. + opReadRune // Read rune. + opRead // Any other read operation. +) + +// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. +var ErrTooLarge = errors.New("bytes.Buffer: too large") + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + b.lastRead = opInvalid + switch { + case n < 0 || n > b.Len(): + panic("bytes.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("bytes.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lastRead = opInvalid + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// WriteString appends the contents of s to the buffer, growing the buffer as +// needed. The return value n is the length of s; err is always nil. If the +// buffer becomes too large, WriteString will panic with ErrTooLarge. +func (b *Buffer) WriteString(s string) (n int, err error) { + b.lastRead = opInvalid + m := b.grow(len(s)) + return copy(b.buf[m:], s), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + b.lastRead = opInvalid + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + b.lastRead = opInvalid + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("bytes.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + b.lastRead = opInvalid + m := b.grow(1) + b.buf[m] = c + return nil +} + +// WriteRune appends the UTF-8 encoding of Unicode code point r to the +// buffer, returning its length and an error, which is always nil but is +// included to match bufio.Writer's WriteRune. The buffer is grown as needed; +// if it becomes too large, WriteRune will panic with ErrTooLarge. +func (b *Buffer) WriteRune(r rune) (n int, err error) { + if r < utf8.RuneSelf { + b.WriteByte(byte(r)) + return 1, nil + } + n = utf8.EncodeRune(b.runeBytes[0:], r) + b.Write(b.runeBytes[0:n]) + return n, nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + if n > 0 { + b.lastRead = opRead + } + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + b.lastRead = opInvalid + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + if n > 0 { + b.lastRead = opRead + } + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + b.lastRead = opRead + return c, nil +} + +// ReadRune reads and returns the next UTF-8-encoded +// Unicode code point from the buffer. +// If no bytes are available, the error returned is io.EOF. +// If the bytes are an erroneous UTF-8 encoding, it +// consumes one byte and returns U+FFFD, 1. +func (b *Buffer) ReadRune() (r rune, size int, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, 0, io.EOF + } + b.lastRead = opReadRune + c := b.buf[b.off] + if c < utf8.RuneSelf { + b.off++ + return rune(c), 1, nil + } + r, n := utf8.DecodeRune(b.buf[b.off:]) + b.off += n + return r, n, nil +} + +// UnreadRune unreads the last rune returned by ReadRune. +// If the most recent read or write operation on the buffer was +// not a ReadRune, UnreadRune returns an error. (In this regard +// it is stricter than UnreadByte, which will unread the last byte +// from any read operation.) +func (b *Buffer) UnreadRune() error { + if b.lastRead != opReadRune { + return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune") + } + b.lastRead = opInvalid + if b.off > 0 { + _, n := utf8.DecodeLastRune(b.buf[0:b.off]) + b.off -= n + } + return nil +} + +// UnreadByte unreads the last byte returned by the most recent +// read operation. If write has happened since the last read, UnreadByte +// returns an error. +func (b *Buffer) UnreadByte() error { + if b.lastRead != opReadRune && b.lastRead != opRead { + return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read") + } + b.lastRead = opInvalid + if b.off > 0 { + b.off-- + } + return nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + b.lastRead = opRead + return line, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end +// in delim. +func (b *Buffer) ReadString(delim byte) (line string, err error) { + slice, err := b.readSlice(delim) + return string(slice), err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } + +// NewBufferString creates and initializes a new Buffer using string s as its +// initial contents. It is intended to prepare a buffer to read an existing +// string. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBufferString(s string) *Buffer { + return &Buffer{buf: []byte(s)} +} diff --git a/vendor/gopkg.in/bufio.v1/bufio.go b/vendor/gopkg.in/bufio.v1/bufio.go new file mode 100644 index 0000000..8f5cdc0 --- /dev/null +++ b/vendor/gopkg.in/bufio.v1/bufio.go @@ -0,0 +1,728 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer +// object, creating another object (Reader or Writer) that also implements +// the interface but provides buffering and some help for textual I/O. +package bufio + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +const ( + defaultBufSize = 4096 +) + +var ( + ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte") + ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune") + ErrBufferFull = errors.New("bufio: buffer full") + ErrNegativeCount = errors.New("bufio: negative count") +) + +// Buffered input. + +// Reader implements buffering for an io.Reader object. +type Reader struct { + buf []byte + rd io.Reader + r, w int + err error + lastByte int + lastRuneSize int +} + +const minReadBufferSize = 16 +const maxConsecutiveEmptyReads = 100 + +// NewReaderSize returns a new Reader whose buffer has at least the specified +// size. If the argument io.Reader is already a Reader with large enough +// size, it returns the underlying Reader. +func NewReaderSize(rd io.Reader, size int) *Reader { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok && len(b.buf) >= size { + return b + } + if size < minReadBufferSize { + size = minReadBufferSize + } + r := new(Reader) + r.reset(make([]byte, size), rd) + return r +} + +// NewReader returns a new Reader whose buffer has the default size. +func NewReader(rd io.Reader) *Reader { + return NewReaderSize(rd, defaultBufSize) +} + +// Reset discards any buffered data, resets all state, and switches +// the buffered reader to read from r. +func (b *Reader) Reset(r io.Reader) { + b.reset(b.buf, r) +} + +func (b *Reader) reset(buf []byte, r io.Reader) { + *b = Reader{ + buf: buf, + rd: r, + lastByte: -1, + lastRuneSize: -1, + } +} + +var errNegativeRead = errors.New("bufio: reader returned negative count from Read") + +// fill reads a new chunk into the buffer. +func (b *Reader) fill() { + // Slide existing data to beginning. + if b.r > 0 { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + if b.w >= len(b.buf) { + panic("bufio: tried to fill full buffer") + } + + // Read new data: try a limited number of times. + for i := maxConsecutiveEmptyReads; i > 0; i-- { + n, err := b.rd.Read(b.buf[b.w:]) + if n < 0 { + panic(errNegativeRead) + } + b.w += n + if err != nil { + b.err = err + return + } + if n > 0 { + return + } + } + b.err = io.ErrNoProgress +} + +func (b *Reader) readErr() error { + err := b.err + b.err = nil + return err +} + +// Peek returns the next n bytes without advancing the reader. The bytes stop +// being valid at the next read call. If Peek returns fewer than n bytes, it +// also returns an error explaining why the read is short. The error is +// ErrBufferFull if n is larger than b's buffer size. +func (b *Reader) Peek(n int) ([]byte, error) { + if n < 0 { + return nil, ErrNegativeCount + } + if n > len(b.buf) { + return nil, ErrBufferFull + } + // 0 <= n <= len(b.buf) + for b.w-b.r < n && b.err == nil { + b.fill() // b.w-b.r < len(b.buf) => buffer is not full + } + m := b.w - b.r + if m > n { + m = n + } + var err error + if m < n { + err = b.readErr() + if err == nil { + err = ErrBufferFull + } + } + return b.buf[b.r : b.r+m], err +} + +// Read reads data into p. +// It returns the number of bytes read into p. +// It calls Read at most once on the underlying Reader, +// hence n may be less than len(p). +// At EOF, the count will be zero and err will be io.EOF. +func (b *Reader) Read(p []byte) (n int, err error) { + n = len(p) + if n == 0 { + return 0, b.readErr() + } + if b.r == b.w { + if b.err != nil { + return 0, b.readErr() + } + if len(p) >= len(b.buf) { + // Large read, empty buffer. + // Read directly into p to avoid copy. + n, b.err = b.rd.Read(p) + if n < 0 { + panic(errNegativeRead) + } + if n > 0 { + b.lastByte = int(p[n-1]) + b.lastRuneSize = -1 + } + return n, b.readErr() + } + b.fill() // buffer is empty + if b.w == b.r { + return 0, b.readErr() + } + } + + if n > b.w-b.r { + n = b.w - b.r + } + copy(p[0:n], b.buf[b.r:]) + b.r += n + b.lastByte = int(b.buf[b.r-1]) + b.lastRuneSize = -1 + return n, nil +} + +// ReadByte reads and returns a single byte. +// If no byte is available, returns an error. +func (b *Reader) ReadByte() (c byte, err error) { + b.lastRuneSize = -1 + for b.r == b.w { + if b.err != nil { + return 0, b.readErr() + } + b.fill() // buffer is empty + } + c = b.buf[b.r] + b.r++ + b.lastByte = int(c) + return c, nil +} + +// UnreadByte unreads the last byte. Only the most recently read byte can be unread. +func (b *Reader) UnreadByte() error { + if b.lastByte < 0 || b.r == 0 && b.w > 0 { + return ErrInvalidUnreadByte + } + // b.r > 0 || b.w == 0 + if b.r > 0 { + b.r-- + } else { + // b.r == 0 && b.w == 0 + b.w = 1 + } + b.buf[b.r] = byte(b.lastByte) + b.lastByte = -1 + b.lastRuneSize = -1 + return nil +} + +// ReadRune reads a single UTF-8 encoded Unicode character and returns the +// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte +// and returns unicode.ReplacementChar (U+FFFD) with a size of 1. +func (b *Reader) ReadRune() (r rune, size int, err error) { + for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) { + b.fill() // b.w-b.r < len(buf) => buffer is not full + } + b.lastRuneSize = -1 + if b.r == b.w { + return 0, 0, b.readErr() + } + r, size = rune(b.buf[b.r]), 1 + if r >= 0x80 { + r, size = utf8.DecodeRune(b.buf[b.r:b.w]) + } + b.r += size + b.lastByte = int(b.buf[b.r-1]) + b.lastRuneSize = size + return r, size, nil +} + +// UnreadRune unreads the last rune. If the most recent read operation on +// the buffer was not a ReadRune, UnreadRune returns an error. (In this +// regard it is stricter than UnreadByte, which will unread the last byte +// from any read operation.) +func (b *Reader) UnreadRune() error { + if b.lastRuneSize < 0 || b.r < b.lastRuneSize { + return ErrInvalidUnreadRune + } + b.r -= b.lastRuneSize + b.lastByte = -1 + b.lastRuneSize = -1 + return nil +} + +// Buffered returns the number of bytes that can be read from the current buffer. +func (b *Reader) Buffered() int { return b.w - b.r } + +// ReadSlice reads until the first occurrence of delim in the input, +// returning a slice pointing at the bytes in the buffer. +// The bytes stop being valid at the next read. +// If ReadSlice encounters an error before finding a delimiter, +// it returns all the data in the buffer and the error itself (often io.EOF). +// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim. +// Because the data returned from ReadSlice will be overwritten +// by the next I/O operation, most clients should use +// ReadBytes or ReadString instead. +// ReadSlice returns err != nil if and only if line does not end in delim. +func (b *Reader) ReadSlice(delim byte) (line []byte, err error) { + for { + // Search buffer. + if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 { + line = b.buf[b.r : b.r+i+1] + b.r += i + 1 + break + } + + // Pending error? + if b.err != nil { + line = b.buf[b.r:b.w] + b.r = b.w + err = b.readErr() + break + } + + // Buffer full? + if n := b.Buffered(); n >= len(b.buf) { + b.r = b.w + line = b.buf + err = ErrBufferFull + break + } + + b.fill() // buffer is not full + } + + // Handle last byte, if any. + if i := len(line) - 1; i >= 0 { + b.lastByte = int(line[i]) + } + + return +} + +// ReadN tries to read exactly n bytes. +// The bytes stop being valid at the next read call. +// If ReadN encounters an error before reading n bytes, +// it returns all the data in the buffer and the error itself (often io.EOF). +// ReadN fails with error ErrBufferFull if the buffer fills +// without reading N bytes. +// Because the data returned from ReadN will be overwritten +// by the next I/O operation, most clients should use +// ReadBytes or ReadString instead. +func (b *Reader) ReadN(n int) ([]byte, error) { + for b.Buffered() < n { + if b.err != nil { + buf := b.buf[b.r:b.w] + b.r = b.w + return buf, b.readErr() + } + + // Buffer is full? + if b.Buffered() >= len(b.buf) { + b.r = b.w + return b.buf, ErrBufferFull + } + + b.fill() + } + buf := b.buf[b.r : b.r+n] + b.r += n + return buf, nil +} + +// ReadLine is a low-level line-reading primitive. Most callers should use +// ReadBytes('\n') or ReadString('\n') instead or use a Scanner. +// +// ReadLine tries to return a single line, not including the end-of-line bytes. +// If the line was too long for the buffer then isPrefix is set and the +// beginning of the line is returned. The rest of the line will be returned +// from future calls. isPrefix will be false when returning the last fragment +// of the line. The returned buffer is only valid until the next call to +// ReadLine. ReadLine either returns a non-nil line or it returns an error, +// never both. +// +// The text returned from ReadLine does not include the line end ("\r\n" or "\n"). +// No indication or error is given if the input ends without a final line end. +// Calling UnreadByte after ReadLine will always unread the last byte read +// (possibly a character belonging to the line end) even if that byte is not +// part of the line returned by ReadLine. +func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) { + line, err = b.ReadSlice('\n') + if err == ErrBufferFull { + // Handle the case where "\r\n" straddles the buffer. + if len(line) > 0 && line[len(line)-1] == '\r' { + // Put the '\r' back on buf and drop it from line. + // Let the next call to ReadLine check for "\r\n". + if b.r == 0 { + // should be unreachable + panic("bufio: tried to rewind past start of buffer") + } + b.r-- + line = line[:len(line)-1] + } + return line, true, nil + } + + if len(line) == 0 { + if err != nil { + line = nil + } + return + } + err = nil + + if line[len(line)-1] == '\n' { + drop := 1 + if len(line) > 1 && line[len(line)-2] == '\r' { + drop = 2 + } + line = line[:len(line)-drop] + } + return +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +// For simple uses, a Scanner may be more convenient. +func (b *Reader) ReadBytes(delim byte) (line []byte, err error) { + // Use ReadSlice to look for array, + // accumulating full buffers. + var frag []byte + var full [][]byte + err = nil + + for { + var e error + frag, e = b.ReadSlice(delim) + if e == nil { // got final fragment + break + } + if e != ErrBufferFull { // unexpected error + err = e + break + } + + // Make a copy of the buffer. + buf := make([]byte, len(frag)) + copy(buf, frag) + full = append(full, buf) + } + + // Allocate new buffer to hold the full pieces and the fragment. + n := 0 + for i := range full { + n += len(full[i]) + } + n += len(frag) + + // Copy full pieces and fragment in. + buf := make([]byte, n) + n = 0 + for i := range full { + n += copy(buf[n:], full[i]) + } + copy(buf[n:], frag) + return buf, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end in +// delim. +// For simple uses, a Scanner may be more convenient. +func (b *Reader) ReadString(delim byte) (line string, err error) { + bytes, err := b.ReadBytes(delim) + line = string(bytes) + return line, err +} + +// WriteTo implements io.WriterTo. +func (b *Reader) WriteTo(w io.Writer) (n int64, err error) { + n, err = b.writeBuf(w) + if err != nil { + return + } + + if r, ok := b.rd.(io.WriterTo); ok { + m, err := r.WriteTo(w) + n += m + return n, err + } + + if w, ok := w.(io.ReaderFrom); ok { + m, err := w.ReadFrom(b.rd) + n += m + return n, err + } + + if b.w-b.r < len(b.buf) { + b.fill() // buffer not full + } + + for b.r < b.w { + // b.r < b.w => buffer is not empty + m, err := b.writeBuf(w) + n += m + if err != nil { + return n, err + } + b.fill() // buffer is empty + } + + if b.err == io.EOF { + b.err = nil + } + + return n, b.readErr() +} + +// writeBuf writes the Reader's buffer to the writer. +func (b *Reader) writeBuf(w io.Writer) (int64, error) { + n, err := w.Write(b.buf[b.r:b.w]) + if n < b.r-b.w { + panic(errors.New("bufio: writer did not write all data")) + } + b.r += n + return int64(n), err +} + +// buffered output + +// Writer implements buffering for an io.Writer object. +// If an error occurs writing to a Writer, no more data will be +// accepted and all subsequent writes will return the error. +// After all data has been written, the client should call the +// Flush method to guarantee all data has been forwarded to +// the underlying io.Writer. +type Writer struct { + err error + buf []byte + n int + wr io.Writer +} + +// NewWriterSize returns a new Writer whose buffer has at least the specified +// size. If the argument io.Writer is already a Writer with large enough +// size, it returns the underlying Writer. +func NewWriterSize(w io.Writer, size int) *Writer { + // Is it already a Writer? + b, ok := w.(*Writer) + if ok && len(b.buf) >= size { + return b + } + if size <= 0 { + size = defaultBufSize + } + return &Writer{ + buf: make([]byte, size), + wr: w, + } +} + +// NewWriter returns a new Writer whose buffer has the default size. +func NewWriter(w io.Writer) *Writer { + return NewWriterSize(w, defaultBufSize) +} + +// Reset discards any unflushed buffered data, clears any error, and +// resets b to write its output to w. +func (b *Writer) Reset(w io.Writer) { + b.err = nil + b.n = 0 + b.wr = w +} + +// Flush writes any buffered data to the underlying io.Writer. +func (b *Writer) Flush() error { + err := b.flush() + return err +} + +func (b *Writer) flush() error { + if b.err != nil { + return b.err + } + if b.n == 0 { + return nil + } + n, err := b.wr.Write(b.buf[0:b.n]) + if n < b.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < b.n { + copy(b.buf[0:b.n-n], b.buf[n:b.n]) + } + b.n -= n + b.err = err + return err + } + b.n = 0 + return nil +} + +// Available returns how many bytes are unused in the buffer. +func (b *Writer) Available() int { return len(b.buf) - b.n } + +// Buffered returns the number of bytes that have been written into the current buffer. +func (b *Writer) Buffered() int { return b.n } + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (b *Writer) Write(p []byte) (nn int, err error) { + for len(p) > b.Available() && b.err == nil { + var n int + if b.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, b.err = b.wr.Write(p) + } else { + n = copy(b.buf[b.n:], p) + b.n += n + b.flush() + } + nn += n + p = p[n:] + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], p) + b.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (b *Writer) WriteByte(c byte) error { + if b.err != nil { + return b.err + } + if b.Available() <= 0 && b.flush() != nil { + return b.err + } + b.buf[b.n] = c + b.n++ + return nil +} + +// WriteRune writes a single Unicode code point, returning +// the number of bytes written and any error. +func (b *Writer) WriteRune(r rune) (size int, err error) { + if r < utf8.RuneSelf { + err = b.WriteByte(byte(r)) + if err != nil { + return 0, err + } + return 1, nil + } + if b.err != nil { + return 0, b.err + } + n := b.Available() + if n < utf8.UTFMax { + if b.flush(); b.err != nil { + return 0, b.err + } + n = b.Available() + if n < utf8.UTFMax { + // Can only happen if buffer is silly small. + return b.WriteString(string(r)) + } + } + size = utf8.EncodeRune(b.buf[b.n:], r) + b.n += size + return size, nil +} + +// WriteString writes a string. +// It returns the number of bytes written. +// If the count is less than len(s), it also returns an error explaining +// why the write is short. +func (b *Writer) WriteString(s string) (int, error) { + nn := 0 + for len(s) > b.Available() && b.err == nil { + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + s = s[n:] + b.flush() + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + return nn, nil +} + +// ReadFrom implements io.ReaderFrom. +func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if b.Buffered() == 0 { + if w, ok := b.wr.(io.ReaderFrom); ok { + return w.ReadFrom(r) + } + } + var m int + for { + if b.Available() == 0 { + if err1 := b.flush(); err1 != nil { + return n, err1 + } + } + nr := 0 + for nr < maxConsecutiveEmptyReads { + m, err = r.Read(b.buf[b.n:]) + if m != 0 || err != nil { + break + } + nr++ + } + if nr == maxConsecutiveEmptyReads { + return n, io.ErrNoProgress + } + b.n += m + n += int64(m) + if err != nil { + break + } + } + if err == io.EOF { + // If we filled the buffer exactly, flush pre-emptively. + if b.Available() == 0 { + err = b.flush() + } else { + err = nil + } + } + return n, err +} + +// buffered input and output + +// ReadWriter stores pointers to a Reader and a Writer. +// It implements io.ReadWriter. +type ReadWriter struct { + *Reader + *Writer +} + +// NewReadWriter allocates a new ReadWriter that dispatches to r and w. +func NewReadWriter(r *Reader, w *Writer) *ReadWriter { + return &ReadWriter{r, w} +} diff --git a/vendor/gopkg.in/redis.v3/.gitignore b/vendor/gopkg.in/redis.v3/.gitignore new file mode 100644 index 0000000..5959942 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/.gitignore @@ -0,0 +1,2 @@ +*.rdb +.test/ diff --git a/vendor/gopkg.in/redis.v3/.travis.yml b/vendor/gopkg.in/redis.v3/.travis.yml new file mode 100644 index 0000000..8a951ff --- /dev/null +++ b/vendor/gopkg.in/redis.v3/.travis.yml @@ -0,0 +1,18 @@ +language: go + +services: +- redis-server + +go: + - 1.3 + - 1.4 + - tip + +install: + - go get gopkg.in/bufio.v1 + - go get gopkg.in/bsm/ratelimit.v1 + - go get github.com/onsi/ginkgo + - go get github.com/onsi/gomega + - mkdir -p $HOME/gopath/src/gopkg.in + - mv $HOME/gopath/src/github.com/go-redis/redis $HOME/gopath/src/gopkg.in/redis.v3 + - cd $HOME/gopath/src/gopkg.in/redis.v3 diff --git a/vendor/gopkg.in/redis.v3/LICENSE b/vendor/gopkg.in/redis.v3/LICENSE new file mode 100644 index 0000000..6855a95 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Redis Go Client Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/redis.v3/Makefile b/vendor/gopkg.in/redis.v3/Makefile new file mode 100644 index 0000000..1107e5f --- /dev/null +++ b/vendor/gopkg.in/redis.v3/Makefile @@ -0,0 +1,17 @@ +all: testdeps + go test ./... -v=1 -cpu=1,2,4 + go test ./... -short -race + +test: testdeps + go test ./... -v=1 + +testdeps: .test/redis/src/redis-server + +.PHONY: all test testdeps + +.test/redis: + mkdir -p $@ + wget -qO- https://github.com/antirez/redis/archive/3.0.3.tar.gz | tar xvz --strip-components=1 -C $@ + +.test/redis/src/redis-server: .test/redis + cd $< && make all diff --git a/vendor/gopkg.in/redis.v3/README.md b/vendor/gopkg.in/redis.v3/README.md new file mode 100644 index 0000000..42e0686 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/README.md @@ -0,0 +1,96 @@ +Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis) +======================= + +Supports: + +- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC. +- [Pub/Sub](http://godoc.org/gopkg.in/redis.v3#PubSub). +- [Transactions](http://godoc.org/gopkg.in/redis.v3#Multi). +- [Pipelining](http://godoc.org/gopkg.in/redis.v3#Client.Pipeline). +- [Scripting](http://godoc.org/gopkg.in/redis.v3#Script). +- [Timeouts](http://godoc.org/gopkg.in/redis.v3#Options). +- [Redis Sentinel](http://godoc.org/gopkg.in/redis.v3#NewFailoverClient). +- [Redis Cluster](http://godoc.org/gopkg.in/redis.v3#NewClusterClient). +- [Ring](http://godoc.org/gopkg.in/redis.v3#NewRing). +- [Cache friendly](https://github.com/go-redis/cache). + +API docs: http://godoc.org/gopkg.in/redis.v3. +Examples: http://godoc.org/gopkg.in/redis.v3#pkg-examples. + +Installation +------------ + +Install: + + go get gopkg.in/redis.v3 + +Quickstart +---------- + +```go +func ExampleNewClient() { + client := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + + pong, err := client.Ping().Result() + fmt.Println(pong, err) + // Output: PONG +} + +func ExampleClient() { + err := client.Set("key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := client.Get("key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := client.Get("key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exists") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exists +} +``` + +Howto +----- + +Please go through [examples](http://godoc.org/gopkg.in/redis.v3#pkg-examples) to get an idea how to use this package. + +Look and feel +------------- + +Some corner cases: + + SET key value EX 10 NX + set, err := client.SetNX("key", "value", 10*time.Second).Result() + + SORT list LIMIT 0 2 ASC + vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + + ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 + vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, + }).Result() + + ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM + vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() + + EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" + vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result() diff --git a/vendor/gopkg.in/redis.v3/cluster.go b/vendor/gopkg.in/redis.v3/cluster.go new file mode 100644 index 0000000..cbf00b2 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/cluster.go @@ -0,0 +1,343 @@ +package redis + +import ( + "log" + "math/rand" + "strings" + "sync" + "sync/atomic" + "time" +) + +type ClusterClient struct { + commandable + + addrs []string + slots [][]string + slotsMx sync.RWMutex // Protects slots and addrs. + + clients map[string]*Client + closed bool + clientsMx sync.RWMutex // Protects clients and closed. + + opt *ClusterOptions + + // Reports where slots reloading is in progress. + reloading uint32 +} + +// NewClusterClient returns a new Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + client := &ClusterClient{ + addrs: opt.Addrs, + slots: make([][]string, hashSlots), + clients: make(map[string]*Client), + opt: opt, + } + client.commandable.process = client.process + client.reloadSlots() + go client.reaper() + return client +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a Client, as the Client is meant to be +// long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + defer c.clientsMx.Unlock() + c.clientsMx.Lock() + + if c.closed { + return nil + } + c.closed = true + c.resetClients() + c.setSlots(nil) + return nil +} + +// getClient returns a Client for a given address. +func (c *ClusterClient) getClient(addr string) (*Client, error) { + if addr == "" { + return c.randomClient() + } + + c.clientsMx.RLock() + client, ok := c.clients[addr] + if ok { + c.clientsMx.RUnlock() + return client, nil + } + c.clientsMx.RUnlock() + + c.clientsMx.Lock() + if c.closed { + c.clientsMx.Unlock() + return nil, errClosed + } + + client, ok = c.clients[addr] + if !ok { + opt := c.opt.clientOptions() + opt.Addr = addr + client = NewClient(opt) + c.clients[addr] = client + } + c.clientsMx.Unlock() + + return client, nil +} + +func (c *ClusterClient) slotAddrs(slot int) []string { + c.slotsMx.RLock() + addrs := c.slots[slot] + c.slotsMx.RUnlock() + return addrs +} + +func (c *ClusterClient) slotMasterAddr(slot int) string { + addrs := c.slotAddrs(slot) + if len(addrs) > 0 { + return addrs[0] + } + return "" +} + +// randomClient returns a Client for the first live node. +func (c *ClusterClient) randomClient() (client *Client, err error) { + for i := 0; i < 10; i++ { + n := rand.Intn(len(c.addrs)) + client, err = c.getClient(c.addrs[n]) + if err != nil { + continue + } + err = client.ClusterInfo().Err() + if err == nil { + return client, nil + } + } + return nil, err +} + +func (c *ClusterClient) process(cmd Cmder) { + var ask bool + + slot := hashSlot(cmd.clusterKey()) + + addr := c.slotMasterAddr(slot) + client, err := c.getClient(addr) + if err != nil { + cmd.setErr(err) + return + } + + for attempt := 0; attempt <= c.opt.getMaxRedirects(); attempt++ { + if attempt > 0 { + cmd.reset() + } + + if ask { + pipe := client.Pipeline() + pipe.Process(NewCmd("ASKING")) + pipe.Process(cmd) + _, _ = pipe.Exec() + ask = false + } else { + client.Process(cmd) + } + + // If there is no (real) error, we are done! + err := cmd.Err() + if err == nil || err == Nil || err == TxFailedErr { + return + } + + // On network errors try random node. + if isNetworkError(err) { + client, err = c.randomClient() + if err != nil { + return + } + continue + } + + var moved bool + var addr string + moved, ask, addr = isMovedError(err) + if moved || ask { + if moved && c.slotMasterAddr(slot) != addr { + c.lazyReloadSlots() + } + client, err = c.getClient(addr) + if err != nil { + return + } + continue + } + + break + } +} + +// Closes all clients and returns last error if there are any. +func (c *ClusterClient) resetClients() (err error) { + for addr, client := range c.clients { + if e := client.Close(); e != nil { + err = e + } + delete(c.clients, addr) + } + return err +} + +func (c *ClusterClient) setSlots(slots []ClusterSlotInfo) { + c.slotsMx.Lock() + + seen := make(map[string]struct{}) + for _, addr := range c.addrs { + seen[addr] = struct{}{} + } + + for i := 0; i < hashSlots; i++ { + c.slots[i] = c.slots[i][:0] + } + for _, info := range slots { + for slot := info.Start; slot <= info.End; slot++ { + c.slots[slot] = info.Addrs + } + + for _, addr := range info.Addrs { + if _, ok := seen[addr]; !ok { + c.addrs = append(c.addrs, addr) + seen[addr] = struct{}{} + } + } + } + + c.slotsMx.Unlock() +} + +func (c *ClusterClient) reloadSlots() { + defer atomic.StoreUint32(&c.reloading, 0) + + client, err := c.randomClient() + if err != nil { + log.Printf("redis: randomClient failed: %s", err) + return + } + + slots, err := client.ClusterSlots().Result() + if err != nil { + log.Printf("redis: ClusterSlots failed: %s", err) + return + } + c.setSlots(slots) +} + +func (c *ClusterClient) lazyReloadSlots() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go c.reloadSlots() +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper() { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + for _ = range ticker.C { + c.clientsMx.RLock() + + if c.closed { + c.clientsMx.RUnlock() + break + } + + for _, client := range c.clients { + pool := client.connPool + // pool.First removes idle connections from the pool and + // returns first non-idle connection. So just put returned + // connection back. + if cn := pool.First(); cn != nil { + pool.Put(cn) + } + } + + c.clientsMx.RUnlock() + } +} + +//------------------------------------------------------------------------------ + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // The maximum number of MOVED/ASK redirects to follow before + // giving up. + // Default is 16 + MaxRedirects int + + // Following options are copied from Options struct. + + Password string + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + PoolSize int + PoolTimeout time.Duration + IdleTimeout time.Duration +} + +func (opt *ClusterOptions) getMaxRedirects() int { + if opt.MaxRedirects == -1 { + return 0 + } + if opt.MaxRedirects == 0 { + return 16 + } + return opt.MaxRedirects +} + +func (opt *ClusterOptions) clientOptions() *Options { + return &Options{ + Password: opt.Password, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + } +} + +//------------------------------------------------------------------------------ + +const hashSlots = 16384 + +func hashKey(key string) string { + if s := strings.IndexByte(key, '{'); s > -1 { + if e := strings.IndexByte(key[s+1:], '}'); e > 0 { + return key[s+1 : s+e+1] + } + } + return key +} + +// hashSlot returns a consistent slot number between 0 and 16383 +// for any given string key. +func hashSlot(key string) int { + key = hashKey(key) + if key == "" { + return rand.Intn(hashSlots) + } + return int(crc16sum(key)) % hashSlots +} diff --git a/vendor/gopkg.in/redis.v3/cluster_pipeline.go b/vendor/gopkg.in/redis.v3/cluster_pipeline.go new file mode 100644 index 0000000..2e11940 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/cluster_pipeline.go @@ -0,0 +1,123 @@ +package redis + +// ClusterPipeline is not thread-safe. +type ClusterPipeline struct { + commandable + + cmds []Cmder + cluster *ClusterClient + closed bool +} + +// Pipeline creates a new pipeline which is able to execute commands +// against multiple shards. +func (c *ClusterClient) Pipeline() *ClusterPipeline { + pipe := &ClusterPipeline{ + cluster: c, + cmds: make([]Cmder, 0, 10), + } + pipe.commandable.process = pipe.process + return pipe +} + +func (pipe *ClusterPipeline) process(cmd Cmder) { + pipe.cmds = append(pipe.cmds, cmd) +} + +// Discard resets the pipeline and discards queued commands. +func (pipe *ClusterPipeline) Discard() error { + if pipe.closed { + return errClosed + } + pipe.cmds = pipe.cmds[:0] + return nil +} + +func (pipe *ClusterPipeline) Exec() (cmds []Cmder, retErr error) { + if pipe.closed { + return nil, errClosed + } + if len(pipe.cmds) == 0 { + return []Cmder{}, nil + } + + cmds = pipe.cmds + pipe.cmds = make([]Cmder, 0, 10) + + cmdsMap := make(map[string][]Cmder) + for _, cmd := range cmds { + slot := hashSlot(cmd.clusterKey()) + addr := pipe.cluster.slotMasterAddr(slot) + cmdsMap[addr] = append(cmdsMap[addr], cmd) + } + + for attempt := 0; attempt <= pipe.cluster.opt.getMaxRedirects(); attempt++ { + failedCmds := make(map[string][]Cmder) + + for addr, cmds := range cmdsMap { + client, err := pipe.cluster.getClient(addr) + if err != nil { + setCmdsErr(cmds, err) + retErr = err + continue + } + + cn, err := client.conn() + if err != nil { + setCmdsErr(cmds, err) + retErr = err + continue + } + + failedCmds, err = pipe.execClusterCmds(cn, cmds, failedCmds) + if err != nil { + retErr = err + } + client.putConn(cn, err) + } + + cmdsMap = failedCmds + } + + return cmds, retErr +} + +// Close marks the pipeline as closed +func (pipe *ClusterPipeline) Close() error { + pipe.Discard() + pipe.closed = true + return nil +} + +func (pipe *ClusterPipeline) execClusterCmds( + cn *conn, cmds []Cmder, failedCmds map[string][]Cmder, +) (map[string][]Cmder, error) { + if err := cn.writeCmds(cmds...); err != nil { + setCmdsErr(cmds, err) + return failedCmds, err + } + + var firstCmdErr error + for i, cmd := range cmds { + err := cmd.parseReply(cn.rd) + if err == nil { + continue + } + if isNetworkError(err) { + cmd.reset() + failedCmds[""] = append(failedCmds[""], cmds[i:]...) + break + } else if moved, ask, addr := isMovedError(err); moved { + pipe.cluster.lazyReloadSlots() + cmd.reset() + failedCmds[addr] = append(failedCmds[addr], cmd) + } else if ask { + cmd.reset() + failedCmds[addr] = append(failedCmds[addr], NewCmd("ASKING"), cmd) + } else if firstCmdErr == nil { + firstCmdErr = err + } + } + + return failedCmds, firstCmdErr +} diff --git a/vendor/gopkg.in/redis.v3/command.go b/vendor/gopkg.in/redis.v3/command.go new file mode 100644 index 0000000..dab9fc3 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/command.go @@ -0,0 +1,783 @@ +package redis + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" + + "gopkg.in/bufio.v1" +) + +var ( + _ Cmder = (*Cmd)(nil) + _ Cmder = (*SliceCmd)(nil) + _ Cmder = (*StatusCmd)(nil) + _ Cmder = (*IntCmd)(nil) + _ Cmder = (*DurationCmd)(nil) + _ Cmder = (*BoolCmd)(nil) + _ Cmder = (*StringCmd)(nil) + _ Cmder = (*FloatCmd)(nil) + _ Cmder = (*StringSliceCmd)(nil) + _ Cmder = (*BoolSliceCmd)(nil) + _ Cmder = (*StringStringMapCmd)(nil) + _ Cmder = (*StringIntMapCmd)(nil) + _ Cmder = (*ZSliceCmd)(nil) + _ Cmder = (*ScanCmd)(nil) + _ Cmder = (*ClusterSlotCmd)(nil) +) + +type Cmder interface { + args() []interface{} + parseReply(*bufio.Reader) error + setErr(error) + reset() + + writeTimeout() *time.Duration + readTimeout() *time.Duration + clusterKey() string + + Err() error + fmt.Stringer +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + cmd.setErr(e) + } +} + +func resetCmds(cmds []Cmder) { + for _, cmd := range cmds { + cmd.reset() + } +} + +func cmdString(cmd Cmder, val interface{}) string { + var ss []string + for _, arg := range cmd.args() { + ss = append(ss, fmt.Sprint(arg)) + } + s := strings.Join(ss, " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + switch vv := val.(type) { + case []byte: + return s + ": " + string(vv) + default: + return s + ": " + fmt.Sprint(val) + } + } + return s + +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []interface{} + + err error + + _clusterKeyPos int + + _writeTimeout, _readTimeout *time.Duration +} + +func (cmd *baseCmd) Err() error { + if cmd.err != nil { + return cmd.err + } + return nil +} + +func (cmd *baseCmd) args() []interface{} { + return cmd._args +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) writeTimeout() *time.Duration { + return cmd._writeTimeout +} + +func (cmd *baseCmd) clusterKey() string { + if cmd._clusterKeyPos > 0 && cmd._clusterKeyPos < len(cmd._args) { + return fmt.Sprint(cmd._args[cmd._clusterKeyPos]) + } + return "" +} + +func (cmd *baseCmd) setWriteTimeout(d time.Duration) { + cmd._writeTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(args ...interface{}) *Cmd { + return &Cmd{baseCmd: baseCmd{_args: args}} +} + +func (cmd *Cmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) parseReply(rd *bufio.Reader) error { + cmd.val, cmd.err = parseReply(rd, parseSlice) + // Convert to string to preserve old behaviour. + // TODO: remove in v4 + if v, ok := cmd.val.([]byte); ok { + cmd.val = string(v) + } + return cmd.err +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +func NewSliceCmd(args ...interface{}) *SliceCmd { + return &SliceCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *SliceCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +func NewStatusCmd(args ...interface{}) *StatusCmd { + return &StatusCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func newKeylessStatusCmd(args ...interface{}) *StatusCmd { + return &StatusCmd{baseCmd: baseCmd{_args: args}} +} + +func (cmd *StatusCmd) reset() { + cmd.val = "" + cmd.err = nil +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = string(v.([]byte)) + return nil +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +func NewIntCmd(args ...interface{}) *IntCmd { + return &IntCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *IntCmd) reset() { + cmd.val = 0 + cmd.err = nil +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(int64) + return nil +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + precision: precision, + baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}, + } +} + +func (cmd *DurationCmd) reset() { + cmd.val = 0 + cmd.err = nil +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = time.Duration(v.(int64)) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +func NewBoolCmd(args ...interface{}) *BoolCmd { + return &BoolCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *BoolCmd) reset() { + cmd.val = false + cmd.err = nil +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +var ok = []byte("OK") + +func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + // `SET key value NX` returns nil when key already exists. + if err == Nil { + cmd.val = false + return nil + } + if err != nil { + cmd.err = err + return err + } + switch vv := v.(type) { + case int64: + cmd.val = vv == 1 + return nil + case []byte: + cmd.val = bytes.Equal(vv, ok) + return nil + default: + return fmt.Errorf("got %T, wanted int64 or string") + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val []byte +} + +func NewStringCmd(args ...interface{}) *StringCmd { + return &StringCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *StringCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *StringCmd) Val() string { + return bytesToString(cmd.val) +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return cmd.val, cmd.err +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return scan(cmd.val, val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + b := v.([]byte) + cmd.val = make([]byte, len(b)) + copy(cmd.val, b) + return nil +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +func NewFloatCmd(args ...interface{}) *FloatCmd { + return &FloatCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *FloatCmd) reset() { + cmd.val = 0 + cmd.err = nil +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + b := v.([]byte) + cmd.val, cmd.err = strconv.ParseFloat(bytesToString(b), 64) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +func NewStringSliceCmd(args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *StringSliceCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseStringSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]string) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *BoolSliceCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseBoolSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]bool) + return nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *StringStringMapCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseStringStringMap) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(map[string]string) + return nil +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *StringIntMapCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseStringIntMap) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(map[string]int64) + return nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +func NewZSliceCmd(args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *ZSliceCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseZSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]Z) + return nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + cursor int64 + keys []string +} + +func NewScanCmd(args ...interface{}) *ScanCmd { + return &ScanCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *ScanCmd) reset() { + cmd.cursor = 0 + cmd.keys = nil + cmd.err = nil +} + +func (cmd *ScanCmd) Val() (int64, []string) { + return cmd.cursor, cmd.keys +} + +func (cmd *ScanCmd) Result() (int64, []string, error) { + return cmd.cursor, cmd.keys, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.keys) +} + +func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error { + vi, err := parseReply(rd, parseSlice) + if err != nil { + cmd.err = err + return cmd.err + } + v := vi.([]interface{}) + + cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64) + if cmd.err != nil { + return cmd.err + } + + keys := v[1].([]interface{}) + for _, keyi := range keys { + cmd.keys = append(cmd.keys, keyi.(string)) + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ClusterSlotInfo struct { + Start, End int + Addrs []string +} + +type ClusterSlotCmd struct { + baseCmd + + val []ClusterSlotInfo +} + +func NewClusterSlotCmd(args ...interface{}) *ClusterSlotCmd { + return &ClusterSlotCmd{baseCmd: baseCmd{_args: args, _clusterKeyPos: 1}} +} + +func (cmd *ClusterSlotCmd) Val() []ClusterSlotInfo { + return cmd.val +} + +func (cmd *ClusterSlotCmd) Result() ([]ClusterSlotInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotCmd) reset() { + cmd.val = nil + cmd.err = nil +} + +func (cmd *ClusterSlotCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseClusterSlotInfoSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]ClusterSlotInfo) + return nil +} diff --git a/vendor/gopkg.in/redis.v3/commands.go b/vendor/gopkg.in/redis.v3/commands.go new file mode 100644 index 0000000..887fd73 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/commands.go @@ -0,0 +1,1573 @@ +package redis + +import ( + "io" + "log" + "strconv" + "time" +) + +func formatInt(i int64) string { + return strconv.FormatInt(i, 10) +} + +func formatUint(i uint64) string { + return strconv.FormatUint(i, 10) +} + +func formatFloat(f float64) string { + return strconv.FormatFloat(f, 'f', -1, 64) +} + +func readTimeout(timeout time.Duration) time.Duration { + if timeout == 0 { + return 0 + } + return timeout + time.Second +} + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(dur time.Duration) string { + if dur > 0 && dur < time.Millisecond { + log.Printf( + "redis: specified duration is %s, but minimal supported value is %s", + dur, time.Millisecond, + ) + } + return formatInt(int64(dur / time.Millisecond)) +} + +func formatSec(dur time.Duration) string { + if dur > 0 && dur < time.Second { + log.Printf( + "redis: specified duration is %s, but minimal supported value is %s", + dur, time.Second, + ) + } + return formatInt(int64(dur / time.Second)) +} + +type commandable struct { + process func(cmd Cmder) +} + +func (c *commandable) Process(cmd Cmder) { + c.process(cmd) +} + +//------------------------------------------------------------------------------ + +func (c *commandable) Auth(password string) *StatusCmd { + cmd := newKeylessStatusCmd("AUTH", password) + c.Process(cmd) + return cmd +} + +func (c *commandable) Echo(message string) *StringCmd { + cmd := NewStringCmd("ECHO", message) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) Ping() *StatusCmd { + cmd := newKeylessStatusCmd("PING") + c.Process(cmd) + return cmd +} + +func (c *commandable) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *commandable) Select(index int64) *StatusCmd { + cmd := newKeylessStatusCmd("SELECT", formatInt(index)) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) Del(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "DEL" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) Dump(key string) *StringCmd { + cmd := NewStringCmd("DUMP", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) Exists(key string) *BoolCmd { + cmd := NewBoolCmd("EXISTS", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) Expire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("EXPIRE", key, formatSec(expiration)) + c.Process(cmd) + return cmd +} + +func (c *commandable) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("EXPIREAT", key, formatInt(tm.Unix())) + c.Process(cmd) + return cmd +} + +func (c *commandable) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("KEYS", pattern) + c.Process(cmd) + return cmd +} + +func (c *commandable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + "MIGRATE", + host, + port, + key, + formatInt(db), + formatMs(timeout), + ) + cmd._clusterKeyPos = 3 + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *commandable) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("MOVE", key, formatInt(db)) + c.Process(cmd) + return cmd +} + +func (c *commandable) ObjectRefCount(keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "OBJECT" + args[1] = "REFCOUNT" + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + cmd._clusterKeyPos = 2 + c.Process(cmd) + return cmd +} + +func (c *commandable) ObjectEncoding(keys ...string) *StringCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "OBJECT" + args[1] = "ENCODING" + for i, key := range keys { + args[2+i] = key + } + cmd := NewStringCmd(args...) + cmd._clusterKeyPos = 2 + c.Process(cmd) + return cmd +} + +func (c *commandable) ObjectIdleTime(keys ...string) *DurationCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "OBJECT" + args[1] = "IDLETIME" + for i, key := range keys { + args[2+i] = key + } + cmd := NewDurationCmd(time.Second, args...) + cmd._clusterKeyPos = 2 + c.Process(cmd) + return cmd +} + +func (c *commandable) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("PERSIST", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) PExpire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("PEXPIRE", key, formatMs(expiration)) + c.Process(cmd) + return cmd +} + +func (c *commandable) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "PEXPIREAT", + key, + formatInt(tm.UnixNano()/int64(time.Millisecond)), + ) + c.Process(cmd) + return cmd +} + +func (c *commandable) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "PTTL", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) RandomKey() *StringCmd { + cmd := NewStringCmd("RANDOMKEY") + c.Process(cmd) + return cmd +} + +func (c *commandable) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("RENAME", key, newkey) + c.Process(cmd) + return cmd +} + +func (c *commandable) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("RENAMENX", key, newkey) + c.Process(cmd) + return cmd +} + +func (c *commandable) Restore(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "RESTORE", + key, + formatMs(ttl), + value, + ) + c.Process(cmd) + return cmd +} + +func (c *commandable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "RESTORE", + key, + formatMs(ttl), + value, + "REPLACE", + ) + c.Process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count float64 + Get []string + Order string + IsAlpha bool + Store string +} + +func (c *commandable) Sort(key string, sort Sort) *StringSliceCmd { + args := []interface{}{"SORT", key} + if sort.By != "" { + args = append(args, "BY", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "LIMIT", formatFloat(sort.Offset), formatFloat(sort.Count)) + } + for _, get := range sort.Get { + args = append(args, "GET", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.IsAlpha { + args = append(args, "ALPHA") + } + if sort.Store != "" { + args = append(args, "STORE", sort.Store) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "TTL", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) Type(key string) *StatusCmd { + cmd := NewStatusCmd("TYPE", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) Scan(cursor int64, match string, count int64) *ScanCmd { + args := []interface{}{"SCAN", formatInt(cursor)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", formatInt(count)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []interface{}{"SSCAN", key, formatInt(cursor)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", formatInt(count)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) HScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []interface{}{"HSCAN", key, formatInt(cursor)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", formatInt(count)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []interface{}{"ZSCAN", key, formatInt(cursor)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", formatInt(count)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) Append(key, value string) *IntCmd { + cmd := NewIntCmd("APPEND", key, value) + c.Process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *commandable) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"BITCOUNT", key} + if bitCount != nil { + args = append( + args, + formatInt(bitCount.Start), + formatInt(bitCount.End), + ) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) bitOp(op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "BITOP" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("AND", destKey, keys...) +} + +func (c *commandable) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("OR", destKey, keys...) +} + +func (c *commandable) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("XOR", destKey, keys...) +} + +func (c *commandable) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("NOT", destKey, key) +} + +func (c *commandable) BitPos(key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "BITPOS" + args[1] = key + args[2] = formatInt(bit) + switch len(pos) { + case 0: + case 1: + args[3] = formatInt(pos[0]) + case 2: + args[3] = formatInt(pos[0]) + args[4] = formatInt(pos[1]) + default: + panic("too many arguments") + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) Decr(key string) *IntCmd { + cmd := NewIntCmd("DECR", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("DECRBY", key, formatInt(decrement)) + c.Process(cmd) + return cmd +} + +func (c *commandable) Get(key string) *StringCmd { + cmd := NewStringCmd("GET", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("GETBIT", key, formatInt(offset)) + c.Process(cmd) + return cmd +} + +func (c *commandable) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd( + "GETRANGE", + key, + formatInt(start), + formatInt(end), + ) + c.Process(cmd) + return cmd +} + +func (c *commandable) GetSet(key string, value interface{}) *StringCmd { + cmd := NewStringCmd("GETSET", key, value) + c.Process(cmd) + return cmd +} + +func (c *commandable) Incr(key string) *IntCmd { + cmd := NewIntCmd("INCR", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("INCRBY", key, formatInt(value)) + c.Process(cmd) + return cmd +} + +func (c *commandable) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("INCRBYFLOAT", key, formatFloat(value)) + c.Process(cmd) + return cmd +} + +func (c *commandable) MGet(keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "MGET" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) MSet(pairs ...string) *StatusCmd { + args := make([]interface{}, 1+len(pairs)) + args[0] = "MSET" + for i, pair := range pairs { + args[1+i] = pair + } + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) MSetNX(pairs ...string) *BoolCmd { + args := make([]interface{}, 1+len(pairs)) + args[0] = "MSETNX" + for i, pair := range pairs { + args[1+i] = pair + } + cmd := NewBoolCmd(args...) + c.Process(cmd) + return cmd +} + +// Redis `SET key value [expiration]` command. +// +// Zero expiration means the key has no expiration time. +func (c *commandable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 5) + args[0] = "SET" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "PX", formatMs(expiration)) + } else { + args = append(args, "EX", formatSec(expiration)) + } + } + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "SETBIT", + key, + formatInt(offset), + formatInt(int64(value)), + ) + c.Process(cmd) + return cmd +} + +// Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +func (c *commandable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd("SETNX", key, value) + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("SET", key, value, "PX", formatMs(expiration), "NX") + } else { + cmd = NewBoolCmd("SET", key, value, "EX", formatSec(expiration), "NX") + } + } + c.Process(cmd) + return cmd +} + +// Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +func (c *Client) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if usePrecise(expiration) { + cmd = NewBoolCmd("SET", key, value, "PX", formatMs(expiration), "XX") + } else { + cmd = NewBoolCmd("SET", key, value, "EX", formatSec(expiration), "XX") + } + c.Process(cmd) + return cmd +} + +func (c *commandable) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("SETRANGE", key, formatInt(offset), value) + c.Process(cmd) + return cmd +} + +func (c *commandable) StrLen(key string) *IntCmd { + cmd := NewIntCmd("STRLEN", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) HDel(key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "HDEL" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("HEXISTS", key, field) + c.Process(cmd) + return cmd +} + +func (c *commandable) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("HGET", key, field) + c.Process(cmd) + return cmd +} + +func (c *commandable) HGetAll(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HGETALL", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) HGetAllMap(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("HGETALL", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("HINCRBY", key, field, formatInt(incr)) + c.Process(cmd) + return cmd +} + +func (c *commandable) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("HINCRBYFLOAT", key, field, formatFloat(incr)) + c.Process(cmd) + return cmd +} + +func (c *commandable) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HKEYS", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) HLen(key string) *IntCmd { + cmd := NewIntCmd("HLEN", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) HMGet(key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "HMGET" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) HMSet(key, field, value string, pairs ...string) *StatusCmd { + args := make([]interface{}, 4+len(pairs)) + args[0] = "HMSET" + args[1] = key + args[2] = field + args[3] = value + for i, pair := range pairs { + args[4+i] = pair + } + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) HSet(key, field, value string) *BoolCmd { + cmd := NewBoolCmd("HSET", key, field, value) + c.Process(cmd) + return cmd +} + +func (c *commandable) HSetNX(key, field, value string) *BoolCmd { + cmd := NewBoolCmd("HSETNX", key, field, value) + c.Process(cmd) + return cmd +} + +func (c *commandable) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HVALS", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "BLPOP" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *commandable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "BRPOP" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *commandable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + "BRPOPLPUSH", + source, + destination, + formatSec(timeout), + ) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *commandable) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("LINDEX", key, formatInt(index)) + c.Process(cmd) + return cmd +} + +func (c *commandable) LInsert(key, op, pivot, value string) *IntCmd { + cmd := NewIntCmd("LINSERT", key, op, pivot, value) + c.Process(cmd) + return cmd +} + +func (c *commandable) LLen(key string) *IntCmd { + cmd := NewIntCmd("LLEN", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) LPop(key string) *StringCmd { + cmd := NewStringCmd("LPOP", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) LPush(key string, values ...string) *IntCmd { + args := make([]interface{}, 2+len(values)) + args[0] = "LPUSH" + args[1] = key + for i, value := range values { + args[2+i] = value + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) LPushX(key, value string) *IntCmd { + cmd := NewIntCmd("LPUSHX", key, value) + c.Process(cmd) + return cmd +} + +func (c *commandable) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "LRANGE", + key, + formatInt(start), + formatInt(stop), + ) + c.Process(cmd) + return cmd +} + +func (c *commandable) LRem(key string, count int64, value string) *IntCmd { + cmd := NewIntCmd("LREM", key, formatInt(count), value) + c.Process(cmd) + return cmd +} + +func (c *commandable) LSet(key string, index int64, value string) *StatusCmd { + cmd := NewStatusCmd("LSET", key, formatInt(index), value) + c.Process(cmd) + return cmd +} + +func (c *commandable) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "LTRIM", + key, + formatInt(start), + formatInt(stop), + ) + c.Process(cmd) + return cmd +} + +func (c *commandable) RPop(key string) *StringCmd { + cmd := NewStringCmd("RPOP", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("RPOPLPUSH", source, destination) + c.Process(cmd) + return cmd +} + +func (c *commandable) RPush(key string, values ...string) *IntCmd { + args := make([]interface{}, 2+len(values)) + args[0] = "RPUSH" + args[1] = key + for i, value := range values { + args[2+i] = value + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) RPushX(key string, value string) *IntCmd { + cmd := NewIntCmd("RPUSHX", key, value) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) SAdd(key string, members ...string) *IntCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "SADD" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SCard(key string) *IntCmd { + cmd := NewIntCmd("SCARD", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) SDiff(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "SDIFF" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SDiffStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "SDIFFSTORE" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SInter(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "SINTER" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SInterStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "SINTERSTORE" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SIsMember(key, member string) *BoolCmd { + cmd := NewBoolCmd("SISMEMBER", key, member) + c.Process(cmd) + return cmd +} + +func (c *commandable) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("SMEMBERS", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) SMove(source, destination, member string) *BoolCmd { + cmd := NewBoolCmd("SMOVE", source, destination, member) + c.Process(cmd) + return cmd +} + +func (c *commandable) SPop(key string) *StringCmd { + cmd := NewStringCmd("SPOP", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("SRANDMEMBER", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) SRem(key string, members ...string) *IntCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "SREM" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SUnion(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "SUNION" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) SUnionStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "SUNIONSTORE" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// Sorted set store operation. +type ZStore struct { + Weights []int64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (c *commandable) ZAdd(key string, members ...Z) *IntCmd { + args := make([]interface{}, 2+2*len(members)) + args[0] = "ZADD" + args[1] = key + for i, m := range members { + args[2+2*i] = formatFloat(m.Score) + args[2+2*i+1] = m.Member + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZCard(key string) *IntCmd { + cmd := NewIntCmd("ZCARD", key) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("ZCOUNT", key, min, max) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("ZINCRBY", key, formatFloat(increment), member) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZInterStore( + destination string, + store ZStore, + keys ...string, +) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "ZINTERSTORE" + args[1] = destination + args[2] = strconv.Itoa(len(keys)) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "WEIGHTS") + for _, weight := range store.Weights { + args = append(args, formatInt(weight)) + } + } + if store.Aggregate != "" { + args = append(args, "AGGREGATE", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []interface{}{ + "ZRANGE", + key, + formatInt(start), + formatInt(stop), + } + if withScores { + args = append(args, "WITHSCORES") + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *commandable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + args := []interface{}{ + "ZRANGE", + key, + formatInt(start), + formatInt(stop), + "WITHSCORES", + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +type ZRangeByScore struct { + Min, Max string + Offset, Count int64 +} + +func (c *commandable) zRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd { + args := []interface{}{"ZRANGEBYSCORE", key, opt.Min, opt.Max} + if withScores { + args = append(args, "WITHSCORES") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + formatInt(opt.Offset), + formatInt(opt.Count), + ) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd { + return c.zRangeByScore(key, opt, false) +} + +func (c *commandable) ZRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd { + args := []interface{}{"ZRANGEBYSCORE", key, opt.Min, opt.Max, "WITHSCORES"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + formatInt(opt.Offset), + formatInt(opt.Count), + ) + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("ZRANK", key, member) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRem(key string, members ...string) *IntCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "ZREM" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "ZREMRANGEBYRANK", + key, + formatInt(start), + formatInt(stop), + ) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("ZREMRANGEBYSCORE", key, min, max) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRevRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd("ZREVRANGE", key, formatInt(start), formatInt(stop)) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("ZREVRANGE", key, formatInt(start), formatInt(stop), "WITHSCORES") + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRevRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd { + args := []interface{}{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + formatInt(opt.Offset), + formatInt(opt.Count), + ) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRevRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd { + args := []interface{}{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min, "WITHSCORES"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + formatInt(opt.Offset), + formatInt(opt.Count), + ) + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("ZREVRANK", key, member) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("ZSCORE", key, member) + c.Process(cmd) + return cmd +} + +func (c *commandable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "ZUNIONSTORE" + args[1] = dest + args[2] = strconv.Itoa(len(keys)) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "WEIGHTS") + for _, weight := range store.Weights { + args = append(args, formatInt(weight)) + } + } + if store.Aggregate != "" { + args = append(args, "AGGREGATE", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("BGREWRITEAOF") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) BgSave() *StatusCmd { + cmd := NewStatusCmd("BGSAVE") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("CLIENT", "KILL", ipPort) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ClientList() *StringCmd { + cmd := NewStringCmd("CLIENT", "LIST") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ClientPause(dur time.Duration) *BoolCmd { + cmd := NewBoolCmd("CLIENT", "PAUSE", formatMs(dur)) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ConfigGet(parameter string) *SliceCmd { + cmd := NewSliceCmd("CONFIG", "GET", parameter) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ConfigResetStat() *StatusCmd { + cmd := NewStatusCmd("CONFIG", "RESETSTAT") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ConfigSet(parameter, value string) *StatusCmd { + cmd := NewStatusCmd("CONFIG", "SET", parameter, value) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) DbSize() *IntCmd { + cmd := NewIntCmd("DBSIZE") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) FlushAll() *StatusCmd { + cmd := newKeylessStatusCmd("FLUSHALL") + c.Process(cmd) + return cmd +} + +func (c *commandable) FlushDb() *StatusCmd { + cmd := newKeylessStatusCmd("FLUSHDB") + c.Process(cmd) + return cmd +} + +func (c *commandable) Info() *StringCmd { + cmd := NewStringCmd("INFO") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) LastSave() *IntCmd { + cmd := NewIntCmd("LASTSAVE") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) Save() *StatusCmd { + cmd := newKeylessStatusCmd("SAVE") + c.Process(cmd) + return cmd +} + +func (c *commandable) shutdown(modifier string) *StatusCmd { + var args []interface{} + if modifier == "" { + args = []interface{}{"SHUTDOWN"} + } else { + args = []interface{}{"SHUTDOWN", modifier} + } + cmd := newKeylessStatusCmd(args...) + c.Process(cmd) + if err := cmd.Err(); err != nil { + if err == io.EOF { + // Server quit as expected. + cmd.err = nil + } + } else { + // Server did not quit. String reply contains the reason. + cmd.err = errorf(cmd.val) + cmd.val = "" + } + return cmd +} + +func (c *commandable) Shutdown() *StatusCmd { + return c.shutdown("") +} + +func (c *commandable) ShutdownSave() *StatusCmd { + return c.shutdown("SAVE") +} + +func (c *commandable) ShutdownNoSave() *StatusCmd { + return c.shutdown("NOSAVE") +} + +func (c *commandable) SlaveOf(host, port string) *StatusCmd { + cmd := newKeylessStatusCmd("SLAVEOF", host, port) + c.Process(cmd) + return cmd +} + +func (c *commandable) SlowLog() { + panic("not implemented") +} + +func (c *commandable) Sync() { + panic("not implemented") +} + +func (c *commandable) Time() *StringSliceCmd { + cmd := NewStringSliceCmd("TIME") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) Eval(script string, keys []string, args []string) *Cmd { + cmdArgs := make([]interface{}, 3+len(keys)+len(args)) + cmdArgs[0] = "EVAL" + cmdArgs[1] = script + cmdArgs[2] = strconv.Itoa(len(keys)) + for i, key := range keys { + cmdArgs[3+i] = key + } + pos := 3 + len(keys) + for i, arg := range args { + cmdArgs[pos+i] = arg + } + cmd := NewCmd(cmdArgs...) + if len(keys) > 0 { + cmd._clusterKeyPos = 3 + } + c.Process(cmd) + return cmd +} + +func (c *commandable) EvalSha(sha1 string, keys []string, args []string) *Cmd { + cmdArgs := make([]interface{}, 3+len(keys)+len(args)) + cmdArgs[0] = "EVALSHA" + cmdArgs[1] = sha1 + cmdArgs[2] = strconv.Itoa(len(keys)) + for i, key := range keys { + cmdArgs[3+i] = key + } + pos := 3 + len(keys) + for i, arg := range args { + cmdArgs[pos+i] = arg + } + cmd := NewCmd(cmdArgs...) + if len(keys) > 0 { + cmd._clusterKeyPos = 3 + } + c.Process(cmd) + return cmd +} + +func (c *commandable) ScriptExists(scripts ...string) *BoolSliceCmd { + args := make([]interface{}, 2+len(scripts)) + args[0] = "SCRIPT" + args[1] = "EXISTS" + for i, script := range scripts { + args[2+i] = script + } + cmd := NewBoolSliceCmd(args...) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ScriptFlush() *StatusCmd { + cmd := newKeylessStatusCmd("SCRIPT", "FLUSH") + c.Process(cmd) + return cmd +} + +func (c *commandable) ScriptKill() *StatusCmd { + cmd := newKeylessStatusCmd("SCRIPT", "KILL") + c.Process(cmd) + return cmd +} + +func (c *commandable) ScriptLoad(script string) *StringCmd { + cmd := NewStringCmd("SCRIPT", "LOAD", script) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) DebugObject(key string) *StringCmd { + cmd := NewStringCmd("DEBUG", "OBJECT", key) + cmd._clusterKeyPos = 2 + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) PubSubChannels(pattern string) *StringSliceCmd { + args := []interface{}{"PUBSUB", "CHANNELS"} + if pattern != "*" { + args = append(args, pattern) + } + cmd := NewStringSliceCmd(args...) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) PubSubNumSub(channels ...string) *StringIntMapCmd { + args := make([]interface{}, 2+len(channels)) + args[0] = "PUBSUB" + args[1] = "NUMSUB" + for i, channel := range channels { + args[2+i] = channel + } + cmd := NewStringIntMapCmd(args...) + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) PubSubNumPat() *IntCmd { + cmd := NewIntCmd("PUBSUB", "NUMPAT") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *commandable) ClusterSlots() *ClusterSlotCmd { + cmd := NewClusterSlotCmd("CLUSTER", "slots") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ClusterNodes() *StringCmd { + cmd := NewStringCmd("CLUSTER", "nodes") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ClusterMeet(host, port string) *StatusCmd { + cmd := newKeylessStatusCmd("CLUSTER", "meet", host, port) + c.Process(cmd) + return cmd +} + +func (c *commandable) ClusterReplicate(nodeID string) *StatusCmd { + cmd := newKeylessStatusCmd("CLUSTER", "replicate", nodeID) + c.Process(cmd) + return cmd +} + +func (c *commandable) ClusterInfo() *StringCmd { + cmd := NewStringCmd("CLUSTER", "info") + cmd._clusterKeyPos = 0 + c.Process(cmd) + return cmd +} + +func (c *commandable) ClusterFailover() *StatusCmd { + cmd := newKeylessStatusCmd("CLUSTER", "failover") + c.Process(cmd) + return cmd +} + +func (c *commandable) ClusterAddSlots(slots ...int) *StatusCmd { + args := make([]interface{}, 2+len(slots)) + args[0] = "CLUSTER" + args[1] = "ADDSLOTS" + for i, num := range slots { + args[2+i] = strconv.Itoa(num) + } + cmd := newKeylessStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *commandable) ClusterAddSlotsRange(min, max int) *StatusCmd { + size := max - min + 1 + slots := make([]int, size) + for i := 0; i < size; i++ { + slots[i] = min + i + } + return c.ClusterAddSlots(slots...) +} diff --git a/vendor/gopkg.in/redis.v3/conn.go b/vendor/gopkg.in/redis.v3/conn.go new file mode 100644 index 0000000..9dc2ede --- /dev/null +++ b/vendor/gopkg.in/redis.v3/conn.go @@ -0,0 +1,104 @@ +package redis + +import ( + "net" + "time" + + "gopkg.in/bufio.v1" +) + +var ( + zeroTime = time.Time{} +) + +type conn struct { + netcn net.Conn + rd *bufio.Reader + buf []byte + + usedAt time.Time + ReadTimeout time.Duration + WriteTimeout time.Duration +} + +func newConnDialer(opt *Options) func() (*conn, error) { + dialer := opt.getDialer() + return func() (*conn, error) { + netcn, err := dialer() + if err != nil { + return nil, err + } + cn := &conn{ + netcn: netcn, + buf: make([]byte, 0, 64), + } + cn.rd = bufio.NewReader(cn) + return cn, cn.init(opt) + } +} + +func (cn *conn) init(opt *Options) error { + if opt.Password == "" && opt.DB == 0 { + return nil + } + + // Use connection to connect to Redis. + pool := newSingleConnPoolConn(cn) + + // Client is not closed because we want to reuse underlying connection. + client := newClient(opt, pool) + + if opt.Password != "" { + if err := client.Auth(opt.Password).Err(); err != nil { + return err + } + } + + if opt.DB > 0 { + if err := client.Select(opt.DB).Err(); err != nil { + return err + } + } + + return nil +} + +func (cn *conn) writeCmds(cmds ...Cmder) error { + buf := cn.buf[:0] + for _, cmd := range cmds { + var err error + buf, err = appendArgs(buf, cmd.args()) + if err != nil { + return err + } + } + + _, err := cn.Write(buf) + return err +} + +func (cn *conn) Read(b []byte) (int, error) { + if cn.ReadTimeout != 0 { + cn.netcn.SetReadDeadline(time.Now().Add(cn.ReadTimeout)) + } else { + cn.netcn.SetReadDeadline(zeroTime) + } + return cn.netcn.Read(b) +} + +func (cn *conn) Write(b []byte) (int, error) { + if cn.WriteTimeout != 0 { + cn.netcn.SetWriteDeadline(time.Now().Add(cn.WriteTimeout)) + } else { + cn.netcn.SetWriteDeadline(zeroTime) + } + return cn.netcn.Write(b) +} + +func (cn *conn) RemoteAddr() net.Addr { + return cn.netcn.RemoteAddr() +} + +func (cn *conn) Close() error { + return cn.netcn.Close() +} diff --git a/vendor/gopkg.in/redis.v3/crc16.go b/vendor/gopkg.in/redis.v3/crc16.go new file mode 100644 index 0000000..a7f3b56 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/crc16.go @@ -0,0 +1,47 @@ +package redis + +// CRC16 implementation according to CCITT standards. +// Copyright 2001-2010 Georges Menie (www.menie.org) +// Copyright 2013 The Go Authors. All rights reserved. +// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c +var crc16tab = [256]uint16{ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, +} + +func crc16sum(key string) (crc uint16) { + for i := 0; i < len(key); i++ { + crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff] + } + return +} diff --git a/vendor/gopkg.in/redis.v3/doc.go b/vendor/gopkg.in/redis.v3/doc.go new file mode 100644 index 0000000..5526253 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/doc.go @@ -0,0 +1,4 @@ +/* +Package redis implements a Redis client. +*/ +package redis diff --git a/vendor/gopkg.in/redis.v3/error.go b/vendor/gopkg.in/redis.v3/error.go new file mode 100644 index 0000000..9e5d973 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/error.go @@ -0,0 +1,63 @@ +package redis + +import ( + "fmt" + "io" + "net" + "strings" +) + +// Redis nil reply, .e.g. when key does not exist. +var Nil = errorf("redis: nil") + +// Redis transaction failed. +var TxFailedErr = errorf("redis: transaction failed") + +type redisError struct { + s string +} + +func errorf(s string, args ...interface{}) redisError { + return redisError{s: fmt.Sprintf(s, args...)} +} + +func (err redisError) Error() string { + return err.s +} + +func isNetworkError(err error) bool { + if _, ok := err.(net.Error); ok || err == io.EOF { + return true + } + return false +} + +func isMovedError(err error) (moved bool, ask bool, addr string) { + if _, ok := err.(redisError); !ok { + return + } + + parts := strings.SplitN(err.Error(), " ", 3) + if len(parts) != 3 { + return + } + + switch parts[0] { + case "MOVED": + moved = true + addr = parts[2] + case "ASK": + ask = true + addr = parts[2] + } + + return +} + +// shouldRetry reports whether failed command should be retried. +func shouldRetry(err error) bool { + if err == nil { + return false + } + return isNetworkError(err) +} diff --git a/vendor/gopkg.in/redis.v3/internal/consistenthash/consistenthash.go b/vendor/gopkg.in/redis.v3/internal/consistenthash/consistenthash.go new file mode 100644 index 0000000..a9c56f0 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/internal/consistenthash/consistenthash.go @@ -0,0 +1,81 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package consistenthash provides an implementation of a ring hash. +package consistenthash + +import ( + "hash/crc32" + "sort" + "strconv" +) + +type Hash func(data []byte) uint32 + +type Map struct { + hash Hash + replicas int + keys []int // Sorted + hashMap map[int]string +} + +func New(replicas int, fn Hash) *Map { + m := &Map{ + replicas: replicas, + hash: fn, + hashMap: make(map[int]string), + } + if m.hash == nil { + m.hash = crc32.ChecksumIEEE + } + return m +} + +// Returns true if there are no items available. +func (m *Map) IsEmpty() bool { + return len(m.keys) == 0 +} + +// Adds some keys to the hash. +func (m *Map) Add(keys ...string) { + for _, key := range keys { + for i := 0; i < m.replicas; i++ { + hash := int(m.hash([]byte(strconv.Itoa(i) + key))) + m.keys = append(m.keys, hash) + m.hashMap[hash] = key + } + } + sort.Ints(m.keys) +} + +// Gets the closest item in the hash to the provided key. +func (m *Map) Get(key string) string { + if m.IsEmpty() { + return "" + } + + hash := int(m.hash([]byte(key))) + + // Binary search for appropriate replica. + idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash }) + + // Means we have cycled back to the first replica. + if idx == len(m.keys) { + idx = 0 + } + + return m.hashMap[m.keys[idx]] +} diff --git a/vendor/gopkg.in/redis.v3/multi.go b/vendor/gopkg.in/redis.v3/multi.go new file mode 100644 index 0000000..63ecdd5 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/multi.go @@ -0,0 +1,154 @@ +package redis + +import ( + "errors" + "fmt" + "log" +) + +var errDiscard = errors.New("redis: Discard can be used only inside Exec") + +// Multi implements Redis transactions as described in +// http://redis.io/topics/transactions. +type Multi struct { + commandable + + base *baseClient + cmds []Cmder +} + +func (c *Client) Multi() *Multi { + multi := &Multi{ + base: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, true), + }, + } + multi.commandable.process = multi.process + return multi +} + +func (c *Multi) process(cmd Cmder) { + if c.cmds == nil { + c.base.process(cmd) + } else { + c.cmds = append(c.cmds, cmd) + } +} + +func (c *Multi) Close() error { + if err := c.Unwatch().Err(); err != nil { + log.Printf("redis: Unwatch failed: %s", err) + } + return c.base.Close() +} + +func (c *Multi) Watch(keys ...string) *StatusCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "WATCH" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Multi) Unwatch(keys ...string) *StatusCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "UNWATCH" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Multi) Discard() error { + if c.cmds == nil { + return errDiscard + } + c.cmds = c.cmds[:1] + return nil +} + +// Exec always returns list of commands. If transaction fails +// TxFailedErr is returned. Otherwise Exec returns error of the first +// failed command or nil. +func (c *Multi) Exec(f func() error) ([]Cmder, error) { + c.cmds = []Cmder{NewStatusCmd("MULTI")} + if err := f(); err != nil { + return nil, err + } + c.cmds = append(c.cmds, NewSliceCmd("EXEC")) + + cmds := c.cmds + c.cmds = nil + + if len(cmds) == 2 { + return []Cmder{}, nil + } + + cn, err := c.base.conn() + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return cmds[1 : len(cmds)-1], err + } + + err = c.execCmds(cn, cmds) + c.base.putConn(cn, err) + return cmds[1 : len(cmds)-1], err +} + +func (c *Multi) execCmds(cn *conn, cmds []Cmder) error { + err := cn.writeCmds(cmds...) + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + + statusCmd := NewStatusCmd() + + // Omit last command (EXEC). + cmdsLen := len(cmds) - 1 + + // Parse queued replies. + for i := 0; i < cmdsLen; i++ { + if err := statusCmd.parseReply(cn.rd); err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + } + + // Parse number of replies. + line, err := readLine(cn.rd) + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + if line[0] != '*' { + err := fmt.Errorf("redis: expected '*', but got line %q", line) + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr) + return TxFailedErr + } + + var firstCmdErr error + + // Parse replies. + // Loop starts from 1 to omit MULTI cmd. + for i := 1; i < cmdsLen; i++ { + cmd := cmds[i] + if err := cmd.parseReply(cn.rd); err != nil { + if firstCmdErr == nil { + firstCmdErr = err + } + } + } + + return firstCmdErr +} diff --git a/vendor/gopkg.in/redis.v3/parser.go b/vendor/gopkg.in/redis.v3/parser.go new file mode 100644 index 0000000..32646ff --- /dev/null +++ b/vendor/gopkg.in/redis.v3/parser.go @@ -0,0 +1,529 @@ +package redis + +import ( + "errors" + "fmt" + "net" + "strconv" + + "gopkg.in/bufio.v1" +) + +type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error) + +var ( + errReaderTooSmall = errors.New("redis: reader is too small") +) + +//------------------------------------------------------------------------------ + +// Copy of encoding.BinaryMarshaler. +type binaryMarshaler interface { + MarshalBinary() (data []byte, err error) +} + +// Copy of encoding.BinaryUnmarshaler. +type binaryUnmarshaler interface { + UnmarshalBinary(data []byte) error +} + +func appendString(b []byte, s string) []byte { + b = append(b, '$') + b = strconv.AppendUint(b, uint64(len(s)), 10) + b = append(b, '\r', '\n') + b = append(b, s...) + b = append(b, '\r', '\n') + return b +} + +func appendBytes(b, bb []byte) []byte { + b = append(b, '$') + b = strconv.AppendUint(b, uint64(len(bb)), 10) + b = append(b, '\r', '\n') + b = append(b, bb...) + b = append(b, '\r', '\n') + return b +} + +func appendArg(b []byte, val interface{}) ([]byte, error) { + switch v := val.(type) { + case nil: + b = appendString(b, "") + case string: + b = appendString(b, v) + case []byte: + b = appendBytes(b, v) + case int: + b = appendString(b, formatInt(int64(v))) + case int8: + b = appendString(b, formatInt(int64(v))) + case int16: + b = appendString(b, formatInt(int64(v))) + case int32: + b = appendString(b, formatInt(int64(v))) + case int64: + b = appendString(b, formatInt(v)) + case uint: + b = appendString(b, formatUint(uint64(v))) + case uint8: + b = appendString(b, formatUint(uint64(v))) + case uint16: + b = appendString(b, formatUint(uint64(v))) + case uint32: + b = appendString(b, formatUint(uint64(v))) + case uint64: + b = appendString(b, formatUint(v)) + case float32: + b = appendString(b, formatFloat(float64(v))) + case float64: + b = appendString(b, formatFloat(v)) + case bool: + if v { + b = appendString(b, "1") + } else { + b = appendString(b, "0") + } + default: + if bm, ok := val.(binaryMarshaler); ok { + bb, err := bm.MarshalBinary() + if err != nil { + return nil, err + } + b = appendBytes(b, bb) + } else { + err := fmt.Errorf( + "redis: can't marshal %T (consider implementing BinaryMarshaler)", val) + return nil, err + } + } + return b, nil +} + +func appendArgs(b []byte, args []interface{}) ([]byte, error) { + b = append(b, '*') + b = strconv.AppendUint(b, uint64(len(args)), 10) + b = append(b, '\r', '\n') + for _, arg := range args { + var err error + b, err = appendArg(b, arg) + if err != nil { + return nil, err + } + } + return b, nil +} + +func scan(b []byte, val interface{}) error { + switch v := val.(type) { + case nil: + return errorf("redis: Scan(nil)") + case *string: + *v = bytesToString(b) + return nil + case *[]byte: + *v = b + return nil + case *int: + var err error + *v, err = strconv.Atoi(bytesToString(b)) + return err + case *int8: + n, err := strconv.ParseInt(bytesToString(b), 10, 8) + if err != nil { + return err + } + *v = int8(n) + return nil + case *int16: + n, err := strconv.ParseInt(bytesToString(b), 10, 16) + if err != nil { + return err + } + *v = int16(n) + return nil + case *int32: + n, err := strconv.ParseInt(bytesToString(b), 10, 16) + if err != nil { + return err + } + *v = int32(n) + return nil + case *int64: + n, err := strconv.ParseInt(bytesToString(b), 10, 64) + if err != nil { + return err + } + *v = n + return nil + case *uint: + n, err := strconv.ParseUint(bytesToString(b), 10, 64) + if err != nil { + return err + } + *v = uint(n) + return nil + case *uint8: + n, err := strconv.ParseUint(bytesToString(b), 10, 8) + if err != nil { + return err + } + *v = uint8(n) + return nil + case *uint16: + n, err := strconv.ParseUint(bytesToString(b), 10, 16) + if err != nil { + return err + } + *v = uint16(n) + return nil + case *uint32: + n, err := strconv.ParseUint(bytesToString(b), 10, 32) + if err != nil { + return err + } + *v = uint32(n) + return nil + case *uint64: + n, err := strconv.ParseUint(bytesToString(b), 10, 64) + if err != nil { + return err + } + *v = n + return nil + case *float32: + n, err := strconv.ParseFloat(bytesToString(b), 32) + if err != nil { + return err + } + *v = float32(n) + return err + case *float64: + var err error + *v, err = strconv.ParseFloat(bytesToString(b), 64) + return err + case *bool: + *v = len(b) == 1 && b[0] == '1' + return nil + default: + if bu, ok := val.(binaryUnmarshaler); ok { + return bu.UnmarshalBinary(b) + } + err := fmt.Errorf( + "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", val) + return err + } +} + +//------------------------------------------------------------------------------ + +func readLine(rd *bufio.Reader) ([]byte, error) { + line, isPrefix, err := rd.ReadLine() + if err != nil { + return line, err + } + if isPrefix { + return line, errReaderTooSmall + } + return line, nil +} + +func readN(rd *bufio.Reader, n int) ([]byte, error) { + b, err := rd.ReadN(n) + if err == bufio.ErrBufferFull { + tmp := make([]byte, n) + r := copy(tmp, b) + b = tmp + + for { + nn, err := rd.Read(b[r:]) + r += nn + if r >= n { + // Ignore error if we read enough. + break + } + if err != nil { + return nil, err + } + } + } else if err != nil { + return nil, err + } + return b, nil +} + +//------------------------------------------------------------------------------ + +func parseReq(rd *bufio.Reader) ([]string, error) { + line, err := readLine(rd) + if err != nil { + return nil, err + } + + if line[0] != '*' { + return []string{string(line)}, nil + } + numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64) + if err != nil { + return nil, err + } + + args := make([]string, 0, numReplies) + for i := int64(0); i < numReplies; i++ { + line, err = readLine(rd) + if err != nil { + return nil, err + } + if line[0] != '$' { + return nil, fmt.Errorf("redis: expected '$', but got %q", line) + } + + argLen, err := strconv.ParseInt(string(line[1:]), 10, 32) + if err != nil { + return nil, err + } + + arg, err := readN(rd, int(argLen)+2) + if err != nil { + return nil, err + } + args = append(args, string(arg[:argLen])) + } + return args, nil +} + +//------------------------------------------------------------------------------ + +func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) { + line, err := readLine(rd) + if err != nil { + return nil, err + } + + switch line[0] { + case '-': + return nil, errorf(string(line[1:])) + case '+': + return line[1:], nil + case ':': + v, err := strconv.ParseInt(bytesToString(line[1:]), 10, 64) + if err != nil { + return nil, err + } + return v, nil + case '$': + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + return nil, Nil + } + + replyLen, err := strconv.Atoi(string(line[1:])) + if err != nil { + return nil, err + } + + b, err := readN(rd, replyLen+2) + if err != nil { + return nil, err + } + return b[:replyLen], nil + case '*': + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + return nil, Nil + } + + repliesNum, err := strconv.ParseInt(bytesToString(line[1:]), 10, 64) + if err != nil { + return nil, err + } + + return p(rd, repliesNum) + } + return nil, fmt.Errorf("redis: can't parse %q", line) +} + +func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, 0, n) + for i := int64(0); i < n; i++ { + v, err := parseReply(rd, parseSlice) + if err == Nil { + vals = append(vals, nil) + } else if err != nil { + return nil, err + } else { + switch vv := v.(type) { + case []byte: + vals = append(vals, string(vv)) + default: + vals = append(vals, v) + } + } + } + return vals, nil +} + +func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]string, 0, n) + for i := int64(0); i < n; i++ { + viface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + v, ok := viface.([]byte) + if !ok { + return nil, fmt.Errorf("got %T, expected string", viface) + } + vals = append(vals, string(v)) + } + return vals, nil +} + +func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]bool, 0, n) + for i := int64(0); i < n; i++ { + viface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + v, ok := viface.(int64) + if !ok { + return nil, fmt.Errorf("got %T, expected int64", viface) + } + vals = append(vals, v == 1) + } + return vals, nil +} + +func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) { + m := make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + keyiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + key, ok := keyiface.([]byte) + if !ok { + return nil, fmt.Errorf("got %T, expected string", keyiface) + } + + valueiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + value, ok := valueiface.([]byte) + if !ok { + return nil, fmt.Errorf("got %T, expected string", valueiface) + } + + m[string(key)] = string(value) + } + return m, nil +} + +func parseStringIntMap(rd *bufio.Reader, n int64) (interface{}, error) { + m := make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + keyiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + key, ok := keyiface.([]byte) + if !ok { + return nil, fmt.Errorf("got %T, expected string", keyiface) + } + + valueiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + switch value := valueiface.(type) { + case int64: + m[string(key)] = value + case string: + m[string(key)], err = strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("got %v, expected number", value) + } + default: + return nil, fmt.Errorf("got %T, expected number or string", valueiface) + } + } + return m, nil +} + +func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) { + zz := make([]Z, n/2) + for i := int64(0); i < n; i += 2 { + z := &zz[i/2] + + memberiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + member, ok := memberiface.([]byte) + if !ok { + return nil, fmt.Errorf("got %T, expected string", memberiface) + } + z.Member = string(member) + + scoreiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + scoreb, ok := scoreiface.([]byte) + if !ok { + return nil, fmt.Errorf("got %T, expected string", scoreiface) + } + score, err := strconv.ParseFloat(bytesToString(scoreb), 64) + if err != nil { + return nil, err + } + z.Score = score + } + return zz, nil +} + +func parseClusterSlotInfoSlice(rd *bufio.Reader, n int64) (interface{}, error) { + infos := make([]ClusterSlotInfo, 0, n) + for i := int64(0); i < n; i++ { + viface, err := parseReply(rd, parseSlice) + if err != nil { + return nil, err + } + + item, ok := viface.([]interface{}) + if !ok { + return nil, fmt.Errorf("got %T, expected []interface{}", viface) + } else if len(item) < 3 { + return nil, fmt.Errorf("got %v, expected {int64, int64, string...}", item) + } + + start, ok := item[0].(int64) + if !ok || start < 0 || start > hashSlots { + return nil, fmt.Errorf("got %v, expected {int64, int64, string...}", item) + } + end, ok := item[1].(int64) + if !ok || end < 0 || end > hashSlots { + return nil, fmt.Errorf("got %v, expected {int64, int64, string...}", item) + } + + info := ClusterSlotInfo{int(start), int(end), make([]string, len(item)-2)} + for n, ipair := range item[2:] { + pair, ok := ipair.([]interface{}) + if !ok || len(pair) != 2 { + return nil, fmt.Errorf("got %v, expected []interface{host, port}", viface) + } + + ip, ok := pair[0].(string) + if !ok || len(ip) < 1 { + return nil, fmt.Errorf("got %v, expected IP PORT pair", pair) + } + port, ok := pair[1].(int64) + if !ok || port < 1 { + return nil, fmt.Errorf("got %v, expected IP PORT pair", pair) + } + + info.Addrs[n] = net.JoinHostPort(ip, strconv.FormatInt(port, 10)) + } + infos = append(infos, info) + } + return infos, nil +} diff --git a/vendor/gopkg.in/redis.v3/pipeline.go b/vendor/gopkg.in/redis.v3/pipeline.go new file mode 100644 index 0000000..8981cb5 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/pipeline.go @@ -0,0 +1,113 @@ +package redis + +// Pipeline implements pipelining as described in +// http://redis.io/topics/pipelining. +// +// Pipeline is not thread-safe. +type Pipeline struct { + commandable + + client *baseClient + + cmds []Cmder + closed bool +} + +func (c *Client) Pipeline() *Pipeline { + pipe := &Pipeline{ + client: c.baseClient, + cmds: make([]Cmder, 0, 10), + } + pipe.commandable.process = pipe.process + return pipe +} + +func (c *Client) Pipelined(fn func(*Pipeline) error) ([]Cmder, error) { + pipe := c.Pipeline() + if err := fn(pipe); err != nil { + return nil, err + } + cmds, err := pipe.Exec() + pipe.Close() + return cmds, err +} + +func (pipe *Pipeline) process(cmd Cmder) { + pipe.cmds = append(pipe.cmds, cmd) +} + +func (pipe *Pipeline) Close() error { + pipe.Discard() + pipe.closed = true + return nil +} + +// Discard resets the pipeline and discards queued commands. +func (pipe *Pipeline) Discard() error { + if pipe.closed { + return errClosed + } + pipe.cmds = pipe.cmds[:0] + return nil +} + +// Exec always returns list of commands and error of the first failed +// command if any. +func (pipe *Pipeline) Exec() (cmds []Cmder, retErr error) { + if pipe.closed { + return nil, errClosed + } + if len(pipe.cmds) == 0 { + return pipe.cmds, nil + } + + cmds = pipe.cmds + pipe.cmds = make([]Cmder, 0, 10) + + failedCmds := cmds + for i := 0; i <= pipe.client.opt.MaxRetries; i++ { + cn, err := pipe.client.conn() + if err != nil { + setCmdsErr(failedCmds, err) + return cmds, err + } + + if i > 0 { + resetCmds(failedCmds) + } + failedCmds, err = execCmds(cn, failedCmds) + pipe.client.putConn(cn, err) + if err != nil && retErr == nil { + retErr = err + } + if len(failedCmds) == 0 { + break + } + } + + return cmds, retErr +} + +func execCmds(cn *conn, cmds []Cmder) ([]Cmder, error) { + if err := cn.writeCmds(cmds...); err != nil { + setCmdsErr(cmds, err) + return cmds, err + } + + var firstCmdErr error + var failedCmds []Cmder + for _, cmd := range cmds { + err := cmd.parseReply(cn.rd) + if err == nil { + continue + } + if firstCmdErr == nil { + firstCmdErr = err + } + if shouldRetry(err) { + failedCmds = append(failedCmds, cmd) + } + } + + return failedCmds, firstCmdErr +} diff --git a/vendor/gopkg.in/redis.v3/pool.go b/vendor/gopkg.in/redis.v3/pool.go new file mode 100644 index 0000000..71ac456 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/pool.go @@ -0,0 +1,442 @@ +package redis + +import ( + "errors" + "fmt" + "log" + "sync" + "sync/atomic" + "time" + + "gopkg.in/bsm/ratelimit.v1" +) + +var ( + errClosed = errors.New("redis: client is closed") + errPoolTimeout = errors.New("redis: connection pool timeout") +) + +type pool interface { + First() *conn + Get() (*conn, error) + Put(*conn) error + Remove(*conn) error + Len() int + FreeLen() int + Close() error +} + +type connList struct { + cns []*conn + mx sync.Mutex + len int32 // atomic + size int32 +} + +func newConnList(size int) *connList { + return &connList{ + cns: make([]*conn, 0, size), + size: int32(size), + } +} + +func (l *connList) Len() int { + return int(atomic.LoadInt32(&l.len)) +} + +// Reserve reserves place in the list and returns true on success. The +// caller must add or remove connection if place was reserved. +func (l *connList) Reserve() bool { + len := atomic.AddInt32(&l.len, 1) + reserved := len <= l.size + if !reserved { + atomic.AddInt32(&l.len, -1) + } + return reserved +} + +// Add adds connection to the list. The caller must reserve place first. +func (l *connList) Add(cn *conn) { + l.mx.Lock() + l.cns = append(l.cns, cn) + l.mx.Unlock() +} + +// Remove closes connection and removes it from the list. +func (l *connList) Remove(cn *conn) error { + defer l.mx.Unlock() + l.mx.Lock() + + if cn == nil { + atomic.AddInt32(&l.len, -1) + return nil + } + + for i, c := range l.cns { + if c == cn { + l.cns = append(l.cns[:i], l.cns[i+1:]...) + atomic.AddInt32(&l.len, -1) + return cn.Close() + } + } + + if l.closed() { + return nil + } + panic("conn not found in the list") +} + +func (l *connList) Replace(cn, newcn *conn) error { + defer l.mx.Unlock() + l.mx.Lock() + + for i, c := range l.cns { + if c == cn { + l.cns[i] = newcn + return cn.Close() + } + } + + if l.closed() { + return newcn.Close() + } + panic("conn not found in the list") +} + +func (l *connList) Close() (retErr error) { + l.mx.Lock() + for _, c := range l.cns { + if err := c.Close(); err != nil { + retErr = err + } + } + l.cns = nil + atomic.StoreInt32(&l.len, 0) + l.mx.Unlock() + return retErr +} + +func (l *connList) closed() bool { + return l.cns == nil +} + +type connPool struct { + dialer func() (*conn, error) + + rl *ratelimit.RateLimiter + opt *Options + conns *connList + freeConns chan *conn + + _closed int32 + + lastDialErr error +} + +func newConnPool(opt *Options) *connPool { + p := &connPool{ + dialer: newConnDialer(opt), + + rl: ratelimit.New(2*opt.getPoolSize(), time.Second), + opt: opt, + conns: newConnList(opt.getPoolSize()), + freeConns: make(chan *conn, opt.getPoolSize()), + } + if p.opt.getIdleTimeout() > 0 { + go p.reaper() + } + return p +} + +func (p *connPool) closed() bool { + return atomic.LoadInt32(&p._closed) == 1 +} + +func (p *connPool) isIdle(cn *conn) bool { + return p.opt.getIdleTimeout() > 0 && time.Since(cn.usedAt) > p.opt.getIdleTimeout() +} + +// First returns first non-idle connection from the pool or nil if +// there are no connections. +func (p *connPool) First() *conn { + for { + select { + case cn := <-p.freeConns: + if p.isIdle(cn) { + p.conns.Remove(cn) + continue + } + return cn + default: + return nil + } + } + panic("not reached") +} + +// wait waits for free non-idle connection. It returns nil on timeout. +func (p *connPool) wait() *conn { + deadline := time.After(p.opt.getPoolTimeout()) + for { + select { + case cn := <-p.freeConns: + if p.isIdle(cn) { + p.Remove(cn) + continue + } + return cn + case <-deadline: + return nil + } + } + panic("not reached") +} + +// Establish a new connection +func (p *connPool) new() (*conn, error) { + if p.rl.Limit() { + err := fmt.Errorf( + "redis: you open connections too fast (last error: %v)", + p.lastDialErr, + ) + return nil, err + } + + cn, err := p.dialer() + if err != nil { + p.lastDialErr = err + return nil, err + } + + return cn, nil +} + +// Get returns existed connection from the pool or creates a new one. +func (p *connPool) Get() (*conn, error) { + if p.closed() { + return nil, errClosed + } + + // Fetch first non-idle connection, if available. + if cn := p.First(); cn != nil { + return cn, nil + } + + // Try to create a new one. + if p.conns.Reserve() { + cn, err := p.new() + if err != nil { + p.conns.Remove(nil) + return nil, err + } + p.conns.Add(cn) + return cn, nil + } + + // Otherwise, wait for the available connection. + if cn := p.wait(); cn != nil { + return cn, nil + } + + return nil, errPoolTimeout +} + +func (p *connPool) Put(cn *conn) error { + if cn.rd.Buffered() != 0 { + b, _ := cn.rd.ReadN(cn.rd.Buffered()) + log.Printf("redis: connection has unread data: %q", b) + return p.Remove(cn) + } + if p.opt.getIdleTimeout() > 0 { + cn.usedAt = time.Now() + } + p.freeConns <- cn + return nil +} + +func (p *connPool) Remove(cn *conn) error { + // Replace existing connection with new one and unblock waiter. + newcn, err := p.new() + if err != nil { + log.Printf("redis: new failed: %s", err) + return p.conns.Remove(cn) + } + err = p.conns.Replace(cn, newcn) + p.freeConns <- newcn + return err +} + +// Len returns total number of connections. +func (p *connPool) Len() int { + return p.conns.Len() +} + +// FreeLen returns number of free connections. +func (p *connPool) FreeLen() int { + return len(p.freeConns) +} + +func (p *connPool) Close() (retErr error) { + if !atomic.CompareAndSwapInt32(&p._closed, 0, 1) { + return errClosed + } + // Wait for app to free connections, but don't close them immediately. + for i := 0; i < p.Len(); i++ { + if cn := p.wait(); cn == nil { + break + } + } + // Close all connections. + if err := p.conns.Close(); err != nil { + retErr = err + } + return retErr +} + +func (p *connPool) reaper() { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for _ = range ticker.C { + if p.closed() { + break + } + + // pool.First removes idle connections from the pool and + // returns first non-idle connection. So just put returned + // connection back. + if cn := p.First(); cn != nil { + p.Put(cn) + } + } +} + +//------------------------------------------------------------------------------ + +type singleConnPool struct { + pool pool + reusable bool + + cn *conn + closed bool + mx sync.Mutex +} + +func newSingleConnPool(pool pool, reusable bool) *singleConnPool { + return &singleConnPool{ + pool: pool, + reusable: reusable, + } +} + +func newSingleConnPoolConn(cn *conn) *singleConnPool { + return &singleConnPool{ + cn: cn, + } +} + +func (p *singleConnPool) First() *conn { + p.mx.Lock() + cn := p.cn + p.mx.Unlock() + return cn +} + +func (p *singleConnPool) Get() (*conn, error) { + defer p.mx.Unlock() + p.mx.Lock() + + if p.closed { + return nil, errClosed + } + if p.cn != nil { + return p.cn, nil + } + + cn, err := p.pool.Get() + if err != nil { + return nil, err + } + p.cn = cn + + return p.cn, nil +} + +func (p *singleConnPool) put() (err error) { + if p.pool != nil { + err = p.pool.Put(p.cn) + } + p.cn = nil + return err +} + +func (p *singleConnPool) Put(cn *conn) error { + defer p.mx.Unlock() + p.mx.Lock() + if p.cn != cn { + panic("p.cn != cn") + } + if p.closed { + return errClosed + } + return nil +} + +func (p *singleConnPool) remove() (err error) { + if p.pool != nil { + err = p.pool.Remove(p.cn) + } + p.cn = nil + return err +} + +func (p *singleConnPool) Remove(cn *conn) error { + defer p.mx.Unlock() + p.mx.Lock() + if p.cn == nil { + panic("p.cn == nil") + } + if p.cn != cn { + panic("p.cn != cn") + } + if p.closed { + return errClosed + } + return p.remove() +} + +func (p *singleConnPool) Len() int { + defer p.mx.Unlock() + p.mx.Lock() + if p.cn == nil { + return 0 + } + return 1 +} + +func (p *singleConnPool) FreeLen() int { + defer p.mx.Unlock() + p.mx.Lock() + if p.cn == nil { + return 1 + } + return 0 +} + +func (p *singleConnPool) Close() error { + defer p.mx.Unlock() + p.mx.Lock() + if p.closed { + return errClosed + } + p.closed = true + var err error + if p.cn != nil { + if p.reusable { + err = p.put() + } else { + err = p.remove() + } + } + return err +} diff --git a/vendor/gopkg.in/redis.v3/pubsub.go b/vendor/gopkg.in/redis.v3/pubsub.go new file mode 100644 index 0000000..1f4f5b6 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/pubsub.go @@ -0,0 +1,190 @@ +package redis + +import ( + "fmt" + "time" +) + +// Posts a message to the given channel. +func (c *Client) Publish(channel, message string) *IntCmd { + req := NewIntCmd("PUBLISH", channel, message) + c.Process(req) + return req +} + +// PubSub implements Pub/Sub commands as described in +// http://redis.io/topics/pubsub. +type PubSub struct { + *baseClient +} + +// Deprecated. Use Subscribe/PSubscribe instead. +func (c *Client) PubSub() *PubSub { + return &PubSub{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, false), + }, + } +} + +// Subscribes the client to the specified channels. +func (c *Client) Subscribe(channels ...string) (*PubSub, error) { + pubsub := c.PubSub() + return pubsub, pubsub.Subscribe(channels...) +} + +// Subscribes the client to the given patterns. +func (c *Client) PSubscribe(channels ...string) (*PubSub, error) { + pubsub := c.PubSub() + return pubsub, pubsub.PSubscribe(channels...) +} + +func (c *PubSub) Ping(payload string) error { + cn, err := c.conn() + if err != nil { + return err + } + + args := []interface{}{"PING"} + if payload != "" { + args = append(args, payload) + } + cmd := NewCmd(args...) + return cn.writeCmds(cmd) +} + +// Message received after a successful subscription to channel. +type Subscription struct { + // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". + Kind string + // Channel name we have subscribed to. + Channel string + // Number of channels we are currently subscribed to. + Count int +} + +func (m *Subscription) String() string { + return fmt.Sprintf("%s: %s", m.Kind, m.Channel) +} + +// Message received as result of a PUBLISH command issued by another client. +type Message struct { + Channel string + Payload string +} + +func (m *Message) String() string { + return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) +} + +// Message matching a pattern-matching subscription received as result +// of a PUBLISH command issued by another client. +type PMessage struct { + Channel string + Pattern string + Payload string +} + +func (m *PMessage) String() string { + return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload) +} + +// Pong received as result of a PING command issued by another client. +type Pong struct { + Payload string +} + +func (p *Pong) String() string { + if p.Payload != "" { + return fmt.Sprintf("Pong<%s>", p.Payload) + } + return "Pong" +} + +// Returns a message as a Subscription, Message, PMessage, Pong or +// error. See PubSub example for details. +func (c *PubSub) Receive() (interface{}, error) { + return c.ReceiveTimeout(0) +} + +func newMessage(reply []interface{}) (interface{}, error) { + switch kind := reply[0].(string); kind { + case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": + return &Subscription{ + Kind: kind, + Channel: reply[1].(string), + Count: int(reply[2].(int64)), + }, nil + case "message": + return &Message{ + Channel: reply[1].(string), + Payload: reply[2].(string), + }, nil + case "pmessage": + return &PMessage{ + Pattern: reply[1].(string), + Channel: reply[2].(string), + Payload: reply[3].(string), + }, nil + case "pong": + return &Pong{ + Payload: reply[1].(string), + }, nil + default: + return nil, fmt.Errorf("redis: unsupported pubsub notification: %q", kind) + } +} + +// ReceiveTimeout acts like Receive but returns an error if message +// is not received in time. +func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) { + cn, err := c.conn() + if err != nil { + return nil, err + } + cn.ReadTimeout = timeout + + cmd := NewSliceCmd() + if err := cmd.parseReply(cn.rd); err != nil { + return nil, err + } + return newMessage(cmd.Val()) +} + +func (c *PubSub) subscribe(cmd string, channels ...string) error { + cn, err := c.conn() + if err != nil { + return err + } + + args := make([]interface{}, 1+len(channels)) + args[0] = cmd + for i, channel := range channels { + args[1+i] = channel + } + req := NewSliceCmd(args...) + return cn.writeCmds(req) +} + +// Subscribes the client to the specified channels. +func (c *PubSub) Subscribe(channels ...string) error { + return c.subscribe("SUBSCRIBE", channels...) +} + +// Subscribes the client to the given patterns. +func (c *PubSub) PSubscribe(patterns ...string) error { + return c.subscribe("PSUBSCRIBE", patterns...) +} + +// Unsubscribes the client from the given channels, or from all of +// them if none is given. +func (c *PubSub) Unsubscribe(channels ...string) error { + return c.subscribe("UNSUBSCRIBE", channels...) +} + +// Unsubscribes the client from the given patterns, or from all of +// them if none is given. +func (c *PubSub) PUnsubscribe(patterns ...string) error { + return c.subscribe("PUNSUBSCRIBE", patterns...) +} diff --git a/vendor/gopkg.in/redis.v3/redis.go b/vendor/gopkg.in/redis.v3/redis.go new file mode 100644 index 0000000..a6e12f5 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/redis.go @@ -0,0 +1,192 @@ +package redis + +import ( + "fmt" + "log" + "net" + "time" +) + +type baseClient struct { + connPool pool + opt *Options +} + +func (c *baseClient) String() string { + return fmt.Sprintf("Redis<%s db:%d>", c.opt.Addr, c.opt.DB) +} + +func (c *baseClient) conn() (*conn, error) { + return c.connPool.Get() +} + +func (c *baseClient) putConn(cn *conn, ei error) { + var err error + if cn.rd.Buffered() > 0 { + err = c.connPool.Remove(cn) + } else if ei == nil { + err = c.connPool.Put(cn) + } else if _, ok := ei.(redisError); ok { + err = c.connPool.Put(cn) + } else { + err = c.connPool.Remove(cn) + } + if err != nil { + log.Printf("redis: putConn failed: %s", err) + } +} + +func (c *baseClient) process(cmd Cmder) { + for i := 0; i <= c.opt.MaxRetries; i++ { + if i > 0 { + cmd.reset() + } + + cn, err := c.conn() + if err != nil { + cmd.setErr(err) + return + } + + if timeout := cmd.writeTimeout(); timeout != nil { + cn.WriteTimeout = *timeout + } else { + cn.WriteTimeout = c.opt.WriteTimeout + } + + if timeout := cmd.readTimeout(); timeout != nil { + cn.ReadTimeout = *timeout + } else { + cn.ReadTimeout = c.opt.ReadTimeout + } + + if err := cn.writeCmds(cmd); err != nil { + c.putConn(cn, err) + cmd.setErr(err) + if shouldRetry(err) { + continue + } + return + } + + err = cmd.parseReply(cn.rd) + c.putConn(cn, err) + if shouldRetry(err) { + continue + } + + return + } +} + +// Close closes the client, releasing any open resources. +func (c *baseClient) Close() error { + return c.connPool.Close() +} + +//------------------------------------------------------------------------------ + +type Options struct { + // The network type, either tcp or unix. + // Default is tcp. + Network string + // host:port address. + Addr string + + // Dialer creates new network connection and has priority over + // Network and Addr options. + Dialer func() (net.Conn, error) + + // An optional password. Must match the password specified in the + // requirepass server configuration option. + Password string + // A database to be selected after connecting to server. + DB int64 + + // The maximum number of retries before giving up. + // Default is to not retry failed commands. + MaxRetries int + + // Sets the deadline for establishing new connections. If reached, + // dial will fail with a timeout. + DialTimeout time.Duration + // Sets the deadline for socket reads. If reached, commands will + // fail with a timeout instead of blocking. + ReadTimeout time.Duration + // Sets the deadline for socket writes. If reached, commands will + // fail with a timeout instead of blocking. + WriteTimeout time.Duration + + // The maximum number of socket connections. + // Default is 10 connections. + PoolSize int + // Specifies amount of time client waits for connection if all + // connections are busy before returning an error. + // Default is 5 seconds. + PoolTimeout time.Duration + // Specifies amount of time after which client closes idle + // connections. Should be less than server's timeout. + // Default is to not close idle connections. + IdleTimeout time.Duration +} + +func (opt *Options) getNetwork() string { + if opt.Network == "" { + return "tcp" + } + return opt.Network +} + +func (opt *Options) getDialer() func() (net.Conn, error) { + if opt.Dialer == nil { + opt.Dialer = func() (net.Conn, error) { + return net.DialTimeout(opt.getNetwork(), opt.Addr, opt.getDialTimeout()) + } + } + return opt.Dialer +} + +func (opt *Options) getPoolSize() int { + if opt.PoolSize == 0 { + return 10 + } + return opt.PoolSize +} + +func (opt *Options) getDialTimeout() time.Duration { + if opt.DialTimeout == 0 { + return 5 * time.Second + } + return opt.DialTimeout +} + +func (opt *Options) getPoolTimeout() time.Duration { + if opt.PoolTimeout == 0 { + return 1 * time.Second + } + return opt.PoolTimeout +} + +func (opt *Options) getIdleTimeout() time.Duration { + return opt.IdleTimeout +} + +//------------------------------------------------------------------------------ + +type Client struct { + *baseClient + commandable +} + +func newClient(opt *Options, pool pool) *Client { + base := &baseClient{opt: opt, connPool: pool} + return &Client{ + baseClient: base, + commandable: commandable{process: base.process}, + } +} + +func NewClient(opt *Options) *Client { + pool := newConnPool(opt) + return newClient(opt, pool) +} diff --git a/vendor/gopkg.in/redis.v3/ring.go b/vendor/gopkg.in/redis.v3/ring.go new file mode 100644 index 0000000..4b20e7a --- /dev/null +++ b/vendor/gopkg.in/redis.v3/ring.go @@ -0,0 +1,349 @@ +package redis + +import ( + "errors" + "fmt" + "log" + "sync" + "time" + + "gopkg.in/redis.v3/internal/consistenthash" +) + +var ( + errRingShardsDown = errors.New("redis: all ring shards are down") +) + +// RingOptions are used to configure a ring client and should be +// passed to NewRing. +type RingOptions struct { + // A map of name => host:port addresses of ring shards. + Addrs map[string]string + + // Following options are copied from Options struct. + + DB int64 + Password string + + MaxRetries int + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + PoolSize int + PoolTimeout time.Duration + IdleTimeout time.Duration +} + +func (opt *RingOptions) clientOptions() *Options { + return &Options{ + DB: opt.DB, + Password: opt.Password, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + } +} + +type ringShard struct { + Client *Client + down int +} + +func (shard *ringShard) String() string { + var state string + if shard.IsUp() { + state = "up" + } else { + state = "down" + } + return fmt.Sprintf("%s is %s", shard.Client, state) +} + +func (shard *ringShard) IsDown() bool { + const threshold = 5 + return shard.down >= threshold +} + +func (shard *ringShard) IsUp() bool { + return !shard.IsDown() +} + +// Vote votes to set shard state and returns true if state was changed. +func (shard *ringShard) Vote(up bool) bool { + if up { + changed := shard.IsDown() + shard.down = 0 + return changed + } + + if shard.IsDown() { + return false + } + + shard.down++ + return shard.IsDown() +} + +// Ring is a Redis client that uses constistent hashing to distribute +// keys across multiple Redis servers (shards). +// +// It monitors the state of each shard and removes dead shards from +// the ring. When shard comes online it is added back to the ring. This +// gives you maximum availability and partition tolerance, but no +// consistency between different shards or even clients. Each client +// uses shards that are available to the client and does not do any +// coordination when shard state is changed. +// +// Ring should be used when you use multiple Redis servers for caching +// and can tolerate losing data when one of the servers dies. +// Otherwise you should use Redis Cluster. +type Ring struct { + commandable + + opt *RingOptions + nreplicas int + + mx sync.RWMutex + hash *consistenthash.Map + shards map[string]*ringShard + + closed bool +} + +func NewRing(opt *RingOptions) *Ring { + const nreplicas = 100 + ring := &Ring{ + opt: opt, + nreplicas: nreplicas, + + hash: consistenthash.New(nreplicas, nil), + shards: make(map[string]*ringShard), + } + ring.commandable.process = ring.process + for name, addr := range opt.Addrs { + clopt := opt.clientOptions() + clopt.Addr = addr + ring.addClient(name, NewClient(clopt)) + } + go ring.heartbeat() + return ring +} + +func (ring *Ring) addClient(name string, cl *Client) { + ring.mx.Lock() + ring.hash.Add(name) + ring.shards[name] = &ringShard{Client: cl} + ring.mx.Unlock() +} + +func (ring *Ring) getClient(key string) (*Client, error) { + ring.mx.RLock() + + if ring.closed { + return nil, errClosed + } + + name := ring.hash.Get(hashKey(key)) + if name == "" { + ring.mx.RUnlock() + return nil, errRingShardsDown + } + + cl := ring.shards[name].Client + ring.mx.RUnlock() + return cl, nil +} + +func (ring *Ring) process(cmd Cmder) { + cl, err := ring.getClient(cmd.clusterKey()) + if err != nil { + cmd.setErr(err) + return + } + cl.baseClient.process(cmd) +} + +// rebalance removes dead shards from the ring. +func (ring *Ring) rebalance() { + defer ring.mx.Unlock() + ring.mx.Lock() + + ring.hash = consistenthash.New(ring.nreplicas, nil) + for name, shard := range ring.shards { + if shard.IsUp() { + ring.hash.Add(name) + } + } +} + +// heartbeat monitors state of each shard in the ring. +func (ring *Ring) heartbeat() { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for _ = range ticker.C { + var rebalance bool + + ring.mx.RLock() + + if ring.closed { + ring.mx.RUnlock() + break + } + + for _, shard := range ring.shards { + err := shard.Client.Ping().Err() + if shard.Vote(err == nil || err == errPoolTimeout) { + log.Printf("redis: ring shard state changed: %s", shard) + rebalance = true + } + } + + ring.mx.RUnlock() + + if rebalance { + ring.rebalance() + } + } +} + +// Close closes the ring client, releasing any open resources. +// +// It is rare to Close a Client, as the Client is meant to be +// long-lived and shared between many goroutines. +func (ring *Ring) Close() (retErr error) { + defer ring.mx.Unlock() + ring.mx.Lock() + + if ring.closed { + return nil + } + ring.closed = true + + for _, shard := range ring.shards { + if err := shard.Client.Close(); err != nil { + retErr = err + } + } + ring.hash = nil + ring.shards = nil + + return retErr +} + +// RingPipeline creates a new pipeline which is able to execute commands +// against multiple shards. +type RingPipeline struct { + commandable + + ring *Ring + + cmds []Cmder + closed bool +} + +func (ring *Ring) Pipeline() *RingPipeline { + pipe := &RingPipeline{ + ring: ring, + cmds: make([]Cmder, 0, 10), + } + pipe.commandable.process = pipe.process + return pipe +} + +func (ring *Ring) Pipelined(fn func(*RingPipeline) error) ([]Cmder, error) { + pipe := ring.Pipeline() + if err := fn(pipe); err != nil { + return nil, err + } + cmds, err := pipe.Exec() + pipe.Close() + return cmds, err +} + +func (pipe *RingPipeline) process(cmd Cmder) { + pipe.cmds = append(pipe.cmds, cmd) +} + +// Discard resets the pipeline and discards queued commands. +func (pipe *RingPipeline) Discard() error { + if pipe.closed { + return errClosed + } + pipe.cmds = pipe.cmds[:0] + return nil +} + +// Exec always returns list of commands and error of the first failed +// command if any. +func (pipe *RingPipeline) Exec() (cmds []Cmder, retErr error) { + if pipe.closed { + return nil, errClosed + } + if len(pipe.cmds) == 0 { + return pipe.cmds, nil + } + + cmds = pipe.cmds + pipe.cmds = make([]Cmder, 0, 10) + + cmdsMap := make(map[string][]Cmder) + for _, cmd := range cmds { + name := pipe.ring.hash.Get(hashKey(cmd.clusterKey())) + if name == "" { + cmd.setErr(errRingShardsDown) + if retErr == nil { + retErr = errRingShardsDown + } + continue + } + cmdsMap[name] = append(cmdsMap[name], cmd) + } + + for i := 0; i <= pipe.ring.opt.MaxRetries; i++ { + failedCmdsMap := make(map[string][]Cmder) + + for name, cmds := range cmdsMap { + client := pipe.ring.shards[name].Client + cn, err := client.conn() + if err != nil { + setCmdsErr(cmds, err) + if retErr == nil { + retErr = err + } + continue + } + + if i > 0 { + resetCmds(cmds) + } + failedCmds, err := execCmds(cn, cmds) + client.putConn(cn, err) + if err != nil && retErr == nil { + retErr = err + } + if len(failedCmds) > 0 { + failedCmdsMap[name] = failedCmds + } + } + + if len(failedCmdsMap) == 0 { + break + } + cmdsMap = failedCmdsMap + } + + return cmds, retErr +} + +func (pipe *RingPipeline) Close() error { + pipe.Discard() + pipe.closed = true + return nil +} diff --git a/vendor/gopkg.in/redis.v3/safe.go b/vendor/gopkg.in/redis.v3/safe.go new file mode 100644 index 0000000..d66dc56 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/safe.go @@ -0,0 +1,7 @@ +// +build appengine + +package redis + +func bytesToString(b []byte) string { + return string(b) +} diff --git a/vendor/gopkg.in/redis.v3/script.go b/vendor/gopkg.in/redis.v3/script.go new file mode 100644 index 0000000..3f22f46 --- /dev/null +++ b/vendor/gopkg.in/redis.v3/script.go @@ -0,0 +1,52 @@ +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +type scripter interface { + Eval(script string, keys []string, args []string) *Cmd + EvalSha(sha1 string, keys []string, args []string) *Cmd + ScriptExists(scripts ...string) *BoolSliceCmd + ScriptLoad(script string) *StringCmd +} + +type Script struct { + src, hash string +} + +func NewScript(src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{ + src: src, + hash: hex.EncodeToString(h.Sum(nil)), + } +} + +func (s *Script) Load(c scripter) *StringCmd { + return c.ScriptLoad(s.src) +} + +func (s *Script) Exists(c scripter) *BoolSliceCmd { + return c.ScriptExists(s.src) +} + +func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd { + return c.Eval(s.src, keys, args) +} + +func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd { + return c.EvalSha(s.hash, keys, args) +} + +func (s *Script) Run(c scripter, keys []string, args []string) *Cmd { + r := s.EvalSha(c, keys, args) + if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { + return s.Eval(c, keys, args) + } + return r +} diff --git a/vendor/gopkg.in/redis.v3/sentinel.go b/vendor/gopkg.in/redis.v3/sentinel.go new file mode 100644 index 0000000..255416e --- /dev/null +++ b/vendor/gopkg.in/redis.v3/sentinel.go @@ -0,0 +1,304 @@ +package redis + +import ( + "errors" + "log" + "net" + "strings" + "sync" + "time" +) + +//------------------------------------------------------------------------------ + +// FailoverOptions are used to configure a failover client and should +// be passed to NewFailoverClient. +type FailoverOptions struct { + // The master name. + MasterName string + // A seed list of host:port addresses of sentinel nodes. + SentinelAddrs []string + + // Following options are copied from Options struct. + + Password string + DB int64 + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + PoolSize int + PoolTimeout time.Duration + IdleTimeout time.Duration + + MaxRetries int +} + +func (opt *FailoverOptions) options() *Options { + return &Options{ + Addr: "FailoverClient", + + DB: opt.DB, + Password: opt.Password, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + + MaxRetries: opt.MaxRetries, + } +} + +// NewFailoverClient returns a Redis client with automatic failover +// capabilities using Redis Sentinel. +func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + opt := failoverOpt.options() + failover := &sentinelFailover{ + masterName: failoverOpt.MasterName, + sentinelAddrs: failoverOpt.SentinelAddrs, + + opt: opt, + } + return newClient(opt, failover.Pool()) +} + +//------------------------------------------------------------------------------ + +type sentinelClient struct { + commandable + *baseClient +} + +func newSentinel(opt *Options) *sentinelClient { + base := &baseClient{ + opt: opt, + connPool: newConnPool(opt), + } + return &sentinelClient{ + baseClient: base, + commandable: commandable{process: base.process}, + } +} + +func (c *sentinelClient) PubSub() *PubSub { + return &PubSub{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, false), + }, + } +} + +func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd { + cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name) + c.Process(cmd) + return cmd +} + +func (c *sentinelClient) Sentinels(name string) *SliceCmd { + cmd := NewSliceCmd("SENTINEL", "sentinels", name) + c.Process(cmd) + return cmd +} + +type sentinelFailover struct { + masterName string + sentinelAddrs []string + + opt *Options + + pool pool + poolOnce sync.Once + + lock sync.RWMutex + _sentinel *sentinelClient +} + +func (d *sentinelFailover) dial() (net.Conn, error) { + addr, err := d.MasterAddr() + if err != nil { + return nil, err + } + return net.DialTimeout("tcp", addr, d.opt.DialTimeout) +} + +func (d *sentinelFailover) Pool() pool { + d.poolOnce.Do(func() { + d.opt.Dialer = d.dial + d.pool = newConnPool(d.opt) + }) + return d.pool +} + +func (d *sentinelFailover) MasterAddr() (string, error) { + defer d.lock.Unlock() + d.lock.Lock() + + // Try last working sentinel. + if d._sentinel != nil { + addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) + d.resetSentinel() + } else { + addr := net.JoinHostPort(addr[0], addr[1]) + log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr) + return addr, nil + } + } + + for i, sentinelAddr := range d.sentinelAddrs { + sentinel := newSentinel(&Options{ + Addr: sentinelAddr, + + DialTimeout: d.opt.DialTimeout, + ReadTimeout: d.opt.ReadTimeout, + WriteTimeout: d.opt.WriteTimeout, + + PoolSize: d.opt.PoolSize, + PoolTimeout: d.opt.PoolTimeout, + IdleTimeout: d.opt.IdleTimeout, + }) + masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) + sentinel.Close() + continue + } + + // Push working sentinel to the top. + d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0] + + d.setSentinel(sentinel) + addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) + log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr) + return addr, nil + } + + return "", errors.New("redis: all sentinels are unreachable") +} + +func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) { + d.discoverSentinels(sentinel) + d._sentinel = sentinel + go d.listen() +} + +func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) { + sentinels, err := sentinel.Sentinels(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err) + return + } + for _, sentinel := range sentinels { + vals := sentinel.([]interface{}) + for i := 0; i < len(vals); i += 2 { + key := vals[i].(string) + if key == "name" { + sentinelAddr := vals[i+1].(string) + if !contains(d.sentinelAddrs, sentinelAddr) { + log.Printf( + "redis-sentinel: discovered new %q sentinel: %s", + d.masterName, sentinelAddr, + ) + d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr) + } + } + } + } +} + +// closeOldConns closes connections to the old master after failover switch. +func (d *sentinelFailover) closeOldConns(newMaster string) { + // Good connections that should be put back to the pool. They + // can't be put immediately, because pool.First will return them + // again on next iteration. + cnsToPut := make([]*conn, 0) + + for { + cn := d.pool.First() + if cn == nil { + break + } + if cn.RemoteAddr().String() != newMaster { + log.Printf( + "redis-sentinel: closing connection to the old master %s", + cn.RemoteAddr(), + ) + d.pool.Remove(cn) + } else { + cnsToPut = append(cnsToPut, cn) + } + } + + for _, cn := range cnsToPut { + d.pool.Put(cn) + } +} + +func (d *sentinelFailover) listen() { + var pubsub *PubSub + for { + if pubsub == nil { + pubsub = d._sentinel.PubSub() + if err := pubsub.Subscribe("+switch-master"); err != nil { + log.Printf("redis-sentinel: Subscribe failed: %s", err) + d.lock.Lock() + d.resetSentinel() + d.lock.Unlock() + return + } + } + + msgIface, err := pubsub.Receive() + if err != nil { + log.Printf("redis-sentinel: Receive failed: %s", err) + pubsub.Close() + return + } + + switch msg := msgIface.(type) { + case *Message: + switch msg.Channel { + case "+switch-master": + parts := strings.Split(msg.Payload, " ") + if parts[0] != d.masterName { + log.Printf("redis-sentinel: ignore new %s addr", parts[0]) + continue + } + addr := net.JoinHostPort(parts[3], parts[4]) + log.Printf( + "redis-sentinel: new %q addr is %s", + d.masterName, addr, + ) + + d.closeOldConns(addr) + default: + log.Printf("redis-sentinel: unsupported message: %s", msg) + } + case *Subscription: + // Ignore. + default: + log.Printf("redis-sentinel: unsupported message: %s", msgIface) + } + } +} + +func (d *sentinelFailover) resetSentinel() { + d._sentinel.Close() + d._sentinel = nil +} + +func contains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/redis.v3/unsafe.go b/vendor/gopkg.in/redis.v3/unsafe.go new file mode 100644 index 0000000..3cd8d1c --- /dev/null +++ b/vendor/gopkg.in/redis.v3/unsafe.go @@ -0,0 +1,14 @@ +// +build !appengine + +package redis + +import ( + "reflect" + "unsafe" +) + +func bytesToString(b []byte) string { + bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + strHeader := reflect.StringHeader{bytesHeader.Data, bytesHeader.Len} + return *(*string)(unsafe.Pointer(&strHeader)) +}