Skip to content

Commit

Permalink
Merge pull request #86 from micro/pool
Browse files Browse the repository at this point in the history
Connection Pooling
  • Loading branch information
Asim Aslam committed Jun 7, 2016
2 parents 69aa853 + ff0bd76 commit d31cd76
Show file tree
Hide file tree
Showing 8 changed files with 277 additions and 43 deletions.
6 changes: 4 additions & 2 deletions client/client.go
Expand Up @@ -66,14 +66,16 @@ type RequestOption func(*RequestOptions)
var (
// DefaultClient is a default client to use out of the box
DefaultClient Client = newRpcClient()

// DefaultBackoff is the default backoff function for retries
DefaultBackoff = exponentialBackoff

// DefaultRetries is the default number of times a request is tried
DefaultRetries = 1
// DefaultRequestTimeout is the default request timeout
DefaultRequestTimeout = time.Second * 5
// DefaultPoolSize sets the connection pool size
DefaultPoolSize = 0
// DefaultPoolTTL sets the connection pool ttl
DefaultPoolTTL = time.Minute
)

// Makes a synchronous call to a service using the default client
Expand Down
2 changes: 1 addition & 1 deletion client/mock/mock.go
Expand Up @@ -123,7 +123,7 @@ func (m *MockClient) CallRemote(ctx context.Context, addr string, req client.Req
}

v.Set(reflect.ValueOf(r.Response))

return nil
}

Expand Down
20 changes: 20 additions & 0 deletions client/options.go
Expand Up @@ -23,6 +23,10 @@ type Options struct {
Selector selector.Selector
Transport transport.Transport

// Connection Pool
PoolSize int
PoolTTL time.Duration

// Middleware for client
Wrappers []Wrapper

Expand Down Expand Up @@ -74,6 +78,8 @@ func newOptions(options ...Option) Options {
RequestTimeout: DefaultRequestTimeout,
DialTimeout: transport.DefaultDialTimeout,
},
PoolSize: DefaultPoolSize,
PoolTTL: DefaultPoolTTL,
}

for _, o := range options {
Expand Down Expand Up @@ -126,6 +132,20 @@ func ContentType(ct string) Option {
}
}

// PoolSize sets the connection pool size
func PoolSize(d int) Option {
return func(o *Options) {
o.PoolSize = d
}
}

// PoolSize sets the connection pool size
func PoolTTL(d time.Duration) Option {
return func(o *Options) {
o.PoolTTL = d
}
}

// Registry to find nodes for a given service
func Registry(r registry.Registry) Option {
return func(o *Options) {
Expand Down
24 changes: 20 additions & 4 deletions client/rpc_client.go
Expand Up @@ -19,16 +19,16 @@ import (
type rpcClient struct {
once sync.Once
opts Options
pool *pool
}

func newRpcClient(opt ...Option) Client {
var once sync.Once

opts := newOptions(opt...)

rc := &rpcClient{
once: once,
once: sync.Once{},
opts: opts,
pool: newPool(opts.PoolSize, opts.PoolTTL),
}

c := Client(rc)
Expand Down Expand Up @@ -73,10 +73,15 @@ func (r *rpcClient) call(ctx context.Context, address string, req Request, resp
return errors.InternalServerError("go.micro.client", err.Error())
}

c, err := r.opts.Transport.Dial(address, transport.WithTimeout(opts.DialTimeout))
var grr error
c, err := r.pool.getConn(address, r.opts.Transport, transport.WithTimeout(opts.DialTimeout))
if err != nil {
return errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
defer func() {
// defer execution of release
r.pool.release(address, c, grr)
}()

stream := &rpcStream{
context: ctx,
Expand Down Expand Up @@ -107,8 +112,10 @@ func (r *rpcClient) call(ctx context.Context, address string, req Request, resp

select {
case err := <-ch:
grr = err
return err
case <-ctx.Done():
grr = ctx.Err()
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
}
}
Expand Down Expand Up @@ -171,9 +178,18 @@ func (r *rpcClient) stream(ctx context.Context, address string, req Request, opt
}

func (r *rpcClient) Init(opts ...Option) error {
size := r.opts.PoolSize
ttl := r.opts.PoolTTL

for _, o := range opts {
o(&r.opts)
}

// recreate the pool if the options changed
if size != r.opts.PoolSize || ttl != r.opts.PoolTTL {
r.pool = newPool(r.opts.PoolSize, r.opts.PoolTTL)
}

return nil
}

Expand Down
87 changes: 87 additions & 0 deletions client/rpc_pool.go
@@ -0,0 +1,87 @@
package client

import (
"sync"
"time"

"github.com/micro/go-micro/transport"
)

type pool struct {
size int
ttl int64

sync.Mutex
conns map[string][]*poolConn
}

type poolConn struct {
transport.Client
created int64
}

func newPool(size int, ttl time.Duration) *pool {
return &pool{
size: size,
ttl: int64(ttl.Seconds()),
conns: make(map[string][]*poolConn),
}
}

// NoOp the Close since we manage it
func (p *poolConn) Close() error {
return nil
}

func (p *pool) getConn(addr string, tr transport.Transport, opts ...transport.DialOption) (*poolConn, error) {
p.Lock()
conns := p.conns[addr]
now := time.Now().Unix()

// while we have conns check age and then return one
// otherwise we'll create a new conn
for len(conns) > 0 {
conn := conns[len(conns)-1]
conns = conns[:len(conns)-1]
p.conns[addr] = conns

// if conn is old kill it and move on
if d := now - conn.created; d > p.ttl {
conn.Client.Close()
continue
}

// we got a good conn, lets unlock and return it
p.Unlock()

return conn, nil
}

p.Unlock()

// create new conn
c, err := tr.Dial(addr, opts...)
if err != nil {
return nil, err
}
return &poolConn{c, time.Now().Unix()}, nil
}

func (p *pool) release(addr string, conn *poolConn, err error) {
// don't store the conn if it has errored
if err != nil {
conn.Client.Close()
return
}

// otherwise put it back for reuse
p.Lock()
conns := p.conns[addr]
if len(conns) >= p.size {
p.Unlock()
conn.Client.Close()
return
}
p.conns[addr] = append(conns, conn)
p.Unlock()
}
84 changes: 84 additions & 0 deletions client/rpc_pool_test.go
@@ -0,0 +1,84 @@
package client

import (
"testing"
"time"

"github.com/micro/go-micro/transport"
"github.com/micro/go-micro/transport/mock"
)

func testPool(t *testing.T, size int, ttl time.Duration) {
// zero pool
p := newPool(size, ttl)

// mock transport
tr := mock.NewTransport()

// listen
l, err := tr.Listen(":0")
if err != nil {
t.Fatal(err)
}
defer l.Close()

// accept loop
go func() {
for {
if err := l.Accept(func(s transport.Socket) {
for {
var msg transport.Message
if err := s.Recv(&msg); err != nil {
return
}
if err := s.Send(&msg); err != nil {
return
}
}
}); err != nil {
return
}
}
}()

for i := 0; i < 10; i++ {
// get a conn
c, err := p.getConn(l.Addr(), tr)
if err != nil {
t.Fatal(err)
}

msg := &transport.Message{
Body: []byte(`hello world`),
}

if err := c.Send(msg); err != nil {
t.Fatal(err)
}

var rcv transport.Message

if err := c.Recv(&rcv); err != nil {
t.Fatal(err)
}

if string(rcv.Body) != string(msg.Body) {
t.Fatalf("got %v, expected %v", rcv.Body, msg.Body)
}

// release the conn
p.release(l.Addr(), c, nil)

p.Lock()
if i := len(p.conns[l.Addr()]); i > size {
p.Unlock()
t.Fatal("pool size %d is greater than expected %d", i, size)
}
p.Unlock()
}
}

func TestRPCPool(t *testing.T) {
testPool(t, 0, time.Minute)
testPool(t, 2, time.Minute)
}
22 changes: 22 additions & 0 deletions cmd/cmd.go
Expand Up @@ -62,6 +62,16 @@ var (
EnvVar: "MICRO_CLIENT_RETRIES",
Usage: "Sets the client retries. Default: 1",
},
cli.IntFlag{
Name: "client_pool_size",
EnvVar: "MICRO_CLIENT_POOL_SIZE",
Usage: "Sets the client connection pool size. Default: 0",
},
cli.StringFlag{
Name: "client_pool_ttl",
EnvVar: "MICRO_CLIENT_POOL_TTL",
Usage: "Sets the client connection pool ttl. e.g 500ms, 5s, 1m. Default: 1m",
},
cli.StringFlag{
Name: "server_name",
EnvVar: "MICRO_SERVER_NAME",
Expand Down Expand Up @@ -337,6 +347,18 @@ func (c *cmd) Before(ctx *cli.Context) error {
clientOpts = append(clientOpts, client.RequestTimeout(d))
}

if r := ctx.Int("client_pool_size"); r > 0 {
clientOpts = append(clientOpts, client.PoolSize(r))
}

if t := ctx.String("client_pool_ttl"); len(t) > 0 {
d, err := time.ParseDuration(t)
if err != nil {
return fmt.Errorf("failed to parse client_pool_ttl: %v", t)
}
clientOpts = append(clientOpts, client.PoolTTL(d))
}

// We have some command line opts for the server.
// Lets set it up
if len(serverOpts) > 0 {
Expand Down

0 comments on commit d31cd76

Please sign in to comment.