Merge pull request #86 from micro/pool

Connection Pooling
This commit is contained in:
Asim Aslam 2016-06-07 01:35:25 +01:00
commit d31cd76b8c
8 changed files with 283 additions and 49 deletions

View File

@ -66,14 +66,16 @@ type RequestOption func(*RequestOptions)
var (
// DefaultClient is a default client to use out of the box
DefaultClient Client = newRpcClient()
// DefaultBackoff is the default backoff function for retries
DefaultBackoff = exponentialBackoff
// DefaultRetries is the default number of times a request is tried
DefaultRetries = 1
// DefaultRequestTimeout is the default request timeout
DefaultRequestTimeout = time.Second * 5
// DefaultPoolSize sets the connection pool size
DefaultPoolSize = 0
// DefaultPoolTTL sets the connection pool ttl
DefaultPoolTTL = time.Minute
)
// Makes a synchronous call to a service using the default client

View File

@ -123,7 +123,7 @@ func (m *MockClient) CallRemote(ctx context.Context, addr string, req client.Req
}
v.Set(reflect.ValueOf(r.Response))
return nil
}

View File

@ -23,6 +23,10 @@ type Options struct {
Selector selector.Selector
Transport transport.Transport
// Connection Pool
PoolSize int
PoolTTL time.Duration
// Middleware for client
Wrappers []Wrapper
@ -74,6 +78,8 @@ func newOptions(options ...Option) Options {
RequestTimeout: DefaultRequestTimeout,
DialTimeout: transport.DefaultDialTimeout,
},
PoolSize: DefaultPoolSize,
PoolTTL: DefaultPoolTTL,
}
for _, o := range options {
@ -126,6 +132,20 @@ func ContentType(ct string) Option {
}
}
// PoolSize sets the connection pool size
func PoolSize(d int) Option {
return func(o *Options) {
o.PoolSize = d
}
}
// PoolSize sets the connection pool size
func PoolTTL(d time.Duration) Option {
return func(o *Options) {
o.PoolTTL = d
}
}
// Registry to find nodes for a given service
func Registry(r registry.Registry) Option {
return func(o *Options) {

View File

@ -19,16 +19,16 @@ import (
type rpcClient struct {
once sync.Once
opts Options
pool *pool
}
func newRpcClient(opt ...Option) Client {
var once sync.Once
opts := newOptions(opt...)
rc := &rpcClient{
once: once,
once: sync.Once{},
opts: opts,
pool: newPool(opts.PoolSize, opts.PoolTTL),
}
c := Client(rc)
@ -73,10 +73,15 @@ func (r *rpcClient) call(ctx context.Context, address string, req Request, resp
return errors.InternalServerError("go.micro.client", err.Error())
}
c, err := r.opts.Transport.Dial(address, transport.WithTimeout(opts.DialTimeout))
var grr error
c, err := r.pool.getConn(address, r.opts.Transport, transport.WithTimeout(opts.DialTimeout))
if err != nil {
return errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
defer func() {
// defer execution of release
r.pool.release(address, c, grr)
}()
stream := &rpcStream{
context: ctx,
@ -107,8 +112,10 @@ func (r *rpcClient) call(ctx context.Context, address string, req Request, resp
select {
case err := <-ch:
grr = err
return err
case <-ctx.Done():
grr = ctx.Err()
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
}
}
@ -171,9 +178,18 @@ func (r *rpcClient) stream(ctx context.Context, address string, req Request, opt
}
func (r *rpcClient) Init(opts ...Option) error {
size := r.opts.PoolSize
ttl := r.opts.PoolTTL
for _, o := range opts {
o(&r.opts)
}
// recreate the pool if the options changed
if size != r.opts.PoolSize || ttl != r.opts.PoolTTL {
r.pool = newPool(r.opts.PoolSize, r.opts.PoolTTL)
}
return nil
}

87
client/rpc_pool.go Normal file
View File

@ -0,0 +1,87 @@
package client
import (
"sync"
"time"
"github.com/micro/go-micro/transport"
)
type pool struct {
size int
ttl int64
sync.Mutex
conns map[string][]*poolConn
}
type poolConn struct {
transport.Client
created int64
}
func newPool(size int, ttl time.Duration) *pool {
return &pool{
size: size,
ttl: int64(ttl.Seconds()),
conns: make(map[string][]*poolConn),
}
}
// NoOp the Close since we manage it
func (p *poolConn) Close() error {
return nil
}
func (p *pool) getConn(addr string, tr transport.Transport, opts ...transport.DialOption) (*poolConn, error) {
p.Lock()
conns := p.conns[addr]
now := time.Now().Unix()
// while we have conns check age and then return one
// otherwise we'll create a new conn
for len(conns) > 0 {
conn := conns[len(conns)-1]
conns = conns[:len(conns)-1]
p.conns[addr] = conns
// if conn is old kill it and move on
if d := now - conn.created; d > p.ttl {
conn.Client.Close()
continue
}
// we got a good conn, lets unlock and return it
p.Unlock()
return conn, nil
}
p.Unlock()
// create new conn
c, err := tr.Dial(addr, opts...)
if err != nil {
return nil, err
}
return &poolConn{c, time.Now().Unix()}, nil
}
func (p *pool) release(addr string, conn *poolConn, err error) {
// don't store the conn if it has errored
if err != nil {
conn.Client.Close()
return
}
// otherwise put it back for reuse
p.Lock()
conns := p.conns[addr]
if len(conns) >= p.size {
p.Unlock()
conn.Client.Close()
return
}
p.conns[addr] = append(conns, conn)
p.Unlock()
}

84
client/rpc_pool_test.go Normal file
View File

@ -0,0 +1,84 @@
package client
import (
"testing"
"time"
"github.com/micro/go-micro/transport"
"github.com/micro/go-micro/transport/mock"
)
func testPool(t *testing.T, size int, ttl time.Duration) {
// zero pool
p := newPool(size, ttl)
// mock transport
tr := mock.NewTransport()
// listen
l, err := tr.Listen(":0")
if err != nil {
t.Fatal(err)
}
defer l.Close()
// accept loop
go func() {
for {
if err := l.Accept(func(s transport.Socket) {
for {
var msg transport.Message
if err := s.Recv(&msg); err != nil {
return
}
if err := s.Send(&msg); err != nil {
return
}
}
}); err != nil {
return
}
}
}()
for i := 0; i < 10; i++ {
// get a conn
c, err := p.getConn(l.Addr(), tr)
if err != nil {
t.Fatal(err)
}
msg := &transport.Message{
Body: []byte(`hello world`),
}
if err := c.Send(msg); err != nil {
t.Fatal(err)
}
var rcv transport.Message
if err := c.Recv(&rcv); err != nil {
t.Fatal(err)
}
if string(rcv.Body) != string(msg.Body) {
t.Fatalf("got %v, expected %v", rcv.Body, msg.Body)
}
// release the conn
p.release(l.Addr(), c, nil)
p.Lock()
if i := len(p.conns[l.Addr()]); i > size {
p.Unlock()
t.Fatal("pool size %d is greater than expected %d", i, size)
}
p.Unlock()
}
}
func TestRPCPool(t *testing.T) {
testPool(t, 0, time.Minute)
testPool(t, 2, time.Minute)
}

View File

@ -62,6 +62,16 @@ var (
EnvVar: "MICRO_CLIENT_RETRIES",
Usage: "Sets the client retries. Default: 1",
},
cli.IntFlag{
Name: "client_pool_size",
EnvVar: "MICRO_CLIENT_POOL_SIZE",
Usage: "Sets the client connection pool size. Default: 0",
},
cli.StringFlag{
Name: "client_pool_ttl",
EnvVar: "MICRO_CLIENT_POOL_TTL",
Usage: "Sets the client connection pool ttl. e.g 500ms, 5s, 1m. Default: 1m",
},
cli.StringFlag{
Name: "server_name",
EnvVar: "MICRO_SERVER_NAME",
@ -337,6 +347,18 @@ func (c *cmd) Before(ctx *cli.Context) error {
clientOpts = append(clientOpts, client.RequestTimeout(d))
}
if r := ctx.Int("client_pool_size"); r > 0 {
clientOpts = append(clientOpts, client.PoolSize(r))
}
if t := ctx.String("client_pool_ttl"); len(t) > 0 {
d, err := time.ParseDuration(t)
if err != nil {
return fmt.Errorf("failed to parse client_pool_ttl: %v", t)
}
clientOpts = append(clientOpts, client.PoolTTL(d))
}
// We have some command line opts for the server.
// Lets set it up
if len(serverOpts) > 0 {

View File

@ -55,50 +55,53 @@ func (s *rpcServer) accept(sock transport.Socket) {
}
}()
var msg transport.Message
if err := sock.Recv(&msg); err != nil {
return
}
// we use this Timeout header to set a server deadline
to := msg.Header["Timeout"]
// we use this Content-Type header to identify the codec needed
ct := msg.Header["Content-Type"]
cf, err := s.newCodec(ct)
// TODO: needs better error handling
if err != nil {
sock.Send(&transport.Message{
Header: map[string]string{
"Content-Type": "text/plain",
},
Body: []byte(err.Error()),
})
return
}
codec := newRpcPlusCodec(&msg, sock, cf)
// strip our headers
hdr := make(map[string]string)
for k, v := range msg.Header {
hdr[k] = v
}
delete(hdr, "Content-Type")
delete(hdr, "Timeout")
ctx := metadata.NewContext(context.Background(), hdr)
// set the timeout if we have it
if len(to) > 0 {
if n, err := strconv.ParseUint(to, 10, 64); err == nil {
ctx, _ = context.WithTimeout(ctx, time.Duration(n))
for {
var msg transport.Message
if err := sock.Recv(&msg); err != nil {
return
}
}
// TODO: needs better error handling
if err := s.rpc.serveRequest(ctx, codec, ct); err != nil {
log.Printf("Unexpected error serving request, closing socket: %v", err)
// we use this Timeout header to set a server deadline
to := msg.Header["Timeout"]
// we use this Content-Type header to identify the codec needed
ct := msg.Header["Content-Type"]
cf, err := s.newCodec(ct)
// TODO: needs better error handling
if err != nil {
sock.Send(&transport.Message{
Header: map[string]string{
"Content-Type": "text/plain",
},
Body: []byte(err.Error()),
})
return
}
codec := newRpcPlusCodec(&msg, sock, cf)
// strip our headers
hdr := make(map[string]string)
for k, v := range msg.Header {
hdr[k] = v
}
delete(hdr, "Content-Type")
delete(hdr, "Timeout")
ctx := metadata.NewContext(context.Background(), hdr)
// set the timeout if we have it
if len(to) > 0 {
if n, err := strconv.ParseUint(to, 10, 64); err == nil {
ctx, _ = context.WithTimeout(ctx, time.Duration(n))
}
}
// TODO: needs better error handling
if err := s.rpc.serveRequest(ctx, codec, ct); err != nil {
log.Printf("Unexpected error serving request, closing socket: %v", err)
return
}
}
}