Merge branch 'master' of ssh://github.com/micro/go-micro

This commit is contained in:
Asim Aslam 2019-12-03 20:32:10 +00:00
commit 3f8af8c1e0
23 changed files with 114 additions and 96 deletions

26
.golangci.yml Normal file
View File

@ -0,0 +1,26 @@
run:
deadline: 10m
linters:
disable-all: false
enable-all: false
enable:
- megacheck
- staticcheck
- deadcode
- varcheck
- gosimple
- unused
- prealloc
- scopelint
- gocritic
- goimports
- unconvert
- govet
- nakedret
- structcheck
- gosec
disable:
- maligned
- interfacer
- typecheck
- dupl

View File

@ -3,6 +3,12 @@ go:
- 1.13.x
env:
- GO111MODULE=on IN_TRAVIS_CI=yes
before_script:
- go install github.com/golangci/golangci-lint/cmd/golangci-lint
script:
- golangci-lint run || true
- go test -v -race ./... || true
- go test -v ./...
notifications:
slack:
secure: aEvhLbhujaGaKSrOokiG3//PaVHTIrc3fBpoRbCRqfZpyq6WREoapJJhF+tIpWWOwaC9GmChbD6aHo/jMUgwKXVyPSaNjiEL87YzUUpL8B2zslNp1rgfTg/LrzthOx3Q1TYwpaAl3to0fuHUVFX4yMeC2vuThq7WSXgMMxFCtbc=

View File

@ -44,11 +44,9 @@ func (tc *telegramConn) run() {
tc.recv = updates
tc.syncCond.Signal()
for {
select {
case <-tc.exit:
return
}
select {
case <-tc.exit:
return
}
}

View File

@ -339,7 +339,9 @@ func (g *grpcClient) Call(ctx context.Context, req client.Request, rsp interface
d, ok := ctx.Deadline()
if !ok {
// no deadline so we create a new one
ctx, _ = context.WithTimeout(ctx, callOpts.RequestTimeout)
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, callOpts.RequestTimeout)
defer cancel()
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along

View File

@ -162,7 +162,6 @@ func (r *rpcClient) call(ctx context.Context, node *registry.Node, req Request,
select {
case err := <-ch:
grr = err
return err
case <-ctx.Done():
grr = errors.Timeout("go.micro.client", fmt.Sprintf("%v", ctx.Err()))
@ -378,7 +377,9 @@ func (r *rpcClient) Call(ctx context.Context, request Request, response interfac
d, ok := ctx.Deadline()
if !ok {
// no deadline so we create a new one
ctx, _ = context.WithTimeout(ctx, callOpts.RequestTimeout)
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, callOpts.RequestTimeout)
defer cancel()
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along

View File

@ -29,12 +29,10 @@ func (c *Codec) ReadBody(b interface{}) error {
return err
}
switch b.(type) {
switch v := b.(type) {
case *[]byte:
v := b.(*[]byte)
*v = buf
case *Frame:
v := b.(*Frame)
v.Data = buf
default:
return fmt.Errorf("failed to read body: %v is not type of *[]byte", b)
@ -45,14 +43,13 @@ func (c *Codec) ReadBody(b interface{}) error {
func (c *Codec) Write(m *codec.Message, b interface{}) error {
var v []byte
switch b.(type) {
switch vb := b.(type) {
case *Frame:
v = b.(*Frame).Data
v = vb.Data
case *[]byte:
ve := b.(*[]byte)
v = *ve
v = *vb
case []byte:
v = b.([]byte)
v = vb
default:
return fmt.Errorf("failed to write: %v is not type of *[]byte or []byte", b)
}

View File

@ -12,25 +12,22 @@ type Message struct {
}
func (n Marshaler) Marshal(v interface{}) ([]byte, error) {
switch v.(type) {
switch ve := v.(type) {
case *[]byte:
ve := v.(*[]byte)
return *ve, nil
case []byte:
return v.([]byte), nil
return ve, nil
case *Message:
return v.(*Message).Body, nil
return ve.Body, nil
}
return nil, errors.New("invalid message")
}
func (n Marshaler) Unmarshal(d []byte, v interface{}) error {
switch v.(type) {
switch ve := v.(type) {
case *[]byte:
ve := v.(*[]byte)
*ve = d
case *Message:
ve := v.(*Message)
ve.Body = d
}
return errors.New("invalid message")

View File

@ -29,15 +29,12 @@ func (c *Codec) ReadBody(b interface{}) error {
return err
}
switch b.(type) {
switch v := b.(type) {
case *string:
v := b.(*string)
*v = string(buf)
case *[]byte:
v := b.(*[]byte)
*v = buf
case *Frame:
v := b.(*Frame)
v.Data = buf
default:
return fmt.Errorf("failed to read body: %v is not type of *[]byte", b)
@ -48,20 +45,17 @@ func (c *Codec) ReadBody(b interface{}) error {
func (c *Codec) Write(m *codec.Message, b interface{}) error {
var v []byte
switch b.(type) {
switch ve := b.(type) {
case *Frame:
v = b.(*Frame).Data
v = ve.Data
case *[]byte:
ve := b.(*[]byte)
v = *ve
case *string:
ve := b.(*string)
v = []byte(*ve)
case string:
ve := b.(string)
v = []byte(ve)
case []byte:
v = b.([]byte)
v = ve
default:
return fmt.Errorf("failed to write: %v is not type of *[]byte or []byte", b)
}

View File

@ -798,7 +798,7 @@ func (n *network) processCtrlChan(listener tunnel.Listener) {
log.Tracef("Network metric for router %s and gateway %s: %v", event.Route.Router, event.Route.Gateway, metric)
// check we don't overflow max int 64
if d := route.Metric + metric; d > math.MaxInt64 || d <= 0 {
if d := route.Metric + metric; d <= 0 {
// set to max int64 if we overflow
route.Metric = math.MaxInt64
} else {

View File

@ -94,11 +94,6 @@ func (n *Network) Connect(ctx context.Context, req *pbNet.ConnectRequest, resp *
// Nodes returns the list of nodes
func (n *Network) Nodes(ctx context.Context, req *pbNet.NodesRequest, resp *pbNet.NodesResponse) error {
depth := uint(req.Depth)
if depth <= 0 || depth > network.MaxDepth {
depth = network.MaxDepth
}
// root node
nodes := map[string]network.Node{}

View File

@ -274,7 +274,7 @@ func (p *Proxy) watchRoutes() {
return
}
if err := p.manageRoutes(event.Route, fmt.Sprintf("%s", event.Type)); err != nil {
if err := p.manageRoutes(event.Route, event.Type.String()); err != nil {
// TODO: should we bail here?
continue
}
@ -562,12 +562,9 @@ func NewProxy(opts ...options.Option) proxy.Proxy {
defer t.Stop()
// we must refresh route metrics since they do not trigger new events
for {
select {
case <-t.C:
// refresh route metrics
p.refreshMetrics()
}
for range t.C {
// refresh route metrics
p.refreshMetrics()
}
}()

View File

@ -230,7 +230,9 @@ func (m *mdnsRegistry) GetService(service string) ([]*Service, error) {
p := mdns.DefaultParams(service)
// set context with timeout
p.Context, _ = context.WithTimeout(context.Background(), m.opts.Timeout)
var cancel context.CancelFunc
p.Context, cancel = context.WithTimeout(context.Background(), m.opts.Timeout)
defer cancel()
// set entries channel
p.Entries = entries
// set the domain
@ -308,7 +310,9 @@ func (m *mdnsRegistry) ListServices() ([]*Service, error) {
p := mdns.DefaultParams("_services")
// set context with timeout
p.Context, _ = context.WithTimeout(context.Background(), m.opts.Timeout)
var cancel context.CancelFunc
p.Context, cancel = context.WithTimeout(context.Background(), m.opts.Timeout)
defer cancel()
// set entries channel
p.Entries = entries
// set domain

View File

@ -87,9 +87,8 @@ func (m *Registry) ttlPrune() {
}
func (m *Registry) sendEvent(r *registry.Result) {
watchers := make([]*Watcher, 0, len(m.watchers))
m.RLock()
watchers := make([]*Watcher, 0, len(m.watchers))
for _, w := range m.watchers {
watchers = append(watchers, w)
}

View File

@ -11,24 +11,22 @@ type serviceWatcher struct {
}
func (s *serviceWatcher) Next() (*registry.Result, error) {
for {
// check if closed
select {
case <-s.closed:
return nil, registry.ErrWatcherStopped
default:
}
r, err := s.stream.Recv()
if err != nil {
return nil, err
}
return &registry.Result{
Action: r.Action,
Service: ToService(r.Service),
}, nil
// check if closed
select {
case <-s.closed:
return nil, registry.ErrWatcherStopped
default:
}
r, err := s.stream.Recv()
if err != nil {
return nil, err
}
return &registry.Result{
Action: r.Action,
Service: ToService(r.Service),
}, nil
}
func (s *serviceWatcher) Stop() {

View File

@ -513,25 +513,28 @@ func (r *router) advertiseEvents() error {
// close closes exit channels
func (r *router) close() {
// notify all goroutines to finish
close(r.exit)
log.Debugf("Router closing remaining channels")
// drain the advertise channel only if advertising
if r.status.Code == Advertising {
// drain the event channel
for range r.eventChan {
}
r.sub.RLock()
// close advert subscribers
for id, sub := range r.subscribers {
select {
case <-sub:
default:
}
// close the channel
close(sub)
// delete the subscriber
r.sub.Lock()
delete(r.subscribers, id)
r.sub.Unlock()
}
r.sub.RUnlock()
}
// mark the router as Stopped and set its Error to nil
@ -552,6 +555,9 @@ func (r *router) watchErrors() {
defer r.Unlock()
// if the router is not stopped, stop it
if r.status.Code != Stopped {
// notify all goroutines to finish
close(r.exit)
// close all the channels
r.close()
// set the status error
@ -718,7 +724,7 @@ func (r *router) Process(a *Advert) error {
route := event.Route
action := event.Type
log.Debugf("Router %s applying %s from router %s for service %s %s", r.options.Id, action, route.Router, route.Service, route.Address)
if err := r.manageRoute(route, fmt.Sprintf("%s", action)); err != nil {
if err := r.manageRoute(route, action.String()); err != nil {
return fmt.Errorf("failed applying action %s to routing table: %s", action, err)
}
}
@ -857,6 +863,9 @@ func (r *router) Stop() error {
r.Unlock()
return r.status.Error
case Running, Advertising:
// notify all goroutines to finish
close(r.exit)
// close all the channels
// NOTE: close marks the router status as Stopped
r.close()

View File

@ -222,7 +222,9 @@ func (g *grpcServer) handler(srv interface{}, stream grpc.ServerStream) error {
// set the timeout if we have it
if len(to) > 0 {
if n, err := strconv.ParseUint(to, 10, 64); err == nil {
ctx, _ = context.WithTimeout(ctx, time.Duration(n))
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(n))
defer cancel()
}
}

View File

@ -277,7 +277,9 @@ func (s *rpcServer) ServeConn(sock transport.Socket) {
// set the timeout from the header if we have it
if len(to) > 0 {
if n, err := strconv.ParseUint(to, 10, 64); err == nil {
ctx, _ = context.WithTimeout(ctx, time.Duration(n))
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(n))
defer cancel()
}
}

View File

@ -201,7 +201,7 @@ func Run() error {
}
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT)
log.Logf("Received signal %s", <-ch)
return Stop()

View File

@ -39,9 +39,7 @@ func (e *etcdLeader) Elect(id string, opts ...leader.ElectOption) (leader.Electe
l := cc.NewElection(s, path)
ctx, _ := context.WithCancel(context.Background())
if err := l.Campaign(ctx, id); err != nil {
if err := l.Campaign(context.TODO(), id); err != nil {
return nil, err
}
@ -63,14 +61,8 @@ func (e *etcdLeader) Follow() chan string {
ech := l.Observe(context.Background())
go func() {
for {
select {
case r, ok := <-ech:
if !ok {
return
}
ch <- string(r.Kvs[0].Value)
}
for r := range ech {
ch <- string(r.Kvs[0].Value)
}
}()
@ -82,8 +74,7 @@ func (e *etcdLeader) String() string {
}
func (e *etcdElected) Reelect() error {
ctx, _ := context.WithCancel(context.Background())
return e.e.Campaign(ctx, e.id)
return e.e.Campaign(context.TODO(), e.id)
}
func (e *etcdElected) Revoked() chan bool {

View File

@ -49,9 +49,7 @@ func (e *etcdLock) Acquire(id string, opts ...lock.AcquireOption) error {
m := cc.NewMutex(s, path)
ctx, _ := context.WithCancel(context.Background())
if err := m.Lock(ctx); err != nil {
if err := m.Lock(context.TODO()); err != nil {
return err
}

View File

@ -63,7 +63,9 @@ func (s Schedule) Run() <-chan time.Time {
}
// start ticker
for t := range time.Tick(s.Interval) {
ticker := time.NewTicker(s.Interval)
defer ticker.Stop()
for t := range ticker.C {
ch <- t
}
}()

View File

@ -228,7 +228,7 @@ func (l *link) manage() {
}
// check the type of message
switch {
case bytes.Compare(p.message.Body, linkRequest) == 0:
case bytes.Equal(p.message.Body, linkRequest):
log.Tracef("Link %s received link request %v", l.id, p.message.Body)
// send response
if err := send(linkResponse); err != nil {
@ -236,7 +236,7 @@ func (l *link) manage() {
l.errCount++
l.Unlock()
}
case bytes.Compare(p.message.Body, linkResponse) == 0:
case bytes.Equal(p.message.Body, linkResponse):
// set round trip time
d := time.Since(now)
log.Tracef("Link %s received link response in %v", p.message.Body, d)

View File

@ -376,7 +376,7 @@ func (s *service) Run() error {
go s.run(ex)
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT)
select {
// wait on kill signal