Compare commits
32 Commits
Author | SHA1 | Date | |
---|---|---|---|
d9b822deff | |||
0e66688f8f | |||
9213fd212f | |||
aa360dcf51 | |||
2df259b5b8 | |||
15e9310368 | |||
|
16d8cf3434 | ||
9704ef2e5e | |||
94e8f90f00 | |||
34d1587881 | |||
bf4143cde5 | |||
36b7b9f5fb | |||
ae97023092 | |||
115ca6a018 | |||
89cf4ef8af | |||
2a6ce6d4da | |||
ad19fe2b90 | |||
49055a28ea | |||
d1c6e121c1 | |||
7cd7fb0c0a | |||
77eb5b5264 | |||
929e46c087 | |||
1fb5673d27 | |||
3bbb0cbc72 | |||
71fe0df73f | |||
f1b8ecbdb3 | |||
fd2b2762e9 | |||
82d269cfb4 | |||
6641463eed | |||
faf2454f0a | |||
de9e4d73f5 | |||
4ae7277140 |
@@ -1,24 +1,26 @@
|
||||
name: lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21
|
||||
go-version: 'stable'
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: deps
|
||||
run: go get -v -d ./...
|
||||
- name: lint
|
||||
uses: https://github.com/golangci/golangci-lint-action@v3.4.0
|
||||
continue-on-error: true
|
||||
uses: https://github.com/golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.52
|
||||
version: 'latest'
|
@@ -1,22 +1,30 @@
|
||||
name: pr
|
||||
name: test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.21
|
||||
- name: deps
|
||||
run: go get -v -t -d ./...
|
||||
run: go get -v -d ./...
|
||||
- name: test
|
||||
env:
|
||||
INTEGRATION_TESTS: yes
|
@@ -1,44 +1,5 @@
|
||||
run:
|
||||
concurrency: 4
|
||||
concurrency: 8
|
||||
deadline: 5m
|
||||
issues-exit-code: 1
|
||||
tests: true
|
||||
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
enable:
|
||||
- fieldalignment
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- govet
|
||||
- deadcode
|
||||
- errcheck
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
- bodyclose
|
||||
- gci
|
||||
- goconst
|
||||
- gocritic
|
||||
- gosimple
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
- revive
|
||||
- gosec
|
||||
- makezero
|
||||
- misspell
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilerr
|
||||
- noctx
|
||||
- prealloc
|
||||
- unconvert
|
||||
- unparam
|
||||
disable-all: false
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package broker is an interface used for asynchronous messaging
|
||||
package broker // import "go.unistack.org/micro/v3/broker"
|
||||
package broker
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -46,6 +46,12 @@ type Broker interface {
|
||||
BatchSubscribe(ctx context.Context, topic string, h BatchHandler, opts ...SubscribeOption) (Subscriber, error)
|
||||
// String type of broker
|
||||
String() string
|
||||
// Live returns broker liveness
|
||||
Live() bool
|
||||
// Ready returns broker readiness
|
||||
Ready() bool
|
||||
// Health returns broker health
|
||||
Health() bool
|
||||
}
|
||||
|
||||
type (
|
||||
|
@@ -206,7 +206,7 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts
|
||||
}
|
||||
} else if sub.opts.AutoAck {
|
||||
if err = ms.Ack(); err != nil {
|
||||
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
|
||||
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
|
||||
}
|
||||
}
|
||||
// single processing
|
||||
@@ -217,11 +217,11 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts
|
||||
if eh != nil {
|
||||
_ = eh(p)
|
||||
} else if m.opts.Logger.V(logger.ErrorLevel) {
|
||||
m.opts.Logger.Error(m.opts.Context, err.Error())
|
||||
m.opts.Logger.Error(m.opts.Context, "broker handler error", err)
|
||||
}
|
||||
} else if sub.opts.AutoAck {
|
||||
if err = p.Ack(); err != nil {
|
||||
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
|
||||
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -339,6 +339,18 @@ func (m *memoryBroker) Name() string {
|
||||
return m.opts.Name
|
||||
}
|
||||
|
||||
func (m *memoryBroker) Live() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *memoryBroker) Ready() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *memoryBroker) Health() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *memoryEvent) Topic() string {
|
||||
return m.topic
|
||||
}
|
||||
|
@@ -25,6 +25,18 @@ func NewBroker(opts ...Option) *NoopBroker {
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *NoopBroker) Health() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *NoopBroker) Live() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *NoopBroker) Ready() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *NoopBroker) Name() string {
|
||||
return b.opts.Name
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package client is an interface for an RPC client
|
||||
package client // import "go.unistack.org/micro/v3/client"
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -298,7 +298,7 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
|
||||
// call backoff first. Someone may want an initial start delay
|
||||
t, err := callOpts.Backoff(ctx, req, i)
|
||||
if err != nil {
|
||||
return errors.InternalServerError("go.micro.client", err.Error())
|
||||
return errors.InternalServerError("go.micro.client", "%s", err.Error())
|
||||
}
|
||||
|
||||
// only sleep if greater than 0
|
||||
@@ -312,7 +312,7 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
|
||||
// TODO apply any filtering here
|
||||
routes, err = n.opts.Lookup(ctx, req, callOpts)
|
||||
if err != nil {
|
||||
return errors.InternalServerError("go.micro.client", err.Error())
|
||||
return errors.InternalServerError("go.micro.client", "%s", err.Error())
|
||||
}
|
||||
|
||||
// balance the list of nodes
|
||||
@@ -466,7 +466,7 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
|
||||
// call backoff first. Someone may want an initial start delay
|
||||
t, cerr := callOpts.Backoff(ctx, req, i)
|
||||
if cerr != nil {
|
||||
return nil, errors.InternalServerError("go.micro.client", cerr.Error())
|
||||
return nil, errors.InternalServerError("go.micro.client", "%s", cerr.Error())
|
||||
}
|
||||
|
||||
// only sleep if greater than 0
|
||||
@@ -480,7 +480,7 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
|
||||
// TODO apply any filtering here
|
||||
routes, err = n.opts.Lookup(ctx, req, callOpts)
|
||||
if err != nil {
|
||||
return nil, errors.InternalServerError("go.micro.client", err.Error())
|
||||
return nil, errors.InternalServerError("go.micro.client", "%s", err.Error())
|
||||
}
|
||||
|
||||
// balance the list of nodes
|
||||
@@ -609,13 +609,13 @@ func (n *noopClient) publish(ctx context.Context, ps []Message, opts ...PublishO
|
||||
// use codec for payload
|
||||
cf, err := n.newCodec(p.ContentType())
|
||||
if err != nil {
|
||||
return errors.InternalServerError("go.micro.client", err.Error())
|
||||
return errors.InternalServerError("go.micro.client", "%s", err.Error())
|
||||
}
|
||||
|
||||
// set the body
|
||||
b, err := cf.Marshal(p.Payload())
|
||||
if err != nil {
|
||||
return errors.InternalServerError("go.micro.client", err.Error())
|
||||
return errors.InternalServerError("go.micro.client", "%s", err.Error())
|
||||
}
|
||||
body = b
|
||||
}
|
||||
|
@@ -11,7 +11,6 @@ import (
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
"go.unistack.org/micro/v3/meter"
|
||||
"go.unistack.org/micro/v3/network/transport"
|
||||
"go.unistack.org/micro/v3/options"
|
||||
"go.unistack.org/micro/v3/register"
|
||||
"go.unistack.org/micro/v3/router"
|
||||
@@ -22,8 +21,6 @@ import (
|
||||
|
||||
// Options holds client options
|
||||
type Options struct {
|
||||
// Transport used for transfer messages
|
||||
Transport transport.Transport
|
||||
// Selector used to select needed address
|
||||
Selector selector.Selector
|
||||
// Logger used to log messages
|
||||
@@ -194,18 +191,16 @@ func NewOptions(opts ...Option) Options {
|
||||
Retry: DefaultRetry,
|
||||
Retries: DefaultRetries,
|
||||
RequestTimeout: DefaultRequestTimeout,
|
||||
DialTimeout: transport.DefaultDialTimeout,
|
||||
},
|
||||
Lookup: LookupRoute,
|
||||
PoolSize: DefaultPoolSize,
|
||||
PoolTTL: DefaultPoolTTL,
|
||||
Selector: random.NewSelector(),
|
||||
Logger: logger.DefaultLogger,
|
||||
Broker: broker.DefaultBroker,
|
||||
Meter: meter.DefaultMeter,
|
||||
Tracer: tracer.DefaultTracer,
|
||||
Router: router.DefaultRouter,
|
||||
Transport: transport.DefaultTransport,
|
||||
Lookup: LookupRoute,
|
||||
PoolSize: DefaultPoolSize,
|
||||
PoolTTL: DefaultPoolTTL,
|
||||
Selector: random.NewSelector(),
|
||||
Logger: logger.DefaultLogger,
|
||||
Broker: broker.DefaultBroker,
|
||||
Meter: meter.DefaultMeter,
|
||||
Tracer: tracer.DefaultTracer,
|
||||
Router: router.DefaultRouter,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
@@ -278,13 +273,6 @@ func PoolTTL(d time.Duration) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// Transport to use for communication e.g http, rabbitmq, etc
|
||||
func Transport(t transport.Transport) Option {
|
||||
return func(o *Options) {
|
||||
o.Transport = t
|
||||
}
|
||||
}
|
||||
|
||||
// Register sets the routers register
|
||||
func Register(r register.Register) Option {
|
||||
return func(o *Options) {
|
||||
@@ -334,14 +322,6 @@ func TLSConfig(t *tls.Config) Option {
|
||||
return func(o *Options) {
|
||||
// set the internal tls
|
||||
o.TLSConfig = t
|
||||
|
||||
// set the default transport if one is not
|
||||
// already set. Required for Init call below.
|
||||
|
||||
// set the transport tls
|
||||
_ = o.Transport.Init(
|
||||
transport.TLSConfig(t),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -507,13 +487,6 @@ func WithAuthToken(t string) CallOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithNetwork is a CallOption which sets the network attribute
|
||||
func WithNetwork(n string) CallOption {
|
||||
return func(o *CallOptions) {
|
||||
o.Network = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithRouter sets the router to use for this call
|
||||
func WithRouter(r router.Router) CallOption {
|
||||
return func(o *CallOptions) {
|
||||
|
@@ -38,4 +38,10 @@ type Cluster interface {
|
||||
Broadcast(ctx context.Context, msg Message, filter ...string) error
|
||||
// Unicast send message to single member in cluster
|
||||
Unicast(ctx context.Context, node Node, msg Message) error
|
||||
// Live returns cluster liveness
|
||||
Live() bool
|
||||
// Ready returns cluster readiness
|
||||
Ready() bool
|
||||
// Health returns cluster health
|
||||
Health() bool
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package config is an interface for dynamic configuration.
|
||||
package config // import "go.unistack.org/micro/v3/config"
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -138,7 +138,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s BeforeLoad err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, c.String()+" BeforeLoad error", err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@@ -153,7 +153,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s AfterLoad err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, c.String()+" AfterLoad error", err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@@ -168,7 +168,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s BeforeSave err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, c.String()+" BeforeSave error", err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@@ -183,7 +183,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s AfterSave err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, c.String()+" AfterSave error", err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@@ -198,7 +198,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s BeforeInit err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, c.String()+" BeforeInit error", err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@@ -213,7 +213,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s AfterInit err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, c.String()+" AfterInit error", err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Package errors provides a way to return detailed information
|
||||
// for an RPC request error. The error is normally JSON encoded.
|
||||
package errors // import "go.unistack.org/micro/v3/errors"
|
||||
package errors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@@ -2,6 +2,7 @@ package errors
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
er "errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -26,7 +27,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
func TestEmpty(t *testing.T) {
|
||||
msg := "test"
|
||||
var err *Error
|
||||
err = FromError(fmt.Errorf(msg))
|
||||
err = FromError(errors.New(msg))
|
||||
if err.Detail != msg {
|
||||
t.Fatalf("invalid error %v", err)
|
||||
}
|
||||
|
@@ -188,7 +188,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
steps, err := w.getSteps(options.Start, options.Reverse)
|
||||
if err != nil {
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
@@ -212,7 +212,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
done := make(chan struct{})
|
||||
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
return eid, werr
|
||||
}
|
||||
for idx := range steps {
|
||||
@@ -237,7 +237,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
return
|
||||
}
|
||||
if w.opts.Logger.V(logger.TraceLevel) {
|
||||
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
|
||||
w.opts.Logger.Trace(nctx, fmt.Sprintf("will be executed %v", steps[idx][nidx]))
|
||||
}
|
||||
cstep := steps[idx][nidx]
|
||||
// nolint: nestif
|
||||
@@ -257,21 +257,21 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
if serr != nil {
|
||||
step.SetStatus(StatusFailure)
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
cherr <- serr
|
||||
return
|
||||
}
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
cherr <- werr
|
||||
return
|
||||
}
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
cherr <- werr
|
||||
return
|
||||
}
|
||||
@@ -290,16 +290,16 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
if serr != nil {
|
||||
cstep.SetStatus(StatusFailure)
|
||||
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
cherr <- serr
|
||||
return
|
||||
}
|
||||
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
cherr <- werr
|
||||
return
|
||||
}
|
||||
@@ -317,7 +317,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
return eid, nil
|
||||
}
|
||||
|
||||
logger.Tracef(ctx, "wait for finish or error")
|
||||
logger.DefaultLogger.Trace(ctx, "wait for finish or error")
|
||||
select {
|
||||
case <-nctx.Done():
|
||||
err = nctx.Err()
|
||||
@@ -333,15 +333,15 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
switch {
|
||||
case nctx.Err() != nil:
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
case err == nil:
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
case err != nil:
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package flow is an interface used for saga pattern microservice workflow
|
||||
package flow // import "go.unistack.org/micro/v3/flow"
|
||||
package flow
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package fsm // import "go.unistack.org/micro/v3/fsm"
|
||||
package fsm
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -17,7 +17,7 @@ func TestFSMStart(t *testing.T) {
|
||||
|
||||
wrapper := func(next StateFunc) StateFunc {
|
||||
return func(sctx context.Context, s State, opts ...StateOption) (State, error) {
|
||||
sctx = logger.NewContext(sctx, logger.Fields("state", s.Name()))
|
||||
sctx = logger.NewContext(sctx, logger.DefaultLogger.Fields("state", s.Name()))
|
||||
return next(sctx, s, opts...)
|
||||
}
|
||||
}
|
||||
|
16
go.mod
16
go.mod
@@ -1,20 +1,32 @@
|
||||
module go.unistack.org/micro/v3
|
||||
|
||||
go 1.20
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/KimMachineGun/automemlimit v0.6.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
go.unistack.org/micro-proto/v3 v3.4.1
|
||||
golang.org/x/sync v0.3.0
|
||||
google.golang.org/grpc v1.57.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cilium/ebpf v0.9.1 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.2 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect
|
||||
)
|
||||
|
49
go.sum
49
go.sum
@@ -2,23 +2,68 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
|
||||
github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
|
||||
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
|
||||
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||
github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE=
|
||||
github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5 h1:G/FZtUu7a6NTWl3KUHMV9jkLAh/Rvtf03NWMHaEDl+E=
|
||||
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1 h1:UTjLSRz2YZuaHk9iSlVqqsA50JQNAEK2ZFboGqtEa9Q=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1/go.mod h1:okx/cnOhzuCX0ggl/vToatbCupi0O44diiiLLsZ93Zo=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||
@@ -26,8 +71,8 @@ google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
135
logger/logger.go
135
logger/logger.go
@@ -14,8 +14,6 @@ var (
|
||||
DefaultLogger Logger = NewLogger()
|
||||
// DefaultLevel used by logger
|
||||
DefaultLevel = InfoLevel
|
||||
// DefaultCallerSkipCount used by logger
|
||||
DefaultCallerSkipCount = 2
|
||||
)
|
||||
|
||||
// Logger is a generic logging interface
|
||||
@@ -33,33 +31,19 @@ type Logger interface {
|
||||
// Fields set fields to always be logged with keyval pairs
|
||||
Fields(fields ...interface{}) Logger
|
||||
// Info level message
|
||||
Info(ctx context.Context, args ...interface{})
|
||||
Info(ctx context.Context, msg string, args ...interface{})
|
||||
// Trace level message
|
||||
Trace(ctx context.Context, args ...interface{})
|
||||
Trace(ctx context.Context, msg string, args ...interface{})
|
||||
// Debug level message
|
||||
Debug(ctx context.Context, args ...interface{})
|
||||
Debug(ctx context.Context, msg string, args ...interface{})
|
||||
// Warn level message
|
||||
Warn(ctx context.Context, args ...interface{})
|
||||
Warn(ctx context.Context, msg string, args ...interface{})
|
||||
// Error level message
|
||||
Error(ctx context.Context, args ...interface{})
|
||||
Error(ctx context.Context, msg string, args ...interface{})
|
||||
// Fatal level message
|
||||
Fatal(ctx context.Context, args ...interface{})
|
||||
// Infof level message
|
||||
Infof(ctx context.Context, msg string, args ...interface{})
|
||||
// Tracef level message
|
||||
Tracef(ctx context.Context, msg string, args ...interface{})
|
||||
// Debug level message
|
||||
Debugf(ctx context.Context, msg string, args ...interface{})
|
||||
// Warn level message
|
||||
Warnf(ctx context.Context, msg string, args ...interface{})
|
||||
// Error level message
|
||||
Errorf(ctx context.Context, msg string, args ...interface{})
|
||||
// Fatal level message
|
||||
Fatalf(ctx context.Context, msg string, args ...interface{})
|
||||
Fatal(ctx context.Context, msg string, args ...interface{})
|
||||
// Log logs message with needed level
|
||||
Log(ctx context.Context, level Level, args ...interface{})
|
||||
// Logf logs message with needed level
|
||||
Logf(ctx context.Context, level Level, msg string, args ...interface{})
|
||||
Log(ctx context.Context, level Level, msg string, args ...interface{})
|
||||
// Name returns broker instance name
|
||||
Name() string
|
||||
// String returns the type of logger
|
||||
@@ -68,108 +52,3 @@ type Logger interface {
|
||||
|
||||
// Field contains keyval pair
|
||||
type Field interface{}
|
||||
|
||||
// Info writes msg to default logger on info level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Info(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Info(ctx, args...)
|
||||
}
|
||||
|
||||
// Error writes msg to default logger on error level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Error(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Error(ctx, args...)
|
||||
}
|
||||
|
||||
// Debug writes msg to default logger on debug level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Debug(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debug(ctx, args...)
|
||||
}
|
||||
|
||||
// Warn writes msg to default logger on warn level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Warn(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warn(ctx, args...)
|
||||
}
|
||||
|
||||
// Trace writes msg to default logger on trace level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Trace(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Trace(ctx, args...)
|
||||
}
|
||||
|
||||
// Fatal writes msg to default logger on fatal level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Fatal(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatal(ctx, args...)
|
||||
}
|
||||
|
||||
// Infof writes formatted msg to default logger on info level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Infof(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Infof(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Errorf writes formatted msg to default logger on error level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Errorf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Errorf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Debugf writes formatted msg to default logger on debug level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Debugf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debugf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Warnf writes formatted msg to default logger on warn level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Warnf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warnf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Tracef writes formatted msg to default logger on trace level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Tracef(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Tracef(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Fatalf writes formatted msg to default logger on fatal level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Fatalf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatalf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// V returns true if passed level enabled in default logger
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func V(level Level) bool {
|
||||
return DefaultLogger.V(level)
|
||||
}
|
||||
|
||||
// Init initialize logger
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Init(opts ...Option) error {
|
||||
return DefaultLogger.Init(opts...)
|
||||
}
|
||||
|
||||
// Fields create logger with specific fields
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Fields(fields ...interface{}) Logger {
|
||||
return DefaultLogger.Fields(fields...)
|
||||
}
|
||||
|
@@ -4,12 +4,17 @@ import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCallerSkipCount = 2
|
||||
)
|
||||
|
||||
type noopLogger struct {
|
||||
opts Options
|
||||
}
|
||||
|
||||
func NewLogger(opts ...Option) Logger {
|
||||
options := NewOptions(opts...)
|
||||
options.CallerSkipCount = defaultCallerSkipCount
|
||||
return &noopLogger{opts: options}
|
||||
}
|
||||
|
||||
@@ -51,44 +56,23 @@ func (l *noopLogger) String() string {
|
||||
return "noop"
|
||||
}
|
||||
|
||||
func (l *noopLogger) Log(ctx context.Context, lvl Level, attrs ...interface{}) {
|
||||
func (l *noopLogger) Log(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Info(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Debug(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Error(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Trace(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Warn(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Fatal(ctx context.Context, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Logf(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
func (l *noopLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@ type Options struct {
|
||||
Name string
|
||||
// Fields holds additional metadata
|
||||
Fields []interface{}
|
||||
// CallerSkipCount number of frmaes to skip
|
||||
// callerSkipCount number of frmaes to skip
|
||||
CallerSkipCount int
|
||||
// ContextAttrFuncs contains funcs that executed before log func on context
|
||||
ContextAttrFuncs []ContextAttrFunc
|
||||
@@ -57,7 +57,6 @@ func NewOptions(opts ...Option) Options {
|
||||
Level: DefaultLevel,
|
||||
Fields: make([]interface{}, 0, 6),
|
||||
Out: os.Stderr,
|
||||
CallerSkipCount: DefaultCallerSkipCount,
|
||||
Context: context.Background(),
|
||||
ContextAttrFuncs: DefaultContextAttrFuncs,
|
||||
AddSource: true,
|
||||
@@ -81,6 +80,13 @@ func WithContextAttrFuncs(fncs ...ContextAttrFunc) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithAddFields add fields for the logger
|
||||
func WithAddFields(fields ...interface{}) Option {
|
||||
return func(o *Options) {
|
||||
o.Fields = append(o.Fields, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithFields set default fields for the logger
|
||||
func WithFields(fields ...interface{}) Option {
|
||||
return func(o *Options) {
|
||||
@@ -102,27 +108,20 @@ func WithOutput(out io.Writer) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WitAddStacktrace controls writing stacktrace on error
|
||||
// WithAddStacktrace controls writing stacktrace on error
|
||||
func WithAddStacktrace(v bool) Option {
|
||||
return func(o *Options) {
|
||||
o.AddStacktrace = v
|
||||
}
|
||||
}
|
||||
|
||||
// WitAddSource controls writing source file and pos in log
|
||||
// WithAddSource controls writing source file and pos in log
|
||||
func WithAddSource(v bool) Option {
|
||||
return func(o *Options) {
|
||||
o.AddSource = v
|
||||
}
|
||||
}
|
||||
|
||||
// WithCallerSkipCount set frame count to skip
|
||||
func WithCallerSkipCount(c int) Option {
|
||||
return func(o *Options) {
|
||||
o.CallerSkipCount = c
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext set context
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return func(o *Options) {
|
||||
@@ -198,6 +197,8 @@ func WithMicroKeys() Option {
|
||||
// WithAddCallerSkipCount add skip count for copy logger
|
||||
func WithAddCallerSkipCount(n int) Option {
|
||||
return func(o *Options) {
|
||||
o.CallerSkipCount += n
|
||||
if n > 0 {
|
||||
o.CallerSkipCount += n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -2,19 +2,26 @@ package slog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/semconv"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
const (
|
||||
badKey = "!BADKEY"
|
||||
// defaultCallerSkipCount used by logger
|
||||
defaultCallerSkipCount = 3
|
||||
)
|
||||
|
||||
var reTrace = regexp.MustCompile(`.*/slog/logger\.go.*\n`)
|
||||
|
||||
var (
|
||||
@@ -26,6 +33,27 @@ var (
|
||||
fatalValue = slog.StringValue("fatal")
|
||||
)
|
||||
|
||||
type wrapper struct {
|
||||
h slog.Handler
|
||||
level atomic.Int64
|
||||
}
|
||||
|
||||
func (h *wrapper) Enabled(ctx context.Context, level slog.Level) bool {
|
||||
return level >= slog.Level(int(h.level.Load()))
|
||||
}
|
||||
|
||||
func (h *wrapper) Handle(ctx context.Context, rec slog.Record) error {
|
||||
return h.h.Handle(ctx, rec)
|
||||
}
|
||||
|
||||
func (h *wrapper) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
return h.h.WithAttrs(attrs)
|
||||
}
|
||||
|
||||
func (h *wrapper) WithGroup(name string) slog.Handler {
|
||||
return h.h.WithGroup(name)
|
||||
}
|
||||
|
||||
func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
|
||||
switch a.Key {
|
||||
case slog.SourceKey:
|
||||
@@ -62,8 +90,7 @@ func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
|
||||
}
|
||||
|
||||
type slogLogger struct {
|
||||
leveler *slog.LevelVar
|
||||
handler slog.Handler
|
||||
handler *wrapper
|
||||
opts logger.Options
|
||||
mu sync.RWMutex
|
||||
}
|
||||
@@ -77,51 +104,52 @@ func (s *slogLogger) Clone(opts ...logger.Option) logger.Logger {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
l := &slogLogger{
|
||||
opts: options,
|
||||
if len(options.ContextAttrFuncs) == 0 {
|
||||
options.ContextAttrFuncs = logger.DefaultContextAttrFuncs
|
||||
}
|
||||
|
||||
l.leveler = new(slog.LevelVar)
|
||||
handleOpt := &slog.HandlerOptions{
|
||||
ReplaceAttr: l.renameAttr,
|
||||
Level: l.leveler,
|
||||
AddSource: l.opts.AddSource,
|
||||
attrs, _ := s.argsAttrs(options.Fields)
|
||||
l := &slogLogger{
|
||||
handler: &wrapper{h: s.handler.h.WithAttrs(attrs)},
|
||||
opts: options,
|
||||
}
|
||||
l.leveler.Set(loggerToSlogLevel(l.opts.Level))
|
||||
l.handler = slog.New(slog.NewJSONHandler(options.Out, handleOpt)).With(options.Fields...).Handler()
|
||||
l.handler.level.Store(int64(loggerToSlogLevel(options.Level)))
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (s *slogLogger) V(level logger.Level) bool {
|
||||
return s.opts.Level.Enabled(level)
|
||||
s.mu.Lock()
|
||||
v := s.opts.Level.Enabled(level)
|
||||
s.mu.Unlock()
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *slogLogger) Level(level logger.Level) {
|
||||
s.leveler.Set(loggerToSlogLevel(level))
|
||||
s.mu.Lock()
|
||||
s.opts.Level = level
|
||||
s.handler.level.Store(int64(loggerToSlogLevel(level)))
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *slogLogger) Options() logger.Options {
|
||||
return s.opts
|
||||
}
|
||||
|
||||
func (s *slogLogger) Fields(attrs ...interface{}) logger.Logger {
|
||||
func (s *slogLogger) Fields(fields ...interface{}) logger.Logger {
|
||||
s.mu.RLock()
|
||||
level := s.leveler.Level()
|
||||
options := s.opts
|
||||
s.mu.RUnlock()
|
||||
|
||||
l := &slogLogger{opts: options}
|
||||
l.leveler = new(slog.LevelVar)
|
||||
l.leveler.Set(level)
|
||||
|
||||
handleOpt := &slog.HandlerOptions{
|
||||
ReplaceAttr: l.renameAttr,
|
||||
Level: l.leveler,
|
||||
AddSource: l.opts.AddSource,
|
||||
if len(options.ContextAttrFuncs) == 0 {
|
||||
options.ContextAttrFuncs = logger.DefaultContextAttrFuncs
|
||||
}
|
||||
|
||||
l.handler = slog.New(slog.NewJSONHandler(l.opts.Out, handleOpt)).With(attrs...).Handler()
|
||||
attrs, _ := s.argsAttrs(fields)
|
||||
l.handler = &wrapper{h: s.handler.h.WithAttrs(attrs)}
|
||||
l.handler.level.Store(int64(loggerToSlogLevel(l.opts.Level)))
|
||||
|
||||
return l
|
||||
}
|
||||
@@ -129,407 +157,77 @@ func (s *slogLogger) Fields(attrs ...interface{}) logger.Logger {
|
||||
func (s *slogLogger) Init(opts ...logger.Option) error {
|
||||
s.mu.Lock()
|
||||
|
||||
if len(s.opts.ContextAttrFuncs) == 0 {
|
||||
s.opts.ContextAttrFuncs = logger.DefaultContextAttrFuncs
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&s.opts)
|
||||
}
|
||||
|
||||
s.leveler = new(slog.LevelVar)
|
||||
if len(s.opts.ContextAttrFuncs) == 0 {
|
||||
s.opts.ContextAttrFuncs = logger.DefaultContextAttrFuncs
|
||||
}
|
||||
|
||||
handleOpt := &slog.HandlerOptions{
|
||||
ReplaceAttr: s.renameAttr,
|
||||
Level: s.leveler,
|
||||
Level: loggerToSlogLevel(logger.TraceLevel),
|
||||
AddSource: s.opts.AddSource,
|
||||
}
|
||||
s.leveler.Set(loggerToSlogLevel(s.opts.Level))
|
||||
s.handler = slog.New(slog.NewJSONHandler(s.opts.Out, handleOpt)).With(s.opts.Fields...).Handler()
|
||||
|
||||
attrs, _ := s.argsAttrs(s.opts.Fields)
|
||||
|
||||
var h slog.Handler
|
||||
if s.opts.Context != nil {
|
||||
if v, ok := s.opts.Context.Value(handlerKey{}).(slog.Handler); ok && v != nil {
|
||||
h = v
|
||||
}
|
||||
|
||||
if fn := s.opts.Context.Value(handlerFnKey{}); fn != nil {
|
||||
if rfn := reflect.ValueOf(fn); rfn.Kind() == reflect.Func {
|
||||
if ret := rfn.Call([]reflect.Value{reflect.ValueOf(s.opts.Out), reflect.ValueOf(handleOpt)}); len(ret) == 1 {
|
||||
if iface, ok := ret[0].Interface().(slog.Handler); ok && iface != nil {
|
||||
h = iface
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if h == nil {
|
||||
h = slog.NewJSONHandler(s.opts.Out, handleOpt)
|
||||
}
|
||||
|
||||
s.handler = &wrapper{h: h.WithAttrs(attrs)}
|
||||
s.handler.level.Store(int64(loggerToSlogLevel(s.opts.Level)))
|
||||
s.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
|
||||
if !s.V(lvl) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, lvl, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Logf(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
|
||||
if !s.V(lvl) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, (slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1])))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.InfoLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Info(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.InfoLevel.String()).Inc()
|
||||
if !s.V(logger.InfoLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelInfo, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.DebugLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.InfoLevel.String()).Inc()
|
||||
if !s.V(logger.InfoLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelInfo, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.TraceLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Debug(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.DebugLevel.String()).Inc()
|
||||
if !s.V(logger.DebugLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.ErrorLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.DebugLevel.String()).Inc()
|
||||
if !s.V(logger.DebugLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Trace(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.TraceLevel.String()).Inc()
|
||||
if !s.V(logger.TraceLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug-1, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.TraceLevel.String()).Inc()
|
||||
if !s.V(logger.TraceLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug-1, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Error(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.ErrorLevel.String()).Inc()
|
||||
if !s.V(logger.ErrorLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.ErrorLevel.String()).Inc()
|
||||
if !s.V(logger.ErrorLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Fatal(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.FatalLevel.String()).Inc()
|
||||
if !s.V(logger.FatalLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError+1, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.FatalLevel, msg, attrs...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.FatalLevel.String()).Inc()
|
||||
if !s.V(logger.FatalLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError+1, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Warn(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.WarnLevel.String()).Inc()
|
||||
if !s.V(logger.WarnLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelWarn, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.WarnLevel.String()).Inc()
|
||||
if !s.V(logger.WarnLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelWarn, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.WarnLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Name() string {
|
||||
@@ -540,10 +238,59 @@ func (s *slogLogger) String() string {
|
||||
return "slog"
|
||||
}
|
||||
|
||||
func (s *slogLogger) printLog(ctx context.Context, lvl logger.Level, msg string, args ...interface{}) {
|
||||
if !s.V(lvl) {
|
||||
return
|
||||
}
|
||||
var argError error
|
||||
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
|
||||
|
||||
attrs, err := s.argsAttrs(args)
|
||||
if err != nil {
|
||||
argError = err
|
||||
}
|
||||
if argError != nil {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, argError.Error())
|
||||
}
|
||||
}
|
||||
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
ctxAttrs, err := s.argsAttrs(fn(ctx))
|
||||
if err != nil {
|
||||
argError = err
|
||||
}
|
||||
attrs = append(attrs, ctxAttrs...)
|
||||
}
|
||||
if argError != nil {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, argError.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, printLog, LogLvlMethod]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), msg, pcs[0])
|
||||
r.AddAttrs(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func NewLogger(opts ...logger.Option) logger.Logger {
|
||||
s := &slogLogger{
|
||||
opts: logger.NewOptions(opts...),
|
||||
}
|
||||
s.opts.CallerSkipCount = defaultCallerSkipCount
|
||||
|
||||
return s
|
||||
}
|
||||
@@ -581,3 +328,39 @@ func slogToLoggerLevel(level slog.Level) logger.Level {
|
||||
return logger.InfoLevel
|
||||
}
|
||||
}
|
||||
|
||||
func (s *slogLogger) argsAttrs(args []interface{}) ([]slog.Attr, error) {
|
||||
attrs := make([]slog.Attr, 0, len(args))
|
||||
var err error
|
||||
|
||||
for idx := 0; idx < len(args); idx++ {
|
||||
switch arg := args[idx].(type) {
|
||||
case slog.Attr:
|
||||
attrs = append(attrs, arg)
|
||||
case string:
|
||||
if idx+1 < len(args) {
|
||||
attrs = append(attrs, slog.Any(arg, args[idx+1]))
|
||||
idx += 1
|
||||
} else {
|
||||
attrs = append(attrs, slog.String(badKey, arg))
|
||||
}
|
||||
case error:
|
||||
attrs = append(attrs, slog.String(s.opts.ErrorKey, arg.Error()))
|
||||
err = arg
|
||||
}
|
||||
}
|
||||
|
||||
return attrs, err
|
||||
}
|
||||
|
||||
type handlerKey struct{}
|
||||
|
||||
func WithHandler(h slog.Handler) logger.Option {
|
||||
return logger.SetOption(handlerKey{}, h)
|
||||
}
|
||||
|
||||
type handlerFnKey struct{}
|
||||
|
||||
func WithHandlerFunc(fn any) logger.Option {
|
||||
return logger.SetOption(handlerFnKey{}, fn)
|
||||
}
|
||||
|
@@ -3,13 +3,114 @@ package slog
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
)
|
||||
|
||||
func TestWithHandlerFunc(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf),
|
||||
WithHandlerFunc(slog.NewTextHandler),
|
||||
)
|
||||
if err := l.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l.Info(ctx, "msg1")
|
||||
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`msg=msg1`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithAddFields(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
|
||||
if err := l.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l.Info(ctx, "msg1")
|
||||
|
||||
if err := l.Init(logger.WithAddFields("key1", "val1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
l.Info(ctx, "msg2")
|
||||
|
||||
if err := l.Init(logger.WithAddFields("key2", "val2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
l.Info(ctx, "msg3")
|
||||
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"key1"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"key2"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleFieldsWithLevel(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
|
||||
if err := l.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l = l.Fields("key", "val")
|
||||
|
||||
l.Info(ctx, "msg1")
|
||||
nl := l.Clone(logger.WithLevel(logger.DebugLevel))
|
||||
nl.Debug(ctx, "msg2")
|
||||
l.Debug(ctx, "msg3")
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"msg1"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"msg2"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
if bytes.Contains(buf.Bytes(), []byte(`"msg3"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleFields(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
|
||||
if err := l.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l = l.Fields("key", "val")
|
||||
|
||||
l = l.Fields("key1", "val1")
|
||||
|
||||
l.Info(ctx, "msg")
|
||||
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"key1":"val1"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
@@ -29,13 +130,22 @@ func TestError(t *testing.T) {
|
||||
|
||||
func TestErrorf(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf), logger.WithAddStacktrace(true))
|
||||
if err := l.Init(); err != nil {
|
||||
if err := l.Init(logger.WithContextAttrFuncs(func(ctx context.Context) []interface{} {
|
||||
return nil
|
||||
})); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l.Errorf(ctx, "message", fmt.Errorf("error message"))
|
||||
l.Log(ctx, logger.ErrorLevel, "message", errors.New("error msg"))
|
||||
|
||||
l.Log(ctx, logger.ErrorLevel, "", errors.New("error msg"))
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"error":"error msg"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"stacktrace":"`)) {
|
||||
t.Fatalf("logger stacktrace not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
@@ -99,6 +209,11 @@ func TestFromContextWithFields(t *testing.T) {
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
|
||||
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
|
||||
l.Info(ctx, "test", "uncorrected number attributes")
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"!BADKEY":"uncorrected number attributes"`)) {
|
||||
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
@@ -174,3 +289,52 @@ func TestLogger(t *testing.T) {
|
||||
t.Fatalf("logger warn, buf %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func Test_WithContextAttrFunc(t *testing.T) {
|
||||
loggerContextAttrFuncs := []logger.ContextAttrFunc{
|
||||
func(ctx context.Context) []interface{} {
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
attrs := make([]interface{}, 0, 10)
|
||||
for k, v := range md {
|
||||
switch k {
|
||||
case "X-Request-Id", "Phone", "External-Id", "Source-Service", "X-App-Install-Id", "Client-Id", "Client-Ip":
|
||||
attrs = append(attrs, strings.ToLower(k), v)
|
||||
}
|
||||
}
|
||||
return attrs
|
||||
},
|
||||
}
|
||||
|
||||
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, loggerContextAttrFuncs...)
|
||||
|
||||
ctx := context.TODO()
|
||||
ctx = metadata.AppendIncomingContext(ctx, "X-Request-Id", uuid.New().String(),
|
||||
"Source-Service", "Test-System")
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
|
||||
if err := l.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l.Info(ctx, "test message")
|
||||
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test message"`))) {
|
||||
t.Fatalf("logger info, buf %s", buf.Bytes())
|
||||
}
|
||||
if !(bytes.Contains(buf.Bytes(), []byte(`"x-request-id":"`))) {
|
||||
t.Fatalf("logger info, buf %s", buf.Bytes())
|
||||
}
|
||||
if !(bytes.Contains(buf.Bytes(), []byte(`"source-service":"Test-System"`))) {
|
||||
t.Fatalf("logger info, buf %s", buf.Bytes())
|
||||
}
|
||||
buf.Reset()
|
||||
imd, _ := metadata.FromIncomingContext(ctx)
|
||||
l.Info(ctx, "test message1")
|
||||
imd.Set("Source-Service", "Test-System2")
|
||||
l.Info(ctx, "test message2")
|
||||
|
||||
// t.Logf("xxx %s", buf.Bytes())
|
||||
}
|
||||
|
@@ -36,14 +36,14 @@ var (
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
filteredBytes = []byte("<filtered>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("{")
|
||||
closeMapBytes = []byte("}")
|
||||
// openBracketBytes = []byte("[")
|
||||
// closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("{")
|
||||
closeMapBytes = []byte("}")
|
||||
)
|
||||
|
||||
type protoMessage interface {
|
||||
|
@@ -82,12 +82,12 @@ func TestTagged(t *testing.T) {
|
||||
func TestTaggedNested(t *testing.T) {
|
||||
type val struct {
|
||||
key string `logger:"take"`
|
||||
val string `logger:"omit"`
|
||||
// val string `logger:"omit"`
|
||||
unk string
|
||||
}
|
||||
type str struct {
|
||||
key string `logger:"omit"`
|
||||
val *val `logger:"take"`
|
||||
// key string `logger:"omit"`
|
||||
val *val `logger:"take"`
|
||||
}
|
||||
|
||||
var iface interface{}
|
||||
|
@@ -1,399 +0,0 @@
|
||||
// Package wrapper provides wrapper for Logger
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.unistack.org/micro/v3/client"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/server"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultClientCallObserver called by wrapper in client Call
|
||||
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []client.CallOption, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultClientStreamObserver called by wrapper in client Stream
|
||||
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []client.CallOption, stream client.Stream, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultClientPublishObserver called by wrapper in client Publish
|
||||
DefaultClientPublishObserver = func(ctx context.Context, msg client.Message, opts []client.PublishOption, err error) []string {
|
||||
labels := []string{"endpoint", msg.Topic()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultServerHandlerObserver called by wrapper in server Handler
|
||||
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultServerSubscriberObserver called by wrapper in server Subscriber
|
||||
DefaultServerSubscriberObserver = func(ctx context.Context, msg server.Message, err error) []string {
|
||||
labels := []string{"endpoint", msg.Topic()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultClientCallFuncObserver called by wrapper in client CallFunc
|
||||
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultSkipEndpoints wrapper not called for this endpoints
|
||||
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
|
||||
)
|
||||
|
||||
type lWrapper struct {
|
||||
client.Client
|
||||
serverHandler server.HandlerFunc
|
||||
serverSubscriber server.SubscriberFunc
|
||||
clientCallFunc client.CallFunc
|
||||
opts Options
|
||||
}
|
||||
|
||||
type (
|
||||
// ClientCallObserver func signature
|
||||
ClientCallObserver func(context.Context, client.Request, interface{}, []client.CallOption, error) []string
|
||||
// ClientStreamObserver func signature
|
||||
ClientStreamObserver func(context.Context, client.Request, []client.CallOption, client.Stream, error) []string
|
||||
// ClientPublishObserver func signature
|
||||
ClientPublishObserver func(context.Context, client.Message, []client.PublishOption, error) []string
|
||||
// ClientCallFuncObserver func signature
|
||||
ClientCallFuncObserver func(context.Context, string, client.Request, interface{}, client.CallOptions, error) []string
|
||||
// ServerHandlerObserver func signature
|
||||
ServerHandlerObserver func(context.Context, server.Request, interface{}, error) []string
|
||||
// ServerSubscriberObserver func signature
|
||||
ServerSubscriberObserver func(context.Context, server.Message, error) []string
|
||||
)
|
||||
|
||||
// Options struct for wrapper
|
||||
type Options struct {
|
||||
// Logger that used for log
|
||||
Logger logger.Logger
|
||||
// ServerHandlerObservers funcs
|
||||
ServerHandlerObservers []ServerHandlerObserver
|
||||
// ServerSubscriberObservers funcs
|
||||
ServerSubscriberObservers []ServerSubscriberObserver
|
||||
// ClientCallObservers funcs
|
||||
ClientCallObservers []ClientCallObserver
|
||||
// ClientStreamObservers funcs
|
||||
ClientStreamObservers []ClientStreamObserver
|
||||
// ClientPublishObservers funcs
|
||||
ClientPublishObservers []ClientPublishObserver
|
||||
// ClientCallFuncObservers funcs
|
||||
ClientCallFuncObservers []ClientCallFuncObserver
|
||||
// SkipEndpoints
|
||||
SkipEndpoints []string
|
||||
// Level for logger
|
||||
Level logger.Level
|
||||
// Enabled flag
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// Option func signature
|
||||
type Option func(*Options)
|
||||
|
||||
// NewOptions creates Options from Option slice
|
||||
func NewOptions(opts ...Option) Options {
|
||||
options := Options{
|
||||
Logger: logger.DefaultLogger,
|
||||
Level: logger.TraceLevel,
|
||||
ClientCallObservers: []ClientCallObserver{DefaultClientCallObserver},
|
||||
ClientStreamObservers: []ClientStreamObserver{DefaultClientStreamObserver},
|
||||
ClientPublishObservers: []ClientPublishObserver{DefaultClientPublishObserver},
|
||||
ClientCallFuncObservers: []ClientCallFuncObserver{DefaultClientCallFuncObserver},
|
||||
ServerHandlerObservers: []ServerHandlerObserver{DefaultServerHandlerObserver},
|
||||
ServerSubscriberObservers: []ServerSubscriberObserver{DefaultServerSubscriberObserver},
|
||||
SkipEndpoints: DefaultSkipEndpoints,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// WithEnabled enable/diable flag
|
||||
func WithEnabled(b bool) Option {
|
||||
return func(o *Options) {
|
||||
o.Enabled = b
|
||||
}
|
||||
}
|
||||
|
||||
// WithLevel log level
|
||||
func WithLevel(l logger.Level) Option {
|
||||
return func(o *Options) {
|
||||
o.Level = l
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger logger
|
||||
func WithLogger(l logger.Logger) Option {
|
||||
return func(o *Options) {
|
||||
o.Logger = l
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCallObservers funcs
|
||||
func WithClientCallObservers(ob ...ClientCallObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientCallObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientStreamObservers funcs
|
||||
func WithClientStreamObservers(ob ...ClientStreamObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientStreamObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientPublishObservers funcs
|
||||
func WithClientPublishObservers(ob ...ClientPublishObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientPublishObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCallFuncObservers funcs
|
||||
func WithClientCallFuncObservers(ob ...ClientCallFuncObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientCallFuncObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithServerHandlerObservers funcs
|
||||
func WithServerHandlerObservers(ob ...ServerHandlerObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ServerHandlerObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithServerSubscriberObservers funcs
|
||||
func WithServerSubscriberObservers(ob ...ServerSubscriberObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ServerSubscriberObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// SkipEndpoins
|
||||
func SkipEndpoints(eps ...string) Option {
|
||||
return func(o *Options) {
|
||||
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
|
||||
err := l.Client.Call(ctx, req, rsp, opts...)
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientCallObservers {
|
||||
labels = append(labels, o(ctx, req, rsp, opts, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lWrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
|
||||
stream, err := l.Client.Stream(ctx, req, opts...)
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return stream, err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return stream, err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientStreamObservers {
|
||||
labels = append(labels, o(ctx, req, opts, stream, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return stream, err
|
||||
}
|
||||
|
||||
func (l *lWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error {
|
||||
err := l.Client.Publish(ctx, msg, opts...)
|
||||
|
||||
endpoint := msg.Topic()
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientPublishObservers {
|
||||
labels = append(labels, o(ctx, msg, opts, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lWrapper) ServerHandler(ctx context.Context, req server.Request, rsp interface{}) error {
|
||||
err := l.serverHandler(ctx, req, rsp)
|
||||
|
||||
endpoint := req.Endpoint()
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ServerHandlerObservers {
|
||||
labels = append(labels, o(ctx, req, rsp, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lWrapper) ServerSubscriber(ctx context.Context, msg server.Message) error {
|
||||
err := l.serverSubscriber(ctx, msg)
|
||||
|
||||
endpoint := msg.Topic()
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ServerSubscriberObservers {
|
||||
labels = append(labels, o(ctx, msg, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewClientWrapper accepts an open options and returns a Client Wrapper
|
||||
func NewClientWrapper(opts ...Option) client.Wrapper {
|
||||
return func(c client.Client) client.Client {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
return &lWrapper{opts: options, Client: c}
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientCallWrapper accepts an options and returns a Call Wrapper
|
||||
func NewClientCallWrapper(opts ...Option) client.CallWrapper {
|
||||
return func(h client.CallFunc) client.CallFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
l := &lWrapper{opts: options, clientCallFunc: h}
|
||||
return l.ClientCallFunc
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lWrapper) ClientCallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
|
||||
err := l.clientCallFunc(ctx, addr, req, rsp, opts)
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientCallFuncObservers {
|
||||
labels = append(labels, o(ctx, addr, req, rsp, opts, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewServerHandlerWrapper accepts an options and returns a Handler Wrapper
|
||||
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
|
||||
return func(h server.HandlerFunc) server.HandlerFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
l := &lWrapper{opts: options, serverHandler: h}
|
||||
return l.ServerHandler
|
||||
}
|
||||
}
|
||||
|
||||
// NewServerSubscriberWrapper accepts an options and returns a Subscriber Wrapper
|
||||
func NewServerSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
|
||||
return func(h server.SubscriberFunc) server.SubscriberFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
l := &lWrapper{opts: options, serverSubscriber: h}
|
||||
return l.ServerSubscriber
|
||||
}
|
||||
}
|
@@ -55,10 +55,7 @@ func NewContext(ctx context.Context, md Metadata) context.Context {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
ctx = context.WithValue(ctx, mdKey{}, &rawMetadata{md})
|
||||
ctx = context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{})
|
||||
ctx = context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{})
|
||||
return ctx
|
||||
return context.WithValue(ctx, mdKey{}, &rawMetadata{md})
|
||||
}
|
||||
|
||||
// SetOutgoingContext modify outgoing context with given metadata
|
||||
@@ -90,11 +87,7 @@ func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
ctx = context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{md})
|
||||
if v, ok := ctx.Value(mdOutgoingKey{}).(*rawMetadata); !ok || v == nil {
|
||||
ctx = context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{})
|
||||
}
|
||||
return ctx
|
||||
return context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{md})
|
||||
}
|
||||
|
||||
// NewOutgoingContext creates a new context with outcoming metadata attached
|
||||
@@ -102,11 +95,7 @@ func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
ctx = context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{md})
|
||||
if v, ok := ctx.Value(mdIncomingKey{}).(*rawMetadata); !ok || v == nil {
|
||||
ctx = context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{})
|
||||
}
|
||||
return ctx
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{md})
|
||||
}
|
||||
|
||||
// AppendOutgoingContext apends new md to context
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package metadata is a way of defining message headers
|
||||
package metadata // import "go.unistack.org/micro/v3/metadata"
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"net/textproto"
|
||||
|
@@ -5,6 +5,28 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMultipleUsage(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
md := New(0)
|
||||
md.Set("key1_1", "val1_1", "key1_2", "val1_2", "key1_3", "val1_3")
|
||||
ctx = NewIncomingContext(ctx, Copy(md))
|
||||
ctx = NewOutgoingContext(ctx, Copy(md))
|
||||
imd, _ := FromIncomingContext(ctx)
|
||||
omd, _ := FromOutgoingContext(ctx)
|
||||
_ = func(x context.Context) context.Context {
|
||||
m, _ := FromIncomingContext(x)
|
||||
m.Del("key1_2")
|
||||
return ctx
|
||||
}(ctx)
|
||||
_ = func(x context.Context) context.Context {
|
||||
m, _ := FromIncomingContext(x)
|
||||
m.Del("key1_3")
|
||||
return ctx
|
||||
}(ctx)
|
||||
t.Logf("imd %#+v", imd)
|
||||
t.Logf("omd %#+v", omd)
|
||||
}
|
||||
|
||||
func TestMetadataSetMultiple(t *testing.T) {
|
||||
md := New(4)
|
||||
md.Set("key1", "val1", "key2", "val2", "key3")
|
||||
@@ -58,6 +80,14 @@ func TestPassing(t *testing.T) {
|
||||
ctx = NewIncomingContext(ctx, md1)
|
||||
testCtx(ctx)
|
||||
md, ok := FromOutgoingContext(ctx)
|
||||
if ok {
|
||||
t.Fatalf("create outgoing context")
|
||||
}
|
||||
_ = md
|
||||
|
||||
ctx = NewOutgoingContext(ctx, New(1))
|
||||
testCtx(ctx)
|
||||
md, ok = FromOutgoingContext(ctx)
|
||||
if !ok {
|
||||
t.Fatalf("missing metadata from outgoing context")
|
||||
}
|
||||
|
@@ -16,10 +16,8 @@ var (
|
||||
DefaultAddress = ":9090"
|
||||
// DefaultPath the meter endpoint where the Meter data will be made available
|
||||
DefaultPath = "/metrics"
|
||||
// DefaultMetricPrefix holds the string that prepends to all metrics
|
||||
DefaultMetricPrefix = "micro_"
|
||||
// DefaultLabelPrefix holds the string that prepends to all labels
|
||||
DefaultLabelPrefix = "micro_"
|
||||
// DefaultMeterStatsInterval specifies interval for meter updating
|
||||
DefaultMeterStatsInterval = 5 * time.Second
|
||||
// DefaultSummaryQuantiles is the default spread of stats for summary
|
||||
DefaultSummaryQuantiles = []float64{0.5, 0.9, 0.97, 0.99, 1}
|
||||
// DefaultSummaryWindow is the default window for summary
|
||||
|
@@ -17,10 +17,6 @@ type Options struct {
|
||||
Address string
|
||||
// Path holds the path for metrics
|
||||
Path string
|
||||
// MetricPrefix holds the prefix for all metrics
|
||||
MetricPrefix string
|
||||
// LabelPrefix holds the prefix for all labels
|
||||
LabelPrefix string
|
||||
// Labels holds the default labels
|
||||
Labels []string
|
||||
// WriteProcessMetrics flag to write process metrics
|
||||
@@ -32,11 +28,9 @@ type Options struct {
|
||||
// NewOptions prepares a set of options:
|
||||
func NewOptions(opt ...Option) Options {
|
||||
opts := Options{
|
||||
Address: DefaultAddress,
|
||||
Path: DefaultPath,
|
||||
Context: context.Background(),
|
||||
MetricPrefix: DefaultMetricPrefix,
|
||||
LabelPrefix: DefaultLabelPrefix,
|
||||
Address: DefaultAddress,
|
||||
Path: DefaultPath,
|
||||
Context: context.Background(),
|
||||
}
|
||||
|
||||
for _, o := range opt {
|
||||
@@ -46,20 +40,6 @@ func NewOptions(opt ...Option) Options {
|
||||
return opts
|
||||
}
|
||||
|
||||
// LabelPrefix sets the labels prefix
|
||||
func LabelPrefix(pref string) Option {
|
||||
return func(o *Options) {
|
||||
o.LabelPrefix = pref
|
||||
}
|
||||
}
|
||||
|
||||
// MetricPrefix sets the metric prefix
|
||||
func MetricPrefix(pref string) Option {
|
||||
return func(o *Options) {
|
||||
o.MetricPrefix = pref
|
||||
}
|
||||
}
|
||||
|
||||
// Context sets the metrics context
|
||||
func Context(ctx context.Context) Option {
|
||||
return func(o *Options) {
|
||||
@@ -90,7 +70,7 @@ func TimingObjectives(value map[float64]float64) Option {
|
||||
}
|
||||
*/
|
||||
|
||||
// Labels sets the meter labels
|
||||
// Labels add the meter labels
|
||||
func Labels(ls ...string) Option {
|
||||
return func(o *Options) {
|
||||
o.Labels = append(o.Labels, ls...)
|
||||
|
@@ -1,347 +0,0 @@
|
||||
package wrapper // import "go.unistack.org/micro/v3/meter/wrapper"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.unistack.org/micro/v3/client"
|
||||
"go.unistack.org/micro/v3/meter"
|
||||
"go.unistack.org/micro/v3/server"
|
||||
)
|
||||
|
||||
var (
|
||||
// ClientRequestDurationSeconds specifies meter metric name
|
||||
ClientRequestDurationSeconds = "client_request_duration_seconds"
|
||||
// ClientRequestLatencyMicroseconds specifies meter metric name
|
||||
ClientRequestLatencyMicroseconds = "client_request_latency_microseconds"
|
||||
// ClientRequestTotal specifies meter metric name
|
||||
ClientRequestTotal = "client_request_total"
|
||||
// ClientRequestInflight specifies meter metric name
|
||||
ClientRequestInflight = "client_request_inflight"
|
||||
// ServerRequestDurationSeconds specifies meter metric name
|
||||
ServerRequestDurationSeconds = "server_request_duration_seconds"
|
||||
// ServerRequestLatencyMicroseconds specifies meter metric name
|
||||
ServerRequestLatencyMicroseconds = "server_request_latency_microseconds"
|
||||
// ServerRequestTotal specifies meter metric name
|
||||
ServerRequestTotal = "server_request_total"
|
||||
// ServerRequestInflight specifies meter metric name
|
||||
ServerRequestInflight = "server_request_inflight"
|
||||
// PublishMessageDurationSeconds specifies meter metric name
|
||||
PublishMessageDurationSeconds = "publish_message_duration_seconds"
|
||||
// PublishMessageLatencyMicroseconds specifies meter metric name
|
||||
PublishMessageLatencyMicroseconds = "publish_message_latency_microseconds"
|
||||
// PublishMessageTotal specifies meter metric name
|
||||
PublishMessageTotal = "publish_message_total"
|
||||
// PublishMessageInflight specifies meter metric name
|
||||
PublishMessageInflight = "publish_message_inflight"
|
||||
// SubscribeMessageDurationSeconds specifies meter metric name
|
||||
SubscribeMessageDurationSeconds = "subscribe_message_duration_seconds"
|
||||
// SubscribeMessageLatencyMicroseconds specifies meter metric name
|
||||
SubscribeMessageLatencyMicroseconds = "subscribe_message_latency_microseconds"
|
||||
// SubscribeMessageTotal specifies meter metric name
|
||||
SubscribeMessageTotal = "subscribe_message_total"
|
||||
// SubscribeMessageInflight specifies meter metric name
|
||||
SubscribeMessageInflight = "subscribe_message_inflight"
|
||||
|
||||
labelSuccess = "success"
|
||||
labelFailure = "failure"
|
||||
labelStatus = "status"
|
||||
labelEndpoint = "endpoint"
|
||||
|
||||
// DefaultSkipEndpoints contains list of endpoints that not evaluted by wrapper
|
||||
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
|
||||
)
|
||||
|
||||
// Options struct
|
||||
type Options struct {
|
||||
Meter meter.Meter
|
||||
lopts []meter.Option
|
||||
SkipEndpoints []string
|
||||
}
|
||||
|
||||
// Option func signature
|
||||
type Option func(*Options)
|
||||
|
||||
// NewOptions creates new Options struct
|
||||
func NewOptions(opts ...Option) Options {
|
||||
options := Options{
|
||||
Meter: meter.DefaultMeter,
|
||||
lopts: make([]meter.Option, 0, 5),
|
||||
SkipEndpoints: DefaultSkipEndpoints,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// ServiceName passes service name to meter label
|
||||
func ServiceName(name string) Option {
|
||||
return func(o *Options) {
|
||||
o.lopts = append(o.lopts, meter.Labels("name", name))
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceVersion passes service version to meter label
|
||||
func ServiceVersion(version string) Option {
|
||||
return func(o *Options) {
|
||||
o.lopts = append(o.lopts, meter.Labels("version", version))
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceID passes service id to meter label
|
||||
func ServiceID(id string) Option {
|
||||
return func(o *Options) {
|
||||
o.lopts = append(o.lopts, meter.Labels("id", id))
|
||||
}
|
||||
}
|
||||
|
||||
// Meter passes meter
|
||||
func Meter(m meter.Meter) Option {
|
||||
return func(o *Options) {
|
||||
o.Meter = m
|
||||
}
|
||||
}
|
||||
|
||||
// SkipEndoints add endpoint to skip
|
||||
func SkipEndoints(eps ...string) Option {
|
||||
return func(o *Options) {
|
||||
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
|
||||
}
|
||||
}
|
||||
|
||||
type wrapper struct {
|
||||
client.Client
|
||||
callFunc client.CallFunc
|
||||
opts Options
|
||||
}
|
||||
|
||||
// NewClientWrapper create new client wrapper
|
||||
func NewClientWrapper(opts ...Option) client.Wrapper {
|
||||
return func(c client.Client) client.Client {
|
||||
handler := &wrapper{
|
||||
opts: NewOptions(opts...),
|
||||
Client: c,
|
||||
}
|
||||
return handler
|
||||
}
|
||||
}
|
||||
|
||||
// NewCallWrapper create new call wrapper
|
||||
func NewCallWrapper(opts ...Option) client.CallWrapper {
|
||||
return func(fn client.CallFunc) client.CallFunc {
|
||||
handler := &wrapper{
|
||||
opts: NewOptions(opts...),
|
||||
callFunc: fn,
|
||||
}
|
||||
return handler.CallFunc
|
||||
}
|
||||
}
|
||||
|
||||
func (w *wrapper) CallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range w.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return w.callFunc(ctx, addr, req, rsp, opts)
|
||||
}
|
||||
}
|
||||
|
||||
labels := make([]string, 0, 4)
|
||||
labels = append(labels, labelEndpoint, endpoint)
|
||||
|
||||
w.opts.Meter.Counter(ClientRequestInflight, labels...).Inc()
|
||||
ts := time.Now()
|
||||
err := w.callFunc(ctx, addr, req, rsp, opts)
|
||||
te := time.Since(ts)
|
||||
w.opts.Meter.Counter(ClientRequestInflight, labels...).Dec()
|
||||
|
||||
w.opts.Meter.Summary(ClientRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
w.opts.Meter.Histogram(ClientRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
|
||||
if err == nil {
|
||||
labels = append(labels, labelStatus, labelSuccess)
|
||||
} else {
|
||||
labels = append(labels, labelStatus, labelFailure)
|
||||
}
|
||||
w.opts.Meter.Counter(ClientRequestTotal, labels...).Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *wrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range w.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return w.Client.Call(ctx, req, rsp, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
labels := make([]string, 0, 4)
|
||||
labels = append(labels, labelEndpoint, endpoint)
|
||||
|
||||
w.opts.Meter.Counter(ClientRequestInflight, labels...).Inc()
|
||||
ts := time.Now()
|
||||
err := w.Client.Call(ctx, req, rsp, opts...)
|
||||
te := time.Since(ts)
|
||||
w.opts.Meter.Counter(ClientRequestInflight, labels...).Dec()
|
||||
|
||||
w.opts.Meter.Summary(ClientRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
w.opts.Meter.Histogram(ClientRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
|
||||
if err == nil {
|
||||
labels = append(labels, labelStatus, labelSuccess)
|
||||
} else {
|
||||
labels = append(labels, labelStatus, labelFailure)
|
||||
}
|
||||
w.opts.Meter.Counter(ClientRequestTotal, labels...).Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *wrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range w.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return w.Client.Stream(ctx, req, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
labels := make([]string, 0, 4)
|
||||
labels = append(labels, labelEndpoint, endpoint)
|
||||
|
||||
w.opts.Meter.Counter(ClientRequestInflight, labels...).Inc()
|
||||
ts := time.Now()
|
||||
stream, err := w.Client.Stream(ctx, req, opts...)
|
||||
te := time.Since(ts)
|
||||
w.opts.Meter.Counter(ClientRequestInflight, labels...).Dec()
|
||||
|
||||
w.opts.Meter.Summary(ClientRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
w.opts.Meter.Histogram(ClientRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
|
||||
if err == nil {
|
||||
labels = append(labels, labelStatus, labelSuccess)
|
||||
} else {
|
||||
labels = append(labels, labelStatus, labelFailure)
|
||||
}
|
||||
w.opts.Meter.Counter(ClientRequestTotal, labels...).Inc()
|
||||
|
||||
return stream, err
|
||||
}
|
||||
|
||||
func (w *wrapper) Publish(ctx context.Context, p client.Message, opts ...client.PublishOption) error {
|
||||
endpoint := p.Topic()
|
||||
|
||||
labels := make([]string, 0, 4)
|
||||
labels = append(labels, labelEndpoint, endpoint)
|
||||
|
||||
w.opts.Meter.Counter(PublishMessageInflight, labels...).Inc()
|
||||
ts := time.Now()
|
||||
err := w.Client.Publish(ctx, p, opts...)
|
||||
te := time.Since(ts)
|
||||
w.opts.Meter.Counter(PublishMessageInflight, labels...).Dec()
|
||||
|
||||
w.opts.Meter.Summary(PublishMessageLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
w.opts.Meter.Histogram(PublishMessageDurationSeconds, labels...).Update(te.Seconds())
|
||||
|
||||
if err == nil {
|
||||
labels = append(labels, labelStatus, labelSuccess)
|
||||
} else {
|
||||
labels = append(labels, labelStatus, labelFailure)
|
||||
}
|
||||
w.opts.Meter.Counter(PublishMessageTotal, labels...).Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewHandlerWrapper create new server handler wrapper
|
||||
// deprecated
|
||||
func NewHandlerWrapper(opts ...Option) server.HandlerWrapper {
|
||||
handler := &wrapper{
|
||||
opts: NewOptions(opts...),
|
||||
}
|
||||
return handler.HandlerFunc
|
||||
}
|
||||
|
||||
// NewServerHandlerWrapper create new server handler wrapper
|
||||
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
|
||||
handler := &wrapper{
|
||||
opts: NewOptions(opts...),
|
||||
}
|
||||
return handler.HandlerFunc
|
||||
}
|
||||
|
||||
func (w *wrapper) HandlerFunc(fn server.HandlerFunc) server.HandlerFunc {
|
||||
return func(ctx context.Context, req server.Request, rsp interface{}) error {
|
||||
endpoint := req.Service() + "." + req.Endpoint()
|
||||
for _, ep := range w.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return fn(ctx, req, rsp)
|
||||
}
|
||||
}
|
||||
|
||||
labels := make([]string, 0, 4)
|
||||
labels = append(labels, labelEndpoint, endpoint)
|
||||
|
||||
w.opts.Meter.Counter(ServerRequestInflight, labels...).Inc()
|
||||
ts := time.Now()
|
||||
err := fn(ctx, req, rsp)
|
||||
te := time.Since(ts)
|
||||
w.opts.Meter.Counter(ServerRequestInflight, labels...).Dec()
|
||||
|
||||
w.opts.Meter.Summary(ServerRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
w.opts.Meter.Histogram(ServerRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
|
||||
if err == nil {
|
||||
labels = append(labels, labelStatus, labelSuccess)
|
||||
} else {
|
||||
labels = append(labels, labelStatus, labelFailure)
|
||||
}
|
||||
w.opts.Meter.Counter(ServerRequestTotal, labels...).Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// NewSubscriberWrapper create server subscribe wrapper
|
||||
// deprecated
|
||||
func NewSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
|
||||
handler := &wrapper{
|
||||
opts: NewOptions(opts...),
|
||||
}
|
||||
return handler.SubscriberFunc
|
||||
}
|
||||
|
||||
func NewServerSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
|
||||
handler := &wrapper{
|
||||
opts: NewOptions(opts...),
|
||||
}
|
||||
return handler.SubscriberFunc
|
||||
}
|
||||
|
||||
func (w *wrapper) SubscriberFunc(fn server.SubscriberFunc) server.SubscriberFunc {
|
||||
return func(ctx context.Context, msg server.Message) error {
|
||||
endpoint := msg.Topic()
|
||||
|
||||
labels := make([]string, 0, 4)
|
||||
labels = append(labels, labelEndpoint, endpoint)
|
||||
|
||||
w.opts.Meter.Counter(SubscribeMessageInflight, labels...).Inc()
|
||||
ts := time.Now()
|
||||
err := fn(ctx, msg)
|
||||
te := time.Since(ts)
|
||||
w.opts.Meter.Counter(SubscribeMessageInflight, labels...).Dec()
|
||||
|
||||
w.opts.Meter.Summary(SubscribeMessageLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
w.opts.Meter.Histogram(SubscribeMessageDurationSeconds, labels...).Update(te.Seconds())
|
||||
|
||||
if err == nil {
|
||||
labels = append(labels, labelStatus, labelSuccess)
|
||||
} else {
|
||||
labels = append(labels, labelStatus, labelFailure)
|
||||
}
|
||||
w.opts.Meter.Counter(SubscribeMessageTotal, labels...).Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
36
micro.go
36
micro.go
@@ -65,6 +65,8 @@ func As(b any, target any) bool {
|
||||
break
|
||||
case targetType.Implements(routerType):
|
||||
break
|
||||
case targetType.Implements(tracerType):
|
||||
break
|
||||
default:
|
||||
return false
|
||||
}
|
||||
@@ -76,19 +78,21 @@ func As(b any, target any) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
var brokerType = reflect.TypeOf((*broker.Broker)(nil)).Elem()
|
||||
var loggerType = reflect.TypeOf((*logger.Logger)(nil)).Elem()
|
||||
var clientType = reflect.TypeOf((*client.Client)(nil)).Elem()
|
||||
var serverType = reflect.TypeOf((*server.Server)(nil)).Elem()
|
||||
var codecType = reflect.TypeOf((*codec.Codec)(nil)).Elem()
|
||||
var flowType = reflect.TypeOf((*flow.Flow)(nil)).Elem()
|
||||
var fsmType = reflect.TypeOf((*fsm.FSM)(nil)).Elem()
|
||||
var meterType = reflect.TypeOf((*meter.Meter)(nil)).Elem()
|
||||
var registerType = reflect.TypeOf((*register.Register)(nil)).Elem()
|
||||
var resolverType = reflect.TypeOf((*resolver.Resolver)(nil)).Elem()
|
||||
var routerType = reflect.TypeOf((*router.Router)(nil)).Elem()
|
||||
var selectorType = reflect.TypeOf((*selector.Selector)(nil)).Elem()
|
||||
var storeType = reflect.TypeOf((*store.Store)(nil)).Elem()
|
||||
var syncType = reflect.TypeOf((*sync.Sync)(nil)).Elem()
|
||||
var tracerType = reflect.TypeOf((*tracer.Tracer)(nil)).Elem()
|
||||
var serviceType = reflect.TypeOf((*Service)(nil)).Elem()
|
||||
var (
|
||||
brokerType = reflect.TypeOf((*broker.Broker)(nil)).Elem()
|
||||
loggerType = reflect.TypeOf((*logger.Logger)(nil)).Elem()
|
||||
clientType = reflect.TypeOf((*client.Client)(nil)).Elem()
|
||||
serverType = reflect.TypeOf((*server.Server)(nil)).Elem()
|
||||
codecType = reflect.TypeOf((*codec.Codec)(nil)).Elem()
|
||||
flowType = reflect.TypeOf((*flow.Flow)(nil)).Elem()
|
||||
fsmType = reflect.TypeOf((*fsm.FSM)(nil)).Elem()
|
||||
meterType = reflect.TypeOf((*meter.Meter)(nil)).Elem()
|
||||
registerType = reflect.TypeOf((*register.Register)(nil)).Elem()
|
||||
resolverType = reflect.TypeOf((*resolver.Resolver)(nil)).Elem()
|
||||
routerType = reflect.TypeOf((*router.Router)(nil)).Elem()
|
||||
selectorType = reflect.TypeOf((*selector.Selector)(nil)).Elem()
|
||||
storeType = reflect.TypeOf((*store.Store)(nil)).Elem()
|
||||
syncType = reflect.TypeOf((*sync.Sync)(nil)).Elem()
|
||||
tracerType = reflect.TypeOf((*tracer.Tracer)(nil)).Elem()
|
||||
serviceType = reflect.TypeOf((*Service)(nil)).Elem()
|
||||
)
|
||||
|
@@ -66,6 +66,12 @@ type bro struct {
|
||||
|
||||
func (p *bro) Name() string { return p.name }
|
||||
|
||||
func (p *bro) Live() bool { return true }
|
||||
|
||||
func (p *bro) Ready() bool { return true }
|
||||
|
||||
func (p *bro) Health() bool { return true }
|
||||
|
||||
func (p *bro) Init(opts ...broker.Option) error { return nil }
|
||||
|
||||
// Options returns broker options
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package mtls // import "go.unistack.org/micro/v3/mtls"
|
||||
package mtls
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package network is for creating internetworks
|
||||
package network // import "go.unistack.org/micro/v3/network"
|
||||
package network
|
||||
|
||||
import (
|
||||
"go.unistack.org/micro/v3/client"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package transport is an interface for synchronous connection based communication
|
||||
package transport // import "go.unistack.org/micro/v3/network/transport"
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package broker is a tunnel broker
|
||||
package broker // import "go.unistack.org/micro/v3/network/tunnel/broker"
|
||||
package broker
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -45,6 +45,18 @@ type (
|
||||
tunnelAddr struct{}
|
||||
)
|
||||
|
||||
func (t *tunBroker) Live() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *tunBroker) Ready() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *tunBroker) Health() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *tunBroker) Init(opts ...broker.Option) error {
|
||||
for _, o := range opts {
|
||||
o(&t.opts)
|
||||
@@ -177,12 +189,12 @@ func (t *tunBatchSubscriber) run() {
|
||||
// receive message
|
||||
m := new(transport.Message)
|
||||
if err := c.Recv(m); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
if err = c.Close(); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
@@ -222,12 +234,12 @@ func (t *tunSubscriber) run() {
|
||||
// receive message
|
||||
m := new(transport.Message)
|
||||
if err := c.Recv(m); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
if err = c.Close(); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package transport provides a tunnel transport
|
||||
package transport // import "go.unistack.org/micro/v3/network/tunnel/transport"
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package tunnel provides gre network tunnelling
|
||||
package tunnel // import "go.unistack.org/micro/v3/network/transport/tunnel"
|
||||
package tunnel
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -269,7 +269,7 @@ func Logger(l logger.Logger, opts ...LoggerOption) Option {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for _, trc := range o.Tracers {
|
||||
for _, ot := range lopts.tracers {
|
||||
if trc.Name() == ot || all {
|
||||
@@ -294,8 +294,8 @@ type loggerOptions struct {
|
||||
brokers []string
|
||||
registers []string
|
||||
stores []string
|
||||
meters []string
|
||||
tracers []string
|
||||
// meters []string
|
||||
tracers []string
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package http enables the http profiler
|
||||
package http // import "go.unistack.org/micro/v3/profiler/http"
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package pprof provides a pprof profiler which writes output to /tmp/[name].{cpu,mem}.pprof
|
||||
package pprof // import "go.unistack.org/micro/v3/profiler/pprof"
|
||||
package pprof
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package profiler is for profilers
|
||||
package profiler // import "go.unistack.org/micro/v3/profiler"
|
||||
package profiler
|
||||
|
||||
// Profiler interface
|
||||
type Profiler interface {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package proxy is a transparent proxy built on the micro/server
|
||||
package proxy // import "go.unistack.org/micro/v3/proxy"
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -2,6 +2,7 @@ package register
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -64,7 +65,7 @@ func (m *memory) ttlPrune() {
|
||||
for id, n := range record.Nodes {
|
||||
if n.TTL != 0 && time.Since(n.LastSeen) > n.TTL {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register TTL expired for node %s of service %s", n.ID, service)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register TTL expired for node %s of service %s", n.ID, service))
|
||||
}
|
||||
delete(m.records[domain][service][version].Nodes, id)
|
||||
}
|
||||
@@ -151,7 +152,7 @@ func (m *memory) Register(ctx context.Context, s *register.Service, opts ...regi
|
||||
if _, ok := srvs[s.Name][s.Version]; !ok {
|
||||
srvs[s.Name][s.Version] = r
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register added new service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register added new service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
m.records[options.Domain] = srvs
|
||||
go m.sendEvent(®ister.Result{Action: "create", Service: s})
|
||||
@@ -191,14 +192,14 @@ func (m *memory) Register(ctx context.Context, s *register.Service, opts ...regi
|
||||
|
||||
if addedNodes {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register added new node to service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register added new node to service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
go m.sendEvent(®ister.Result{Action: "update", Service: s})
|
||||
} else {
|
||||
// refresh TTL and timestamp
|
||||
for _, n := range s.Nodes {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Updated registration for service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Updated registration for service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
srvs[s.Name][s.Version].Nodes[n.ID].TTL = options.TTL
|
||||
srvs[s.Name][s.Version].Nodes[n.ID].LastSeen = time.Now()
|
||||
@@ -243,7 +244,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
|
||||
for _, n := range s.Nodes {
|
||||
if _, ok := version.Nodes[n.ID]; ok {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register removed node from service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed node from service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
delete(version.Nodes, n.ID)
|
||||
}
|
||||
@@ -264,7 +265,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
|
||||
go m.sendEvent(®ister.Result{Action: "delete", Service: s})
|
||||
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s", s.Name)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed service: %s", s.Name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -273,7 +274,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
|
||||
delete(m.records[options.Domain][s.Name], s.Version)
|
||||
go m.sendEvent(®ister.Result{Action: "delete", Service: s})
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -468,9 +469,7 @@ func serviceToRecord(s *register.Service, ttl time.Duration) *record {
|
||||
}
|
||||
|
||||
endpoints := make([]*register.Endpoint, len(s.Endpoints))
|
||||
for i, e := range s.Endpoints {
|
||||
endpoints[i] = e
|
||||
}
|
||||
copy(endpoints, s.Endpoints)
|
||||
|
||||
return &record{
|
||||
Name: s.Name,
|
||||
|
@@ -290,27 +290,25 @@ func TestWatcher(t *testing.T) {
|
||||
|
||||
ctx := context.TODO()
|
||||
m := NewRegister()
|
||||
m.Init()
|
||||
m.Connect(ctx)
|
||||
_ = m.Init()
|
||||
_ = m.Connect(ctx)
|
||||
wc, err := m.Watch(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("cant watch: %v", err)
|
||||
}
|
||||
defer wc.Stop()
|
||||
|
||||
cherr := make(chan error, 10)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for {
|
||||
_, err := wc.Next()
|
||||
if err != nil {
|
||||
t.Fatal("unexpected err", err)
|
||||
}
|
||||
// t.Logf("changes %#+v", ch.Service)
|
||||
wc.Stop()
|
||||
wg.Done()
|
||||
return
|
||||
_, err := wc.Next()
|
||||
if err != nil {
|
||||
cherr <- fmt.Errorf("unexpected err %v", err)
|
||||
}
|
||||
// t.Logf("changes %#+v", ch.Service)
|
||||
wc.Stop()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
if err := m.Register(ctx, testSrv); err != nil {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package register is an interface for service discovery
|
||||
package register // import "go.unistack.org/micro/v3/register"
|
||||
package register
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -29,17 +29,32 @@ var (
|
||||
// and an abstraction over varying implementations
|
||||
// {consul, etcd, zookeeper, ...}
|
||||
type Register interface {
|
||||
// Name returns register name
|
||||
Name() string
|
||||
// Init initialize register
|
||||
Init(...Option) error
|
||||
// Options returns options for register
|
||||
Options() Options
|
||||
// Connect initialize connect to register
|
||||
Connect(context.Context) error
|
||||
// Disconnect initialize discconection from register
|
||||
Disconnect(context.Context) error
|
||||
// Register service in registry
|
||||
Register(context.Context, *Service, ...RegisterOption) error
|
||||
// Deregister service from registry
|
||||
Deregister(context.Context, *Service, ...DeregisterOption) error
|
||||
// LookupService in registry
|
||||
LookupService(context.Context, string, ...LookupOption) ([]*Service, error)
|
||||
// ListServices in registry
|
||||
ListServices(context.Context, ...ListOption) ([]*Service, error)
|
||||
// Watch registry events
|
||||
Watch(context.Context, ...WatchOption) (Watcher, error)
|
||||
// String returns registry string representation
|
||||
String() string
|
||||
// Live returns register liveness
|
||||
// Live() bool
|
||||
// Ready returns register readiness
|
||||
// Ready() bool
|
||||
}
|
||||
|
||||
// Service holds service register info
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package dns resolves names to dns records
|
||||
package dns // import "go.unistack.org/micro/v3/resolver/dns"
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
|
||||
// Resolver is a DNS network resolve
|
||||
type Resolver struct {
|
||||
sync.RWMutex
|
||||
goresolver *net.Resolver
|
||||
Address string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Resolve tries to resolve endpoint address
|
||||
@@ -39,12 +39,12 @@ func (r *Resolver) Resolve(name string) ([]*resolver.Record, error) {
|
||||
return []*resolver.Record{rec}, nil
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
r.mu.RLock()
|
||||
goresolver := r.goresolver
|
||||
r.RUnlock()
|
||||
r.mu.RUnlock()
|
||||
|
||||
if goresolver == nil {
|
||||
r.Lock()
|
||||
r.mu.Lock()
|
||||
r.goresolver = &net.Resolver{
|
||||
Dial: func(ctx context.Context, _ string, _ string) (net.Conn, error) {
|
||||
d := net.Dialer{
|
||||
@@ -53,7 +53,7 @@ func (r *Resolver) Resolve(name string) ([]*resolver.Record, error) {
|
||||
return d.DialContext(ctx, "udp", r.Address)
|
||||
},
|
||||
}
|
||||
r.Unlock()
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
addrs, err := goresolver.LookupIP(context.TODO(), "ip", host)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package dnssrv resolves names to dns srv records
|
||||
package dnssrv // import "go.unistack.org/micro/v3/resolver/dnssrv"
|
||||
package dnssrv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package http resolves names to network addresses using a http request
|
||||
package http // import "go.unistack.org/micro/v3/resolver/http"
|
||||
package http
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package noop is a noop resolver
|
||||
package noop // import "go.unistack.org/micro/v3/resolver/noop"
|
||||
package noop
|
||||
|
||||
import (
|
||||
"go.unistack.org/micro/v3/resolver"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package register resolves names using the micro register
|
||||
package register // import "go.unistack.org/micro/v3/resolver/registry"
|
||||
package register
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package static is a static resolver
|
||||
package static // import "go.unistack.org/micro/v3/resolver/static"
|
||||
package static
|
||||
|
||||
import (
|
||||
"go.unistack.org/micro/v3/resolver"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package router provides a network routing control plane
|
||||
package router // import "go.unistack.org/micro/v3/router"
|
||||
package router
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package random // import "go.unistack.org/micro/v3/selector/random"
|
||||
package random
|
||||
|
||||
import (
|
||||
"go.unistack.org/micro/v3/selector"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package roundrobin // import "go.unistack.org/micro/v3/selector/roundrobin"
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"go.unistack.org/micro/v3/selector"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package selector is for node selection and load balancing
|
||||
package selector // import "go.unistack.org/micro/v3/selector"
|
||||
package selector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@@ -2,21 +2,21 @@ package semconv
|
||||
|
||||
var (
|
||||
// PublishMessageDurationSeconds specifies meter metric name
|
||||
PublishMessageDurationSeconds = "publish_message_duration_seconds"
|
||||
PublishMessageDurationSeconds = "micro_publish_message_duration_seconds"
|
||||
// PublishMessageLatencyMicroseconds specifies meter metric name
|
||||
PublishMessageLatencyMicroseconds = "publish_message_latency_microseconds"
|
||||
PublishMessageLatencyMicroseconds = "micro_publish_message_latency_microseconds"
|
||||
// PublishMessageTotal specifies meter metric name
|
||||
PublishMessageTotal = "publish_message_total"
|
||||
PublishMessageTotal = "micro_publish_message_total"
|
||||
// PublishMessageInflight specifies meter metric name
|
||||
PublishMessageInflight = "publish_message_inflight"
|
||||
PublishMessageInflight = "micro_publish_message_inflight"
|
||||
// SubscribeMessageDurationSeconds specifies meter metric name
|
||||
SubscribeMessageDurationSeconds = "subscribe_message_duration_seconds"
|
||||
SubscribeMessageDurationSeconds = "micro_subscribe_message_duration_seconds"
|
||||
// SubscribeMessageLatencyMicroseconds specifies meter metric name
|
||||
SubscribeMessageLatencyMicroseconds = "subscribe_message_latency_microseconds"
|
||||
SubscribeMessageLatencyMicroseconds = "micro_subscribe_message_latency_microseconds"
|
||||
// SubscribeMessageTotal specifies meter metric name
|
||||
SubscribeMessageTotal = "subscribe_message_total"
|
||||
SubscribeMessageTotal = "micro_subscribe_message_total"
|
||||
// SubscribeMessageInflight specifies meter metric name
|
||||
SubscribeMessageInflight = "subscribe_message_inflight"
|
||||
SubscribeMessageInflight = "micro_subscribe_message_inflight"
|
||||
// BrokerGroupLag specifies broker lag
|
||||
BrokerGroupLag = "broker_group_lag"
|
||||
BrokerGroupLag = "micro_broker_group_lag"
|
||||
)
|
||||
|
@@ -2,11 +2,11 @@ package semconv
|
||||
|
||||
var (
|
||||
// ClientRequestDurationSeconds specifies meter metric name
|
||||
ClientRequestDurationSeconds = "client_request_duration_seconds"
|
||||
ClientRequestDurationSeconds = "micro_client_request_duration_seconds"
|
||||
// ClientRequestLatencyMicroseconds specifies meter metric name
|
||||
ClientRequestLatencyMicroseconds = "client_request_latency_microseconds"
|
||||
ClientRequestLatencyMicroseconds = "micro_client_request_latency_microseconds"
|
||||
// ClientRequestTotal specifies meter metric name
|
||||
ClientRequestTotal = "client_request_total"
|
||||
ClientRequestTotal = "micro_client_request_total"
|
||||
// ClientRequestInflight specifies meter metric name
|
||||
ClientRequestInflight = "client_request_inflight"
|
||||
ClientRequestInflight = "micro_client_request_inflight"
|
||||
)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package semconv
|
||||
|
||||
// LoggerMessageTotal specifies meter metric name for logger messages
|
||||
var LoggerMessageTotal = "logger_message_total"
|
||||
var LoggerMessageTotal = "micro_logger_message_total"
|
||||
|
12
semconv/pool.go
Normal file
12
semconv/pool.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package semconv
|
||||
|
||||
var (
|
||||
// PoolGetTotal specifies meter metric name for total number of pool get ops
|
||||
PoolGetTotal = "micro_pool_get_total"
|
||||
// PoolPutTotal specifies meter metric name for total number of pool put ops
|
||||
PoolPutTotal = "micro_pool_put_total"
|
||||
// PoolMisTotal specifies meter metric name for total number of pool misses
|
||||
PoolMisTotal = "micro_pool_mis_total"
|
||||
// PoolRetTotal specifies meter metric name for total number of pool returned to gc
|
||||
PoolRetTotal = "micro_pool_ret_total"
|
||||
)
|
@@ -2,11 +2,11 @@ package semconv
|
||||
|
||||
var (
|
||||
// ServerRequestDurationSeconds specifies meter metric name
|
||||
ServerRequestDurationSeconds = "server_request_duration_seconds"
|
||||
ServerRequestDurationSeconds = "micro_server_request_duration_seconds"
|
||||
// ServerRequestLatencyMicroseconds specifies meter metric name
|
||||
ServerRequestLatencyMicroseconds = "server_request_latency_microseconds"
|
||||
ServerRequestLatencyMicroseconds = "micro_server_request_latency_microseconds"
|
||||
// ServerRequestTotal specifies meter metric name
|
||||
ServerRequestTotal = "server_request_total"
|
||||
ServerRequestTotal = "micro_server_request_total"
|
||||
// ServerRequestInflight specifies meter metric name
|
||||
ServerRequestInflight = "server_request_inflight"
|
||||
ServerRequestInflight = "micro_server_request_inflight"
|
||||
)
|
||||
|
@@ -2,11 +2,11 @@ package semconv
|
||||
|
||||
var (
|
||||
// StoreRequestDurationSeconds specifies meter metric name
|
||||
StoreRequestDurationSeconds = "store_request_duration_seconds"
|
||||
StoreRequestDurationSeconds = "micro_store_request_duration_seconds"
|
||||
// ClientRequestLatencyMicroseconds specifies meter metric name
|
||||
StoreRequestLatencyMicroseconds = "store_request_latency_microseconds"
|
||||
StoreRequestLatencyMicroseconds = "micro_store_request_latency_microseconds"
|
||||
// StoreRequestTotal specifies meter metric name
|
||||
StoreRequestTotal = "store_request_total"
|
||||
StoreRequestTotal = "micro_store_request_total"
|
||||
// StoreRequestInflight specifies meter metric name
|
||||
StoreRequestInflight = "store_request_inflight"
|
||||
StoreRequestInflight = "micro_store_request_inflight"
|
||||
)
|
||||
|
@@ -121,6 +121,18 @@ func (n *noopServer) newCodec(contentType string) (codec.Codec, error) {
|
||||
return nil, codec.ErrUnknownContentType
|
||||
}
|
||||
|
||||
func (n *noopServer) Live() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *noopServer) Ready() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *noopServer) Health() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *noopServer) Handle(handler Handler) error {
|
||||
n.h = handler
|
||||
return nil
|
||||
@@ -159,7 +171,6 @@ type rpcMessage struct {
|
||||
header metadata.Metadata
|
||||
topic string
|
||||
contentType string
|
||||
body []byte
|
||||
}
|
||||
|
||||
func (r *rpcMessage) ContentType() string {
|
||||
@@ -459,7 +470,7 @@ func (n *noopServer) Start() error {
|
||||
}
|
||||
} else if rerr != nil && !registered {
|
||||
if config.Logger.V(logger.ErrorLevel) {
|
||||
config.Logger.Errorf(n.opts.Context, fmt.Sprintf("server %s-%s register check error", config.Name, config.ID), rerr)
|
||||
config.Logger.Error(n.opts.Context, fmt.Sprintf("server %s-%s register check error", config.Name, config.ID), rerr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@@ -38,7 +38,7 @@ func TestNoopSub(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
logger.DefaultLogger.Init(logger.WithLevel(logger.ErrorLevel))
|
||||
_ = logger.DefaultLogger.Init(logger.WithLevel(logger.ErrorLevel))
|
||||
s := server.NewServer(
|
||||
server.Broker(b),
|
||||
server.Codec("application/octet-stream", codec.NewCodec()),
|
||||
|
@@ -12,7 +12,6 @@ import (
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
"go.unistack.org/micro/v3/meter"
|
||||
"go.unistack.org/micro/v3/network/transport"
|
||||
"go.unistack.org/micro/v3/options"
|
||||
"go.unistack.org/micro/v3/register"
|
||||
msync "go.unistack.org/micro/v3/sync"
|
||||
@@ -37,8 +36,6 @@ type Options struct {
|
||||
Logger logger.Logger
|
||||
// Meter holds the meter
|
||||
Meter meter.Meter
|
||||
// Transport holds the transport
|
||||
Transport transport.Transport
|
||||
|
||||
/*
|
||||
// Router for requests
|
||||
@@ -100,7 +97,6 @@ func NewOptions(opts ...Option) Options {
|
||||
Tracer: tracer.DefaultTracer,
|
||||
Broker: broker.DefaultBroker,
|
||||
Register: register.DefaultRegister,
|
||||
Transport: transport.DefaultTransport,
|
||||
Address: DefaultAddress,
|
||||
Name: DefaultName,
|
||||
Version: DefaultVersion,
|
||||
@@ -209,13 +205,6 @@ func Tracer(t tracer.Tracer) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// Transport mechanism for communication e.g http, rabbitmq, etc
|
||||
func Transport(t transport.Transport) Option {
|
||||
return func(o *Options) {
|
||||
o.Transport = t
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata associated with the server
|
||||
func Metadata(md metadata.Metadata) Option {
|
||||
return func(o *Options) {
|
||||
@@ -249,14 +238,6 @@ func TLSConfig(t *tls.Config) Option {
|
||||
return func(o *Options) {
|
||||
// set the internal tls
|
||||
o.TLSConfig = t
|
||||
|
||||
// set the default transport if one is not
|
||||
// already set. Required for Init call below.
|
||||
|
||||
// set the transport tls
|
||||
_ = o.Transport.Init(
|
||||
transport.TLSConfig(t),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package server is an interface for a micro server
|
||||
package server // import "go.unistack.org/micro/v3/server"
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
)
|
||||
|
||||
// DefaultServer default server
|
||||
var DefaultServer Server = NewServer()
|
||||
var (
|
||||
DefaultServer Server = NewServer()
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultAddress will be used if no address passed, use secure localhost
|
||||
@@ -60,6 +62,12 @@ type Server interface {
|
||||
Stop() error
|
||||
// Server implementation
|
||||
String() string
|
||||
// Live returns server liveness
|
||||
Live() bool
|
||||
// Ready returns server readiness
|
||||
Ready() bool
|
||||
// Health returns server health
|
||||
Health() bool
|
||||
}
|
||||
|
||||
type (
|
||||
|
106
service.go
106
service.go
@@ -1,10 +1,14 @@
|
||||
// Package micro is a pluggable framework for microservices
|
||||
package micro // import "go.unistack.org/micro/v3"
|
||||
package micro
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/KimMachineGun/automemlimit/memlimit"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
"go.unistack.org/micro/v3/broker"
|
||||
"go.unistack.org/micro/v3/client"
|
||||
"go.unistack.org/micro/v3/config"
|
||||
@@ -15,8 +19,24 @@ import (
|
||||
"go.unistack.org/micro/v3/server"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
utildns "go.unistack.org/micro/v3/util/dns"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_, _ = maxprocs.Set()
|
||||
_, _ = memlimit.SetGoMemLimitWithOpts(
|
||||
memlimit.WithRatio(0.9),
|
||||
memlimit.WithProvider(
|
||||
memlimit.ApplyFallback(
|
||||
memlimit.FromCgroup,
|
||||
memlimit.FromSystem,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
net.DefaultResolver = utildns.NewNetResolver(utildns.Timeout(1 * time.Second))
|
||||
}
|
||||
|
||||
// Service is an interface that wraps the lower level components.
|
||||
// Its works as container with building blocks for service.
|
||||
type Service interface {
|
||||
@@ -57,8 +77,14 @@ type Service interface {
|
||||
Start() error
|
||||
// Stop the service
|
||||
Stop() error
|
||||
// The service implementation
|
||||
// String service representation
|
||||
String() string
|
||||
// Live returns service liveness
|
||||
Live() bool
|
||||
// Ready returns service readiness
|
||||
Ready() bool
|
||||
// Health returns service health
|
||||
Health() bool
|
||||
}
|
||||
|
||||
// RegisterHandler is syntactic sugar for registering a handler
|
||||
@@ -72,22 +98,21 @@ func RegisterSubscriber(topic string, s server.Server, h interface{}, opts ...se
|
||||
}
|
||||
|
||||
type service struct {
|
||||
done chan struct{}
|
||||
opts Options
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewService creates and returns a new Service based on the packages within.
|
||||
func NewService(opts ...Option) Service {
|
||||
return &service{opts: NewOptions(opts...)}
|
||||
return &service{opts: NewOptions(opts...), done: make(chan struct{})}
|
||||
}
|
||||
|
||||
func (s *service) Name() string {
|
||||
return s.opts.Name
|
||||
}
|
||||
|
||||
// Init initialises options. Additionally it calls cmd.Init
|
||||
// which parses command line flags. cmd.Init is only called
|
||||
// on first Init.
|
||||
// Init initialises options.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (s *service) Init(opts ...Option) error {
|
||||
@@ -236,6 +261,63 @@ func (s *service) String() string {
|
||||
return s.opts.Name
|
||||
}
|
||||
|
||||
func (s *service) Live() bool {
|
||||
for _, v := range s.opts.Brokers {
|
||||
if !v.Live() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, v := range s.opts.Servers {
|
||||
if !v.Live() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, v := range s.opts.Stores {
|
||||
if !v.Live() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *service) Ready() bool {
|
||||
for _, v := range s.opts.Brokers {
|
||||
if !v.Ready() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, v := range s.opts.Servers {
|
||||
if !v.Ready() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, v := range s.opts.Stores {
|
||||
if !v.Ready() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *service) Health() bool {
|
||||
for _, v := range s.opts.Brokers {
|
||||
if !v.Health() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, v := range s.opts.Servers {
|
||||
if !v.Health() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, v := range s.opts.Stores {
|
||||
if !v.Health() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (s *service) Start() error {
|
||||
var err error
|
||||
@@ -262,11 +344,7 @@ func (s *service) Start() error {
|
||||
}
|
||||
|
||||
if config.Loggers[0].V(logger.InfoLevel) {
|
||||
config.Loggers[0].Infof(s.opts.Context, "starting [service] %s version %s", s.Options().Name, s.Options().Version)
|
||||
}
|
||||
|
||||
if len(s.opts.Servers) == 0 {
|
||||
return fmt.Errorf("cant start nil server")
|
||||
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("starting [service] %s version %s", s.Options().Name, s.Options().Version))
|
||||
}
|
||||
|
||||
for _, reg := range s.opts.Registers {
|
||||
@@ -308,7 +386,7 @@ func (s *service) Stop() error {
|
||||
s.RUnlock()
|
||||
|
||||
if config.Loggers[0].V(logger.InfoLevel) {
|
||||
config.Loggers[0].Infof(s.opts.Context, "stoppping [service] %s", s.Name())
|
||||
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name()))
|
||||
}
|
||||
|
||||
var err error
|
||||
@@ -348,6 +426,8 @@ func (s *service) Stop() error {
|
||||
}
|
||||
}
|
||||
|
||||
close(s.done)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -371,7 +451,7 @@ func (s *service) Run() error {
|
||||
}
|
||||
|
||||
// wait on context cancel
|
||||
<-s.opts.Context.Done()
|
||||
<-s.done
|
||||
|
||||
return s.Stop()
|
||||
}
|
||||
|
@@ -134,7 +134,7 @@ func TestNewService(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := NewService(tt.args.opts...); !reflect.DeepEqual(got, tt.want) {
|
||||
if got := NewService(tt.args.opts...); got.Name() != tt.want.Name() {
|
||||
t.Errorf("NewService() = %v, want %v", got.Options().Name, tt.want.Options().Name)
|
||||
}
|
||||
})
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
@@ -20,7 +21,10 @@ func NewStore(opts ...store.Option) store.Store {
|
||||
}
|
||||
|
||||
func (m *memoryStore) Connect(ctx context.Context) error {
|
||||
return nil
|
||||
if m.opts.LazyConnect {
|
||||
return nil
|
||||
}
|
||||
return m.connect(ctx)
|
||||
}
|
||||
|
||||
func (m *memoryStore) Disconnect(ctx context.Context) error {
|
||||
@@ -29,13 +33,14 @@ func (m *memoryStore) Disconnect(ctx context.Context) error {
|
||||
}
|
||||
|
||||
type memoryStore struct {
|
||||
funcRead store.FuncRead
|
||||
funcWrite store.FuncWrite
|
||||
funcExists store.FuncExists
|
||||
funcList store.FuncList
|
||||
funcDelete store.FuncDelete
|
||||
store *cache.Cache
|
||||
opts store.Options
|
||||
funcRead store.FuncRead
|
||||
funcWrite store.FuncWrite
|
||||
funcExists store.FuncExists
|
||||
funcList store.FuncList
|
||||
funcDelete store.FuncDelete
|
||||
store *cache.Cache
|
||||
opts store.Options
|
||||
isConnected atomic.Int32
|
||||
}
|
||||
|
||||
func (m *memoryStore) key(prefix, key string) string {
|
||||
@@ -144,7 +149,24 @@ func (m *memoryStore) Name() string {
|
||||
return m.opts.Name
|
||||
}
|
||||
|
||||
func (m *memoryStore) Live() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *memoryStore) Ready() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *memoryStore) Health() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *memoryStore) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
|
||||
if m.opts.LazyConnect {
|
||||
if err := m.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return m.funcExists(ctx, key, opts...)
|
||||
}
|
||||
|
||||
@@ -157,6 +179,11 @@ func (m *memoryStore) fnExists(ctx context.Context, key string, opts ...store.Ex
|
||||
}
|
||||
|
||||
func (m *memoryStore) Read(ctx context.Context, key string, val interface{}, opts ...store.ReadOption) error {
|
||||
if m.opts.LazyConnect {
|
||||
if err := m.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return m.funcRead(ctx, key, val, opts...)
|
||||
}
|
||||
|
||||
@@ -169,6 +196,11 @@ func (m *memoryStore) fnRead(ctx context.Context, key string, val interface{}, o
|
||||
}
|
||||
|
||||
func (m *memoryStore) Write(ctx context.Context, key string, val interface{}, opts ...store.WriteOption) error {
|
||||
if m.opts.LazyConnect {
|
||||
if err := m.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return m.funcWrite(ctx, key, val, opts...)
|
||||
}
|
||||
|
||||
@@ -193,6 +225,11 @@ func (m *memoryStore) fnWrite(ctx context.Context, key string, val interface{},
|
||||
}
|
||||
|
||||
func (m *memoryStore) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error {
|
||||
if m.opts.LazyConnect {
|
||||
if err := m.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return m.funcDelete(ctx, key, opts...)
|
||||
}
|
||||
|
||||
@@ -211,6 +248,11 @@ func (m *memoryStore) Options() store.Options {
|
||||
}
|
||||
|
||||
func (m *memoryStore) List(ctx context.Context, opts ...store.ListOption) ([]string, error) {
|
||||
if m.opts.LazyConnect {
|
||||
if err := m.connect(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return m.funcList(ctx, opts...)
|
||||
}
|
||||
|
||||
@@ -244,3 +286,21 @@ func (m *memoryStore) fnList(ctx context.Context, opts ...store.ListOption) ([]s
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) connect(ctx context.Context) error {
|
||||
m.isConnected.CompareAndSwap(0, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memoryStore) Watch(ctx context.Context, opts ...store.WatchOption) (store.Watcher, error) {
|
||||
return &watcher{}, nil
|
||||
}
|
||||
|
||||
type watcher struct{}
|
||||
|
||||
func (w *watcher) Next() (store.Event, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (w *watcher) Stop() {
|
||||
}
|
||||
|
113
store/noop.go
113
store/noop.go
@@ -2,19 +2,37 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.unistack.org/micro/v3/options"
|
||||
"go.unistack.org/micro/v3/util/id"
|
||||
)
|
||||
|
||||
var _ Store = (*noopStore)(nil)
|
||||
|
||||
type noopStore struct {
|
||||
funcRead FuncRead
|
||||
funcWrite FuncWrite
|
||||
funcExists FuncExists
|
||||
funcList FuncList
|
||||
funcDelete FuncDelete
|
||||
opts Options
|
||||
mu sync.Mutex
|
||||
watchers map[string]Watcher
|
||||
funcRead FuncRead
|
||||
funcWrite FuncWrite
|
||||
funcExists FuncExists
|
||||
funcList FuncList
|
||||
funcDelete FuncDelete
|
||||
opts Options
|
||||
isConnected atomic.Int32
|
||||
}
|
||||
|
||||
func (n *noopStore) Live() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *noopStore) Ready() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *noopStore) Health() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func NewStore(opts ...Option) *noopStore {
|
||||
@@ -52,12 +70,10 @@ func (n *noopStore) Init(opts ...Option) error {
|
||||
}
|
||||
|
||||
func (n *noopStore) Connect(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
if n.opts.LazyConnect {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
return n.connect(ctx)
|
||||
}
|
||||
|
||||
func (n *noopStore) Disconnect(ctx context.Context) error {
|
||||
@@ -70,6 +86,11 @@ func (n *noopStore) Disconnect(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (n *noopStore) Read(ctx context.Context, key string, val interface{}, opts ...ReadOption) error {
|
||||
if n.opts.LazyConnect {
|
||||
if err := n.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return n.funcRead(ctx, key, val, opts...)
|
||||
}
|
||||
|
||||
@@ -83,6 +104,11 @@ func (n *noopStore) fnRead(ctx context.Context, key string, val interface{}, opt
|
||||
}
|
||||
|
||||
func (n *noopStore) Delete(ctx context.Context, key string, opts ...DeleteOption) error {
|
||||
if n.opts.LazyConnect {
|
||||
if err := n.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return n.funcDelete(ctx, key, opts...)
|
||||
}
|
||||
|
||||
@@ -96,6 +122,11 @@ func (n *noopStore) fnDelete(ctx context.Context, key string, opts ...DeleteOpti
|
||||
}
|
||||
|
||||
func (n *noopStore) Exists(ctx context.Context, key string, opts ...ExistsOption) error {
|
||||
if n.opts.LazyConnect {
|
||||
if err := n.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return n.funcExists(ctx, key, opts...)
|
||||
}
|
||||
|
||||
@@ -109,6 +140,11 @@ func (n *noopStore) fnExists(ctx context.Context, key string, opts ...ExistsOpti
|
||||
}
|
||||
|
||||
func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error {
|
||||
if n.opts.LazyConnect {
|
||||
if err := n.connect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return n.funcWrite(ctx, key, val, opts...)
|
||||
}
|
||||
|
||||
@@ -122,6 +158,11 @@ func (n *noopStore) fnWrite(ctx context.Context, key string, val interface{}, op
|
||||
}
|
||||
|
||||
func (n *noopStore) List(ctx context.Context, opts ...ListOption) ([]string, error) {
|
||||
if n.opts.LazyConnect {
|
||||
if err := n.connect(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return n.funcList(ctx, opts...)
|
||||
}
|
||||
|
||||
@@ -145,3 +186,53 @@ func (n *noopStore) String() string {
|
||||
func (n *noopStore) Options() Options {
|
||||
return n.opts
|
||||
}
|
||||
|
||||
func (n *noopStore) connect(ctx context.Context) error {
|
||||
if n.isConnected.CompareAndSwap(0, 1) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type watcher struct {
|
||||
exit chan bool
|
||||
id string
|
||||
ch chan Event
|
||||
opts WatchOptions
|
||||
}
|
||||
|
||||
func (m *noopStore) Watch(ctx context.Context, opts ...WatchOption) (Watcher, error) {
|
||||
id, err := id.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wo, err := NewWatchOptions(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// construct the watcher
|
||||
w := &watcher{
|
||||
exit: make(chan bool),
|
||||
ch: make(chan Event),
|
||||
id: id,
|
||||
opts: wo,
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.watchers[w.id] = w
|
||||
m.mu.Unlock()
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *watcher) Next() (Event, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (w *watcher) Stop() {
|
||||
}
|
||||
|
@@ -41,6 +41,8 @@ type Options struct {
|
||||
Timeout time.Duration
|
||||
// Hooks can be run before/after store Read/List/Write/Exists/Delete
|
||||
Hooks options.Hooks
|
||||
// LazyConnect creates a connection when using store
|
||||
LazyConnect bool
|
||||
}
|
||||
|
||||
// NewOptions creates options struct
|
||||
@@ -132,6 +134,13 @@ func Timeout(td time.Duration) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// LazyConnect initialize connection only when needed
|
||||
func LazyConnect(b bool) Option {
|
||||
return func(o *Options) {
|
||||
o.LazyConnect = b
|
||||
}
|
||||
}
|
||||
|
||||
// Addrs contains the addresses or other connection information of the backing storage.
|
||||
// For example, an etcd implementation would contain the nodes of the cluster.
|
||||
// A SQL implementation could contain one or more connection strings.
|
||||
|
@@ -1,12 +1,16 @@
|
||||
// Package store is an interface for distributed data storage.
|
||||
package store // import "go.unistack.org/micro/v3/store"
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWatcherStopped = errors.New("watcher stopped")
|
||||
// ErrNotConnected is returned when a store is not connected
|
||||
ErrNotConnected = errors.New("not connected")
|
||||
// ErrNotFound is returned when a key doesn't exist
|
||||
ErrNotFound = errors.New("not found")
|
||||
// ErrInvalidKey is returned when a key has empty or have invalid format
|
||||
@@ -41,6 +45,14 @@ type Store interface {
|
||||
Disconnect(ctx context.Context) error
|
||||
// String returns the name of the implementation.
|
||||
String() string
|
||||
// Watch returns events watcher
|
||||
Watch(ctx context.Context, opts ...WatchOption) (Watcher, error)
|
||||
// Live returns store liveness
|
||||
Live() bool
|
||||
// Ready returns store readiness
|
||||
Ready() bool
|
||||
// Health returns store health
|
||||
Health() bool
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -55,3 +67,45 @@ type (
|
||||
FuncList func(ctx context.Context, opts ...ListOption) ([]string, error)
|
||||
HookList func(next FuncList) FuncList
|
||||
)
|
||||
|
||||
type EventType int
|
||||
|
||||
const (
|
||||
EventTypeUnknown = iota
|
||||
EventTypeConnect
|
||||
EventTypeDisconnect
|
||||
EventTypeOpError
|
||||
)
|
||||
|
||||
type Event interface {
|
||||
Timestamp() time.Time
|
||||
Error() error
|
||||
Type() EventType
|
||||
}
|
||||
|
||||
type Watcher interface {
|
||||
// Next is a blocking call
|
||||
Next() (Event, error)
|
||||
// Stop stops the watcher
|
||||
Stop()
|
||||
}
|
||||
|
||||
type WatchOption func(*WatchOptions) error
|
||||
|
||||
type WatchOptions struct{}
|
||||
|
||||
func NewWatchOptions(opts ...WatchOption) (WatchOptions, error) {
|
||||
options := WatchOptions{}
|
||||
var err error
|
||||
for _, o := range opts {
|
||||
if err = o(&options); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return options, err
|
||||
}
|
||||
|
||||
func Watch(context.Context) (Watcher, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@@ -67,16 +67,18 @@ func (w *NamespaceStore) String() string {
|
||||
return w.s.String()
|
||||
}
|
||||
|
||||
// type NamespaceWrapper struct{}
|
||||
|
||||
// func NewNamespaceWrapper() Wrapper {
|
||||
// return &NamespaceWrapper{}
|
||||
// }
|
||||
|
||||
/*
|
||||
func (w *OmitWrapper) Logf(fn LogfFunc) LogfFunc {
|
||||
return func(ctx context.Context, level Level, msg string, args ...interface{}) {
|
||||
fn(ctx, level, msg, getArgs(args)...)
|
||||
}
|
||||
func (w *NamespaceStore) Watch(ctx context.Context, opts ...WatchOption) (Watcher, error) {
|
||||
return w.s.Watch(ctx, opts...)
|
||||
}
|
||||
|
||||
func (w *NamespaceStore) Live() bool {
|
||||
return w.s.Live()
|
||||
}
|
||||
|
||||
func (w *NamespaceStore) Ready() bool {
|
||||
return w.s.Ready()
|
||||
}
|
||||
|
||||
func (w *NamespaceStore) Health() bool {
|
||||
return w.s.Health()
|
||||
}
|
||||
*/
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package sync is an interface for distributed synchronization
|
||||
package sync // import "go.unistack.org/micro/v3/sync"
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@@ -17,14 +17,14 @@ func TestLoggerWithTracer(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
logger.DefaultLogger = slog.NewLogger(logger.WithOutput(buf))
|
||||
|
||||
if err := logger.Init(); err != nil {
|
||||
if err := logger.DefaultLogger.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var span tracer.Span
|
||||
tr := NewTracer()
|
||||
ctx, span = tr.Start(ctx, "test1")
|
||||
|
||||
logger.Error(ctx, "my test error", fmt.Errorf("error"))
|
||||
logger.DefaultLogger.Error(ctx, "my test error", fmt.Errorf("error"))
|
||||
|
||||
if !strings.Contains(buf.String(), span.TraceID()) {
|
||||
t.Fatalf("log does not contains trace id: %s", buf.Bytes())
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package tracer provides an interface for distributed tracing
|
||||
package tracer // import "go.unistack.org/micro/v3/tracer"
|
||||
package tracer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,415 +0,0 @@
|
||||
// Package wrapper provides wrapper for Tracer
|
||||
package wrapper // import "go.unistack.org/micro/v3/tracer/wrapper"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"go.unistack.org/micro/v3/client"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
"go.unistack.org/micro/v3/server"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
var DefaultHeadersExctract = []string{metadata.HeaderXRequestID}
|
||||
|
||||
func ExtractDefaultLabels(md metadata.Metadata) []interface{} {
|
||||
labels := make([]interface{}, 0, len(DefaultHeadersExctract))
|
||||
for _, k := range DefaultHeadersExctract {
|
||||
if v, ok := md.Get(k); ok {
|
||||
labels = append(labels, strings.ToLower(k), v)
|
||||
}
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
var (
|
||||
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []client.CallOption, sp tracer.Span, err error) {
|
||||
var labels []interface{}
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
labels = append(labels, ExtractDefaultLabels(md)...)
|
||||
}
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
sp.AddLabels(labels...)
|
||||
}
|
||||
|
||||
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []client.CallOption, stream client.Stream, sp tracer.Span, err error) {
|
||||
var labels []interface{}
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
labels = append(labels, ExtractDefaultLabels(md)...)
|
||||
}
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
sp.AddLabels(labels...)
|
||||
}
|
||||
|
||||
DefaultClientPublishObserver = func(ctx context.Context, msg client.Message, opts []client.PublishOption, sp tracer.Span, err error) {
|
||||
var labels []interface{}
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
labels = append(labels, ExtractDefaultLabels(md)...)
|
||||
}
|
||||
labels = append(labels, ExtractDefaultLabels(msg.Metadata())...)
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
sp.AddLabels(labels...)
|
||||
}
|
||||
|
||||
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, sp tracer.Span, err error) {
|
||||
var labels []interface{}
|
||||
if md, ok := metadata.FromIncomingContext(ctx); ok {
|
||||
labels = append(labels, ExtractDefaultLabels(md)...)
|
||||
}
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
sp.AddLabels(labels...)
|
||||
}
|
||||
|
||||
DefaultServerSubscriberObserver = func(ctx context.Context, msg server.Message, sp tracer.Span, err error) {
|
||||
var labels []interface{}
|
||||
if md, ok := metadata.FromIncomingContext(ctx); ok {
|
||||
labels = append(labels, ExtractDefaultLabels(md)...)
|
||||
}
|
||||
labels = append(labels, ExtractDefaultLabels(msg.Header())...)
|
||||
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
sp.AddLabels(labels...)
|
||||
}
|
||||
|
||||
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, sp tracer.Span, err error) {
|
||||
sp.SetName(fmt.Sprintf("%s.%s call", req.Service(), req.Method()))
|
||||
var labels []interface{}
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
labels = append(labels, ExtractDefaultLabels(md)...)
|
||||
}
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
sp.AddLabels(labels...)
|
||||
}
|
||||
|
||||
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
|
||||
)
|
||||
|
||||
type tWrapper struct {
|
||||
client.Client
|
||||
serverHandler server.HandlerFunc
|
||||
serverSubscriber server.SubscriberFunc
|
||||
clientCallFunc client.CallFunc
|
||||
opts Options
|
||||
}
|
||||
|
||||
type (
|
||||
ClientCallObserver func(context.Context, client.Request, interface{}, []client.CallOption, tracer.Span, error)
|
||||
ClientStreamObserver func(context.Context, client.Request, []client.CallOption, client.Stream, tracer.Span, error)
|
||||
ClientPublishObserver func(context.Context, client.Message, []client.PublishOption, tracer.Span, error)
|
||||
ClientCallFuncObserver func(context.Context, string, client.Request, interface{}, client.CallOptions, tracer.Span, error)
|
||||
ServerHandlerObserver func(context.Context, server.Request, interface{}, tracer.Span, error)
|
||||
ServerSubscriberObserver func(context.Context, server.Message, tracer.Span, error)
|
||||
)
|
||||
|
||||
// Options struct
|
||||
type Options struct {
|
||||
// Tracer that used for tracing
|
||||
Tracer tracer.Tracer
|
||||
// ClientCallObservers funcs
|
||||
ClientCallObservers []ClientCallObserver
|
||||
// ClientStreamObservers funcs
|
||||
ClientStreamObservers []ClientStreamObserver
|
||||
// ClientPublishObservers funcs
|
||||
ClientPublishObservers []ClientPublishObserver
|
||||
// ClientCallFuncObservers funcs
|
||||
ClientCallFuncObservers []ClientCallFuncObserver
|
||||
// ServerHandlerObservers funcs
|
||||
ServerHandlerObservers []ServerHandlerObserver
|
||||
// ServerSubscriberObservers funcs
|
||||
ServerSubscriberObservers []ServerSubscriberObserver
|
||||
// SkipEndpoints
|
||||
SkipEndpoints []string
|
||||
}
|
||||
|
||||
// Option func signature
|
||||
type Option func(*Options)
|
||||
|
||||
// NewOptions create Options from Option slice
|
||||
func NewOptions(opts ...Option) Options {
|
||||
options := Options{
|
||||
Tracer: tracer.DefaultTracer,
|
||||
ClientCallObservers: []ClientCallObserver{DefaultClientCallObserver},
|
||||
ClientStreamObservers: []ClientStreamObserver{DefaultClientStreamObserver},
|
||||
ClientPublishObservers: []ClientPublishObserver{DefaultClientPublishObserver},
|
||||
ClientCallFuncObservers: []ClientCallFuncObserver{DefaultClientCallFuncObserver},
|
||||
ServerHandlerObservers: []ServerHandlerObserver{DefaultServerHandlerObserver},
|
||||
ServerSubscriberObservers: []ServerSubscriberObserver{DefaultServerSubscriberObserver},
|
||||
SkipEndpoints: DefaultSkipEndpoints,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// WithTracer pass tracer
|
||||
func WithTracer(t tracer.Tracer) Option {
|
||||
return func(o *Options) {
|
||||
o.Tracer = t
|
||||
}
|
||||
}
|
||||
|
||||
// SkipEndponts
|
||||
func SkipEndpoins(eps ...string) Option {
|
||||
return func(o *Options) {
|
||||
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCallObservers funcs
|
||||
func WithClientCallObservers(ob ...ClientCallObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientCallObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientStreamObservers funcs
|
||||
func WithClientStreamObservers(ob ...ClientStreamObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientStreamObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientPublishObservers funcs
|
||||
func WithClientPublishObservers(ob ...ClientPublishObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientPublishObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCallFuncObservers funcs
|
||||
func WithClientCallFuncObservers(ob ...ClientCallFuncObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientCallFuncObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithServerHandlerObservers funcs
|
||||
func WithServerHandlerObservers(ob ...ServerHandlerObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ServerHandlerObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithServerSubscriberObservers funcs
|
||||
func WithServerSubscriberObservers(ob ...ServerSubscriberObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ServerSubscriberObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
func (ot *tWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range ot.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return ot.Client.Call(ctx, req, rsp, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
|
||||
tracer.WithSpanKind(tracer.SpanKindClient),
|
||||
tracer.WithSpanLabels(
|
||||
"rpc.service", req.Service(),
|
||||
"rpc.method", req.Method(),
|
||||
"rpc.flavor", "rpc",
|
||||
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
|
||||
"rpc.call_type", "unary",
|
||||
),
|
||||
)
|
||||
defer sp.Finish()
|
||||
|
||||
err := ot.Client.Call(nctx, req, rsp, opts...)
|
||||
|
||||
for _, o := range ot.opts.ClientCallObservers {
|
||||
o(nctx, req, rsp, opts, sp, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ot *tWrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range ot.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return ot.Client.Stream(ctx, req, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
|
||||
tracer.WithSpanKind(tracer.SpanKindClient),
|
||||
tracer.WithSpanLabels(
|
||||
"rpc.service", req.Service(),
|
||||
"rpc.method", req.Method(),
|
||||
"rpc.flavor", "rpc",
|
||||
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
|
||||
"rpc.call_type", "stream",
|
||||
),
|
||||
)
|
||||
defer sp.Finish()
|
||||
|
||||
stream, err := ot.Client.Stream(nctx, req, opts...)
|
||||
|
||||
for _, o := range ot.opts.ClientStreamObservers {
|
||||
o(nctx, req, opts, stream, sp, err)
|
||||
}
|
||||
|
||||
return stream, err
|
||||
}
|
||||
|
||||
func (ot *tWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error {
|
||||
nctx, sp := ot.opts.Tracer.Start(ctx, msg.Topic()+" publish", tracer.WithSpanKind(tracer.SpanKindProducer))
|
||||
defer sp.Finish()
|
||||
sp.AddLabels("messaging.destination.name", msg.Topic())
|
||||
sp.AddLabels("messaging.operation", "publish")
|
||||
err := ot.Client.Publish(nctx, msg, opts...)
|
||||
|
||||
for _, o := range ot.opts.ClientPublishObservers {
|
||||
o(nctx, msg, opts, sp, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ot *tWrapper) ServerHandler(ctx context.Context, req server.Request, rsp interface{}) error {
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Method())
|
||||
for _, ep := range ot.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return ot.serverHandler(ctx, req, rsp)
|
||||
}
|
||||
}
|
||||
|
||||
callType := "unary"
|
||||
if req.Stream() {
|
||||
callType = "stream"
|
||||
}
|
||||
|
||||
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-server", req.Service(), req.Method()),
|
||||
tracer.WithSpanKind(tracer.SpanKindServer),
|
||||
tracer.WithSpanLabels(
|
||||
"rpc.service", req.Service(),
|
||||
"rpc.method", req.Method(),
|
||||
"rpc.flavor", "rpc",
|
||||
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
|
||||
"rpc.call_type", callType,
|
||||
),
|
||||
)
|
||||
defer sp.Finish()
|
||||
|
||||
err := ot.serverHandler(nctx, req, rsp)
|
||||
|
||||
for _, o := range ot.opts.ServerHandlerObservers {
|
||||
o(nctx, req, rsp, sp, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ot *tWrapper) ServerSubscriber(ctx context.Context, msg server.Message) error {
|
||||
nctx, sp := ot.opts.Tracer.Start(ctx, msg.Topic()+" process", tracer.WithSpanKind(tracer.SpanKindConsumer))
|
||||
defer sp.Finish()
|
||||
sp.AddLabels("messaging.operation", "process")
|
||||
sp.AddLabels("messaging.source.name", msg.Topic())
|
||||
err := ot.serverSubscriber(nctx, msg)
|
||||
|
||||
for _, o := range ot.opts.ServerSubscriberObservers {
|
||||
o(nctx, msg, sp, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewClientWrapper accepts an open tracing Trace and returns a Client Wrapper
|
||||
func NewClientWrapper(opts ...Option) client.Wrapper {
|
||||
return func(c client.Client) client.Client {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
return &tWrapper{opts: options, Client: c}
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientCallWrapper accepts an opentracing Tracer and returns a Call Wrapper
|
||||
func NewClientCallWrapper(opts ...Option) client.CallWrapper {
|
||||
return func(h client.CallFunc) client.CallFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
ot := &tWrapper{opts: options, clientCallFunc: h}
|
||||
return ot.ClientCallFunc
|
||||
}
|
||||
}
|
||||
|
||||
func (ot *tWrapper) ClientCallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Method())
|
||||
for _, ep := range ot.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return ot.ClientCallFunc(ctx, addr, req, rsp, opts)
|
||||
}
|
||||
}
|
||||
|
||||
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
|
||||
tracer.WithSpanKind(tracer.SpanKindClient),
|
||||
tracer.WithSpanLabels(
|
||||
"rpc.service", req.Service(),
|
||||
"rpc.method", req.Method(),
|
||||
"rpc.flavor", "rpc",
|
||||
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
|
||||
"rpc.call_type", "unary",
|
||||
),
|
||||
)
|
||||
|
||||
defer sp.Finish()
|
||||
|
||||
err := ot.clientCallFunc(nctx, addr, req, rsp, opts)
|
||||
|
||||
for _, o := range ot.opts.ClientCallFuncObservers {
|
||||
o(nctx, addr, req, rsp, opts, sp, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewServerHandlerWrapper accepts an options and returns a Handler Wrapper
|
||||
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
|
||||
return func(h server.HandlerFunc) server.HandlerFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
ot := &tWrapper{opts: options, serverHandler: h}
|
||||
return ot.ServerHandler
|
||||
}
|
||||
}
|
||||
|
||||
// NewServerSubscriberWrapper accepts an opentracing Tracer and returns a Subscriber Wrapper
|
||||
func NewServerSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
|
||||
return func(h server.SubscriberFunc) server.SubscriberFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
ot := &tWrapper{opts: options, serverSubscriber: h}
|
||||
return ot.ServerSubscriber
|
||||
}
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package addr // import "go.unistack.org/micro/v3/util/addr"
|
||||
package addr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -58,6 +58,7 @@ func IsLocal(addr string) bool {
|
||||
}
|
||||
|
||||
// Extract returns a real ip
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func Extract(addr string) (string, error) {
|
||||
// if addr specified then its returned
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// Package backoff provides backoff functionality
|
||||
package backoff // import "go.unistack.org/micro/v3/util/backoff"
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package buf // import "go.unistack.org/micro/v3/util/buf"
|
||||
package buf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
377
util/dns/cache.go
Normal file
377
util/dns/cache.go
Normal file
@@ -0,0 +1,377 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DialFunc is a [net.Resolver.Dial] function.
|
||||
type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
|
||||
|
||||
// NewNetResolver creates a caching [net.Resolver] that uses parent to resolve names.
|
||||
func NewNetResolver(opts ...Option) *net.Resolver {
|
||||
options := Options{Resolver: &net.Resolver{}}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
return &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: options.Resolver.StrictErrors,
|
||||
Dial: NewNetDialer(options.Resolver.Dial, append(opts, Resolver(options.Resolver))...),
|
||||
}
|
||||
}
|
||||
|
||||
// NewNetDialer adds caching to a [net.Resolver.Dial] function.
|
||||
func NewNetDialer(parent DialFunc, opts ...Option) DialFunc {
|
||||
cache := cache{dial: parent, opts: Options{}}
|
||||
for _, o := range opts {
|
||||
o(&cache.opts)
|
||||
}
|
||||
if cache.opts.MaxCacheEntries == 0 {
|
||||
cache.opts.MaxCacheEntries = DefaultMaxCacheEntries
|
||||
}
|
||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
conn := &dnsConn{}
|
||||
conn.roundTrip = cachingRoundTrip(&cache, network, address)
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
const DefaultMaxCacheEntries = 300
|
||||
|
||||
// A Option customizes the resolver cache.
|
||||
type Option func(*Options)
|
||||
|
||||
type Options struct {
|
||||
Resolver *net.Resolver
|
||||
MaxCacheEntries int
|
||||
MaxCacheTTL time.Duration
|
||||
MinCacheTTL time.Duration
|
||||
NegativeCache bool
|
||||
PreferIPV4 bool
|
||||
PreferIPV6 bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// MaxCacheEntries sets the maximum number of entries to cache.
|
||||
// If zero, [DefaultMaxCacheEntries] is used; negative means no limit.
|
||||
func MaxCacheEntries(n int) Option {
|
||||
return func(o *Options) {
|
||||
o.MaxCacheEntries = n
|
||||
}
|
||||
}
|
||||
|
||||
// MaxCacheTTL sets the maximum time-to-live for entries in the cache.
|
||||
func MaxCacheTTL(td time.Duration) Option {
|
||||
return func(o *Options) {
|
||||
o.MaxCacheTTL = td
|
||||
}
|
||||
}
|
||||
|
||||
// MinCacheTTL sets the minimum time-to-live for entries in the cache.
|
||||
func MinCacheTTL(td time.Duration) Option {
|
||||
return func(o *Options) {
|
||||
o.MinCacheTTL = td
|
||||
}
|
||||
}
|
||||
|
||||
// NegativeCache sets whether to cache negative responses.
|
||||
func NegativeCache(b bool) Option {
|
||||
return func(o *Options) {
|
||||
o.NegativeCache = b
|
||||
}
|
||||
}
|
||||
|
||||
// Timeout sets upstream *net.Resolver timeout
|
||||
func Timeout(td time.Duration) Option {
|
||||
return func(o *Options) {
|
||||
o.Timeout = td
|
||||
}
|
||||
}
|
||||
|
||||
// Resolver sets upstream *net.Resolver.
|
||||
func Resolver(r *net.Resolver) Option {
|
||||
return func(o *Options) {
|
||||
o.Resolver = r
|
||||
}
|
||||
}
|
||||
|
||||
// PreferIPV4 resolve ipv4 records.
|
||||
func PreferIPV4(b bool) Option {
|
||||
return func(o *Options) {
|
||||
o.PreferIPV4 = b
|
||||
}
|
||||
}
|
||||
|
||||
// PreferIPV6 resolve ipv4 records.
|
||||
func PreferIPV6(b bool) Option {
|
||||
return func(o *Options) {
|
||||
o.PreferIPV6 = b
|
||||
}
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
sync.RWMutex
|
||||
|
||||
dial DialFunc
|
||||
entries map[string]cacheEntry
|
||||
|
||||
opts Options
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
deadline time.Time
|
||||
value string
|
||||
}
|
||||
|
||||
func (c *cache) put(req string, res string) {
|
||||
// ignore uncacheable/unparseable answers
|
||||
if invalid(req, res) {
|
||||
return
|
||||
}
|
||||
|
||||
// ignore errors (if requested)
|
||||
if nameError(res) && !c.opts.NegativeCache {
|
||||
return
|
||||
}
|
||||
|
||||
// ignore uncacheable/unparseable answers
|
||||
ttl := getTTL(res)
|
||||
if ttl <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// adjust TTL
|
||||
if ttl < c.opts.MinCacheTTL {
|
||||
ttl = c.opts.MinCacheTTL
|
||||
}
|
||||
// maxTTL overrides minTTL
|
||||
if ttl > c.opts.MaxCacheTTL && c.opts.MaxCacheTTL != 0 {
|
||||
ttl = c.opts.MaxCacheTTL
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.entries == nil {
|
||||
c.entries = make(map[string]cacheEntry)
|
||||
}
|
||||
|
||||
// do some cache evition
|
||||
var tested, evicted int
|
||||
for k, e := range c.entries {
|
||||
if time.Until(e.deadline) <= 0 {
|
||||
// delete expired entry
|
||||
delete(c.entries, k)
|
||||
evicted++
|
||||
}
|
||||
tested++
|
||||
|
||||
if tested < 8 {
|
||||
continue
|
||||
}
|
||||
if evicted == 0 && c.opts.MaxCacheEntries > 0 && len(c.entries) >= c.opts.MaxCacheEntries {
|
||||
// delete at least one entry
|
||||
delete(c.entries, k)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// remove message IDs
|
||||
c.entries[req[2:]] = cacheEntry{
|
||||
deadline: time.Now().Add(ttl),
|
||||
value: res[2:],
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cache) get(req string) (res string) {
|
||||
// ignore invalid messages
|
||||
if len(req) < 12 {
|
||||
return ""
|
||||
}
|
||||
if req[2] >= 0x7f {
|
||||
return ""
|
||||
}
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
if c.entries == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// remove message ID
|
||||
entry, ok := c.entries[req[2:]]
|
||||
if ok && time.Until(entry.deadline) > 0 {
|
||||
// prepend correct ID
|
||||
return req[:2] + entry.value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func invalid(req string, res string) bool {
|
||||
if len(req) < 12 || len(res) < 12 { // header size
|
||||
return true
|
||||
}
|
||||
if req[0] != res[0] || req[1] != res[1] { // IDs match
|
||||
return true
|
||||
}
|
||||
if req[2] >= 0x7f || res[2] < 0x7f { // query, response
|
||||
return true
|
||||
}
|
||||
if req[2]&0x7a != 0 || res[2]&0x7a != 0 { // standard query, not truncated
|
||||
return true
|
||||
}
|
||||
if res[3]&0xf != 0 && res[3]&0xf != 3 { // no error, or name error
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func nameError(res string) bool {
|
||||
return res[3]&0xf == 3
|
||||
}
|
||||
|
||||
func getTTL(msg string) time.Duration {
|
||||
ttl := math.MaxInt32
|
||||
|
||||
qdcount := getUint16(msg[4:])
|
||||
ancount := getUint16(msg[6:])
|
||||
nscount := getUint16(msg[8:])
|
||||
arcount := getUint16(msg[10:])
|
||||
rdcount := ancount + nscount + arcount
|
||||
|
||||
msg = msg[12:] // skip header
|
||||
|
||||
// skip questions
|
||||
for i := 0; i < qdcount; i++ {
|
||||
name := getNameLen(msg)
|
||||
if name < 0 || name+4 > len(msg) {
|
||||
return -1
|
||||
}
|
||||
msg = msg[name+4:]
|
||||
}
|
||||
|
||||
// parse records
|
||||
for i := 0; i < rdcount; i++ {
|
||||
name := getNameLen(msg)
|
||||
if name < 0 || name+10 > len(msg) {
|
||||
return -1
|
||||
}
|
||||
rtyp := getUint16(msg[name+0:])
|
||||
rttl := getUint32(msg[name+4:])
|
||||
rlen := getUint16(msg[name+8:])
|
||||
if name+10+rlen > len(msg) {
|
||||
return -1
|
||||
}
|
||||
// skip EDNS OPT since it doesn't have a TTL
|
||||
if rtyp != 41 && rttl < ttl {
|
||||
ttl = rttl
|
||||
}
|
||||
msg = msg[name+10+rlen:]
|
||||
}
|
||||
|
||||
return time.Duration(ttl) * time.Second
|
||||
}
|
||||
|
||||
func getNameLen(msg string) int {
|
||||
i := 0
|
||||
for i < len(msg) {
|
||||
if msg[i] == 0 {
|
||||
// end of name
|
||||
i += 1
|
||||
break
|
||||
}
|
||||
if msg[i] >= 0xc0 {
|
||||
// compressed name
|
||||
i += 2
|
||||
break
|
||||
}
|
||||
if msg[i] >= 0x40 {
|
||||
// reserved
|
||||
return -1
|
||||
}
|
||||
i += int(msg[i] + 1)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func getUint16(s string) int {
|
||||
return int(s[1]) | int(s[0])<<8
|
||||
}
|
||||
|
||||
func getUint32(s string) int {
|
||||
return int(s[3]) | int(s[2])<<8 | int(s[1])<<16 | int(s[0])<<24
|
||||
}
|
||||
|
||||
func cachingRoundTrip(cache *cache, network, address string) roundTripper {
|
||||
return func(ctx context.Context, req string) (res string, err error) {
|
||||
// check cache
|
||||
if res := cache.get(req); res != "" {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case cache.opts.PreferIPV4 && cache.opts.PreferIPV6:
|
||||
network = "udp"
|
||||
case cache.opts.PreferIPV4:
|
||||
network = "udp4"
|
||||
case cache.opts.PreferIPV6:
|
||||
network = "udp6"
|
||||
default:
|
||||
network = "udp"
|
||||
}
|
||||
|
||||
if cache.opts.Timeout > 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, cache.opts.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// dial connection
|
||||
var conn net.Conn
|
||||
if cache.dial != nil {
|
||||
conn, err = cache.dial(ctx, network, address)
|
||||
} else {
|
||||
var d net.Dialer
|
||||
conn, err = d.DialContext(ctx, network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
conn.Close()
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
if t, ok := ctx.Deadline(); ok {
|
||||
err = conn.SetDeadline(t)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// send request
|
||||
err = writeMessage(conn, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// read response
|
||||
res, err = readMessage(conn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// cache response
|
||||
cache.put(req, res)
|
||||
return res, nil
|
||||
}
|
||||
}
|
16
util/dns/cache_test.go
Normal file
16
util/dns/cache_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
net.DefaultResolver = NewNetResolver(PreferIPV4(true))
|
||||
|
||||
addrs, err := net.LookupHost("unistack.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("addrs %v", addrs)
|
||||
}
|
183
util/dns/conn.go
Normal file
183
util/dns/conn.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type dnsConn struct {
|
||||
sync.Mutex
|
||||
|
||||
ibuf bytes.Buffer
|
||||
obuf bytes.Buffer
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
deadline time.Time
|
||||
roundTrip roundTripper
|
||||
}
|
||||
|
||||
type roundTripper func(ctx context.Context, req string) (res string, err error)
|
||||
|
||||
func (c *dnsConn) Read(b []byte) (n int, err error) {
|
||||
imsg, n, err := c.drainBuffers(b)
|
||||
if n != 0 || err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
ctx, cancel := c.childContext()
|
||||
omsg, err := c.roundTrip(ctx, imsg)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return c.fillBuffer(b, omsg)
|
||||
}
|
||||
|
||||
func (c *dnsConn) Write(b []byte) (n int, err error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
return c.ibuf.Write(b)
|
||||
}
|
||||
|
||||
func (c *dnsConn) Close() error {
|
||||
c.Lock()
|
||||
cancel := c.cancel
|
||||
c.Unlock()
|
||||
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dnsConn) LocalAddr() net.Addr {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dnsConn) RemoteAddr() net.Addr {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dnsConn) SetDeadline(t time.Time) error {
|
||||
var err error
|
||||
if err = c.SetReadDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.SetWriteDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dnsConn) SetReadDeadline(t time.Time) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.deadline = t
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dnsConn) SetWriteDeadline(t time.Time) error {
|
||||
// writes do not timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// drain the output buffer
|
||||
if c.obuf.Len() > 0 {
|
||||
n, err := c.obuf.Read(b)
|
||||
return "", n, err
|
||||
}
|
||||
|
||||
// otherwise, get the next message from the input buffer
|
||||
sz := c.ibuf.Next(2)
|
||||
if len(sz) < 2 {
|
||||
return "", 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
size := int64(sz[0])<<8 | int64(sz[1])
|
||||
|
||||
var str strings.Builder
|
||||
_, err := io.CopyN(&str, &c.ibuf, size)
|
||||
if err == io.EOF {
|
||||
return "", 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
return str.String(), 0, nil
|
||||
}
|
||||
|
||||
func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.obuf.WriteByte(byte(len(str) >> 8))
|
||||
c.obuf.WriteByte(byte(len(str)))
|
||||
c.obuf.WriteString(str)
|
||||
return c.obuf.Read(b)
|
||||
}
|
||||
|
||||
func (c *dnsConn) childContext() (context.Context, context.CancelFunc) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.ctx == nil {
|
||||
c.ctx, c.cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
return context.WithDeadline(c.ctx, c.deadline)
|
||||
}
|
||||
|
||||
func writeMessage(conn net.Conn, msg string) error {
|
||||
var buf []byte
|
||||
if _, ok := conn.(net.PacketConn); ok {
|
||||
buf = []byte(msg)
|
||||
} else {
|
||||
buf = make([]byte, len(msg)+2)
|
||||
buf[0] = byte(len(msg) >> 8)
|
||||
buf[1] = byte(len(msg))
|
||||
copy(buf[2:], msg)
|
||||
}
|
||||
// SHOULD do a single write on TCP (RFC 7766, section 8).
|
||||
// MUST do a single write on UDP.
|
||||
_, err := conn.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func readMessage(c net.Conn) (string, error) {
|
||||
if _, ok := c.(net.PacketConn); ok {
|
||||
// RFC 1035 specifies 512 as the maximum message size for DNS over UDP.
|
||||
// RFC 6891 OTOH suggests 4096 as the maximum payload size for EDNS.
|
||||
b := make([]byte, 4096)
|
||||
n, err := c.Read(b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b[:n]), nil
|
||||
} else {
|
||||
var sz [2]byte
|
||||
_, err := io.ReadFull(c, sz[:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
size := int64(sz[0])<<8 | int64(sz[1])
|
||||
|
||||
var str strings.Builder
|
||||
_, err = io.CopyN(&str, c, size)
|
||||
if err == io.EOF {
|
||||
return "", io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return str.String(), nil
|
||||
}
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package http // import "go.unistack.org/micro/v3/util/http"
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package id // import "go.unistack.org/micro/v3/util/id"
|
||||
package id
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -71,7 +71,7 @@ func New(opts ...Option) (string, error) {
|
||||
func Must(opts ...Option) string {
|
||||
id, err := New(opts...)
|
||||
if err != nil {
|
||||
logger.Fatal(context.TODO(), err)
|
||||
logger.DefaultLogger.Fatal(context.TODO(), "Must call is failed", err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
@@ -1,40 +0,0 @@
|
||||
// Package io is for io management
|
||||
package io // import "go.unistack.org/micro/v3/util/io"
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"go.unistack.org/micro/v3/network/transport"
|
||||
)
|
||||
|
||||
type rwc struct {
|
||||
socket transport.Socket
|
||||
}
|
||||
|
||||
func (r *rwc) Read(p []byte) (n int, err error) {
|
||||
m := new(transport.Message)
|
||||
if err := r.socket.Recv(m); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
copy(p, m.Body)
|
||||
return len(m.Body), nil
|
||||
}
|
||||
|
||||
func (r *rwc) Write(p []byte) (n int, err error) {
|
||||
err = r.socket.Send(&transport.Message{
|
||||
Body: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (r *rwc) Close() error {
|
||||
return r.socket.Close()
|
||||
}
|
||||
|
||||
// NewRWC returns a new ReadWriteCloser
|
||||
func NewRWC(sock transport.Socket) io.ReadWriteCloser {
|
||||
return &rwc{sock}
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
// Package jitter provides a random jitter
|
||||
package jitter // import "go.unistack.org/micro/v3/util/jitter"
|
||||
package jitter
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package jitter // import "go.unistack.org/micro/v3/util/jitter"
|
||||
package jitter
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -16,7 +16,6 @@ type Ticker struct {
|
||||
C chan time.Time
|
||||
min int64
|
||||
max int64
|
||||
exp int64
|
||||
exit bool
|
||||
rng rand.Rand
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package net // import "go.unistack.org/micro/v3/util/net"
|
||||
package net
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Package pki provides PKI all the PKI functions necessary to run micro over an untrusted network
|
||||
// including a CA
|
||||
package pki // import "go.unistack.org/micro/v3/util/pki"
|
||||
package pki
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@@ -1,118 +0,0 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.unistack.org/micro/v3/network/transport"
|
||||
"go.unistack.org/micro/v3/util/id"
|
||||
)
|
||||
|
||||
type pool struct {
|
||||
tr transport.Transport
|
||||
conns map[string][]*poolConn
|
||||
size int
|
||||
ttl time.Duration
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type poolConn struct {
|
||||
created time.Time
|
||||
transport.Client
|
||||
id string
|
||||
}
|
||||
|
||||
func newPool(options Options) *pool {
|
||||
return &pool{
|
||||
size: options.Size,
|
||||
tr: options.Transport,
|
||||
ttl: options.TTL,
|
||||
conns: make(map[string][]*poolConn),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) Close() error {
|
||||
p.Lock()
|
||||
for k, c := range p.conns {
|
||||
for _, conn := range c {
|
||||
conn.Client.Close()
|
||||
}
|
||||
delete(p.conns, k)
|
||||
}
|
||||
p.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// NoOp the Close since we manage it
|
||||
func (p *poolConn) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *poolConn) ID() string {
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *poolConn) Created() time.Time {
|
||||
return p.created
|
||||
}
|
||||
|
||||
func (p *pool) Get(ctx context.Context, addr string, opts ...transport.DialOption) (Conn, error) {
|
||||
p.Lock()
|
||||
conns := p.conns[addr]
|
||||
|
||||
// while we have conns check age and then return one
|
||||
// otherwise we'll create a new conn
|
||||
for len(conns) > 0 {
|
||||
conn := conns[len(conns)-1]
|
||||
conns = conns[:len(conns)-1]
|
||||
p.conns[addr] = conns
|
||||
|
||||
// if conn is old kill it and move on
|
||||
if d := time.Since(conn.Created()); d > p.ttl {
|
||||
conn.Client.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// we got a good conn, lets unlock and return it
|
||||
p.Unlock()
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
p.Unlock()
|
||||
|
||||
// create new conn
|
||||
c, err := p.tr.Dial(ctx, addr, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id, err := id.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &poolConn{
|
||||
Client: c,
|
||||
id: id,
|
||||
created: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *pool) Release(conn Conn, err error) error {
|
||||
// don't store the conn if it has errored
|
||||
if err != nil {
|
||||
return conn.(*poolConn).Client.Close()
|
||||
}
|
||||
|
||||
// otherwise put it back for reuse
|
||||
p.Lock()
|
||||
conns := p.conns[conn.Remote()]
|
||||
if len(conns) >= p.size {
|
||||
p.Unlock()
|
||||
return conn.(*poolConn).Client.Close()
|
||||
}
|
||||
p.conns[conn.Remote()] = append(conns, conn.(*poolConn))
|
||||
p.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,92 +0,0 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.unistack.org/micro/v3/network/transport"
|
||||
"go.unistack.org/micro/v3/network/transport/memory"
|
||||
)
|
||||
|
||||
func testPool(t *testing.T, size int, ttl time.Duration) {
|
||||
// mock transport
|
||||
tr := memory.NewTransport()
|
||||
|
||||
options := Options{
|
||||
TTL: ttl,
|
||||
Size: size,
|
||||
Transport: tr,
|
||||
}
|
||||
// zero pool
|
||||
p := newPool(options)
|
||||
|
||||
// listen
|
||||
l, err := tr.Listen(":0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
// accept loop
|
||||
go func() {
|
||||
for {
|
||||
if err := l.Accept(func(s transport.Socket) {
|
||||
for {
|
||||
var msg transport.Message
|
||||
if err := s.Recv(&msg); err != nil {
|
||||
return
|
||||
}
|
||||
if err := s.Send(&msg); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// get a conn
|
||||
c, err := p.Get(l.Addr())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &transport.Message{
|
||||
Body: []byte(`hello world`),
|
||||
}
|
||||
|
||||
if err := c.Send(msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var rcv transport.Message
|
||||
|
||||
if err := c.Recv(&rcv); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(rcv.Body) != string(msg.Body) {
|
||||
t.Fatalf("got %v, expected %v", rcv.Body, msg.Body)
|
||||
}
|
||||
|
||||
// release the conn
|
||||
p.Release(c, nil)
|
||||
|
||||
p.Lock()
|
||||
if i := len(p.conns[l.Addr()]); i > size {
|
||||
p.Unlock()
|
||||
t.Fatalf("pool size %d is greater than expected %d", i, size)
|
||||
}
|
||||
p.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientPool(t *testing.T) {
|
||||
testPool(t, 0, time.Minute)
|
||||
testPool(t, 2, time.Minute)
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.unistack.org/micro/v3/network/transport"
|
||||
)
|
||||
|
||||
// Options struct
|
||||
type Options struct {
|
||||
Transport transport.Transport
|
||||
TTL time.Duration
|
||||
Size int
|
||||
}
|
||||
|
||||
// Option func signature
|
||||
type Option func(*Options)
|
||||
|
||||
// Size sets the size
|
||||
func Size(i int) Option {
|
||||
return func(o *Options) {
|
||||
o.Size = i
|
||||
}
|
||||
}
|
||||
|
||||
// Transport sets the transport
|
||||
func Transport(t transport.Transport) Option {
|
||||
return func(o *Options) {
|
||||
o.Transport = t
|
||||
}
|
||||
}
|
||||
|
||||
// TTL specifies ttl
|
||||
func TTL(t time.Duration) Option {
|
||||
return func(o *Options) {
|
||||
o.TTL = t
|
||||
}
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
// Package pool is a connection pool
|
||||
package pool // import "go.unistack.org/micro/v3/util/pool"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.unistack.org/micro/v3/network/transport"
|
||||
)
|
||||
|
||||
// Pool is an interface for connection pooling
|
||||
type Pool interface {
|
||||
// Close the pool
|
||||
Close() error
|
||||
// Get a connection
|
||||
Get(ctx context.Context, addr string, opts ...transport.DialOption) (Conn, error)
|
||||
// Release the connection
|
||||
Release(c Conn, status error) error
|
||||
}
|
||||
|
||||
// Conn conn pool interface
|
||||
type Conn interface {
|
||||
// unique id of connection
|
||||
ID() string
|
||||
// time it was created
|
||||
Created() time.Time
|
||||
// embedded connection
|
||||
transport.Client
|
||||
}
|
||||
|
||||
// NewPool creates new connection pool
|
||||
func NewPool(opts ...Option) Pool {
|
||||
options := Options{}
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
return newPool(options)
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package rand // import "go.unistack.org/micro/v3/util/rand"
|
||||
package rand
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
|
@@ -44,6 +44,37 @@ func SliceAppend(b bool) Option {
|
||||
}
|
||||
}
|
||||
|
||||
var maxDepth = 32
|
||||
|
||||
func mergeMap(dst, src map[string]interface{}, depth int) map[string]interface{} {
|
||||
if depth > maxDepth {
|
||||
return dst
|
||||
}
|
||||
for key, srcVal := range src {
|
||||
if dstVal, ok := dst[key]; ok {
|
||||
srcMap, srcMapOk := mapify(srcVal)
|
||||
dstMap, dstMapOk := mapify(dstVal)
|
||||
if srcMapOk && dstMapOk {
|
||||
srcVal = mergeMap(dstMap, srcMap, depth+1)
|
||||
}
|
||||
}
|
||||
dst[key] = srcVal
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func mapify(i interface{}) (map[string]interface{}, bool) {
|
||||
value := reflect.ValueOf(i)
|
||||
if value.Kind() == reflect.Map {
|
||||
m := map[string]interface{}{}
|
||||
for _, k := range value.MapKeys() {
|
||||
m[k.String()] = value.MapIndex(k).Interface()
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
return map[string]interface{}{}, false
|
||||
}
|
||||
|
||||
// Merge merges map[string]interface{} to destination struct
|
||||
func Merge(dst interface{}, mp map[string]interface{}, opts ...Option) error {
|
||||
options := Options{}
|
||||
@@ -59,6 +90,11 @@ func Merge(dst interface{}, mp map[string]interface{}, opts ...Option) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if mapper, ok := dst.(map[string]interface{}); ok {
|
||||
mergeMap(mapper, mp, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var sval reflect.Value
|
||||
var fname string
|
||||
|
@@ -1,9 +1,59 @@
|
||||
package reflect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMergeMapStringInterface(t *testing.T) {
|
||||
var dst interface{} //nolint:gosimple
|
||||
dst = map[string]interface{}{
|
||||
"xx": 11,
|
||||
}
|
||||
|
||||
src := map[string]interface{}{
|
||||
"zz": "aa",
|
||||
}
|
||||
|
||||
if err := Merge(dst, src); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mp, ok := dst.(map[string]interface{})
|
||||
if !ok || mp == nil {
|
||||
t.Fatalf("xxx %#+v\n", dst)
|
||||
}
|
||||
|
||||
if fmt.Sprintf("%v", mp["xx"]) != "11" {
|
||||
t.Fatalf("xxx zzzz %#+v", mp)
|
||||
}
|
||||
|
||||
if fmt.Sprintf("%v", mp["zz"]) != "aa" {
|
||||
t.Fatalf("xxx zzzz %#+v", mp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeMap(t *testing.T) {
|
||||
src := map[string]interface{}{
|
||||
"skey1": "sval1",
|
||||
"skey2": map[string]interface{}{
|
||||
"skey3": "sval3",
|
||||
},
|
||||
}
|
||||
dst := map[string]interface{}{
|
||||
"skey1": "dval1",
|
||||
"skey2": map[string]interface{}{
|
||||
"skey3": "dval3",
|
||||
},
|
||||
}
|
||||
|
||||
if err := Merge(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("%#+v", src)
|
||||
}
|
||||
|
||||
func TestFieldName(t *testing.T) {
|
||||
src := "SomeVar"
|
||||
chk := "some_var"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user