Compare commits
7 Commits
Author | SHA1 | Date | |
---|---|---|---|
7cd7fb0c0a | |||
77eb5b5264 | |||
929e46c087 | |||
1fb5673d27 | |||
3bbb0cbc72 | |||
71fe0df73f | |||
f1b8ecbdb3 |
@ -206,7 +206,7 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts
|
||||
}
|
||||
} else if sub.opts.AutoAck {
|
||||
if err = ms.Ack(); err != nil {
|
||||
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
|
||||
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
|
||||
}
|
||||
}
|
||||
// single processing
|
||||
@ -217,11 +217,11 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts
|
||||
if eh != nil {
|
||||
_ = eh(p)
|
||||
} else if m.opts.Logger.V(logger.ErrorLevel) {
|
||||
m.opts.Logger.Error(m.opts.Context, err.Error())
|
||||
m.opts.Logger.Error(m.opts.Context, "broker handler error", err)
|
||||
}
|
||||
} else if sub.opts.AutoAck {
|
||||
if err = p.Ack(); err != nil {
|
||||
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
|
||||
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ package config
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
@ -138,7 +139,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s BeforeLoad err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, fmt.Sprintf("%s BeforeLoad error", c.String()), err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@ -153,7 +154,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s AfterLoad err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, fmt.Sprintf("%s AfterLoad error", c.String()), err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@ -168,7 +169,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s BeforeSave err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, fmt.Sprintf("%s BeforeSave error", c.String()), err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@ -183,7 +184,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s AfterSave err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, fmt.Sprintf("%s AfterSave error", c.String()), err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@ -198,7 +199,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s BeforeInit err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, fmt.Sprintf("%s BeforeInit error", c.String()), err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
@ -213,7 +214,7 @@ var (
|
||||
return nil
|
||||
}
|
||||
if err := fn(ctx, c); err != nil {
|
||||
c.Options().Logger.Errorf(ctx, "%s AfterInit err: %v", c.String(), err)
|
||||
c.Options().Logger.Error(ctx, fmt.Sprintf("%s AfterInit error", c.String(), err), err)
|
||||
if !c.Options().AllowFail {
|
||||
return err
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
steps, err := w.getSteps(options.Start, options.Reverse)
|
||||
if err != nil {
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
@ -212,7 +212,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
done := make(chan struct{})
|
||||
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
return eid, werr
|
||||
}
|
||||
for idx := range steps {
|
||||
@ -237,7 +237,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
return
|
||||
}
|
||||
if w.opts.Logger.V(logger.TraceLevel) {
|
||||
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
|
||||
w.opts.Logger.Trace(nctx, fmt.Sprintf("will be executed %v", steps[idx][nidx]))
|
||||
}
|
||||
cstep := steps[idx][nidx]
|
||||
// nolint: nestif
|
||||
@ -257,21 +257,21 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
if serr != nil {
|
||||
step.SetStatus(StatusFailure)
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
cherr <- serr
|
||||
return
|
||||
}
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
cherr <- werr
|
||||
return
|
||||
}
|
||||
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
cherr <- werr
|
||||
return
|
||||
}
|
||||
@ -290,16 +290,16 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
if serr != nil {
|
||||
cstep.SetStatus(StatusFailure)
|
||||
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
}
|
||||
cherr <- serr
|
||||
return
|
||||
}
|
||||
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
|
||||
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
|
||||
w.opts.Logger.Error(ctx, "store write error", werr)
|
||||
cherr <- werr
|
||||
return
|
||||
}
|
||||
@ -317,7 +317,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
return eid, nil
|
||||
}
|
||||
|
||||
logger.Tracef(ctx, "wait for finish or error")
|
||||
logger.DefaultLogger.Trace(ctx, "wait for finish or error")
|
||||
select {
|
||||
case <-nctx.Done():
|
||||
err = nctx.Err()
|
||||
@ -333,15 +333,15 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
|
||||
switch {
|
||||
case nctx.Err() != nil:
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
case err == nil:
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
case err != nil:
|
||||
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
|
||||
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
|
||||
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ func TestFSMStart(t *testing.T) {
|
||||
|
||||
wrapper := func(next StateFunc) StateFunc {
|
||||
return func(sctx context.Context, s State, opts ...StateOption) (State, error) {
|
||||
sctx = logger.NewContext(sctx, logger.Fields("state", s.Name()))
|
||||
sctx = logger.NewContext(sctx, logger.DefaultLogger.Fields("state", s.Name()))
|
||||
return next(sctx, s, opts...)
|
||||
}
|
||||
}
|
||||
|
16
go.mod
16
go.mod
@ -1,20 +1,32 @@
|
||||
module go.unistack.org/micro/v3
|
||||
|
||||
go 1.20
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/KimMachineGun/automemlimit v0.6.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
go.unistack.org/micro-proto/v3 v3.4.1
|
||||
golang.org/x/sync v0.3.0
|
||||
google.golang.org/grpc v1.57.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cilium/ebpf v0.9.1 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.2 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect
|
||||
)
|
||||
|
49
go.sum
49
go.sum
@ -2,23 +2,68 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
|
||||
github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
|
||||
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
|
||||
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||
github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE=
|
||||
github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5 h1:G/FZtUu7a6NTWl3KUHMV9jkLAh/Rvtf03NWMHaEDl+E=
|
||||
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1 h1:UTjLSRz2YZuaHk9iSlVqqsA50JQNAEK2ZFboGqtEa9Q=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1/go.mod h1:okx/cnOhzuCX0ggl/vToatbCupi0O44diiiLLsZ93Zo=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||
@ -26,8 +71,8 @@ google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
135
logger/logger.go
135
logger/logger.go
@ -14,8 +14,6 @@ var (
|
||||
DefaultLogger Logger = NewLogger()
|
||||
// DefaultLevel used by logger
|
||||
DefaultLevel = InfoLevel
|
||||
// DefaultCallerSkipCount used by logger
|
||||
DefaultCallerSkipCount = 2
|
||||
)
|
||||
|
||||
// Logger is a generic logging interface
|
||||
@ -33,33 +31,19 @@ type Logger interface {
|
||||
// Fields set fields to always be logged with keyval pairs
|
||||
Fields(fields ...interface{}) Logger
|
||||
// Info level message
|
||||
Info(ctx context.Context, args ...interface{})
|
||||
Info(ctx context.Context, msg string, args ...interface{})
|
||||
// Trace level message
|
||||
Trace(ctx context.Context, args ...interface{})
|
||||
Trace(ctx context.Context, msg string, args ...interface{})
|
||||
// Debug level message
|
||||
Debug(ctx context.Context, args ...interface{})
|
||||
Debug(ctx context.Context, msg string, args ...interface{})
|
||||
// Warn level message
|
||||
Warn(ctx context.Context, args ...interface{})
|
||||
Warn(ctx context.Context, msg string, args ...interface{})
|
||||
// Error level message
|
||||
Error(ctx context.Context, args ...interface{})
|
||||
Error(ctx context.Context, msg string, args ...interface{})
|
||||
// Fatal level message
|
||||
Fatal(ctx context.Context, args ...interface{})
|
||||
// Infof level message
|
||||
Infof(ctx context.Context, msg string, args ...interface{})
|
||||
// Tracef level message
|
||||
Tracef(ctx context.Context, msg string, args ...interface{})
|
||||
// Debug level message
|
||||
Debugf(ctx context.Context, msg string, args ...interface{})
|
||||
// Warn level message
|
||||
Warnf(ctx context.Context, msg string, args ...interface{})
|
||||
// Error level message
|
||||
Errorf(ctx context.Context, msg string, args ...interface{})
|
||||
// Fatal level message
|
||||
Fatalf(ctx context.Context, msg string, args ...interface{})
|
||||
Fatal(ctx context.Context, msg string, args ...interface{})
|
||||
// Log logs message with needed level
|
||||
Log(ctx context.Context, level Level, args ...interface{})
|
||||
// Logf logs message with needed level
|
||||
Logf(ctx context.Context, level Level, msg string, args ...interface{})
|
||||
Log(ctx context.Context, level Level, msg string, args ...interface{})
|
||||
// Name returns broker instance name
|
||||
Name() string
|
||||
// String returns the type of logger
|
||||
@ -68,108 +52,3 @@ type Logger interface {
|
||||
|
||||
// Field contains keyval pair
|
||||
type Field interface{}
|
||||
|
||||
// Info writes msg to default logger on info level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Info(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Info(ctx, args...)
|
||||
}
|
||||
|
||||
// Error writes msg to default logger on error level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Error(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Error(ctx, args...)
|
||||
}
|
||||
|
||||
// Debug writes msg to default logger on debug level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Debug(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debug(ctx, args...)
|
||||
}
|
||||
|
||||
// Warn writes msg to default logger on warn level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Warn(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warn(ctx, args...)
|
||||
}
|
||||
|
||||
// Trace writes msg to default logger on trace level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Trace(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Trace(ctx, args...)
|
||||
}
|
||||
|
||||
// Fatal writes msg to default logger on fatal level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Fatal(ctx context.Context, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatal(ctx, args...)
|
||||
}
|
||||
|
||||
// Infof writes formatted msg to default logger on info level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Infof(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Infof(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Errorf writes formatted msg to default logger on error level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Errorf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Errorf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Debugf writes formatted msg to default logger on debug level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Debugf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debugf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Warnf writes formatted msg to default logger on warn level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Warnf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warnf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Tracef writes formatted msg to default logger on trace level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Tracef(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Tracef(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// Fatalf writes formatted msg to default logger on fatal level
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Fatalf(ctx context.Context, msg string, args ...interface{}) {
|
||||
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatalf(ctx, msg, args...)
|
||||
}
|
||||
|
||||
// V returns true if passed level enabled in default logger
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func V(level Level) bool {
|
||||
return DefaultLogger.V(level)
|
||||
}
|
||||
|
||||
// Init initialize logger
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Init(opts ...Option) error {
|
||||
return DefaultLogger.Init(opts...)
|
||||
}
|
||||
|
||||
// Fields create logger with specific fields
|
||||
//
|
||||
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
|
||||
func Fields(fields ...interface{}) Logger {
|
||||
return DefaultLogger.Fields(fields...)
|
||||
}
|
||||
|
@ -4,12 +4,17 @@ import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCallerSkipCount = 2
|
||||
)
|
||||
|
||||
type noopLogger struct {
|
||||
opts Options
|
||||
}
|
||||
|
||||
func NewLogger(opts ...Option) Logger {
|
||||
options := NewOptions(opts...)
|
||||
options.CallerSkipCount = defaultCallerSkipCount
|
||||
return &noopLogger{opts: options}
|
||||
}
|
||||
|
||||
@ -51,44 +56,23 @@ func (l *noopLogger) String() string {
|
||||
return "noop"
|
||||
}
|
||||
|
||||
func (l *noopLogger) Log(ctx context.Context, lvl Level, attrs ...interface{}) {
|
||||
func (l *noopLogger) Log(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Info(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Debug(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Error(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Trace(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Warn(ctx context.Context, attrs ...interface{}) {
|
||||
func (l *noopLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Fatal(ctx context.Context, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Logf(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
||||
func (l *noopLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
func (l *noopLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ type Options struct {
|
||||
Name string
|
||||
// Fields holds additional metadata
|
||||
Fields []interface{}
|
||||
// CallerSkipCount number of frmaes to skip
|
||||
// callerSkipCount number of frmaes to skip
|
||||
CallerSkipCount int
|
||||
// ContextAttrFuncs contains funcs that executed before log func on context
|
||||
ContextAttrFuncs []ContextAttrFunc
|
||||
@ -57,7 +57,6 @@ func NewOptions(opts ...Option) Options {
|
||||
Level: DefaultLevel,
|
||||
Fields: make([]interface{}, 0, 6),
|
||||
Out: os.Stderr,
|
||||
CallerSkipCount: DefaultCallerSkipCount,
|
||||
Context: context.Background(),
|
||||
ContextAttrFuncs: DefaultContextAttrFuncs,
|
||||
AddSource: true,
|
||||
@ -102,27 +101,20 @@ func WithOutput(out io.Writer) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WitAddStacktrace controls writing stacktrace on error
|
||||
// WithAddStacktrace controls writing stacktrace on error
|
||||
func WithAddStacktrace(v bool) Option {
|
||||
return func(o *Options) {
|
||||
o.AddStacktrace = v
|
||||
}
|
||||
}
|
||||
|
||||
// WitAddSource controls writing source file and pos in log
|
||||
// WithAddSource controls writing source file and pos in log
|
||||
func WithAddSource(v bool) Option {
|
||||
return func(o *Options) {
|
||||
o.AddSource = v
|
||||
}
|
||||
}
|
||||
|
||||
// WithCallerSkipCount set frame count to skip
|
||||
func WithCallerSkipCount(c int) Option {
|
||||
return func(o *Options) {
|
||||
o.CallerSkipCount = c
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext set context
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return func(o *Options) {
|
||||
@ -198,6 +190,8 @@ func WithMicroKeys() Option {
|
||||
// WithAddCallerSkipCount add skip count for copy logger
|
||||
func WithAddCallerSkipCount(n int) Option {
|
||||
return func(o *Options) {
|
||||
o.CallerSkipCount += n
|
||||
if n > 0 {
|
||||
o.CallerSkipCount += n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package slog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"regexp"
|
||||
@ -15,6 +14,12 @@ import (
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
const (
|
||||
badKey = "!BADKEY"
|
||||
// defaultCallerSkipCount used by logger
|
||||
defaultCallerSkipCount = 3
|
||||
)
|
||||
|
||||
var reTrace = regexp.MustCompile(`.*/slog/logger\.go.*\n`)
|
||||
|
||||
var (
|
||||
@ -150,386 +155,33 @@ func (s *slogLogger) Init(opts ...logger.Option) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
|
||||
if !s.V(lvl) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, lvl, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Logf(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
|
||||
if !s.V(lvl) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, (slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1])))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.InfoLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Info(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.InfoLevel.String()).Inc()
|
||||
if !s.V(logger.InfoLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelInfo, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.DebugLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.InfoLevel.String()).Inc()
|
||||
if !s.V(logger.InfoLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelInfo, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.TraceLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Debug(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.DebugLevel.String()).Inc()
|
||||
if !s.V(logger.DebugLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.ErrorLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.DebugLevel.String()).Inc()
|
||||
if !s.V(logger.DebugLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Trace(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.TraceLevel.String()).Inc()
|
||||
if !s.V(logger.TraceLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug-1, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.TraceLevel.String()).Inc()
|
||||
if !s.V(logger.TraceLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug-1, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Error(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.ErrorLevel.String()).Inc()
|
||||
if !s.V(logger.ErrorLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.ErrorLevel.String()).Inc()
|
||||
if !s.V(logger.ErrorLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.opts.AddStacktrace {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
if a.Key == s.opts.ErrorKey {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, a.Value.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Fatal(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.FatalLevel.String()).Inc()
|
||||
if !s.V(logger.FatalLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError+1, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.FatalLevel, msg, attrs...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.FatalLevel.String()).Inc()
|
||||
if !s.V(logger.FatalLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError+1, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Warn(ctx context.Context, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.WarnLevel.String()).Inc()
|
||||
if !s.V(logger.WarnLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelWarn, fmt.Sprintf("%s", attrs[0]), pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.WarnLevel.String()).Inc()
|
||||
if !s.V(logger.WarnLevel) {
|
||||
return
|
||||
}
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelWarn, msg, pcs[0])
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
attrs = append(attrs, fn(ctx)...)
|
||||
}
|
||||
|
||||
for idx, attr := range attrs {
|
||||
if ve, ok := attr.(error); ok && ve != nil {
|
||||
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
r.Add(attrs[1:]...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
func (s *slogLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
|
||||
s.printLog(ctx, logger.WarnLevel, msg, attrs...)
|
||||
}
|
||||
|
||||
func (s *slogLogger) Name() string {
|
||||
@ -540,10 +192,59 @@ func (s *slogLogger) String() string {
|
||||
return "slog"
|
||||
}
|
||||
|
||||
func (s *slogLogger) printLog(ctx context.Context, lvl logger.Level, msg string, args ...interface{}) {
|
||||
if !s.V(lvl) {
|
||||
return
|
||||
}
|
||||
var argError error
|
||||
|
||||
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
|
||||
|
||||
attrs, err := s.argsAttrs(args)
|
||||
if err != nil {
|
||||
argError = err
|
||||
}
|
||||
if argError != nil {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, argError.Error())
|
||||
}
|
||||
}
|
||||
|
||||
for _, fn := range s.opts.ContextAttrFuncs {
|
||||
ctxAttrs, err := s.argsAttrs(fn(ctx))
|
||||
if err != nil {
|
||||
argError = err
|
||||
}
|
||||
attrs = append(attrs, ctxAttrs...)
|
||||
}
|
||||
if argError != nil {
|
||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||
span.SetStatus(tracer.SpanStatusError, argError.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
if len(traceLines) != 0 {
|
||||
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var pcs [1]uintptr
|
||||
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, printLog, LogLvlMethod]
|
||||
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), msg, pcs[0])
|
||||
r.AddAttrs(attrs...)
|
||||
_ = s.handler.Handle(ctx, r)
|
||||
}
|
||||
|
||||
func NewLogger(opts ...logger.Option) logger.Logger {
|
||||
s := &slogLogger{
|
||||
opts: logger.NewOptions(opts...),
|
||||
}
|
||||
s.opts.CallerSkipCount = defaultCallerSkipCount
|
||||
|
||||
return s
|
||||
}
|
||||
@ -581,3 +282,27 @@ func slogToLoggerLevel(level slog.Level) logger.Level {
|
||||
return logger.InfoLevel
|
||||
}
|
||||
}
|
||||
|
||||
func (s *slogLogger) argsAttrs(args []interface{}) ([]slog.Attr, error) {
|
||||
attrs := make([]slog.Attr, 0, len(args))
|
||||
var err error
|
||||
|
||||
for idx := 0; idx < len(args); idx++ {
|
||||
switch arg := args[idx].(type) {
|
||||
case slog.Attr:
|
||||
attrs = append(attrs, arg)
|
||||
case string:
|
||||
if idx+1 < len(args) {
|
||||
attrs = append(attrs, slog.Any(arg, args[idx+1]))
|
||||
idx += 1
|
||||
} else {
|
||||
attrs = append(attrs, slog.String(badKey, arg))
|
||||
}
|
||||
case error:
|
||||
attrs = append(attrs, slog.String(s.opts.ErrorKey, arg.Error()))
|
||||
err = arg
|
||||
}
|
||||
}
|
||||
|
||||
return attrs, err
|
||||
}
|
||||
|
@ -3,10 +3,15 @@ package slog
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
)
|
||||
|
||||
@ -29,13 +34,22 @@ func TestError(t *testing.T) {
|
||||
|
||||
func TestErrorf(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf), logger.WithAddStacktrace(true))
|
||||
if err := l.Init(); err != nil {
|
||||
if err := l.Init(logger.WithContextAttrFuncs(func(ctx context.Context) []interface{} {
|
||||
return nil
|
||||
})); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l.Errorf(ctx, "message", fmt.Errorf("error message"))
|
||||
l.Log(ctx, logger.ErrorLevel, "message", errors.New("error msg"))
|
||||
|
||||
l.Log(ctx, logger.ErrorLevel, "", errors.New("error msg"))
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"error":"error msg"`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"stacktrace":"`)) {
|
||||
t.Fatalf("logger stacktrace not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
@ -99,6 +113,11 @@ func TestFromContextWithFields(t *testing.T) {
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
|
||||
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
|
||||
l.Info(ctx, "test", "uncorrected number attributes")
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`"!BADKEY":"uncorrected number attributes"`)) {
|
||||
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
@ -174,3 +193,45 @@ func TestLogger(t *testing.T) {
|
||||
t.Fatalf("logger warn, buf %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func Test_WithContextAttrFunc(t *testing.T) {
|
||||
loggerContextAttrFuncs := []logger.ContextAttrFunc{
|
||||
func(ctx context.Context) []interface{} {
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
attrs := make([]interface{}, 0, 10)
|
||||
for k, v := range md {
|
||||
switch k {
|
||||
case "X-Request-Id", "Phone", "External-Id", "Source-Service", "X-App-Install-Id", "Client-Id", "Client-Ip":
|
||||
attrs = append(attrs, strings.ToLower(k), v)
|
||||
}
|
||||
}
|
||||
return attrs
|
||||
},
|
||||
}
|
||||
|
||||
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, loggerContextAttrFuncs...)
|
||||
|
||||
ctx := context.TODO()
|
||||
ctx = metadata.AppendIncomingContext(ctx, "X-Request-Id", uuid.New().String(),
|
||||
"Source-Service", "Test-System")
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
|
||||
if err := l.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l.Info(ctx, "test message")
|
||||
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test message"`))) {
|
||||
t.Fatalf("logger info, buf %s", buf.Bytes())
|
||||
}
|
||||
if !(bytes.Contains(buf.Bytes(), []byte(`"x-request-id":"`))) {
|
||||
t.Fatalf("logger info, buf %s", buf.Bytes())
|
||||
}
|
||||
if !(bytes.Contains(buf.Bytes(), []byte(`"source-service":"Test-System"`))) {
|
||||
t.Fatalf("logger info, buf %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
@ -1,399 +0,0 @@
|
||||
// Package wrapper provides wrapper for Logger
|
||||
package wrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.unistack.org/micro/v3/client"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/server"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultClientCallObserver called by wrapper in client Call
|
||||
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []client.CallOption, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultClientStreamObserver called by wrapper in client Stream
|
||||
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []client.CallOption, stream client.Stream, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultClientPublishObserver called by wrapper in client Publish
|
||||
DefaultClientPublishObserver = func(ctx context.Context, msg client.Message, opts []client.PublishOption, err error) []string {
|
||||
labels := []string{"endpoint", msg.Topic()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultServerHandlerObserver called by wrapper in server Handler
|
||||
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultServerSubscriberObserver called by wrapper in server Subscriber
|
||||
DefaultServerSubscriberObserver = func(ctx context.Context, msg server.Message, err error) []string {
|
||||
labels := []string{"endpoint", msg.Topic()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultClientCallFuncObserver called by wrapper in client CallFunc
|
||||
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, err error) []string {
|
||||
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
|
||||
if err != nil {
|
||||
labels = append(labels, "error", err.Error())
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// DefaultSkipEndpoints wrapper not called for this endpoints
|
||||
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
|
||||
)
|
||||
|
||||
type lWrapper struct {
|
||||
client.Client
|
||||
serverHandler server.HandlerFunc
|
||||
serverSubscriber server.SubscriberFunc
|
||||
clientCallFunc client.CallFunc
|
||||
opts Options
|
||||
}
|
||||
|
||||
type (
|
||||
// ClientCallObserver func signature
|
||||
ClientCallObserver func(context.Context, client.Request, interface{}, []client.CallOption, error) []string
|
||||
// ClientStreamObserver func signature
|
||||
ClientStreamObserver func(context.Context, client.Request, []client.CallOption, client.Stream, error) []string
|
||||
// ClientPublishObserver func signature
|
||||
ClientPublishObserver func(context.Context, client.Message, []client.PublishOption, error) []string
|
||||
// ClientCallFuncObserver func signature
|
||||
ClientCallFuncObserver func(context.Context, string, client.Request, interface{}, client.CallOptions, error) []string
|
||||
// ServerHandlerObserver func signature
|
||||
ServerHandlerObserver func(context.Context, server.Request, interface{}, error) []string
|
||||
// ServerSubscriberObserver func signature
|
||||
ServerSubscriberObserver func(context.Context, server.Message, error) []string
|
||||
)
|
||||
|
||||
// Options struct for wrapper
|
||||
type Options struct {
|
||||
// Logger that used for log
|
||||
Logger logger.Logger
|
||||
// ServerHandlerObservers funcs
|
||||
ServerHandlerObservers []ServerHandlerObserver
|
||||
// ServerSubscriberObservers funcs
|
||||
ServerSubscriberObservers []ServerSubscriberObserver
|
||||
// ClientCallObservers funcs
|
||||
ClientCallObservers []ClientCallObserver
|
||||
// ClientStreamObservers funcs
|
||||
ClientStreamObservers []ClientStreamObserver
|
||||
// ClientPublishObservers funcs
|
||||
ClientPublishObservers []ClientPublishObserver
|
||||
// ClientCallFuncObservers funcs
|
||||
ClientCallFuncObservers []ClientCallFuncObserver
|
||||
// SkipEndpoints
|
||||
SkipEndpoints []string
|
||||
// Level for logger
|
||||
Level logger.Level
|
||||
// Enabled flag
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// Option func signature
|
||||
type Option func(*Options)
|
||||
|
||||
// NewOptions creates Options from Option slice
|
||||
func NewOptions(opts ...Option) Options {
|
||||
options := Options{
|
||||
Logger: logger.DefaultLogger,
|
||||
Level: logger.TraceLevel,
|
||||
ClientCallObservers: []ClientCallObserver{DefaultClientCallObserver},
|
||||
ClientStreamObservers: []ClientStreamObserver{DefaultClientStreamObserver},
|
||||
ClientPublishObservers: []ClientPublishObserver{DefaultClientPublishObserver},
|
||||
ClientCallFuncObservers: []ClientCallFuncObserver{DefaultClientCallFuncObserver},
|
||||
ServerHandlerObservers: []ServerHandlerObserver{DefaultServerHandlerObserver},
|
||||
ServerSubscriberObservers: []ServerSubscriberObserver{DefaultServerSubscriberObserver},
|
||||
SkipEndpoints: DefaultSkipEndpoints,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// WithEnabled enable/diable flag
|
||||
func WithEnabled(b bool) Option {
|
||||
return func(o *Options) {
|
||||
o.Enabled = b
|
||||
}
|
||||
}
|
||||
|
||||
// WithLevel log level
|
||||
func WithLevel(l logger.Level) Option {
|
||||
return func(o *Options) {
|
||||
o.Level = l
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger logger
|
||||
func WithLogger(l logger.Logger) Option {
|
||||
return func(o *Options) {
|
||||
o.Logger = l
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCallObservers funcs
|
||||
func WithClientCallObservers(ob ...ClientCallObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientCallObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientStreamObservers funcs
|
||||
func WithClientStreamObservers(ob ...ClientStreamObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientStreamObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientPublishObservers funcs
|
||||
func WithClientPublishObservers(ob ...ClientPublishObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientPublishObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCallFuncObservers funcs
|
||||
func WithClientCallFuncObservers(ob ...ClientCallFuncObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ClientCallFuncObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithServerHandlerObservers funcs
|
||||
func WithServerHandlerObservers(ob ...ServerHandlerObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ServerHandlerObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// WithServerSubscriberObservers funcs
|
||||
func WithServerSubscriberObservers(ob ...ServerSubscriberObserver) Option {
|
||||
return func(o *Options) {
|
||||
o.ServerSubscriberObservers = ob
|
||||
}
|
||||
}
|
||||
|
||||
// SkipEndpoins
|
||||
func SkipEndpoints(eps ...string) Option {
|
||||
return func(o *Options) {
|
||||
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
|
||||
err := l.Client.Call(ctx, req, rsp, opts...)
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientCallObservers {
|
||||
labels = append(labels, o(ctx, req, rsp, opts, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lWrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
|
||||
stream, err := l.Client.Stream(ctx, req, opts...)
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return stream, err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return stream, err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientStreamObservers {
|
||||
labels = append(labels, o(ctx, req, opts, stream, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return stream, err
|
||||
}
|
||||
|
||||
func (l *lWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error {
|
||||
err := l.Client.Publish(ctx, msg, opts...)
|
||||
|
||||
endpoint := msg.Topic()
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientPublishObservers {
|
||||
labels = append(labels, o(ctx, msg, opts, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lWrapper) ServerHandler(ctx context.Context, req server.Request, rsp interface{}) error {
|
||||
err := l.serverHandler(ctx, req, rsp)
|
||||
|
||||
endpoint := req.Endpoint()
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ServerHandlerObservers {
|
||||
labels = append(labels, o(ctx, req, rsp, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *lWrapper) ServerSubscriber(ctx context.Context, msg server.Message) error {
|
||||
err := l.serverSubscriber(ctx, msg)
|
||||
|
||||
endpoint := msg.Topic()
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ServerSubscriberObservers {
|
||||
labels = append(labels, o(ctx, msg, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewClientWrapper accepts an open options and returns a Client Wrapper
|
||||
func NewClientWrapper(opts ...Option) client.Wrapper {
|
||||
return func(c client.Client) client.Client {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
return &lWrapper{opts: options, Client: c}
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientCallWrapper accepts an options and returns a Call Wrapper
|
||||
func NewClientCallWrapper(opts ...Option) client.CallWrapper {
|
||||
return func(h client.CallFunc) client.CallFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
l := &lWrapper{opts: options, clientCallFunc: h}
|
||||
return l.ClientCallFunc
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lWrapper) ClientCallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
|
||||
err := l.clientCallFunc(ctx, addr, req, rsp, opts)
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
|
||||
for _, ep := range l.opts.SkipEndpoints {
|
||||
if ep == endpoint {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !l.opts.Enabled {
|
||||
return err
|
||||
}
|
||||
|
||||
var labels []string
|
||||
for _, o := range l.opts.ClientCallFuncObservers {
|
||||
labels = append(labels, o(ctx, addr, req, rsp, opts, err)...)
|
||||
}
|
||||
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// NewServerHandlerWrapper accepts an options and returns a Handler Wrapper
|
||||
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
|
||||
return func(h server.HandlerFunc) server.HandlerFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
l := &lWrapper{opts: options, serverHandler: h}
|
||||
return l.ServerHandler
|
||||
}
|
||||
}
|
||||
|
||||
// NewServerSubscriberWrapper accepts an options and returns a Subscriber Wrapper
|
||||
func NewServerSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
|
||||
return func(h server.SubscriberFunc) server.SubscriberFunc {
|
||||
options := NewOptions()
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
l := &lWrapper{opts: options, serverSubscriber: h}
|
||||
return l.ServerSubscriber
|
||||
}
|
||||
}
|
@ -177,12 +177,12 @@ func (t *tunBatchSubscriber) run() {
|
||||
// receive message
|
||||
m := new(transport.Message)
|
||||
if err := c.Recv(m); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
if err = c.Close(); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
@ -222,12 +222,12 @@ func (t *tunSubscriber) run() {
|
||||
// receive message
|
||||
m := new(transport.Message)
|
||||
if err := c.Recv(m); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
if err = c.Close(); err != nil {
|
||||
if logger.V(logger.ErrorLevel) {
|
||||
logger.Error(t.opts.Context, err.Error())
|
||||
if logger.DefaultLogger.V(logger.ErrorLevel) {
|
||||
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -2,6 +2,7 @@ package register
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -64,7 +65,7 @@ func (m *memory) ttlPrune() {
|
||||
for id, n := range record.Nodes {
|
||||
if n.TTL != 0 && time.Since(n.LastSeen) > n.TTL {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register TTL expired for node %s of service %s", n.ID, service)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register TTL expired for node %s of service %s", n.ID, service))
|
||||
}
|
||||
delete(m.records[domain][service][version].Nodes, id)
|
||||
}
|
||||
@ -151,7 +152,7 @@ func (m *memory) Register(ctx context.Context, s *register.Service, opts ...regi
|
||||
if _, ok := srvs[s.Name][s.Version]; !ok {
|
||||
srvs[s.Name][s.Version] = r
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register added new service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register added new service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
m.records[options.Domain] = srvs
|
||||
go m.sendEvent(®ister.Result{Action: "create", Service: s})
|
||||
@ -191,14 +192,14 @@ func (m *memory) Register(ctx context.Context, s *register.Service, opts ...regi
|
||||
|
||||
if addedNodes {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register added new node to service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register added new node to service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
go m.sendEvent(®ister.Result{Action: "update", Service: s})
|
||||
} else {
|
||||
// refresh TTL and timestamp
|
||||
for _, n := range s.Nodes {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Updated registration for service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Updated registration for service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
srvs[s.Name][s.Version].Nodes[n.ID].TTL = options.TTL
|
||||
srvs[s.Name][s.Version].Nodes[n.ID].LastSeen = time.Now()
|
||||
@ -243,7 +244,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
|
||||
for _, n := range s.Nodes {
|
||||
if _, ok := version.Nodes[n.ID]; ok {
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register removed node from service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed node from service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
delete(version.Nodes, n.ID)
|
||||
}
|
||||
@ -264,7 +265,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
|
||||
go m.sendEvent(®ister.Result{Action: "delete", Service: s})
|
||||
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s", s.Name)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed service: %s", s.Name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -273,7 +274,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
|
||||
delete(m.records[options.Domain][s.Name], s.Version)
|
||||
go m.sendEvent(®ister.Result{Action: "delete", Service: s})
|
||||
if m.opts.Logger.V(logger.DebugLevel) {
|
||||
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s, version: %s", s.Name, s.Version)
|
||||
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed service: %s, version: %s", s.Name, s.Version))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -459,7 +459,7 @@ func (n *noopServer) Start() error {
|
||||
}
|
||||
} else if rerr != nil && !registered {
|
||||
if config.Logger.V(logger.ErrorLevel) {
|
||||
config.Logger.Errorf(n.opts.Context, fmt.Sprintf("server %s-%s register check error", config.Name, config.ID), rerr)
|
||||
config.Logger.Error(n.opts.Context, fmt.Sprintf("server %s-%s register check error", config.Name, config.ID), rerr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
26
service.go
26
service.go
@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/KimMachineGun/automemlimit/memlimit"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
"go.unistack.org/micro/v3/broker"
|
||||
"go.unistack.org/micro/v3/client"
|
||||
"go.unistack.org/micro/v3/config"
|
||||
@ -17,6 +19,19 @@ import (
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
func init() {
|
||||
maxprocs.Set()
|
||||
memlimit.SetGoMemLimitWithOpts(
|
||||
memlimit.WithRatio(0.9),
|
||||
memlimit.WithProvider(
|
||||
memlimit.ApplyFallback(
|
||||
memlimit.FromCgroup,
|
||||
memlimit.FromSystem,
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Service is an interface that wraps the lower level components.
|
||||
// Its works as container with building blocks for service.
|
||||
type Service interface {
|
||||
@ -72,13 +87,14 @@ func RegisterSubscriber(topic string, s server.Server, h interface{}, opts ...se
|
||||
}
|
||||
|
||||
type service struct {
|
||||
done chan struct{}
|
||||
opts Options
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewService creates and returns a new Service based on the packages within.
|
||||
func NewService(opts ...Option) Service {
|
||||
return &service{opts: NewOptions(opts...)}
|
||||
return &service{opts: NewOptions(opts...), done: make(chan struct{})}
|
||||
}
|
||||
|
||||
func (s *service) Name() string {
|
||||
@ -262,7 +278,7 @@ func (s *service) Start() error {
|
||||
}
|
||||
|
||||
if config.Loggers[0].V(logger.InfoLevel) {
|
||||
config.Loggers[0].Infof(s.opts.Context, "starting [service] %s version %s", s.Options().Name, s.Options().Version)
|
||||
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("starting [service] %s version %s", s.Options().Name, s.Options().Version))
|
||||
}
|
||||
|
||||
if len(s.opts.Servers) == 0 {
|
||||
@ -308,7 +324,7 @@ func (s *service) Stop() error {
|
||||
s.RUnlock()
|
||||
|
||||
if config.Loggers[0].V(logger.InfoLevel) {
|
||||
config.Loggers[0].Infof(s.opts.Context, "stoppping [service] %s", s.Name())
|
||||
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name()))
|
||||
}
|
||||
|
||||
var err error
|
||||
@ -348,6 +364,8 @@ func (s *service) Stop() error {
|
||||
}
|
||||
}
|
||||
|
||||
close(s.done)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -371,7 +389,7 @@ func (s *service) Run() error {
|
||||
}
|
||||
|
||||
// wait on context cancel
|
||||
<-s.opts.Context.Done()
|
||||
<-s.done
|
||||
|
||||
return s.Stop()
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func TestNewService(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := NewService(tt.args.opts...); !reflect.DeepEqual(got, tt.want) {
|
||||
if got := NewService(tt.args.opts...); got.Name() != tt.want.Name() {
|
||||
t.Errorf("NewService() = %v, want %v", got.Options().Name, tt.want.Options().Name)
|
||||
}
|
||||
})
|
||||
|
@ -7,6 +7,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotConnected is returned when a store is not connected
|
||||
ErrNotConnected = errors.New("not conected")
|
||||
// ErrNotFound is returned when a key doesn't exist
|
||||
ErrNotFound = errors.New("not found")
|
||||
// ErrInvalidKey is returned when a key has empty or have invalid format
|
||||
|
@ -17,14 +17,14 @@ func TestLoggerWithTracer(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
logger.DefaultLogger = slog.NewLogger(logger.WithOutput(buf))
|
||||
|
||||
if err := logger.Init(); err != nil {
|
||||
if err := logger.DefaultLogger.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var span tracer.Span
|
||||
tr := NewTracer()
|
||||
ctx, span = tr.Start(ctx, "test1")
|
||||
|
||||
logger.Error(ctx, "my test error", fmt.Errorf("error"))
|
||||
logger.DefaultLogger.Error(ctx, "my test error", fmt.Errorf("error"))
|
||||
|
||||
if !strings.Contains(buf.String(), span.TraceID()) {
|
||||
t.Fatalf("log does not contains trace id: %s", buf.Bytes())
|
||||
|
@ -71,7 +71,7 @@ func New(opts ...Option) (string, error) {
|
||||
func Must(opts ...Option) string {
|
||||
id, err := New(opts...)
|
||||
if err != nil {
|
||||
logger.Fatal(context.TODO(), err)
|
||||
logger.DefaultLogger.Fatal(context.TODO(), "Must call is failed", err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Duration int64
|
||||
@ -53,6 +55,31 @@ loop:
|
||||
return time.ParseDuration(fmt.Sprintf("%dh%s", hours, s[p:]))
|
||||
}
|
||||
|
||||
func (d Duration) MarshalYAML() (interface{}, error) {
|
||||
return time.Duration(d).String(), nil
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalYAML(n *yaml.Node) error {
|
||||
var v interface{}
|
||||
if err := yaml.Unmarshal([]byte(n.Value), &v); err != nil {
|
||||
return err
|
||||
}
|
||||
switch value := v.(type) {
|
||||
case float64:
|
||||
*d = Duration(time.Duration(value))
|
||||
return nil
|
||||
case string:
|
||||
dv, err := ParseDuration(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = Duration(dv)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("invalid duration")
|
||||
}
|
||||
}
|
||||
|
||||
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(time.Duration(d).String())
|
||||
}
|
||||
|
@ -5,8 +5,44 @@ import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestMarshalYAML(t *testing.T) {
|
||||
d := Duration(10000000)
|
||||
buf, err := yaml.Marshal(d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf, []byte(`10ms
|
||||
`)) {
|
||||
t.Fatalf("invalid duration: %s != %s", buf, `10ms`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalYAML(t *testing.T) {
|
||||
type str struct {
|
||||
TTL Duration `yaml:"ttl"`
|
||||
}
|
||||
v := &str{}
|
||||
var err error
|
||||
|
||||
err = yaml.Unmarshal([]byte(`{"ttl":"10ms"}`), v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if v.TTL != 10000000 {
|
||||
t.Fatalf("invalid duration %v != 10000000", v.TTL)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal([]byte(`{"ttl":"1y"}`), v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if v.TTL != 31622400000000000 {
|
||||
t.Fatalf("invalid duration %v != 31622400000000000", v.TTL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
d := Duration(10000000)
|
||||
buf, err := json.Marshal(d)
|
||||
|
Loading…
Reference in New Issue
Block a user