logger: slog improve
/ autoupdate (push) Successful in 1m16s Details

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
This commit is contained in:
Василий Толстов 2023-10-14 18:59:27 +03:00
parent 7b9a4924ad
commit 5c3fa31ad2
14 changed files with 154 additions and 477 deletions

View File

@ -190,7 +190,7 @@ func (m *MemoryBroker) Publish(ctx context.Context, message interface{}, opts ..
} else {
if sub.opts.AutoAck {
if err = p.Ack(); err != nil {
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
m.opts.Logger.Error(m.opts.Context, "ack failed: "+err.Error())
m.opts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", t, "status", "failure").Inc()
} else {
m.opts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", t, "status", "success").Inc()

View File

@ -133,7 +133,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s BeforeLoad err: %v", c.String(), err)
c.Options().Logger.Error(ctx, c.String()+" BeforeLoad error "+err.Error())
if !c.Options().AllowFail {
return err
}
@ -148,7 +148,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s AfterLoad err: %v", c.String(), err)
c.Options().Logger.Error(ctx, c.String()+" AfterLoad error "+err.Error())
if !c.Options().AllowFail {
return err
}
@ -163,7 +163,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s BeforeSave err: %v", c.String(), err)
c.Options().Logger.Error(ctx, c.String()+" BeforeSave error "+err.Error())
if !c.Options().AllowFail {
return err
}
@ -178,7 +178,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s AfterSave err: %v", c.String(), err)
c.Options().Logger.Error(ctx, c.String()+" AfterSave error "+err.Error())
if !c.Options().AllowFail {
return err
}
@ -193,7 +193,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s BeforeInit err: %v", c.String(), err)
c.Options().Logger.Error(ctx, c.String()+" BeforeInit error "+err.Error())
if !c.Options().AllowFail {
return err
}
@ -208,7 +208,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s AfterInit err: %v", c.String(), err)
c.Options().Logger.Error(ctx, c.String()+" AfterInit error "+err.Error())
if !c.Options().AllowFail {
return err
}

View File

@ -190,7 +190,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...optio
steps, err := w.getSteps(options.Start, options.Reverse)
if err != nil {
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", "error", werr.Error())
}
return "", err
}
@ -214,7 +214,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...optio
done := make(chan struct{})
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", "error", werr.Error())
return eid, werr
}
for idx := range steps {
@ -239,7 +239,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...optio
return
}
if w.opts.Logger.V(logger.TraceLevel) {
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
w.opts.Logger.Trace(nctx, fmt.Sprintf("step will be executed %v", steps[idx][nidx]))
}
cstep := steps[idx][nidx]
// nolint: nestif
@ -259,21 +259,21 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...optio
if serr != nil {
step.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", "error", werr.Error())
}
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", "error", werr.Error())
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", "error", werr.Error())
cherr <- werr
return
}
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", "error", werr.Error())
cherr <- werr
return
}
@ -292,16 +292,16 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...optio
if serr != nil {
cstep.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", "error", werr.Error())
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", "error", werr.Error())
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", "error", werr.Error())
cherr <- werr
return
}
@ -319,7 +319,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...optio
return eid, nil
}
logger.Tracef(ctx, "wait for finish or error")
w.opts.Logger.Trace(ctx, "wait for finish or error")
select {
case <-nctx.Done():
err = nctx.Err()
@ -335,15 +335,15 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...optio
switch {
case nctx.Err() != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", "error", werr.Error())
}
case err == nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", "error", werr.Error())
}
case err != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", "error", werr.Error())
}
}

1
go.mod
View File

@ -8,7 +8,6 @@ require (
github.com/google/uuid v1.3.1
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.3.0
golang.org/x/sys v0.12.0
google.golang.org/grpc v1.58.2

2
go.sum
View File

@ -13,8 +13,6 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5 h1:G/FZtUu7a6NTWl3KUHMV9jkLAh/Rvtf03NWMHaEDl+E=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=

View File

@ -32,33 +32,19 @@ type Logger interface {
// Fields set fields to always be logged with keyval pairs
Fields(fields ...interface{}) Logger
// Info level message
Info(ctx context.Context, args ...interface{})
// Trace level message
Trace(ctx context.Context, args ...interface{})
// Debug level message
Debug(ctx context.Context, args ...interface{})
// Warn level message
Warn(ctx context.Context, args ...interface{})
// Error level message
Error(ctx context.Context, args ...interface{})
// Fatal level message
Fatal(ctx context.Context, args ...interface{})
// Infof level message
Infof(ctx context.Context, msg string, args ...interface{})
Info(ctx context.Context, msg string, args ...interface{})
// Tracef level message
Tracef(ctx context.Context, msg string, args ...interface{})
Trace(ctx context.Context, msg string, args ...interface{})
// Debug level message
Debugf(ctx context.Context, msg string, args ...interface{})
Debug(ctx context.Context, msg string, args ...interface{})
// Warn level message
Warnf(ctx context.Context, msg string, args ...interface{})
Warn(ctx context.Context, msg string, args ...interface{})
// Error level message
Errorf(ctx context.Context, msg string, args ...interface{})
Error(ctx context.Context, msg string, args ...interface{})
// Fatal level message
Fatalf(ctx context.Context, msg string, args ...interface{})
Fatal(ctx context.Context, msg string, args ...interface{})
// Log logs message with needed level
Log(ctx context.Context, level Level, args ...interface{})
// Logf logs message with needed level
Logf(ctx context.Context, level Level, msg string, args ...interface{})
Log(ctx context.Context, level Level, msg string, args ...interface{})
// String returns the name of logger
String() string
}
@ -66,64 +52,34 @@ type Logger interface {
// Field contains keyval pair
type Field interface{}
// Info writes msg to default logger on info level
func Info(ctx context.Context, args ...interface{}) {
DefaultLogger.Info(ctx, args...)
// Info writes formatted msg to default logger on info level
func Info(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Info(ctx, msg, args...)
}
// Error writes msg to default logger on error level
func Error(ctx context.Context, args ...interface{}) {
DefaultLogger.Error(ctx, args...)
}
// Debug writes msg to default logger on debug level
func Debug(ctx context.Context, args ...interface{}) {
DefaultLogger.Debug(ctx, args...)
}
// Warn writes msg to default logger on warn level
func Warn(ctx context.Context, args ...interface{}) {
DefaultLogger.Warn(ctx, args...)
}
// Trace writes msg to default logger on trace level
func Trace(ctx context.Context, args ...interface{}) {
DefaultLogger.Trace(ctx, args...)
}
// Fatal writes msg to default logger on fatal level
func Fatal(ctx context.Context, args ...interface{}) {
DefaultLogger.Fatal(ctx, args...)
}
// Infof writes formatted msg to default logger on info level
func Infof(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Infof(ctx, msg, args...)
}
// Errorf writes formatted msg to default logger on error level
func Errorf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Errorf(ctx, msg, args...)
// Error writes formatted msg to default logger on error level
func Error(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Error(ctx, msg, args...)
}
// Debugf writes formatted msg to default logger on debug level
func Debugf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Debugf(ctx, msg, args...)
DefaultLogger.Debug(ctx, msg, args...)
}
// Warnf writes formatted msg to default logger on warn level
func Warnf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Warnf(ctx, msg, args...)
// Warn writes formatted msg to default logger on warn level
func Warn(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Warn(ctx, msg, args...)
}
// Tracef writes formatted msg to default logger on trace level
func Tracef(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Tracef(ctx, msg, args...)
// Trace writes formatted msg to default logger on trace level
func Trace(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Trace(ctx, msg, args...)
}
// Fatalf writes formatted msg to default logger on fatal level
func Fatalf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Fatalf(ctx, msg, args...)
// Fatal writes formatted msg to default logger on fatal level
func Fatal(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Fatal(ctx, msg, args...)
}
// V returns true if passed level enabled in default logger

View File

@ -94,7 +94,7 @@ func TestRedirectStdLogger(t *testing.T) {
fn := RedirectStdLogger(l, ErrorLevel)
defer fn()
log.Print("test")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"error","msg":"test"`)) {
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
@ -107,7 +107,7 @@ func TestStdLogger(t *testing.T) {
}
lg := NewStdLogger(l, ErrorLevel)
lg.Print("test")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"error","msg":"test"`)) {
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
@ -120,21 +120,20 @@ func TestLogger(t *testing.T) {
t.Fatal(err)
}
l.Trace(ctx, "trace_msg1")
// l.Warn(ctx, "warn_msg1")
// l.Fields("error", "test").Info(ctx, "error message")
// l.Warn(ctx, "first second")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"trace","msg":"trace_msg1"`)) {
l.Warn(ctx, "warn_msg1")
l.Fields("error", "test").Info(ctx, "error message")
l.Warn(ctx, "first second")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"trace"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"trace_msg1"`))) {
t.Fatalf("logger tracer, buf %s", buf.Bytes())
}
/*
if !bytes.Contains(buf.Bytes(), []byte(`"warn","msg":"warn_msg1"`)) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"error":"test","level":"info","msg":"error message"`)) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"level":"warn","msg":"first second"`)) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
*/
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"warn_msg1"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"error message","error":"test"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"first second"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
}

View File

@ -2,12 +2,13 @@ package logger
import (
"context"
"fmt"
"log/slog"
"os"
"runtime"
"strconv"
"time"
"go.unistack.org/micro/v4/options"
"golang.org/x/exp/slog"
)
var (
@ -21,6 +22,13 @@ var (
var renameAttr = func(_ []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.SourceKey:
source := a.Value.Any().(*slog.Source)
a.Value = slog.StringValue(source.File + ":" + strconv.Itoa(source.Line))
a.Key = "caller"
// add func?
// "trace": "<traceid>",
// "span": "<spanid>",
case slog.TimeKey:
a.Key = "timestamp"
case slog.LevelKey:
@ -73,6 +81,7 @@ func (s *slogLogger) Clone(opts ...options.Option) Logger {
handleOpt := &slog.HandlerOptions{
ReplaceAttr: renameAttr,
Level: l.leveler,
AddSource: true,
}
l.leveler.Set(loggerToSlogLevel(l.opts.Level))
handler := slog.NewJSONHandler(options.Out, handleOpt)
@ -101,6 +110,7 @@ func (s *slogLogger) Fields(fields ...interface{}) Logger {
handleOpt := &slog.HandlerOptions{
ReplaceAttr: renameAttr,
Level: s.leveler,
AddSource: true,
}
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
@ -125,6 +135,7 @@ func (s *slogLogger) Init(opts ...options.Option) error {
handleOpt := &slog.HandlerOptions{
ReplaceAttr: renameAttr,
Level: s.leveler,
AddSource: true,
}
s.leveler.Set(loggerToSlogLevel(s.opts.Level))
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
@ -133,70 +144,82 @@ func (s *slogLogger) Init(opts ...options.Option) error {
return nil
}
func (s *slogLogger) Log(ctx context.Context, lvl Level, args ...any) {
func (s *slogLogger) Log(ctx context.Context, lvl Level, msg string, args ...any) {
if !s.V(lvl) {
return
}
slvl := loggerToSlogLevel(lvl)
msg := fmt.Sprint(args...)
s.slog.Log(ctx, slvl, msg)
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), loggerToSlogLevel(lvl), msg, pcs[0])
r.Add(args...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Logf(ctx context.Context, lvl Level, format string, args ...any) {
if !s.V(lvl) {
func (s *slogLogger) Info(ctx context.Context, msg string, args ...interface{}) {
if !s.V(InfoLevel) {
return
}
slvl := loggerToSlogLevel(lvl)
s.slog.Log(ctx, slvl, format, args...)
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelInfo, msg, pcs[0])
r.Add(args...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Info(ctx context.Context, args ...any) {
s.Log(ctx, InfoLevel, args...)
func (s *slogLogger) Debug(ctx context.Context, msg string, args ...any) {
if !s.V(InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug, msg, pcs[0])
r.Add(args...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Infof(ctx context.Context, format string, args ...interface{}) {
s.Logf(ctx, InfoLevel, format, args...)
func (s *slogLogger) Trace(ctx context.Context, msg string, args ...interface{}) {
if !s.V(InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug-1, msg, pcs[0])
r.Add(args...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Debug(ctx context.Context, args ...any) {
s.Log(ctx, DebugLevel, args...)
func (s *slogLogger) Error(ctx context.Context, msg string, args ...any) {
if !s.V(InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError, msg, pcs[0])
r.Add(args...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Debugf(ctx context.Context, format string, args ...any) {
s.Logf(ctx, DebugLevel, format, args...)
}
func (s *slogLogger) Error(ctx context.Context, args ...any) {
s.Log(ctx, ErrorLevel, args...)
}
func (s *slogLogger) Trace(ctx context.Context, args ...interface{}) {
s.Log(ctx, TraceLevel, args...)
}
func (s *slogLogger) Tracef(ctx context.Context, msg string, args ...interface{}) {
s.Logf(ctx, TraceLevel, msg, args...)
}
func (s *slogLogger) Errorf(ctx context.Context, format string, args ...any) {
s.Logf(ctx, ErrorLevel, format, args...)
}
func (s *slogLogger) Fatal(ctx context.Context, args ...any) {
s.Log(ctx, FatalLevel, args...)
}
func (s *slogLogger) Fatalf(ctx context.Context, msg string, args ...interface{}) {
s.Logf(ctx, FatalLevel, msg, args...)
func (s *slogLogger) Fatal(ctx context.Context, msg string, args ...interface{}) {
if !s.V(InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError+1, msg, pcs[0])
r.Add(args...)
_ = s.slog.Handler().Handle(ctx, r)
os.Exit(1)
}
func (s *slogLogger) Warn(ctx context.Context, args ...any) {
s.Log(ctx, WarnLevel, args...)
}
func (s *slogLogger) Warnf(ctx context.Context, format string, args ...any) {
s.Logf(ctx, WarnLevel, format, args...)
func (s *slogLogger) Warn(ctx context.Context, msg string, args ...any) {
if !s.V(InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelWarn, msg, pcs[0])
r.Add(args...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) String() string {

View File

@ -12,7 +12,7 @@ type stdLogger struct {
// NewStdLogger returns new *log.Logger baked by logger.Logger implementation
func NewStdLogger(l Logger, level Level) *log.Logger {
return log.New(&stdLogger{l: l, level: level}, "" /* prefix */, 0 /* flags */)
return log.New(&stdLogger{l: l.Clone(WithCallerSkipCount(l.Options().CallerSkipCount + 1)), level: level}, "" /* prefix */, 0 /* flags */)
}
func (sl *stdLogger) Write(p []byte) (int, error) {

View File

@ -1,298 +0,0 @@
// Package wrapper provides wrapper for Logger
package wrapper
import (
"context"
"fmt"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/server"
)
var (
// DefaultClientCallObserver called by wrapper in client Call
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []options.Option, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultClientStreamObserver called by wrapper in client Stream
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []options.Option, stream client.Stream, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultServerHandlerObserver called by wrapper in server Handler
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultClientCallFuncObserver called by wrapper in client CallFunc
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultSkipEndpoints wrapper not called for this endpoints
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
)
type lWrapper struct {
client.Client
serverHandler server.HandlerFunc
clientCallFunc client.CallFunc
opts Options
}
type (
// ClientCallObserver func signature
ClientCallObserver func(context.Context, client.Request, interface{}, []options.Option, error) []string
// ClientStreamObserver func signature
ClientStreamObserver func(context.Context, client.Request, []options.Option, client.Stream, error) []string
// ClientCallFuncObserver func signature
ClientCallFuncObserver func(context.Context, string, client.Request, interface{}, client.CallOptions, error) []string
// ServerHandlerObserver func signature
ServerHandlerObserver func(context.Context, server.Request, interface{}, error) []string
)
// Options struct for wrapper
type Options struct {
// Logger that used for log
Logger logger.Logger
// ServerHandlerObservers funcs
ServerHandlerObservers []ServerHandlerObserver
// ClientCallObservers funcs
ClientCallObservers []ClientCallObserver
// ClientStreamObservers funcs
ClientStreamObservers []ClientStreamObserver
// ClientCallFuncObservers funcs
ClientCallFuncObservers []ClientCallFuncObserver
// SkipEndpoints
SkipEndpoints []string
// Level for logger
Level logger.Level
// Enabled flag
Enabled bool
}
// Option func signature
type Option func(*Options)
// NewOptions creates Options from Option slice
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Level: logger.TraceLevel,
ClientCallObservers: []ClientCallObserver{DefaultClientCallObserver},
ClientStreamObservers: []ClientStreamObserver{DefaultClientStreamObserver},
ClientCallFuncObservers: []ClientCallFuncObserver{DefaultClientCallFuncObserver},
ServerHandlerObservers: []ServerHandlerObserver{DefaultServerHandlerObserver},
SkipEndpoints: DefaultSkipEndpoints,
}
for _, o := range opts {
o(&options)
}
return options
}
// WithEnabled enable/diable flag
func WithEnabled(b bool) Option {
return func(o *Options) {
o.Enabled = b
}
}
// WithLevel log level
func WithLevel(l logger.Level) Option {
return func(o *Options) {
o.Level = l
}
}
// WithLogger logger
func WithLogger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// WithClientCallObservers funcs
func WithClientCallObservers(ob ...ClientCallObserver) Option {
return func(o *Options) {
o.ClientCallObservers = ob
}
}
// WithClientStreamObservers funcs
func WithClientStreamObservers(ob ...ClientStreamObserver) Option {
return func(o *Options) {
o.ClientStreamObservers = ob
}
}
// WithClientCallFuncObservers funcs
func WithClientCallFuncObservers(ob ...ClientCallFuncObserver) Option {
return func(o *Options) {
o.ClientCallFuncObservers = ob
}
}
// WithServerHandlerObservers funcs
func WithServerHandlerObservers(ob ...ServerHandlerObserver) Option {
return func(o *Options) {
o.ServerHandlerObservers = ob
}
}
// SkipEndpoins
func SkipEndpoints(eps ...string) Option {
return func(o *Options) {
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
}
}
func (l *lWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...options.Option) error {
err := l.Client.Call(ctx, req, rsp, opts...)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ClientCallObservers {
labels = append(labels, o(ctx, req, rsp, opts, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
func (l *lWrapper) Stream(ctx context.Context, req client.Request, opts ...options.Option) (client.Stream, error) {
stream, err := l.Client.Stream(ctx, req, opts...)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return stream, err
}
}
if !l.opts.Enabled {
return stream, err
}
var labels []string
for _, o := range l.opts.ClientStreamObservers {
labels = append(labels, o(ctx, req, opts, stream, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return stream, err
}
func (l *lWrapper) ServerHandler(ctx context.Context, req server.Request, rsp interface{}) error {
err := l.serverHandler(ctx, req, rsp)
endpoint := req.Endpoint()
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ServerHandlerObservers {
labels = append(labels, o(ctx, req, rsp, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
// NewClientWrapper accepts an open options and returns a Client Wrapper
func NewClientWrapper(opts ...Option) client.Wrapper {
return func(c client.Client) client.Client {
options := NewOptions()
for _, o := range opts {
o(&options)
}
return &lWrapper{opts: options, Client: c}
}
}
// NewClientCallWrapper accepts an options and returns a Call Wrapper
func NewClientCallWrapper(opts ...Option) client.CallWrapper {
return func(h client.CallFunc) client.CallFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
l := &lWrapper{opts: options, clientCallFunc: h}
return l.ClientCallFunc
}
}
func (l *lWrapper) ClientCallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
err := l.clientCallFunc(ctx, addr, req, rsp, opts)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ClientCallFuncObservers {
labels = append(labels, o(ctx, addr, req, rsp, opts, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
// NewServerHandlerWrapper accepts an options and returns a Handler Wrapper
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
return func(h server.HandlerFunc) server.HandlerFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
l := &lWrapper{opts: options, serverHandler: h}
return l.ServerHandler
}
}

View File

@ -63,7 +63,7 @@ func (m *memory) ttlPrune() {
for id, n := range record.Nodes {
if n.TTL != 0 && time.Since(n.LastSeen) > n.TTL {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register TTL expired for node %s of service %s", n.ID, service)
m.opts.Logger.Debug(m.opts.Context, "RegisterTTL expired for node "+n.ID+" of service "+service)
}
delete(m.records[domain][service][version].Nodes, id)
}
@ -150,7 +150,7 @@ func (m *memory) Register(ctx context.Context, s *Service, opts ...RegisterOptio
if _, ok := srvs[s.Name][s.Version]; !ok {
srvs[s.Name][s.Version] = r
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register added new service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, "register added new service: "+s.Name+", version "+s.Version)
}
m.records[options.Domain] = srvs
go m.sendEvent(&Result{Action: "create", Service: s})
@ -190,14 +190,14 @@ func (m *memory) Register(ctx context.Context, s *Service, opts ...RegisterOptio
if addedNodes {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register added new node to service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, "register added new node to service: "+s.Name+", version "+s.Version)
}
go m.sendEvent(&Result{Action: "update", Service: s})
} else {
// refresh TTL and timestamp
for _, n := range s.Nodes {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Updated registration for service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, "updated registration for service: "+s.Name+", version "+s.Version)
}
srvs[s.Name][s.Version].Nodes[n.ID].TTL = options.TTL
srvs[s.Name][s.Version].Nodes[n.ID].LastSeen = time.Now()
@ -242,7 +242,7 @@ func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterO
for _, n := range s.Nodes {
if _, ok := version.Nodes[n.ID]; ok {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed node from service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, "register removed node from service: "+s.Name+", version "+s.Version)
}
delete(version.Nodes, n.ID)
}
@ -263,7 +263,7 @@ func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterO
go m.sendEvent(&Result{Action: "delete", Service: s})
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s", s.Name)
m.opts.Logger.Debug(m.opts.Context, "register removed service: "+s.Name)
}
return nil
}
@ -272,7 +272,7 @@ func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterO
delete(m.records[options.Domain][s.Name], s.Version)
go m.sendEvent(&Result{Action: "delete", Service: s})
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, "register removed service: "+s.Name+", version "+s.Version)
}
return nil
@ -467,7 +467,7 @@ func serviceToRecord(s *Service, ttl time.Duration) *record {
}
endpoints := make([]*Endpoint, len(s.Endpoints))
for i, e := range s.Endpoints {
for i, e := range s.Endpoints { // TODO: vtolstov use copy
endpoints[i] = e
}

View File

@ -122,7 +122,7 @@ func (n *noopServer) Register() error {
if !registered {
if config.Logger.V(logger.InfoLevel) {
config.Logger.Infof(n.opts.Context, "register [%s] Registering node: %s", config.Register.String(), service.Nodes[0].ID)
config.Logger.Info(n.opts.Context, "register ["+config.Register.String()+"] Registering node: "+service.Nodes[0].ID)
}
}
@ -160,7 +160,7 @@ func (n *noopServer) Deregister() error {
}
if config.Logger.V(logger.InfoLevel) {
config.Logger.Infof(n.opts.Context, "deregistering node: %s", service.Nodes[0].ID)
config.Logger.Info(n.opts.Context, "deregistering node: "+service.Nodes[0].ID)
}
if err := DefaultDeregisterFunc(service, config); err != nil {
@ -204,7 +204,7 @@ func (n *noopServer) Start() error {
config.Address = addr
if config.Logger.V(logger.InfoLevel) {
config.Logger.Infof(n.opts.Context, "server [noop] Listening on %s", config.Address)
config.Logger.Info(n.opts.Context, "server [noop] Listening on "+config.Address)
}
n.Lock()
@ -217,13 +217,13 @@ func (n *noopServer) Start() error {
// nolint: nestif
if err := config.RegisterCheck(config.Context); err != nil {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, "server %s-%s register check error: %s", config.Name, config.ID, err)
config.Logger.Error(n.opts.Context, "server "+config.Name+"-"+config.ID+" register check error: "+err.Error())
}
} else {
// announce self to the world
if err := n.Register(); err != nil {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, "server register error: %v", err)
config.Logger.Error(n.opts.Context, "server register error: "+err.Error())
}
}
}
@ -252,23 +252,23 @@ func (n *noopServer) Start() error {
// nolint: nestif
if rerr != nil && registered {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, "server %s-%s register check error: %s, deregister it", config.Name, config.ID, rerr)
config.Logger.Error(n.opts.Context, "server "+config.Name+"-"+config.ID+" register check error: ", rerr.Error())
}
// deregister self in case of error
if err := n.Deregister(); err != nil {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, "server %s-%s deregister error: %s", config.Name, config.ID, err)
config.Logger.Error(n.opts.Context, "server "+config.Name+"-"+config.ID+" deregister error: ", err.Error())
}
}
} else if rerr != nil && !registered {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, "server %s-%s register check error: %s", config.Name, config.ID, rerr)
config.Logger.Error(n.opts.Context, "server "+config.Name+"-"+config.ID+" register check error: ", rerr.Error())
}
continue
}
if err := n.Register(); err != nil {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, "server %s-%s register error: %s", config.Name, config.ID, err)
config.Logger.Error(n.opts.Context, "server "+config.Name+"-"+config.ID+" register error: ", err.Error())
}
}
// wait for exit
@ -280,7 +280,7 @@ func (n *noopServer) Start() error {
// deregister self
if err := n.Deregister(); err != nil {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, "server deregister error: ", err)
config.Logger.Error(n.opts.Context, "server deregister error: "+err.Error())
}
}

View File

@ -258,7 +258,7 @@ func (s *service) Start() error {
}
if config.Loggers[0].V(logger.InfoLevel) {
config.Loggers[0].Infof(s.opts.Context, "starting [service] %s version %s", s.Options().Name, s.Options().Version)
config.Loggers[0].Info(s.opts.Context, "starting [service] "+s.Options().Name+" version "+s.Options().Version)
}
if len(s.opts.Servers) == 0 {
@ -304,7 +304,7 @@ func (s *service) Stop() error {
s.RUnlock()
if config.Loggers[0].V(logger.InfoLevel) {
config.Loggers[0].Infof(s.opts.Context, "stoppping [service] %s", s.Name())
config.Loggers[0].Info(s.opts.Context, "stoppping [service] "+s.Name())
}
var err error

View File

@ -71,7 +71,7 @@ func New(opts ...Option) (string, error) {
func Must(opts ...Option) string {
id, err := New(opts...)
if err != nil {
logger.Fatal(context.TODO(), err)
logger.Fatal(context.TODO(), err.Error())
}
return id
}