Compare commits

..

29 Commits
v3.10.79 ... v3

Author SHA1 Message Date
89cf4ef8af store: add missin LazyConnect option
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-11-26 17:48:09 +03:00
2a6ce6d4da add using lazy connect (#361)
#357

Co-authored-by: Василий Толстов <v.tolstov@unistack.org>
Reviewed-on: #361
Reviewed-by: Василий Толстов <v.tolstov@unistack.org>
Co-authored-by: Evstigneev Denis <danteevstigneev@yandex.ru>
Co-committed-by: Evstigneev Denis <danteevstigneev@yandex.ru>
2024-11-26 12:18:17 +03:00
ad19fe2b90 logger/slog: fix race condigtion with Enabled and Level
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-11-24 23:40:54 +03:00
49055a28ea logger/slog: wrap handler
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-11-24 23:28:15 +03:00
d1c6e121c1 logger/slog: fix Clone and Fields methods
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-11-24 15:31:40 +03:00
7cd7fb0c0a disable logging for automaxprocs
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-11-20 22:35:36 +03:00
77eb5b5264 add yaml support
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-11-01 11:23:29 +03:00
929e46c087 improve slog
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-11-01 00:56:40 +03:00
1fb5673d27 fixup graceful stop
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-10-25 17:21:54 +03:00
3bbb0cbc72 update slog/logger (#351)
Изменено (методы logger без форматирования):

- Добавлена подготовка и выравнивание аттрибутов для logger
- Выравнивание за счет добавления !BADKEY до процессинга log/slog
- Добавлено переиспользование метода Log
- Удалены методы [Logf, Infof, Debugf, Errorf, Warnf, Fatalf, Tracef]
- Обновлены юниттесты
- Удален wrapper в пакете logger
- Изменен интерфейс logger
- Отрефакторены вызовы logger'a в micro

Co-authored-by: Vasiliy Tolstov <v.tolstov@unistack.org>
Reviewed-on: #351
Co-authored-by: Evstigneev Denis <danteevstigneev@yandex.ru>
Co-committed-by: Evstigneev Denis <danteevstigneev@yandex.ru>
2024-10-12 12:37:43 +03:00
71fe0df73f use automaxproc and automemlimit
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-10-06 13:50:59 +03:00
f1b8ecbdb3 store: add new ErrNotConnected error
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-10-05 14:46:22 +03:00
fd2b2762e9 fixup missing xpool dep
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-30 09:57:07 +03:00
82d269cfb4 xpool: add metrics
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-29 22:58:53 +03:00
6641463eed util/reflect: add ability to merge maps
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-20 19:22:20 +03:00
faf2454f0a cleanup
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-20 17:54:17 +03:00
de9e4d73f5 change semconv metric names to include micro_ prefix
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-20 08:38:36 +03:00
4ae7277140 meter: remove prefix options
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-20 08:27:25 +03:00
a98618ed5b add codec.Flatten option
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-16 23:10:43 +03:00
3aaf1182cb add codec option
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-16 23:02:45 +03:00
eb1482d789 codec: simplify codec interface
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-16 22:41:47 +03:00
a305f7553f Merge pull request '#347 add test' (#349) from kgorbunov/micro:#347-v3 into v3
Reviewed-on: #349
2024-09-16 14:59:58 +03:00
Gorbunov Kirill Andreevich
d9b2f2a45d #347 add test
Some checks failed
pr / test (pull_request) Failing after 0s
lint / lint (pull_request) Failing after 1s
2024-09-16 14:48:47 +03:00
3ace7657dc codec: RawMessage Marshal fix
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-10 10:43:45 +03:00
53b40617e2 fixup util/xpool
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-04 23:06:40 +03:00
1a9236caad update meter options
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-04 22:41:10 +03:00
6c68d39081 errors: add RFC9457 problem type
closes #297

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-08-01 01:06:02 +03:00
35e62fbeb0 tracer: add default context attr funcs option
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-06 00:09:27 +03:00
00b3ceb468 smeconv: fix naming
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-04 14:56:48 +03:00
89 changed files with 1101 additions and 2056 deletions

View File

@ -1,5 +1,5 @@
// Package broker is an interface used for asynchronous messaging
package broker // import "go.unistack.org/micro/v3/broker"
package broker
import (
"context"

View File

@ -206,7 +206,7 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts
}
} else if sub.opts.AutoAck {
if err = ms.Ack(); err != nil {
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
}
}
// single processing
@ -217,11 +217,11 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts
if eh != nil {
_ = eh(p)
} else if m.opts.Logger.V(logger.ErrorLevel) {
m.opts.Logger.Error(m.opts.Context, err.Error())
m.opts.Logger.Error(m.opts.Context, "broker handler error", err)
}
} else if sub.opts.AutoAck {
if err = p.Ack(); err != nil {
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
}
}
}

View File

@ -1,5 +1,5 @@
// Package client is an interface for an RPC client
package client // import "go.unistack.org/micro/v3/client"
package client
import (
"context"

View File

@ -1,19 +1,8 @@
// Package codec is an interface for encoding messages
package codec // import "go.unistack.org/micro/v3/codec"
package codec
import (
"errors"
"io"
"go.unistack.org/micro/v3/metadata"
)
// Message types
const (
Error MessageType = iota
Request
Response
Event
)
var (
@ -24,65 +13,23 @@ var (
)
var (
// DefaultMaxMsgSize specifies how much data codec can handle
DefaultMaxMsgSize = 1024 * 1024 * 4 // 4Mb
// DefaultCodec is the global default codec
DefaultCodec = NewCodec()
// DefaultTagName specifies struct tag name to control codec Marshal/Unmarshal
DefaultTagName = "codec"
)
// MessageType specifies message type for codec
type MessageType int
// Codec encodes/decodes various types of messages used within micro.
// ReadHeader and ReadBody are called in pairs to read requests/responses
// from the connection. Close is called when finished with the
// connection. ReadBody may be called with a nil argument to force the
// body to be read and discarded.
// Codec encodes/decodes various types of messages.
type Codec interface {
ReadHeader(r io.Reader, m *Message, mt MessageType) error
ReadBody(r io.Reader, v interface{}) error
Write(w io.Writer, m *Message, v interface{}) error
Marshal(v interface{}, opts ...Option) ([]byte, error)
Unmarshal(b []byte, v interface{}, opts ...Option) error
String() string
}
// Message represents detailed information about
// the communication, likely followed by the body.
// In the case of an error, body may be nil.
type Message struct {
Header metadata.Metadata
Target string
Method string
Endpoint string
Error string
ID string
Body []byte
Type MessageType
}
// NewMessage creates new codec message
func NewMessage(t MessageType) *Message {
return &Message{Type: t, Header: metadata.New(0)}
}
// MarshalAppend calls codec.Marshal(v) and returns the data appended to buf.
// If codec implements MarshalAppend, that is called instead.
func MarshalAppend(buf []byte, c Codec, v interface{}, opts ...Option) ([]byte, error) {
if nc, ok := c.(interface {
MarshalAppend([]byte, interface{}, ...Option) ([]byte, error)
}); ok {
return nc.MarshalAppend(buf, v, opts...)
}
mbuf, err := c.Marshal(v, opts...)
if err != nil {
return nil, err
}
return append(buf, mbuf...), nil
type CodecV2 interface {
Marshal(buf []byte, v interface{}, opts ...Option) ([]byte, error)
Unmarshal(buf []byte, v interface{}, opts ...Option) error
String() string
}
// RawMessage is a raw encoded JSON value.
@ -93,6 +40,8 @@ type RawMessage []byte
func (m *RawMessage) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
} else if len(*m) == 0 {
return []byte("null"), nil
}
return *m, nil
}

View File

@ -2,70 +2,14 @@ package codec
import (
"encoding/json"
"io"
codecpb "go.unistack.org/micro-proto/v3/codec"
)
type noopCodec struct {
opts Options
}
func (c *noopCodec) ReadHeader(conn io.Reader, m *Message, t MessageType) error {
return nil
}
func (c *noopCodec) ReadBody(conn io.Reader, b interface{}) error {
// read bytes
buf, err := io.ReadAll(conn)
if err != nil {
return err
}
if b == nil {
return nil
}
switch v := b.(type) {
case *string:
*v = string(buf)
case *[]byte:
*v = buf
case *Frame:
v.Data = buf
default:
return json.Unmarshal(buf, v)
}
return nil
}
func (c *noopCodec) Write(conn io.Writer, m *Message, b interface{}) error {
if b == nil {
return nil
}
var v []byte
switch vb := b.(type) {
case *Frame:
v = vb.Data
case string:
v = []byte(vb)
case *string:
v = []byte(*vb)
case *[]byte:
v = *vb
case []byte:
v = vb
default:
var err error
v, err = json.Marshal(vb)
if err != nil {
return err
}
}
_, err := conn.Write(v)
return err
}
func (c *noopCodec) String() string {
return "noop"
}
@ -91,8 +35,8 @@ func (c *noopCodec) Marshal(v interface{}, opts ...Option) ([]byte, error) {
return ve, nil
case *Frame:
return ve.Data, nil
case *Message:
return ve.Body, nil
case *codecpb.Frame:
return ve.Data, nil
}
return json.Marshal(v)
@ -115,8 +59,8 @@ func (c *noopCodec) Unmarshal(d []byte, v interface{}, opts ...Option) error {
case *Frame:
ve.Data = d
return nil
case *Message:
ve.Body = d
case *codecpb.Frame:
ve.Data = d
return nil
}

View File

@ -23,15 +23,8 @@ type Options struct {
Context context.Context
// TagName specifies tag name in struct to control codec
TagName string
// MaxMsgSize specifies max messages size that reads by codec
MaxMsgSize int
}
// MaxMsgSize sets the max message size
func MaxMsgSize(n int) Option {
return func(o *Options) {
o.MaxMsgSize = n
}
// Flatten specifies that struct must be analyzed for flatten tag
Flatten bool
}
// TagName sets the codec tag name in struct
@ -41,6 +34,13 @@ func TagName(n string) Option {
}
}
// Flatten enables checking for flatten tag name
func Flatten(b bool) Option {
return func(o *Options) {
o.Flatten = b
}
}
// Logger sets the logger
func Logger(l logger.Logger) Option {
return func(o *Options) {
@ -69,8 +69,8 @@ func NewOptions(opts ...Option) Options {
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
MaxMsgSize: DefaultMaxMsgSize,
TagName: DefaultTagName,
Flatten: false,
}
for _, o := range opts {

View File

@ -1,9 +1,10 @@
// Package config is an interface for dynamic configuration.
package config // import "go.unistack.org/micro/v3/config"
package config
import (
"context"
"errors"
"fmt"
"reflect"
"time"
)
@ -138,7 +139,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s BeforeLoad err: %v", c.String(), err)
c.Options().Logger.Error(ctx, fmt.Sprintf("%s BeforeLoad error", c.String()), err)
if !c.Options().AllowFail {
return err
}
@ -153,7 +154,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s AfterLoad err: %v", c.String(), err)
c.Options().Logger.Error(ctx, fmt.Sprintf("%s AfterLoad error", c.String()), err)
if !c.Options().AllowFail {
return err
}
@ -168,7 +169,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s BeforeSave err: %v", c.String(), err)
c.Options().Logger.Error(ctx, fmt.Sprintf("%s BeforeSave error", c.String()), err)
if !c.Options().AllowFail {
return err
}
@ -183,7 +184,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s AfterSave err: %v", c.String(), err)
c.Options().Logger.Error(ctx, fmt.Sprintf("%s AfterSave error", c.String()), err)
if !c.Options().AllowFail {
return err
}
@ -198,7 +199,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s BeforeInit err: %v", c.String(), err)
c.Options().Logger.Error(ctx, fmt.Sprintf("%s BeforeInit error", c.String()), err)
if !c.Options().AllowFail {
return err
}
@ -213,7 +214,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Errorf(ctx, "%s AfterInit err: %v", c.String(), err)
c.Options().Logger.Error(ctx, fmt.Sprintf("%s AfterInit error", c.String(), err), err)
if !c.Options().AllowFail {
return err
}

View File

@ -1,6 +1,6 @@
// Package errors provides a way to return detailed information
// for an RPC request error. The error is normally JSON encoded.
package errors // import "go.unistack.org/micro/v3/errors"
package errors
import (
"bytes"
@ -44,6 +44,20 @@ var (
ErrGatewayTimeout = &Error{Code: 504}
)
const ProblemContentType = "application/problem+json"
type Problem struct {
Type string `json:"type,omitempty"`
Title string `json:"title,omitempty"`
Detail string `json:"detail,omitempty"`
Instance string `json:"instance,omitempty"`
Errors []struct {
Title string `json:"title,omitempty"`
Detail string `json:"detail,omitempty"`
} `json:"errors,omitempty"`
Status int `json:"status,omitempty"`
}
// Error type
type Error struct {
// ID holds error id or service, usually someting like my_service or id

View File

@ -188,7 +188,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
steps, err := w.getSteps(options.Start, options.Reverse)
if err != nil {
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
return "", err
}
@ -212,7 +212,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
done := make(chan struct{})
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
return eid, werr
}
for idx := range steps {
@ -237,7 +237,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
return
}
if w.opts.Logger.V(logger.TraceLevel) {
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
w.opts.Logger.Trace(nctx, fmt.Sprintf("will be executed %v", steps[idx][nidx]))
}
cstep := steps[idx][nidx]
// nolint: nestif
@ -257,21 +257,21 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
if serr != nil {
step.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", werr)
}
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", werr)
cherr <- werr
return
}
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", werr)
cherr <- werr
return
}
@ -290,16 +290,16 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
if serr != nil {
cstep.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", werr)
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
w.opts.Logger.Error(ctx, "store write error", werr)
cherr <- werr
return
}
@ -317,7 +317,7 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
return eid, nil
}
logger.Tracef(ctx, "wait for finish or error")
logger.DefaultLogger.Trace(ctx, "wait for finish or error")
select {
case <-nctx.Done():
err = nctx.Err()
@ -333,15 +333,15 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
switch {
case nctx.Err() != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
case err == nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
case err != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
}

View File

@ -1,5 +1,5 @@
// Package flow is an interface used for saga pattern microservice workflow
package flow // import "go.unistack.org/micro/v3/flow"
package flow
import (
"context"

View File

@ -1,4 +1,4 @@
package fsm // import "go.unistack.org/micro/v3/fsm"
package fsm
import (
"context"

View File

@ -17,7 +17,7 @@ func TestFSMStart(t *testing.T) {
wrapper := func(next StateFunc) StateFunc {
return func(sctx context.Context, s State, opts ...StateOption) (State, error) {
sctx = logger.NewContext(sctx, logger.Fields("state", s.Name()))
sctx = logger.NewContext(sctx, logger.DefaultLogger.Fields("state", s.Name()))
return next(sctx, s, opts...)
}
}

16
go.mod
View File

@ -1,20 +1,32 @@
module go.unistack.org/micro/v3
go 1.20
go 1.22
require (
dario.cat/mergo v1.0.0
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/KimMachineGun/automemlimit v0.6.1
github.com/google/uuid v1.3.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
go.uber.org/automaxprocs v1.6.0
go.unistack.org/micro-proto/v3 v3.4.1
golang.org/x/sync v0.3.0
google.golang.org/grpc v1.57.0
google.golang.org/protobuf v1.31.0
google.golang.org/protobuf v1.33.0
)
require (
github.com/cilium/ebpf v0.9.1 // indirect
github.com/containerd/cgroups/v3 v3.0.1 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sys v0.11.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect
)

49
go.sum
View File

@ -2,23 +2,68 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE=
github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5 h1:G/FZtUu7a6NTWl3KUHMV9jkLAh/Rvtf03NWMHaEDl+E=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.unistack.org/micro-proto/v3 v3.4.1 h1:UTjLSRz2YZuaHk9iSlVqqsA50JQNAEK2ZFboGqtEa9Q=
go.unistack.org/micro-proto/v3 v3.4.1/go.mod h1:okx/cnOhzuCX0ggl/vToatbCupi0O44diiiLLsZ93Zo=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
@ -26,8 +71,8 @@ google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,5 +1,5 @@
// Package logger provides a log interface
package logger // import "go.unistack.org/micro/v3/logger"
package logger
import (
"context"
@ -14,8 +14,6 @@ var (
DefaultLogger Logger = NewLogger()
// DefaultLevel used by logger
DefaultLevel = InfoLevel
// DefaultCallerSkipCount used by logger
DefaultCallerSkipCount = 2
)
// Logger is a generic logging interface
@ -33,33 +31,19 @@ type Logger interface {
// Fields set fields to always be logged with keyval pairs
Fields(fields ...interface{}) Logger
// Info level message
Info(ctx context.Context, args ...interface{})
Info(ctx context.Context, msg string, args ...interface{})
// Trace level message
Trace(ctx context.Context, args ...interface{})
Trace(ctx context.Context, msg string, args ...interface{})
// Debug level message
Debug(ctx context.Context, args ...interface{})
Debug(ctx context.Context, msg string, args ...interface{})
// Warn level message
Warn(ctx context.Context, args ...interface{})
Warn(ctx context.Context, msg string, args ...interface{})
// Error level message
Error(ctx context.Context, args ...interface{})
Error(ctx context.Context, msg string, args ...interface{})
// Fatal level message
Fatal(ctx context.Context, args ...interface{})
// Infof level message
Infof(ctx context.Context, msg string, args ...interface{})
// Tracef level message
Tracef(ctx context.Context, msg string, args ...interface{})
// Debug level message
Debugf(ctx context.Context, msg string, args ...interface{})
// Warn level message
Warnf(ctx context.Context, msg string, args ...interface{})
// Error level message
Errorf(ctx context.Context, msg string, args ...interface{})
// Fatal level message
Fatalf(ctx context.Context, msg string, args ...interface{})
Fatal(ctx context.Context, msg string, args ...interface{})
// Log logs message with needed level
Log(ctx context.Context, level Level, args ...interface{})
// Logf logs message with needed level
Logf(ctx context.Context, level Level, msg string, args ...interface{})
Log(ctx context.Context, level Level, msg string, args ...interface{})
// Name returns broker instance name
Name() string
// String returns the type of logger
@ -68,108 +52,3 @@ type Logger interface {
// Field contains keyval pair
type Field interface{}
// Info writes msg to default logger on info level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Info(ctx context.Context, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Info(ctx, args...)
}
// Error writes msg to default logger on error level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Error(ctx context.Context, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Error(ctx, args...)
}
// Debug writes msg to default logger on debug level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Debug(ctx context.Context, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debug(ctx, args...)
}
// Warn writes msg to default logger on warn level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Warn(ctx context.Context, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warn(ctx, args...)
}
// Trace writes msg to default logger on trace level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Trace(ctx context.Context, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Trace(ctx, args...)
}
// Fatal writes msg to default logger on fatal level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Fatal(ctx context.Context, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatal(ctx, args...)
}
// Infof writes formatted msg to default logger on info level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Infof(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Infof(ctx, msg, args...)
}
// Errorf writes formatted msg to default logger on error level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Errorf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Errorf(ctx, msg, args...)
}
// Debugf writes formatted msg to default logger on debug level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Debugf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debugf(ctx, msg, args...)
}
// Warnf writes formatted msg to default logger on warn level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Warnf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warnf(ctx, msg, args...)
}
// Tracef writes formatted msg to default logger on trace level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Tracef(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Tracef(ctx, msg, args...)
}
// Fatalf writes formatted msg to default logger on fatal level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Fatalf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatalf(ctx, msg, args...)
}
// V returns true if passed level enabled in default logger
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func V(level Level) bool {
return DefaultLogger.V(level)
}
// Init initialize logger
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Init(opts ...Option) error {
return DefaultLogger.Init(opts...)
}
// Fields create logger with specific fields
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Fields(fields ...interface{}) Logger {
return DefaultLogger.Fields(fields...)
}

View File

@ -4,12 +4,17 @@ import (
"context"
)
const (
defaultCallerSkipCount = 2
)
type noopLogger struct {
opts Options
}
func NewLogger(opts ...Option) Logger {
options := NewOptions(opts...)
options.CallerSkipCount = defaultCallerSkipCount
return &noopLogger{opts: options}
}
@ -51,44 +56,23 @@ func (l *noopLogger) String() string {
return "noop"
}
func (l *noopLogger) Log(ctx context.Context, lvl Level, attrs ...interface{}) {
func (l *noopLogger) Log(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Info(ctx context.Context, attrs ...interface{}) {
func (l *noopLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Debug(ctx context.Context, attrs ...interface{}) {
func (l *noopLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Error(ctx context.Context, attrs ...interface{}) {
func (l *noopLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Trace(ctx context.Context, attrs ...interface{}) {
func (l *noopLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Warn(ctx context.Context, attrs ...interface{}) {
func (l *noopLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Fatal(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Logf(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
func (l *noopLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
}

View File

@ -23,7 +23,7 @@ type Options struct {
Name string
// Fields holds additional metadata
Fields []interface{}
// CallerSkipCount number of frmaes to skip
// callerSkipCount number of frmaes to skip
CallerSkipCount int
// ContextAttrFuncs contains funcs that executed before log func on context
ContextAttrFuncs []ContextAttrFunc
@ -57,7 +57,6 @@ func NewOptions(opts ...Option) Options {
Level: DefaultLevel,
Fields: make([]interface{}, 0, 6),
Out: os.Stderr,
CallerSkipCount: DefaultCallerSkipCount,
Context: context.Background(),
ContextAttrFuncs: DefaultContextAttrFuncs,
AddSource: true,
@ -74,7 +73,7 @@ func NewOptions(opts ...Option) Options {
return options
}
// WithContextAttrFuncs appends default funcs for the context arrts filler
// WithContextAttrFuncs appends default funcs for the context attrs filler
func WithContextAttrFuncs(fncs ...ContextAttrFunc) Option {
return func(o *Options) {
o.ContextAttrFuncs = append(o.ContextAttrFuncs, fncs...)
@ -102,27 +101,20 @@ func WithOutput(out io.Writer) Option {
}
}
// WitAddStacktrace controls writing stacktrace on error
// WithAddStacktrace controls writing stacktrace on error
func WithAddStacktrace(v bool) Option {
return func(o *Options) {
o.AddStacktrace = v
}
}
// WitAddSource controls writing source file and pos in log
// WithAddSource controls writing source file and pos in log
func WithAddSource(v bool) Option {
return func(o *Options) {
o.AddSource = v
}
}
// WithCallerSkipCount set frame count to skip
func WithCallerSkipCount(c int) Option {
return func(o *Options) {
o.CallerSkipCount = c
}
}
// WithContext set context
func WithContext(ctx context.Context) Option {
return func(o *Options) {
@ -137,6 +129,13 @@ func WithName(n string) Option {
}
}
// WithMeter sets the meter
func WithMeter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// WithTimeFunc sets the func to obtain current time
func WithTimeFunc(fn func() time.Time) Option {
return func(o *Options) {
@ -191,6 +190,8 @@ func WithMicroKeys() Option {
// WithAddCallerSkipCount add skip count for copy logger
func WithAddCallerSkipCount(n int) Option {
return func(o *Options) {
if n > 0 {
o.CallerSkipCount += n
}
}
}

View File

@ -2,19 +2,25 @@ package slog
import (
"context"
"fmt"
"log/slog"
"os"
"regexp"
"runtime"
"strconv"
"sync"
"sync/atomic"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v3/tracer"
)
const (
badKey = "!BADKEY"
// defaultCallerSkipCount used by logger
defaultCallerSkipCount = 3
)
var reTrace = regexp.MustCompile(`.*/slog/logger\.go.*\n`)
var (
@ -26,6 +32,27 @@ var (
fatalValue = slog.StringValue("fatal")
)
type wrapper struct {
h slog.Handler
level atomic.Int64
}
func (h *wrapper) Enabled(ctx context.Context, level slog.Level) bool {
return level >= slog.Level(int(h.level.Load()))
}
func (h *wrapper) Handle(ctx context.Context, rec slog.Record) error {
return h.h.Handle(ctx, rec)
}
func (h *wrapper) WithAttrs(attrs []slog.Attr) slog.Handler {
return h.WithAttrs(attrs)
}
func (h *wrapper) WithGroup(name string) slog.Handler {
return h.WithGroup(name)
}
func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.SourceKey:
@ -63,7 +90,7 @@ func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
type slogLogger struct {
leveler *slog.LevelVar
handler slog.Handler
handler *wrapper
opts logger.Options
mu sync.RWMutex
}
@ -77,51 +104,52 @@ func (s *slogLogger) Clone(opts ...logger.Option) logger.Logger {
o(&options)
}
l := &slogLogger{
opts: options,
if len(options.ContextAttrFuncs) == 0 {
options.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
l.leveler = new(slog.LevelVar)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: l.renameAttr,
Level: l.leveler,
AddSource: l.opts.AddSource,
attrs, _ := s.argsAttrs(options.Fields)
l := &slogLogger{
handler: &wrapper{h: s.handler.h.WithAttrs(attrs)},
opts: options,
}
l.leveler.Set(loggerToSlogLevel(l.opts.Level))
l.handler = slog.New(slog.NewJSONHandler(options.Out, handleOpt)).With(options.Fields...).Handler()
l.handler.level.Store(int64(loggerToSlogLevel(options.Level)))
return l
}
func (s *slogLogger) V(level logger.Level) bool {
return s.opts.Level.Enabled(level)
s.mu.Lock()
v := s.opts.Level.Enabled(level)
s.mu.Unlock()
return v
}
func (s *slogLogger) Level(level logger.Level) {
s.leveler.Set(loggerToSlogLevel(level))
s.mu.Lock()
s.opts.Level = level
s.handler.level.Store(int64(loggerToSlogLevel(level)))
s.mu.Unlock()
}
func (s *slogLogger) Options() logger.Options {
return s.opts
}
func (s *slogLogger) Fields(attrs ...interface{}) logger.Logger {
func (s *slogLogger) Fields(fields ...interface{}) logger.Logger {
s.mu.RLock()
level := s.leveler.Level()
options := s.opts
s.mu.RUnlock()
l := &slogLogger{opts: options}
l.leveler = new(slog.LevelVar)
l.leveler.Set(level)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: l.renameAttr,
Level: l.leveler,
AddSource: l.opts.AddSource,
if len(options.ContextAttrFuncs) == 0 {
options.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
l.handler = slog.New(slog.NewJSONHandler(l.opts.Out, handleOpt)).With(attrs...).Handler()
attrs, _ := s.argsAttrs(fields)
l.handler = &wrapper{h: s.handler.h.WithAttrs(attrs)}
l.handler.level.Store(int64(loggerToSlogLevel(l.opts.Level)))
return l
}
@ -129,407 +157,55 @@ func (s *slogLogger) Fields(attrs ...interface{}) logger.Logger {
func (s *slogLogger) Init(opts ...logger.Option) error {
s.mu.Lock()
if len(s.opts.ContextAttrFuncs) == 0 {
s.opts.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
for _, o := range opts {
o(&s.opts)
}
s.leveler = new(slog.LevelVar)
if len(s.opts.ContextAttrFuncs) == 0 {
s.opts.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
handleOpt := &slog.HandlerOptions{
ReplaceAttr: s.renameAttr,
Level: s.leveler,
Level: loggerToSlogLevel(logger.TraceLevel),
AddSource: s.opts.AddSource,
}
s.leveler.Set(loggerToSlogLevel(s.opts.Level))
s.handler = slog.New(slog.NewJSONHandler(s.opts.Out, handleOpt)).With(s.opts.Fields...).Handler()
attrs, _ := s.argsAttrs(s.opts.Fields)
s.handler = &wrapper{h: slog.NewJSONHandler(s.opts.Out, handleOpt).WithAttrs(attrs)}
s.handler.level.Store(int64(loggerToSlogLevel(s.opts.Level)))
s.mu.Unlock()
return nil
}
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
if !s.V(lvl) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
s.printLog(ctx, lvl, msg, attrs...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
}
}
}
r.Add(attrs[1:]...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.handler.Handle(ctx, r)
func (s *slogLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.InfoLevel, msg, attrs...)
}
func (s *slogLogger) Logf(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
if !s.V(lvl) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
func (s *slogLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.DebugLevel, msg, attrs...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, (slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1])))
}
}
}
r.Add(attrs[1:]...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.handler.Handle(ctx, r)
func (s *slogLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.TraceLevel, msg, attrs...)
}
func (s *slogLogger) Info(ctx context.Context, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.InfoLevel.String()).Inc()
if !s.V(logger.InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelInfo, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
func (s *slogLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.ErrorLevel, msg, attrs...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.InfoLevel.String()).Inc()
if !s.V(logger.InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelInfo, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs...)
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Debug(ctx context.Context, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.DebugLevel.String()).Inc()
if !s.V(logger.DebugLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.DebugLevel.String()).Inc()
if !s.V(logger.DebugLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs...)
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Trace(ctx context.Context, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.TraceLevel.String()).Inc()
if !s.V(logger.TraceLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug-1, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.TraceLevel.String()).Inc()
if !s.V(logger.TraceLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelDebug-1, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Error(ctx context.Context, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.ErrorLevel.String()).Inc()
if !s.V(logger.ErrorLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
}
}
}
r.Add(attrs[1:]...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.ErrorLevel.String()).Inc()
if !s.V(logger.ErrorLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
}
}
}
r.Add(attrs...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Fatal(ctx context.Context, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.FatalLevel.String()).Inc()
if !s.V(logger.FatalLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError+1, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.handler.Handle(ctx, r)
func (s *slogLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.FatalLevel, msg, attrs...)
os.Exit(1)
}
func (s *slogLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.FatalLevel.String()).Inc()
if !s.V(logger.FatalLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelError+1, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs...)
_ = s.handler.Handle(ctx, r)
os.Exit(1)
}
func (s *slogLogger) Warn(ctx context.Context, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.WarnLevel.String()).Inc()
if !s.V(logger.WarnLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelWarn, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.handler.Handle(ctx, r)
}
func (s *slogLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", logger.WarnLevel.String()).Inc()
if !s.V(logger.WarnLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(s.opts.TimeFunc(), slog.LevelWarn, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.handler.Handle(ctx, r)
func (s *slogLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.WarnLevel, msg, attrs...)
}
func (s *slogLogger) Name() string {
@ -540,10 +216,59 @@ func (s *slogLogger) String() string {
return "slog"
}
func (s *slogLogger) printLog(ctx context.Context, lvl logger.Level, msg string, args ...interface{}) {
if !s.V(lvl) {
return
}
var argError error
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
attrs, err := s.argsAttrs(args)
if err != nil {
argError = err
}
if argError != nil {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, argError.Error())
}
}
for _, fn := range s.opts.ContextAttrFuncs {
ctxAttrs, err := s.argsAttrs(fn(ctx))
if err != nil {
argError = err
}
attrs = append(attrs, ctxAttrs...)
}
if argError != nil {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, argError.Error())
}
}
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
}
}
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, printLog, LogLvlMethod]
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), msg, pcs[0])
r.AddAttrs(attrs...)
_ = s.handler.Handle(ctx, r)
}
func NewLogger(opts ...logger.Option) logger.Logger {
s := &slogLogger{
opts: logger.NewOptions(opts...),
}
s.opts.CallerSkipCount = defaultCallerSkipCount
return s
}
@ -581,3 +306,27 @@ func slogToLoggerLevel(level slog.Level) logger.Level {
return logger.InfoLevel
}
}
func (s *slogLogger) argsAttrs(args []interface{}) ([]slog.Attr, error) {
attrs := make([]slog.Attr, 0, len(args))
var err error
for idx := 0; idx < len(args); idx++ {
switch arg := args[idx].(type) {
case slog.Attr:
attrs = append(attrs, arg)
case string:
if idx+1 < len(args) {
attrs = append(attrs, slog.Any(arg, args[idx+1]))
idx += 1
} else {
attrs = append(attrs, slog.String(badKey, arg))
}
case error:
attrs = append(attrs, slog.String(s.opts.ErrorKey, arg.Error()))
err = arg
}
}
return attrs, err
}

View File

@ -3,13 +3,68 @@ package slog
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"strings"
"testing"
"github.com/google/uuid"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/logger"
)
func TestMultipleFieldsWithLevel(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l = l.Fields("key", "val")
l.Info(ctx, "msg1")
nl := l.Clone(logger.WithLevel(logger.DebugLevel))
nl.Debug(ctx, "msg2")
l.Debug(ctx, "msg3")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"msg1"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"msg2"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if bytes.Contains(buf.Bytes(), []byte(`"msg3"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestMultipleFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l = l.Fields("key", "val")
l = l.Fields("key1", "val1")
l.Info(ctx, "msg")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"key1":"val1"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestError(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
@ -29,13 +84,22 @@ func TestError(t *testing.T) {
func TestErrorf(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf), logger.WithAddStacktrace(true))
if err := l.Init(); err != nil {
if err := l.Init(logger.WithContextAttrFuncs(func(ctx context.Context) []interface{} {
return nil
})); err != nil {
t.Fatal(err)
}
l.Errorf(ctx, "message", fmt.Errorf("error message"))
l.Log(ctx, logger.ErrorLevel, "message", errors.New("error msg"))
l.Log(ctx, logger.ErrorLevel, "", errors.New("error msg"))
if !bytes.Contains(buf.Bytes(), []byte(`"error":"error msg"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"stacktrace":"`)) {
t.Fatalf("logger stacktrace not works, buf contains: %s", buf.Bytes())
}
@ -99,6 +163,11 @@ func TestFromContextWithFields(t *testing.T) {
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
l.Info(ctx, "test", "uncorrected number attributes")
if !bytes.Contains(buf.Bytes(), []byte(`"!BADKEY":"uncorrected number attributes"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestClone(t *testing.T) {
@ -174,3 +243,52 @@ func TestLogger(t *testing.T) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
}
func Test_WithContextAttrFunc(t *testing.T) {
loggerContextAttrFuncs := []logger.ContextAttrFunc{
func(ctx context.Context) []interface{} {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil
}
attrs := make([]interface{}, 0, 10)
for k, v := range md {
switch k {
case "X-Request-Id", "Phone", "External-Id", "Source-Service", "X-App-Install-Id", "Client-Id", "Client-Ip":
attrs = append(attrs, strings.ToLower(k), v)
}
}
return attrs
},
}
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, loggerContextAttrFuncs...)
ctx := context.TODO()
ctx = metadata.AppendIncomingContext(ctx, "X-Request-Id", uuid.New().String(),
"Source-Service", "Test-System")
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Info(ctx, "test message")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test message"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"x-request-id":"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"source-service":"Test-System"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
buf.Reset()
imd, _ := metadata.FromIncomingContext(ctx)
l.Info(ctx, "test message1")
imd.Set("Source-Service", "Test-System2")
l.Info(ctx, "test message2")
// t.Logf("xxx %s", buf.Bytes())
}

View File

@ -1,399 +0,0 @@
// Package wrapper provides wrapper for Logger
package wrapper
import (
"context"
"fmt"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/server"
)
var (
// DefaultClientCallObserver called by wrapper in client Call
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []client.CallOption, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultClientStreamObserver called by wrapper in client Stream
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []client.CallOption, stream client.Stream, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultClientPublishObserver called by wrapper in client Publish
DefaultClientPublishObserver = func(ctx context.Context, msg client.Message, opts []client.PublishOption, err error) []string {
labels := []string{"endpoint", msg.Topic()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultServerHandlerObserver called by wrapper in server Handler
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultServerSubscriberObserver called by wrapper in server Subscriber
DefaultServerSubscriberObserver = func(ctx context.Context, msg server.Message, err error) []string {
labels := []string{"endpoint", msg.Topic()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultClientCallFuncObserver called by wrapper in client CallFunc
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultSkipEndpoints wrapper not called for this endpoints
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
)
type lWrapper struct {
client.Client
serverHandler server.HandlerFunc
serverSubscriber server.SubscriberFunc
clientCallFunc client.CallFunc
opts Options
}
type (
// ClientCallObserver func signature
ClientCallObserver func(context.Context, client.Request, interface{}, []client.CallOption, error) []string
// ClientStreamObserver func signature
ClientStreamObserver func(context.Context, client.Request, []client.CallOption, client.Stream, error) []string
// ClientPublishObserver func signature
ClientPublishObserver func(context.Context, client.Message, []client.PublishOption, error) []string
// ClientCallFuncObserver func signature
ClientCallFuncObserver func(context.Context, string, client.Request, interface{}, client.CallOptions, error) []string
// ServerHandlerObserver func signature
ServerHandlerObserver func(context.Context, server.Request, interface{}, error) []string
// ServerSubscriberObserver func signature
ServerSubscriberObserver func(context.Context, server.Message, error) []string
)
// Options struct for wrapper
type Options struct {
// Logger that used for log
Logger logger.Logger
// ServerHandlerObservers funcs
ServerHandlerObservers []ServerHandlerObserver
// ServerSubscriberObservers funcs
ServerSubscriberObservers []ServerSubscriberObserver
// ClientCallObservers funcs
ClientCallObservers []ClientCallObserver
// ClientStreamObservers funcs
ClientStreamObservers []ClientStreamObserver
// ClientPublishObservers funcs
ClientPublishObservers []ClientPublishObserver
// ClientCallFuncObservers funcs
ClientCallFuncObservers []ClientCallFuncObserver
// SkipEndpoints
SkipEndpoints []string
// Level for logger
Level logger.Level
// Enabled flag
Enabled bool
}
// Option func signature
type Option func(*Options)
// NewOptions creates Options from Option slice
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Level: logger.TraceLevel,
ClientCallObservers: []ClientCallObserver{DefaultClientCallObserver},
ClientStreamObservers: []ClientStreamObserver{DefaultClientStreamObserver},
ClientPublishObservers: []ClientPublishObserver{DefaultClientPublishObserver},
ClientCallFuncObservers: []ClientCallFuncObserver{DefaultClientCallFuncObserver},
ServerHandlerObservers: []ServerHandlerObserver{DefaultServerHandlerObserver},
ServerSubscriberObservers: []ServerSubscriberObserver{DefaultServerSubscriberObserver},
SkipEndpoints: DefaultSkipEndpoints,
}
for _, o := range opts {
o(&options)
}
return options
}
// WithEnabled enable/diable flag
func WithEnabled(b bool) Option {
return func(o *Options) {
o.Enabled = b
}
}
// WithLevel log level
func WithLevel(l logger.Level) Option {
return func(o *Options) {
o.Level = l
}
}
// WithLogger logger
func WithLogger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// WithClientCallObservers funcs
func WithClientCallObservers(ob ...ClientCallObserver) Option {
return func(o *Options) {
o.ClientCallObservers = ob
}
}
// WithClientStreamObservers funcs
func WithClientStreamObservers(ob ...ClientStreamObserver) Option {
return func(o *Options) {
o.ClientStreamObservers = ob
}
}
// WithClientPublishObservers funcs
func WithClientPublishObservers(ob ...ClientPublishObserver) Option {
return func(o *Options) {
o.ClientPublishObservers = ob
}
}
// WithClientCallFuncObservers funcs
func WithClientCallFuncObservers(ob ...ClientCallFuncObserver) Option {
return func(o *Options) {
o.ClientCallFuncObservers = ob
}
}
// WithServerHandlerObservers funcs
func WithServerHandlerObservers(ob ...ServerHandlerObserver) Option {
return func(o *Options) {
o.ServerHandlerObservers = ob
}
}
// WithServerSubscriberObservers funcs
func WithServerSubscriberObservers(ob ...ServerSubscriberObserver) Option {
return func(o *Options) {
o.ServerSubscriberObservers = ob
}
}
// SkipEndpoins
func SkipEndpoints(eps ...string) Option {
return func(o *Options) {
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
}
}
func (l *lWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
err := l.Client.Call(ctx, req, rsp, opts...)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ClientCallObservers {
labels = append(labels, o(ctx, req, rsp, opts, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
func (l *lWrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
stream, err := l.Client.Stream(ctx, req, opts...)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return stream, err
}
}
if !l.opts.Enabled {
return stream, err
}
var labels []string
for _, o := range l.opts.ClientStreamObservers {
labels = append(labels, o(ctx, req, opts, stream, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return stream, err
}
func (l *lWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error {
err := l.Client.Publish(ctx, msg, opts...)
endpoint := msg.Topic()
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ClientPublishObservers {
labels = append(labels, o(ctx, msg, opts, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
func (l *lWrapper) ServerHandler(ctx context.Context, req server.Request, rsp interface{}) error {
err := l.serverHandler(ctx, req, rsp)
endpoint := req.Endpoint()
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ServerHandlerObservers {
labels = append(labels, o(ctx, req, rsp, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
func (l *lWrapper) ServerSubscriber(ctx context.Context, msg server.Message) error {
err := l.serverSubscriber(ctx, msg)
endpoint := msg.Topic()
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ServerSubscriberObservers {
labels = append(labels, o(ctx, msg, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
// NewClientWrapper accepts an open options and returns a Client Wrapper
func NewClientWrapper(opts ...Option) client.Wrapper {
return func(c client.Client) client.Client {
options := NewOptions()
for _, o := range opts {
o(&options)
}
return &lWrapper{opts: options, Client: c}
}
}
// NewClientCallWrapper accepts an options and returns a Call Wrapper
func NewClientCallWrapper(opts ...Option) client.CallWrapper {
return func(h client.CallFunc) client.CallFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
l := &lWrapper{opts: options, clientCallFunc: h}
return l.ClientCallFunc
}
}
func (l *lWrapper) ClientCallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
err := l.clientCallFunc(ctx, addr, req, rsp, opts)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ClientCallFuncObservers {
labels = append(labels, o(ctx, addr, req, rsp, opts, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
// NewServerHandlerWrapper accepts an options and returns a Handler Wrapper
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
return func(h server.HandlerFunc) server.HandlerFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
l := &lWrapper{opts: options, serverHandler: h}
return l.ServerHandler
}
}
// NewServerSubscriberWrapper accepts an options and returns a Subscriber Wrapper
func NewServerSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
return func(h server.SubscriberFunc) server.SubscriberFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
l := &lWrapper{opts: options, serverSubscriber: h}
return l.ServerSubscriber
}
}

View File

@ -1,5 +1,5 @@
// Package metadata is a way of defining message headers
package metadata // import "go.unistack.org/micro/v3/metadata"
package metadata
import (
"net/textproto"

View File

@ -16,10 +16,8 @@ var (
DefaultAddress = ":9090"
// DefaultPath the meter endpoint where the Meter data will be made available
DefaultPath = "/metrics"
// DefaultMetricPrefix holds the string that prepends to all metrics
DefaultMetricPrefix = "micro_"
// DefaultLabelPrefix holds the string that prepends to all labels
DefaultLabelPrefix = "micro_"
// DefaultMeterStatsInterval specifies interval for meter updating
DefaultMeterStatsInterval = 5 * time.Second
// DefaultSummaryQuantiles is the default spread of stats for summary
DefaultSummaryQuantiles = []float64{0.5, 0.9, 0.97, 0.99, 1}
// DefaultSummaryWindow is the default window for summary

View File

@ -17,10 +17,6 @@ type Options struct {
Address string
// Path holds the path for metrics
Path string
// MetricPrefix holds the prefix for all metrics
MetricPrefix string
// LabelPrefix holds the prefix for all labels
LabelPrefix string
// Labels holds the default labels
Labels []string
// WriteProcessMetrics flag to write process metrics
@ -35,8 +31,6 @@ func NewOptions(opt ...Option) Options {
Address: DefaultAddress,
Path: DefaultPath,
Context: context.Background(),
MetricPrefix: DefaultMetricPrefix,
LabelPrefix: DefaultLabelPrefix,
}
for _, o := range opt {
@ -46,20 +40,6 @@ func NewOptions(opt ...Option) Options {
return opts
}
// LabelPrefix sets the labels prefix
func LabelPrefix(pref string) Option {
return func(o *Options) {
o.LabelPrefix = pref
}
}
// MetricPrefix sets the metric prefix
func MetricPrefix(pref string) Option {
return func(o *Options) {
o.MetricPrefix = pref
}
}
// Context sets the metrics context
func Context(ctx context.Context) Option {
return func(o *Options) {
@ -90,7 +70,7 @@ func TimingObjectives(value map[float64]float64) Option {
}
*/
// Labels sets the meter labels
// Labels add the meter labels
func Labels(ls ...string) Option {
return func(o *Options) {
o.Labels = append(o.Labels, ls...)

View File

@ -1,347 +0,0 @@
package wrapper // import "go.unistack.org/micro/v3/meter/wrapper"
import (
"context"
"fmt"
"time"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/server"
)
var (
// ClientRequestDurationSeconds specifies meter metric name
ClientRequestDurationSeconds = "client_request_duration_seconds"
// ClientRequestLatencyMicroseconds specifies meter metric name
ClientRequestLatencyMicroseconds = "client_request_latency_microseconds"
// ClientRequestTotal specifies meter metric name
ClientRequestTotal = "client_request_total"
// ClientRequestInflight specifies meter metric name
ClientRequestInflight = "client_request_inflight"
// ServerRequestDurationSeconds specifies meter metric name
ServerRequestDurationSeconds = "server_request_duration_seconds"
// ServerRequestLatencyMicroseconds specifies meter metric name
ServerRequestLatencyMicroseconds = "server_request_latency_microseconds"
// ServerRequestTotal specifies meter metric name
ServerRequestTotal = "server_request_total"
// ServerRequestInflight specifies meter metric name
ServerRequestInflight = "server_request_inflight"
// PublishMessageDurationSeconds specifies meter metric name
PublishMessageDurationSeconds = "publish_message_duration_seconds"
// PublishMessageLatencyMicroseconds specifies meter metric name
PublishMessageLatencyMicroseconds = "publish_message_latency_microseconds"
// PublishMessageTotal specifies meter metric name
PublishMessageTotal = "publish_message_total"
// PublishMessageInflight specifies meter metric name
PublishMessageInflight = "publish_message_inflight"
// SubscribeMessageDurationSeconds specifies meter metric name
SubscribeMessageDurationSeconds = "subscribe_message_duration_seconds"
// SubscribeMessageLatencyMicroseconds specifies meter metric name
SubscribeMessageLatencyMicroseconds = "subscribe_message_latency_microseconds"
// SubscribeMessageTotal specifies meter metric name
SubscribeMessageTotal = "subscribe_message_total"
// SubscribeMessageInflight specifies meter metric name
SubscribeMessageInflight = "subscribe_message_inflight"
labelSuccess = "success"
labelFailure = "failure"
labelStatus = "status"
labelEndpoint = "endpoint"
// DefaultSkipEndpoints contains list of endpoints that not evaluted by wrapper
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
)
// Options struct
type Options struct {
Meter meter.Meter
lopts []meter.Option
SkipEndpoints []string
}
// Option func signature
type Option func(*Options)
// NewOptions creates new Options struct
func NewOptions(opts ...Option) Options {
options := Options{
Meter: meter.DefaultMeter,
lopts: make([]meter.Option, 0, 5),
SkipEndpoints: DefaultSkipEndpoints,
}
for _, o := range opts {
o(&options)
}
return options
}
// ServiceName passes service name to meter label
func ServiceName(name string) Option {
return func(o *Options) {
o.lopts = append(o.lopts, meter.Labels("name", name))
}
}
// ServiceVersion passes service version to meter label
func ServiceVersion(version string) Option {
return func(o *Options) {
o.lopts = append(o.lopts, meter.Labels("version", version))
}
}
// ServiceID passes service id to meter label
func ServiceID(id string) Option {
return func(o *Options) {
o.lopts = append(o.lopts, meter.Labels("id", id))
}
}
// Meter passes meter
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// SkipEndoints add endpoint to skip
func SkipEndoints(eps ...string) Option {
return func(o *Options) {
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
}
}
type wrapper struct {
client.Client
callFunc client.CallFunc
opts Options
}
// NewClientWrapper create new client wrapper
func NewClientWrapper(opts ...Option) client.Wrapper {
return func(c client.Client) client.Client {
handler := &wrapper{
opts: NewOptions(opts...),
Client: c,
}
return handler
}
}
// NewCallWrapper create new call wrapper
func NewCallWrapper(opts ...Option) client.CallWrapper {
return func(fn client.CallFunc) client.CallFunc {
handler := &wrapper{
opts: NewOptions(opts...),
callFunc: fn,
}
return handler.CallFunc
}
}
func (w *wrapper) CallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range w.opts.SkipEndpoints {
if ep == endpoint {
return w.callFunc(ctx, addr, req, rsp, opts)
}
}
labels := make([]string, 0, 4)
labels = append(labels, labelEndpoint, endpoint)
w.opts.Meter.Counter(ClientRequestInflight, labels...).Inc()
ts := time.Now()
err := w.callFunc(ctx, addr, req, rsp, opts)
te := time.Since(ts)
w.opts.Meter.Counter(ClientRequestInflight, labels...).Dec()
w.opts.Meter.Summary(ClientRequestLatencyMicroseconds, labels...).Update(te.Seconds())
w.opts.Meter.Histogram(ClientRequestDurationSeconds, labels...).Update(te.Seconds())
if err == nil {
labels = append(labels, labelStatus, labelSuccess)
} else {
labels = append(labels, labelStatus, labelFailure)
}
w.opts.Meter.Counter(ClientRequestTotal, labels...).Inc()
return err
}
func (w *wrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range w.opts.SkipEndpoints {
if ep == endpoint {
return w.Client.Call(ctx, req, rsp, opts...)
}
}
labels := make([]string, 0, 4)
labels = append(labels, labelEndpoint, endpoint)
w.opts.Meter.Counter(ClientRequestInflight, labels...).Inc()
ts := time.Now()
err := w.Client.Call(ctx, req, rsp, opts...)
te := time.Since(ts)
w.opts.Meter.Counter(ClientRequestInflight, labels...).Dec()
w.opts.Meter.Summary(ClientRequestLatencyMicroseconds, labels...).Update(te.Seconds())
w.opts.Meter.Histogram(ClientRequestDurationSeconds, labels...).Update(te.Seconds())
if err == nil {
labels = append(labels, labelStatus, labelSuccess)
} else {
labels = append(labels, labelStatus, labelFailure)
}
w.opts.Meter.Counter(ClientRequestTotal, labels...).Inc()
return err
}
func (w *wrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range w.opts.SkipEndpoints {
if ep == endpoint {
return w.Client.Stream(ctx, req, opts...)
}
}
labels := make([]string, 0, 4)
labels = append(labels, labelEndpoint, endpoint)
w.opts.Meter.Counter(ClientRequestInflight, labels...).Inc()
ts := time.Now()
stream, err := w.Client.Stream(ctx, req, opts...)
te := time.Since(ts)
w.opts.Meter.Counter(ClientRequestInflight, labels...).Dec()
w.opts.Meter.Summary(ClientRequestLatencyMicroseconds, labels...).Update(te.Seconds())
w.opts.Meter.Histogram(ClientRequestDurationSeconds, labels...).Update(te.Seconds())
if err == nil {
labels = append(labels, labelStatus, labelSuccess)
} else {
labels = append(labels, labelStatus, labelFailure)
}
w.opts.Meter.Counter(ClientRequestTotal, labels...).Inc()
return stream, err
}
func (w *wrapper) Publish(ctx context.Context, p client.Message, opts ...client.PublishOption) error {
endpoint := p.Topic()
labels := make([]string, 0, 4)
labels = append(labels, labelEndpoint, endpoint)
w.opts.Meter.Counter(PublishMessageInflight, labels...).Inc()
ts := time.Now()
err := w.Client.Publish(ctx, p, opts...)
te := time.Since(ts)
w.opts.Meter.Counter(PublishMessageInflight, labels...).Dec()
w.opts.Meter.Summary(PublishMessageLatencyMicroseconds, labels...).Update(te.Seconds())
w.opts.Meter.Histogram(PublishMessageDurationSeconds, labels...).Update(te.Seconds())
if err == nil {
labels = append(labels, labelStatus, labelSuccess)
} else {
labels = append(labels, labelStatus, labelFailure)
}
w.opts.Meter.Counter(PublishMessageTotal, labels...).Inc()
return err
}
// NewHandlerWrapper create new server handler wrapper
// deprecated
func NewHandlerWrapper(opts ...Option) server.HandlerWrapper {
handler := &wrapper{
opts: NewOptions(opts...),
}
return handler.HandlerFunc
}
// NewServerHandlerWrapper create new server handler wrapper
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
handler := &wrapper{
opts: NewOptions(opts...),
}
return handler.HandlerFunc
}
func (w *wrapper) HandlerFunc(fn server.HandlerFunc) server.HandlerFunc {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
endpoint := req.Service() + "." + req.Endpoint()
for _, ep := range w.opts.SkipEndpoints {
if ep == endpoint {
return fn(ctx, req, rsp)
}
}
labels := make([]string, 0, 4)
labels = append(labels, labelEndpoint, endpoint)
w.opts.Meter.Counter(ServerRequestInflight, labels...).Inc()
ts := time.Now()
err := fn(ctx, req, rsp)
te := time.Since(ts)
w.opts.Meter.Counter(ServerRequestInflight, labels...).Dec()
w.opts.Meter.Summary(ServerRequestLatencyMicroseconds, labels...).Update(te.Seconds())
w.opts.Meter.Histogram(ServerRequestDurationSeconds, labels...).Update(te.Seconds())
if err == nil {
labels = append(labels, labelStatus, labelSuccess)
} else {
labels = append(labels, labelStatus, labelFailure)
}
w.opts.Meter.Counter(ServerRequestTotal, labels...).Inc()
return err
}
}
// NewSubscriberWrapper create server subscribe wrapper
// deprecated
func NewSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
handler := &wrapper{
opts: NewOptions(opts...),
}
return handler.SubscriberFunc
}
func NewServerSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
handler := &wrapper{
opts: NewOptions(opts...),
}
return handler.SubscriberFunc
}
func (w *wrapper) SubscriberFunc(fn server.SubscriberFunc) server.SubscriberFunc {
return func(ctx context.Context, msg server.Message) error {
endpoint := msg.Topic()
labels := make([]string, 0, 4)
labels = append(labels, labelEndpoint, endpoint)
w.opts.Meter.Counter(SubscribeMessageInflight, labels...).Inc()
ts := time.Now()
err := fn(ctx, msg)
te := time.Since(ts)
w.opts.Meter.Counter(SubscribeMessageInflight, labels...).Dec()
w.opts.Meter.Summary(SubscribeMessageLatencyMicroseconds, labels...).Update(te.Seconds())
w.opts.Meter.Histogram(SubscribeMessageDurationSeconds, labels...).Update(te.Seconds())
if err == nil {
labels = append(labels, labelStatus, labelSuccess)
} else {
labels = append(labels, labelStatus, labelFailure)
}
w.opts.Meter.Counter(SubscribeMessageTotal, labels...).Inc()
return err
}
}

View File

@ -1,4 +1,4 @@
package mtls // import "go.unistack.org/micro/v3/mtls"
package mtls
import (
"bytes"

View File

@ -1,5 +1,5 @@
// Package network is for creating internetworks
package network // import "go.unistack.org/micro/v3/network"
package network
import (
"go.unistack.org/micro/v3/client"

View File

@ -1,5 +1,5 @@
// Package transport is an interface for synchronous connection based communication
package transport // import "go.unistack.org/micro/v3/network/transport"
package transport
import (
"context"

View File

@ -1,5 +1,5 @@
// Package broker is a tunnel broker
package broker // import "go.unistack.org/micro/v3/network/tunnel/broker"
package broker
import (
"context"
@ -177,12 +177,12 @@ func (t *tunBatchSubscriber) run() {
// receive message
m := new(transport.Message)
if err := c.Recv(m); err != nil {
if logger.V(logger.ErrorLevel) {
logger.Error(t.opts.Context, err.Error())
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
if err = c.Close(); err != nil {
if logger.V(logger.ErrorLevel) {
logger.Error(t.opts.Context, err.Error())
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
}
continue
@ -222,12 +222,12 @@ func (t *tunSubscriber) run() {
// receive message
m := new(transport.Message)
if err := c.Recv(m); err != nil {
if logger.V(logger.ErrorLevel) {
logger.Error(t.opts.Context, err.Error())
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
if err = c.Close(); err != nil {
if logger.V(logger.ErrorLevel) {
logger.Error(t.opts.Context, err.Error())
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
}
continue

View File

@ -1,5 +1,5 @@
// Package transport provides a tunnel transport
package transport // import "go.unistack.org/micro/v3/network/tunnel/transport"
package transport
import (
"context"

View File

@ -1,5 +1,5 @@
// Package tunnel provides gre network tunnelling
package tunnel // import "go.unistack.org/micro/v3/network/transport/tunnel"
package tunnel
import (
"context"

View File

@ -1,5 +1,5 @@
// Package http enables the http profiler
package http // import "go.unistack.org/micro/v3/profiler/http"
package http
import (
"context"

View File

@ -1,5 +1,5 @@
// Package pprof provides a pprof profiler which writes output to /tmp/[name].{cpu,mem}.pprof
package pprof // import "go.unistack.org/micro/v3/profiler/pprof"
package pprof
import (
"os"

View File

@ -1,5 +1,5 @@
// Package profiler is for profilers
package profiler // import "go.unistack.org/micro/v3/profiler"
package profiler
// Profiler interface
type Profiler interface {

View File

@ -1,5 +1,5 @@
// Package proxy is a transparent proxy built on the micro/server
package proxy // import "go.unistack.org/micro/v3/proxy"
package proxy
import (
"context"

View File

@ -2,6 +2,7 @@ package register
import (
"context"
"fmt"
"sync"
"time"
@ -64,7 +65,7 @@ func (m *memory) ttlPrune() {
for id, n := range record.Nodes {
if n.TTL != 0 && time.Since(n.LastSeen) > n.TTL {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register TTL expired for node %s of service %s", n.ID, service)
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register TTL expired for node %s of service %s", n.ID, service))
}
delete(m.records[domain][service][version].Nodes, id)
}
@ -151,7 +152,7 @@ func (m *memory) Register(ctx context.Context, s *register.Service, opts ...regi
if _, ok := srvs[s.Name][s.Version]; !ok {
srvs[s.Name][s.Version] = r
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register added new service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register added new service: %s, version: %s", s.Name, s.Version))
}
m.records[options.Domain] = srvs
go m.sendEvent(&register.Result{Action: "create", Service: s})
@ -191,14 +192,14 @@ func (m *memory) Register(ctx context.Context, s *register.Service, opts ...regi
if addedNodes {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register added new node to service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register added new node to service: %s, version: %s", s.Name, s.Version))
}
go m.sendEvent(&register.Result{Action: "update", Service: s})
} else {
// refresh TTL and timestamp
for _, n := range s.Nodes {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Updated registration for service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Updated registration for service: %s, version: %s", s.Name, s.Version))
}
srvs[s.Name][s.Version].Nodes[n.ID].TTL = options.TTL
srvs[s.Name][s.Version].Nodes[n.ID].LastSeen = time.Now()
@ -243,7 +244,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
for _, n := range s.Nodes {
if _, ok := version.Nodes[n.ID]; ok {
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed node from service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed node from service: %s, version: %s", s.Name, s.Version))
}
delete(version.Nodes, n.ID)
}
@ -264,7 +265,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
go m.sendEvent(&register.Result{Action: "delete", Service: s})
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s", s.Name)
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed service: %s", s.Name))
}
return nil
}
@ -273,7 +274,7 @@ func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...re
delete(m.records[options.Domain][s.Name], s.Version)
go m.sendEvent(&register.Result{Action: "delete", Service: s})
if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s, version: %s", s.Name, s.Version)
m.opts.Logger.Debug(m.opts.Context, fmt.Sprintf("Register removed service: %s, version: %s", s.Name, s.Version))
}
return nil

View File

@ -1,5 +1,5 @@
// Package register is an interface for service discovery
package register // import "go.unistack.org/micro/v3/register"
package register
import (
"context"

View File

@ -1,5 +1,5 @@
// Package dns resolves names to dns records
package dns // import "go.unistack.org/micro/v3/resolver/dns"
package dns
import (
"context"

View File

@ -1,5 +1,5 @@
// Package dnssrv resolves names to dns srv records
package dnssrv // import "go.unistack.org/micro/v3/resolver/dnssrv"
package dnssrv
import (
"fmt"

View File

@ -1,5 +1,5 @@
// Package http resolves names to network addresses using a http request
package http // import "go.unistack.org/micro/v3/resolver/http"
package http
import (
"encoding/json"

View File

@ -1,5 +1,5 @@
// Package noop is a noop resolver
package noop // import "go.unistack.org/micro/v3/resolver/noop"
package noop
import (
"go.unistack.org/micro/v3/resolver"

View File

@ -1,5 +1,5 @@
// Package register resolves names using the micro register
package register // import "go.unistack.org/micro/v3/resolver/registry"
package register
import (
"context"

View File

@ -1,5 +1,5 @@
// Package static is a static resolver
package static // import "go.unistack.org/micro/v3/resolver/static"
package static
import (
"go.unistack.org/micro/v3/resolver"

View File

@ -1,5 +1,5 @@
// Package router provides a network routing control plane
package router // import "go.unistack.org/micro/v3/router"
package router
import (
"errors"

View File

@ -1,4 +1,4 @@
package random // import "go.unistack.org/micro/v3/selector/random"
package random
import (
"go.unistack.org/micro/v3/selector"

View File

@ -1,4 +1,4 @@
package roundrobin // import "go.unistack.org/micro/v3/selector/roundrobin"
package roundrobin
import (
"go.unistack.org/micro/v3/selector"

View File

@ -1,5 +1,5 @@
// Package selector is for node selection and load balancing
package selector // import "go.unistack.org/micro/v3/selector"
package selector
import (
"errors"

View File

@ -2,21 +2,21 @@ package semconv
var (
// PublishMessageDurationSeconds specifies meter metric name
PublishMessageDurationSeconds = "publish_message_duration_seconds"
PublishMessageDurationSeconds = "micro_publish_message_duration_seconds"
// PublishMessageLatencyMicroseconds specifies meter metric name
PublishMessageLatencyMicroseconds = "publish_message_latency_microseconds"
PublishMessageLatencyMicroseconds = "micro_publish_message_latency_microseconds"
// PublishMessageTotal specifies meter metric name
PublishMessageTotal = "publish_message_total"
PublishMessageTotal = "micro_publish_message_total"
// PublishMessageInflight specifies meter metric name
PublishMessageInflight = "publish_message_inflight"
PublishMessageInflight = "micro_publish_message_inflight"
// SubscribeMessageDurationSeconds specifies meter metric name
SubscribeMessageDurationSeconds = "subscribe_message_duration_seconds"
SubscribeMessageDurationSeconds = "micro_subscribe_message_duration_seconds"
// SubscribeMessageLatencyMicroseconds specifies meter metric name
SubscribeMessageLatencyMicroseconds = "subscribe_message_latency_microseconds"
SubscribeMessageLatencyMicroseconds = "micro_subscribe_message_latency_microseconds"
// SubscribeMessageTotal specifies meter metric name
SubscribeMessageTotal = "subscribe_message_total"
SubscribeMessageTotal = "micro_subscribe_message_total"
// SubscribeMessageInflight specifies meter metric name
SubscribeMessageInflight = "subscribe_message_inflight"
SubscribeMessageInflight = "micro_subscribe_message_inflight"
// BrokerGroupLag specifies broker lag
BrokerGroupLag = "broker_group_lag"
BrokerGroupLag = "micro_broker_group_lag"
)

View File

@ -1,12 +0,0 @@
package semconv
var (
// CacheRequestDurationSeconds specifies meter metric name
CacheRequestDurationSeconds = "cache_request_duration_seconds"
// ClientRequestLatencyMicroseconds specifies meter metric name
CacheRequestLatencyMicroseconds = "cache_request_latency_microseconds"
// CacheRequestTotal specifies meter metric name
CacheRequestTotal = "cache_request_total"
// CacheRequestInflight specifies meter metric name
CacheRequestInflight = "cache_request_inflight"
)

View File

@ -2,11 +2,11 @@ package semconv
var (
// ClientRequestDurationSeconds specifies meter metric name
ClientRequestDurationSeconds = "client_request_duration_seconds"
ClientRequestDurationSeconds = "micro_client_request_duration_seconds"
// ClientRequestLatencyMicroseconds specifies meter metric name
ClientRequestLatencyMicroseconds = "client_request_latency_microseconds"
ClientRequestLatencyMicroseconds = "micro_client_request_latency_microseconds"
// ClientRequestTotal specifies meter metric name
ClientRequestTotal = "client_request_total"
ClientRequestTotal = "micro_client_request_total"
// ClientRequestInflight specifies meter metric name
ClientRequestInflight = "client_request_inflight"
ClientRequestInflight = "micro_client_request_inflight"
)

View File

@ -1,4 +1,4 @@
package semconv
// LoggerMessageTotal specifies meter metric name for logger messages
var LoggerMessageTotal = "logger_message_total"
var LoggerMessageTotal = "micro_logger_message_total"

12
semconv/pool.go Normal file
View File

@ -0,0 +1,12 @@
package semconv
var (
// PoolGetTotal specifies meter metric name for total number of pool get ops
PoolGetTotal = "micro_pool_get_total"
// PoolPutTotal specifies meter metric name for total number of pool put ops
PoolPutTotal = "micro_pool_put_total"
// PoolMisTotal specifies meter metric name for total number of pool misses
PoolMisTotal = "micro_pool_mis_total"
// PoolRetTotal specifies meter metric name for total number of pool returned to gc
PoolRetTotal = "micro_pool_ret_total"
)

View File

@ -2,11 +2,11 @@ package semconv
var (
// ServerRequestDurationSeconds specifies meter metric name
ServerRequestDurationSeconds = "server_request_duration_seconds"
ServerRequestDurationSeconds = "micro_server_request_duration_seconds"
// ServerRequestLatencyMicroseconds specifies meter metric name
ServerRequestLatencyMicroseconds = "server_request_latency_microseconds"
ServerRequestLatencyMicroseconds = "micro_server_request_latency_microseconds"
// ServerRequestTotal specifies meter metric name
ServerRequestTotal = "server_request_total"
ServerRequestTotal = "micro_server_request_total"
// ServerRequestInflight specifies meter metric name
ServerRequestInflight = "server_request_inflight"
ServerRequestInflight = "micro_server_request_inflight"
)

12
semconv/store.go Normal file
View File

@ -0,0 +1,12 @@
package semconv
var (
// StoreRequestDurationSeconds specifies meter metric name
StoreRequestDurationSeconds = "micro_store_request_duration_seconds"
// ClientRequestLatencyMicroseconds specifies meter metric name
StoreRequestLatencyMicroseconds = "micro_store_request_latency_microseconds"
// StoreRequestTotal specifies meter metric name
StoreRequestTotal = "micro_store_request_total"
// StoreRequestInflight specifies meter metric name
StoreRequestInflight = "micro_store_request_inflight"
)

View File

@ -1,7 +1,6 @@
package server
import (
"bytes"
"context"
"fmt"
"reflect"
@ -460,7 +459,7 @@ func (n *noopServer) Start() error {
}
} else if rerr != nil && !registered {
if config.Logger.V(logger.ErrorLevel) {
config.Logger.Errorf(n.opts.Context, fmt.Sprintf("server %s-%s register check error", config.Name, config.ID), rerr)
config.Logger.Error(n.opts.Context, fmt.Sprintf("server %s-%s register check error", config.Name, config.ID), rerr)
}
continue
}
@ -691,7 +690,7 @@ func (n *noopServer) createSubHandler(sb *subscriber, opts Options) broker.Handl
req = req.Elem()
}
if err = cf.ReadBody(bytes.NewBuffer(msg.Body), req.Interface()); err != nil {
if err = cf.Unmarshal(msg.Body, req.Interface()); err != nil {
return err
}

View File

@ -1,5 +1,5 @@
// Package server is an interface for a micro server
package server // import "go.unistack.org/micro/v3/server"
package server
import (
"context"
@ -11,7 +11,9 @@ import (
)
// DefaultServer default server
var DefaultServer Server = NewServer()
var (
DefaultServer Server = NewServer()
)
var (
// DefaultAddress will be used if no address passed, use secure localhost

View File

@ -5,6 +5,8 @@ import (
"fmt"
"sync"
"github.com/KimMachineGun/automemlimit/memlimit"
"go.uber.org/automaxprocs/maxprocs"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/config"
@ -17,6 +19,19 @@ import (
"go.unistack.org/micro/v3/tracer"
)
func init() {
maxprocs.Set()
memlimit.SetGoMemLimitWithOpts(
memlimit.WithRatio(0.9),
memlimit.WithProvider(
memlimit.ApplyFallback(
memlimit.FromCgroup,
memlimit.FromSystem,
),
),
)
}
// Service is an interface that wraps the lower level components.
// Its works as container with building blocks for service.
type Service interface {
@ -72,13 +87,14 @@ func RegisterSubscriber(topic string, s server.Server, h interface{}, opts ...se
}
type service struct {
done chan struct{}
opts Options
sync.RWMutex
}
// NewService creates and returns a new Service based on the packages within.
func NewService(opts ...Option) Service {
return &service{opts: NewOptions(opts...)}
return &service{opts: NewOptions(opts...), done: make(chan struct{})}
}
func (s *service) Name() string {
@ -262,7 +278,7 @@ func (s *service) Start() error {
}
if config.Loggers[0].V(logger.InfoLevel) {
config.Loggers[0].Infof(s.opts.Context, "starting [service] %s version %s", s.Options().Name, s.Options().Version)
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("starting [service] %s version %s", s.Options().Name, s.Options().Version))
}
if len(s.opts.Servers) == 0 {
@ -308,7 +324,7 @@ func (s *service) Stop() error {
s.RUnlock()
if config.Loggers[0].V(logger.InfoLevel) {
config.Loggers[0].Infof(s.opts.Context, "stoppping [service] %s", s.Name())
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name()))
}
var err error
@ -348,6 +364,8 @@ func (s *service) Stop() error {
}
}
close(s.done)
return nil
}
@ -371,7 +389,7 @@ func (s *service) Run() error {
}
// wait on context cancel
<-s.opts.Context.Done()
<-s.done
return s.Stop()
}

View File

@ -134,7 +134,7 @@ func TestNewService(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewService(tt.args.opts...); !reflect.DeepEqual(got, tt.want) {
if got := NewService(tt.args.opts...); got.Name() != tt.want.Name() {
t.Errorf("NewService() = %v, want %v", got.Options().Name, tt.want.Options().Name)
}
})

View File

@ -4,6 +4,7 @@ import (
"context"
"sort"
"strings"
"sync/atomic"
"time"
cache "github.com/patrickmn/go-cache"
@ -20,8 +21,11 @@ func NewStore(opts ...store.Option) store.Store {
}
func (m *memoryStore) Connect(ctx context.Context) error {
if m.opts.LazyConnect {
return nil
}
return m.connect(ctx)
}
func (m *memoryStore) Disconnect(ctx context.Context) error {
m.store.Flush()
@ -36,6 +40,7 @@ type memoryStore struct {
funcDelete store.FuncDelete
store *cache.Cache
opts store.Options
isConnected atomic.Int32
}
func (m *memoryStore) key(prefix, key string) string {
@ -145,6 +150,11 @@ func (m *memoryStore) Name() string {
}
func (m *memoryStore) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
if m.opts.LazyConnect {
if err := m.connect(ctx); err != nil {
return err
}
}
return m.funcExists(ctx, key, opts...)
}
@ -157,6 +167,11 @@ func (m *memoryStore) fnExists(ctx context.Context, key string, opts ...store.Ex
}
func (m *memoryStore) Read(ctx context.Context, key string, val interface{}, opts ...store.ReadOption) error {
if m.opts.LazyConnect {
if err := m.connect(ctx); err != nil {
return err
}
}
return m.funcRead(ctx, key, val, opts...)
}
@ -169,6 +184,11 @@ func (m *memoryStore) fnRead(ctx context.Context, key string, val interface{}, o
}
func (m *memoryStore) Write(ctx context.Context, key string, val interface{}, opts ...store.WriteOption) error {
if m.opts.LazyConnect {
if err := m.connect(ctx); err != nil {
return err
}
}
return m.funcWrite(ctx, key, val, opts...)
}
@ -193,6 +213,11 @@ func (m *memoryStore) fnWrite(ctx context.Context, key string, val interface{},
}
func (m *memoryStore) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error {
if m.opts.LazyConnect {
if err := m.connect(ctx); err != nil {
return err
}
}
return m.funcDelete(ctx, key, opts...)
}
@ -211,6 +236,11 @@ func (m *memoryStore) Options() store.Options {
}
func (m *memoryStore) List(ctx context.Context, opts ...store.ListOption) ([]string, error) {
if m.opts.LazyConnect {
if err := m.connect(ctx); err != nil {
return nil, err
}
}
return m.funcList(ctx, opts...)
}
@ -244,3 +274,8 @@ func (m *memoryStore) fnList(ctx context.Context, opts ...store.ListOption) ([]s
return keys, nil
}
func (m *memoryStore) connect(ctx context.Context) error {
m.isConnected.CompareAndSwap(0, 1)
return nil
}

View File

@ -2,6 +2,7 @@ package store
import (
"context"
"sync/atomic"
"go.unistack.org/micro/v3/options"
)
@ -15,6 +16,7 @@ type noopStore struct {
funcList FuncList
funcDelete FuncDelete
opts Options
isConnected atomic.Int32
}
func NewStore(opts ...Option) *noopStore {
@ -52,13 +54,11 @@ func (n *noopStore) Init(opts ...Option) error {
}
func (n *noopStore) Connect(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if n.opts.LazyConnect {
return nil
}
return n.connect(ctx)
}
func (n *noopStore) Disconnect(ctx context.Context) error {
select {
@ -70,6 +70,11 @@ func (n *noopStore) Disconnect(ctx context.Context) error {
}
func (n *noopStore) Read(ctx context.Context, key string, val interface{}, opts ...ReadOption) error {
if n.opts.LazyConnect {
if err := n.connect(ctx); err != nil {
return err
}
}
return n.funcRead(ctx, key, val, opts...)
}
@ -83,6 +88,11 @@ func (n *noopStore) fnRead(ctx context.Context, key string, val interface{}, opt
}
func (n *noopStore) Delete(ctx context.Context, key string, opts ...DeleteOption) error {
if n.opts.LazyConnect {
if err := n.connect(ctx); err != nil {
return err
}
}
return n.funcDelete(ctx, key, opts...)
}
@ -96,6 +106,11 @@ func (n *noopStore) fnDelete(ctx context.Context, key string, opts ...DeleteOpti
}
func (n *noopStore) Exists(ctx context.Context, key string, opts ...ExistsOption) error {
if n.opts.LazyConnect {
if err := n.connect(ctx); err != nil {
return err
}
}
return n.funcExists(ctx, key, opts...)
}
@ -109,6 +124,11 @@ func (n *noopStore) fnExists(ctx context.Context, key string, opts ...ExistsOpti
}
func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error {
if n.opts.LazyConnect {
if err := n.connect(ctx); err != nil {
return err
}
}
return n.funcWrite(ctx, key, val, opts...)
}
@ -122,6 +142,11 @@ func (n *noopStore) fnWrite(ctx context.Context, key string, val interface{}, op
}
func (n *noopStore) List(ctx context.Context, opts ...ListOption) ([]string, error) {
if n.opts.LazyConnect {
if err := n.connect(ctx); err != nil {
return nil, err
}
}
return n.funcList(ctx, opts...)
}
@ -145,3 +170,15 @@ func (n *noopStore) String() string {
func (n *noopStore) Options() Options {
return n.opts
}
func (n *noopStore) connect(ctx context.Context) error {
if n.isConnected.CompareAndSwap(0, 1) {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
}
return nil
}

View File

@ -41,6 +41,8 @@ type Options struct {
Timeout time.Duration
// Hooks can be run before/after store Read/List/Write/Exists/Delete
Hooks options.Hooks
// LazyConnect creates a connection when using store
LazyConnect bool
}
// NewOptions creates options struct
@ -132,6 +134,13 @@ func Timeout(td time.Duration) Option {
}
}
// LazyConnect initialize connection only when needed
func LazyConnect(b bool) Option {
return func(o *Options) {
o.LazyConnect = b
}
}
// Addrs contains the addresses or other connection information of the backing storage.
// For example, an etcd implementation would contain the nodes of the cluster.
// A SQL implementation could contain one or more connection strings.

View File

@ -1,5 +1,5 @@
// Package store is an interface for distributed data storage.
package store // import "go.unistack.org/micro/v3/store"
package store
import (
"context"
@ -7,6 +7,8 @@ import (
)
var (
// ErrNotConnected is returned when a store is not connected
ErrNotConnected = errors.New("not conected")
// ErrNotFound is returned when a key doesn't exist
ErrNotFound = errors.New("not found")
// ErrInvalidKey is returned when a key has empty or have invalid format

View File

@ -1,5 +1,5 @@
// Package sync is an interface for distributed synchronization
package sync // import "go.unistack.org/micro/v3/sync"
package sync
import (
"errors"

View File

@ -17,14 +17,14 @@ func TestLoggerWithTracer(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger.DefaultLogger = slog.NewLogger(logger.WithOutput(buf))
if err := logger.Init(); err != nil {
if err := logger.DefaultLogger.Init(); err != nil {
t.Fatal(err)
}
var span tracer.Span
tr := NewTracer()
ctx, span = tr.Start(ctx, "test1")
logger.Error(ctx, "my test error", fmt.Errorf("error"))
logger.DefaultLogger.Error(ctx, "my test error", fmt.Errorf("error"))
if !strings.Contains(buf.String(), span.TraceID()) {
t.Fatalf("log does not contains trace id: %s", buf.Bytes())

View File

@ -140,6 +140,8 @@ type Options struct {
Logger logger.Logger
// Name of the tracer
Name string
// ContextAttrFuncs contains funcs that provides tracing
ContextAttrFuncs []ContextAttrFunc
}
// Option func signature
@ -178,6 +180,7 @@ func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Context: context.Background(),
ContextAttrFuncs: DefaultContextAttrFuncs,
}
for _, o := range opts {
o(&options)

View File

@ -1,5 +1,5 @@
// Package tracer provides an interface for distributed tracing
package tracer // import "go.unistack.org/micro/v3/tracer"
package tracer
import (
"context"
@ -21,8 +21,11 @@ var (
"HealthService.Ready",
"HealthService.Version",
}
DefaultContextAttrFuncs []ContextAttrFunc
)
type ContextAttrFunc func(ctx context.Context) []interface{}
func init() {
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs,
func(ctx context.Context) []interface{} {

View File

@ -1,415 +0,0 @@
// Package wrapper provides wrapper for Tracer
package wrapper // import "go.unistack.org/micro/v3/tracer/wrapper"
import (
"context"
"fmt"
"strings"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/server"
"go.unistack.org/micro/v3/tracer"
)
var DefaultHeadersExctract = []string{metadata.HeaderXRequestID}
func ExtractDefaultLabels(md metadata.Metadata) []interface{} {
labels := make([]interface{}, 0, len(DefaultHeadersExctract))
for _, k := range DefaultHeadersExctract {
if v, ok := md.Get(k); ok {
labels = append(labels, strings.ToLower(k), v)
}
}
return labels
}
var (
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []client.CallOption, sp tracer.Span, err error) {
var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = append(labels, ExtractDefaultLabels(md)...)
}
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
sp.AddLabels(labels...)
}
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []client.CallOption, stream client.Stream, sp tracer.Span, err error) {
var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = append(labels, ExtractDefaultLabels(md)...)
}
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
sp.AddLabels(labels...)
}
DefaultClientPublishObserver = func(ctx context.Context, msg client.Message, opts []client.PublishOption, sp tracer.Span, err error) {
var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = append(labels, ExtractDefaultLabels(md)...)
}
labels = append(labels, ExtractDefaultLabels(msg.Metadata())...)
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
sp.AddLabels(labels...)
}
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, sp tracer.Span, err error) {
var labels []interface{}
if md, ok := metadata.FromIncomingContext(ctx); ok {
labels = append(labels, ExtractDefaultLabels(md)...)
}
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
sp.AddLabels(labels...)
}
DefaultServerSubscriberObserver = func(ctx context.Context, msg server.Message, sp tracer.Span, err error) {
var labels []interface{}
if md, ok := metadata.FromIncomingContext(ctx); ok {
labels = append(labels, ExtractDefaultLabels(md)...)
}
labels = append(labels, ExtractDefaultLabels(msg.Header())...)
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
sp.AddLabels(labels...)
}
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, sp tracer.Span, err error) {
sp.SetName(fmt.Sprintf("%s.%s call", req.Service(), req.Method()))
var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = append(labels, ExtractDefaultLabels(md)...)
}
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
sp.AddLabels(labels...)
}
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
)
type tWrapper struct {
client.Client
serverHandler server.HandlerFunc
serverSubscriber server.SubscriberFunc
clientCallFunc client.CallFunc
opts Options
}
type (
ClientCallObserver func(context.Context, client.Request, interface{}, []client.CallOption, tracer.Span, error)
ClientStreamObserver func(context.Context, client.Request, []client.CallOption, client.Stream, tracer.Span, error)
ClientPublishObserver func(context.Context, client.Message, []client.PublishOption, tracer.Span, error)
ClientCallFuncObserver func(context.Context, string, client.Request, interface{}, client.CallOptions, tracer.Span, error)
ServerHandlerObserver func(context.Context, server.Request, interface{}, tracer.Span, error)
ServerSubscriberObserver func(context.Context, server.Message, tracer.Span, error)
)
// Options struct
type Options struct {
// Tracer that used for tracing
Tracer tracer.Tracer
// ClientCallObservers funcs
ClientCallObservers []ClientCallObserver
// ClientStreamObservers funcs
ClientStreamObservers []ClientStreamObserver
// ClientPublishObservers funcs
ClientPublishObservers []ClientPublishObserver
// ClientCallFuncObservers funcs
ClientCallFuncObservers []ClientCallFuncObserver
// ServerHandlerObservers funcs
ServerHandlerObservers []ServerHandlerObserver
// ServerSubscriberObservers funcs
ServerSubscriberObservers []ServerSubscriberObserver
// SkipEndpoints
SkipEndpoints []string
}
// Option func signature
type Option func(*Options)
// NewOptions create Options from Option slice
func NewOptions(opts ...Option) Options {
options := Options{
Tracer: tracer.DefaultTracer,
ClientCallObservers: []ClientCallObserver{DefaultClientCallObserver},
ClientStreamObservers: []ClientStreamObserver{DefaultClientStreamObserver},
ClientPublishObservers: []ClientPublishObserver{DefaultClientPublishObserver},
ClientCallFuncObservers: []ClientCallFuncObserver{DefaultClientCallFuncObserver},
ServerHandlerObservers: []ServerHandlerObserver{DefaultServerHandlerObserver},
ServerSubscriberObservers: []ServerSubscriberObserver{DefaultServerSubscriberObserver},
SkipEndpoints: DefaultSkipEndpoints,
}
for _, o := range opts {
o(&options)
}
return options
}
// WithTracer pass tracer
func WithTracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
// SkipEndponts
func SkipEndpoins(eps ...string) Option {
return func(o *Options) {
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
}
}
// WithClientCallObservers funcs
func WithClientCallObservers(ob ...ClientCallObserver) Option {
return func(o *Options) {
o.ClientCallObservers = ob
}
}
// WithClientStreamObservers funcs
func WithClientStreamObservers(ob ...ClientStreamObserver) Option {
return func(o *Options) {
o.ClientStreamObservers = ob
}
}
// WithClientPublishObservers funcs
func WithClientPublishObservers(ob ...ClientPublishObserver) Option {
return func(o *Options) {
o.ClientPublishObservers = ob
}
}
// WithClientCallFuncObservers funcs
func WithClientCallFuncObservers(ob ...ClientCallFuncObserver) Option {
return func(o *Options) {
o.ClientCallFuncObservers = ob
}
}
// WithServerHandlerObservers funcs
func WithServerHandlerObservers(ob ...ServerHandlerObserver) Option {
return func(o *Options) {
o.ServerHandlerObservers = ob
}
}
// WithServerSubscriberObservers funcs
func WithServerSubscriberObservers(ob ...ServerSubscriberObserver) Option {
return func(o *Options) {
o.ServerSubscriberObservers = ob
}
}
func (ot *tWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range ot.opts.SkipEndpoints {
if ep == endpoint {
return ot.Client.Call(ctx, req, rsp, opts...)
}
}
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
tracer.WithSpanKind(tracer.SpanKindClient),
tracer.WithSpanLabels(
"rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", "unary",
),
)
defer sp.Finish()
err := ot.Client.Call(nctx, req, rsp, opts...)
for _, o := range ot.opts.ClientCallObservers {
o(nctx, req, rsp, opts, sp, err)
}
return err
}
func (ot *tWrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range ot.opts.SkipEndpoints {
if ep == endpoint {
return ot.Client.Stream(ctx, req, opts...)
}
}
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
tracer.WithSpanKind(tracer.SpanKindClient),
tracer.WithSpanLabels(
"rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", "stream",
),
)
defer sp.Finish()
stream, err := ot.Client.Stream(nctx, req, opts...)
for _, o := range ot.opts.ClientStreamObservers {
o(nctx, req, opts, stream, sp, err)
}
return stream, err
}
func (ot *tWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error {
nctx, sp := ot.opts.Tracer.Start(ctx, msg.Topic()+" publish", tracer.WithSpanKind(tracer.SpanKindProducer))
defer sp.Finish()
sp.AddLabels("messaging.destination.name", msg.Topic())
sp.AddLabels("messaging.operation", "publish")
err := ot.Client.Publish(nctx, msg, opts...)
for _, o := range ot.opts.ClientPublishObservers {
o(nctx, msg, opts, sp, err)
}
return err
}
func (ot *tWrapper) ServerHandler(ctx context.Context, req server.Request, rsp interface{}) error {
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Method())
for _, ep := range ot.opts.SkipEndpoints {
if ep == endpoint {
return ot.serverHandler(ctx, req, rsp)
}
}
callType := "unary"
if req.Stream() {
callType = "stream"
}
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-server", req.Service(), req.Method()),
tracer.WithSpanKind(tracer.SpanKindServer),
tracer.WithSpanLabels(
"rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", callType,
),
)
defer sp.Finish()
err := ot.serverHandler(nctx, req, rsp)
for _, o := range ot.opts.ServerHandlerObservers {
o(nctx, req, rsp, sp, err)
}
return err
}
func (ot *tWrapper) ServerSubscriber(ctx context.Context, msg server.Message) error {
nctx, sp := ot.opts.Tracer.Start(ctx, msg.Topic()+" process", tracer.WithSpanKind(tracer.SpanKindConsumer))
defer sp.Finish()
sp.AddLabels("messaging.operation", "process")
sp.AddLabels("messaging.source.name", msg.Topic())
err := ot.serverSubscriber(nctx, msg)
for _, o := range ot.opts.ServerSubscriberObservers {
o(nctx, msg, sp, err)
}
return err
}
// NewClientWrapper accepts an open tracing Trace and returns a Client Wrapper
func NewClientWrapper(opts ...Option) client.Wrapper {
return func(c client.Client) client.Client {
options := NewOptions()
for _, o := range opts {
o(&options)
}
return &tWrapper{opts: options, Client: c}
}
}
// NewClientCallWrapper accepts an opentracing Tracer and returns a Call Wrapper
func NewClientCallWrapper(opts ...Option) client.CallWrapper {
return func(h client.CallFunc) client.CallFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
ot := &tWrapper{opts: options, clientCallFunc: h}
return ot.ClientCallFunc
}
}
func (ot *tWrapper) ClientCallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Method())
for _, ep := range ot.opts.SkipEndpoints {
if ep == endpoint {
return ot.ClientCallFunc(ctx, addr, req, rsp, opts)
}
}
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
tracer.WithSpanKind(tracer.SpanKindClient),
tracer.WithSpanLabels(
"rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", "unary",
),
)
defer sp.Finish()
err := ot.clientCallFunc(nctx, addr, req, rsp, opts)
for _, o := range ot.opts.ClientCallFuncObservers {
o(nctx, addr, req, rsp, opts, sp, err)
}
return err
}
// NewServerHandlerWrapper accepts an options and returns a Handler Wrapper
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
return func(h server.HandlerFunc) server.HandlerFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
ot := &tWrapper{opts: options, serverHandler: h}
return ot.ServerHandler
}
}
// NewServerSubscriberWrapper accepts an opentracing Tracer and returns a Subscriber Wrapper
func NewServerSubscriberWrapper(opts ...Option) server.SubscriberWrapper {
return func(h server.SubscriberFunc) server.SubscriberFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
ot := &tWrapper{opts: options, serverSubscriber: h}
return ot.ServerSubscriber
}
}

View File

@ -1,4 +1,4 @@
package addr // import "go.unistack.org/micro/v3/util/addr"
package addr
import (
"fmt"
@ -58,6 +58,7 @@ func IsLocal(addr string) bool {
}
// Extract returns a real ip
//
//nolint:gocyclo
func Extract(addr string) (string, error) {
// if addr specified then its returned

View File

@ -1,5 +1,5 @@
// Package backoff provides backoff functionality
package backoff // import "go.unistack.org/micro/v3/util/backoff"
package backoff
import (
"math"

View File

@ -1,4 +1,4 @@
package buf // import "go.unistack.org/micro/v3/util/buf"
package buf
import (
"bytes"

View File

@ -1,4 +1,4 @@
package http // import "go.unistack.org/micro/v3/util/http"
package http
import (
"context"

View File

@ -1,4 +1,4 @@
package id // import "go.unistack.org/micro/v3/util/id"
package id
import (
"context"
@ -71,7 +71,7 @@ func New(opts ...Option) (string, error) {
func Must(opts ...Option) string {
id, err := New(opts...)
if err != nil {
logger.Fatal(context.TODO(), err)
logger.DefaultLogger.Fatal(context.TODO(), "Must call is failed", err)
}
return id
}

View File

@ -1,5 +1,5 @@
// Package io is for io management
package io // import "go.unistack.org/micro/v3/util/io"
package io
import (
"io"

View File

@ -1,5 +1,5 @@
// Package jitter provides a random jitter
package jitter // import "go.unistack.org/micro/v3/util/jitter"
package jitter
import (
"time"

View File

@ -1,4 +1,4 @@
package jitter // import "go.unistack.org/micro/v3/util/jitter"
package jitter
import (
"context"

View File

@ -1,4 +1,4 @@
package net // import "go.unistack.org/micro/v3/util/net"
package net
import (
"errors"

View File

@ -1,6 +1,6 @@
// Package pki provides PKI all the PKI functions necessary to run micro over an untrusted network
// including a CA
package pki // import "go.unistack.org/micro/v3/util/pki"
package pki
import (
"bytes"

View File

@ -1,5 +1,5 @@
// Package pool is a connection pool
package pool // import "go.unistack.org/micro/v3/util/pool"
package pool
import (
"context"

View File

@ -1,4 +1,4 @@
package rand // import "go.unistack.org/micro/v3/util/rand"
package rand
import (
crand "crypto/rand"

View File

@ -44,6 +44,37 @@ func SliceAppend(b bool) Option {
}
}
var maxDepth = 32
func mergeMap(dst, src map[string]interface{}, depth int) map[string]interface{} {
if depth > maxDepth {
return dst
}
for key, srcVal := range src {
if dstVal, ok := dst[key]; ok {
srcMap, srcMapOk := mapify(srcVal)
dstMap, dstMapOk := mapify(dstVal)
if srcMapOk && dstMapOk {
srcVal = mergeMap(dstMap, srcMap, depth+1)
}
}
dst[key] = srcVal
}
return dst
}
func mapify(i interface{}) (map[string]interface{}, bool) {
value := reflect.ValueOf(i)
if value.Kind() == reflect.Map {
m := map[string]interface{}{}
for _, k := range value.MapKeys() {
m[k.String()] = value.MapIndex(k).Interface()
}
return m, true
}
return map[string]interface{}{}, false
}
// Merge merges map[string]interface{} to destination struct
func Merge(dst interface{}, mp map[string]interface{}, opts ...Option) error {
options := Options{}
@ -59,6 +90,11 @@ func Merge(dst interface{}, mp map[string]interface{}, opts ...Option) error {
return err
}
if mapper, ok := dst.(map[string]interface{}); ok {
dst = mergeMap(mapper, mp, 0)
return nil
}
var err error
var sval reflect.Value
var fname string

View File

@ -4,6 +4,27 @@ import (
"testing"
)
func TestMergeMap(t *testing.T) {
src := map[string]interface{}{
"skey1": "sval1",
"skey2": map[string]interface{}{
"skey3": "sval3",
},
}
dst := map[string]interface{}{
"skey1": "dval1",
"skey2": map[string]interface{}{
"skey3": "dval3",
},
}
if err := Merge(src, dst); err != nil {
t.Fatal(err)
}
t.Logf("%#+v", src)
}
func TestFieldName(t *testing.T) {
src := "SomeVar"
chk := "some_var"

View File

@ -1,4 +1,4 @@
package register // import "go.unistack.org/micro/v3/util/register"
package register
import (
"context"

View File

@ -1,5 +1,5 @@
// Package ring provides a simple ring buffer for storing local data
package ring // import "go.unistack.org/micro/v3/util/ring"
package ring
import (
"sync"

View File

@ -1,5 +1,5 @@
// Package socket provides a pseudo socket
package socket // import "go.unistack.org/micro/v3/util/socket"
package socket
import (
"io"

60
util/sort/sort_test.go Normal file
View File

@ -0,0 +1,60 @@
package sort
import (
"reflect"
"testing"
)
func TestUniq(t *testing.T) {
type args struct {
labels []interface{}
}
tests := []struct {
name string
args args
want []interface{}
}{
{
name: "test#1",
args: args{
labels: append(make([]interface{}, 0), "test-1", 1, "test-2", 2),
},
want: append(make([]interface{}, 0), "test-1", 1, "test-2", 2),
},
{
name: "test#2",
args: args{
labels: append(make([]interface{}, 0), "test-1", 1, "test-2", 2, "test-2", 2),
},
want: append(make([]interface{}, 0), "test-1", 1, "test-2", 2),
},
{
name: "test#3",
args: args{
labels: append(make([]interface{}, 0), "test-1", 1, "test-2", 2, "test-2", 3),
},
want: append(make([]interface{}, 0), "test-1", 1, "test-2", 3),
},
{
name: "test#4",
args: args{
labels: append(make([]interface{}, 0),
"test-1", 1, "test-1", 2,
"test-2", 3, "test-2", 2,
"test-3", 5, "test-3", 3,
"test-1", 4, "test-1", 1),
},
want: append(make([]interface{}, 0), "test-1", 1, "test-2", 2, "test-3", 3),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var got []interface{}
if got = Uniq(tt.args.labels); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Uniq() = %v, want %v", got, tt.want)
}
t.Logf("got-%#v", got)
})
}
}

View File

@ -1,5 +1,5 @@
// Package stream encapsulates streams within streams
package stream // import "go.unistack.org/micro/v3/util/stream"
package stream
import (
"context"

View File

@ -6,6 +6,8 @@ import (
"fmt"
"strconv"
"time"
"gopkg.in/yaml.v3"
)
type Duration int64
@ -53,6 +55,31 @@ loop:
return time.ParseDuration(fmt.Sprintf("%dh%s", hours, s[p:]))
}
func (d Duration) MarshalYAML() (interface{}, error) {
return time.Duration(d).String(), nil
}
func (d *Duration) UnmarshalYAML(n *yaml.Node) error {
var v interface{}
if err := yaml.Unmarshal([]byte(n.Value), &v); err != nil {
return err
}
switch value := v.(type) {
case float64:
*d = Duration(time.Duration(value))
return nil
case string:
dv, err := ParseDuration(value)
if err != nil {
return err
}
*d = Duration(dv)
return nil
default:
return fmt.Errorf("invalid duration")
}
}
func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Duration(d).String())
}

View File

@ -5,8 +5,44 @@ import (
"encoding/json"
"testing"
"time"
"gopkg.in/yaml.v3"
)
func TestMarshalYAML(t *testing.T) {
d := Duration(10000000)
buf, err := yaml.Marshal(d)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, []byte(`10ms
`)) {
t.Fatalf("invalid duration: %s != %s", buf, `10ms`)
}
}
func TestUnmarshalYAML(t *testing.T) {
type str struct {
TTL Duration `yaml:"ttl"`
}
v := &str{}
var err error
err = yaml.Unmarshal([]byte(`{"ttl":"10ms"}`), v)
if err != nil {
t.Fatal(err)
} else if v.TTL != 10000000 {
t.Fatalf("invalid duration %v != 10000000", v.TTL)
}
err = yaml.Unmarshal([]byte(`{"ttl":"1y"}`), v)
if err != nil {
t.Fatal(err)
} else if v.TTL != 31622400000000000 {
t.Fatalf("invalid duration %v != 31622400000000000", v.TTL)
}
}
func TestMarshalJSON(t *testing.T) {
d := Duration(10000000)
buf, err := json.Marshal(d)

View File

@ -1,11 +1,78 @@
package pool
import "sync"
import (
"bytes"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/semconv"
)
var (
pools = make([]Statser, 0)
poolsMu sync.Mutex
)
// Stats struct
type Stats struct {
Get uint64
Put uint64
Mis uint64
Ret uint64
}
// Statser provides buffer pool stats
type Statser interface {
Stats() Stats
Cap() int
}
func init() {
go newStatsMeter()
}
func newStatsMeter() {
ticker := time.NewTicker(meter.DefaultMeterStatsInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
poolsMu.Lock()
for _, st := range pools {
stats := st.Stats()
meter.DefaultMeter.Counter(semconv.PoolGetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Get)
meter.DefaultMeter.Counter(semconv.PoolPutTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Put)
meter.DefaultMeter.Counter(semconv.PoolMisTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Mis)
meter.DefaultMeter.Counter(semconv.PoolRetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Ret)
}
poolsMu.Unlock()
}
}
}
var (
_ Statser = (*BytePool)(nil)
_ Statser = (*BytesPool)(nil)
_ Statser = (*StringsPool)(nil)
)
type Pool[T any] struct {
p *sync.Pool
}
func (p Pool[T]) Put(t T) {
p.p.Put(t)
}
func (p Pool[T]) Get() T {
return p.p.Get().(T)
}
func NewPool[T any](fn func() T) Pool[T] {
return Pool[T]{
p: &sync.Pool{
@ -16,10 +83,155 @@ func NewPool[T any](fn func() T) Pool[T] {
}
}
func (p Pool[T]) Get() T {
return p.p.Get().(T)
type BytePool struct {
p *sync.Pool
get uint64
put uint64
mis uint64
ret uint64
c int
}
func (p Pool[T]) Put(t T) {
p.p.Put(t)
func NewBytePool(size int) *BytePool {
p := &BytePool{c: size}
p.p = &sync.Pool{
New: func() interface{} {
atomic.AddUint64(&p.mis, 1)
b := make([]byte, 0, size)
return &b
},
}
poolsMu.Lock()
pools = append(pools, p)
poolsMu.Unlock()
return p
}
func (p *BytePool) Cap() int {
return p.c
}
func (p *BytePool) Stats() Stats {
return Stats{
Put: atomic.LoadUint64(&p.put),
Get: atomic.LoadUint64(&p.get),
Mis: atomic.LoadUint64(&p.mis),
Ret: atomic.LoadUint64(&p.ret),
}
}
func (p *BytePool) Get() *[]byte {
atomic.AddUint64(&p.get, 1)
return p.p.Get().(*[]byte)
}
func (p *BytePool) Put(b *[]byte) {
atomic.AddUint64(&p.put, 1)
if cap(*b) > p.c {
atomic.AddUint64(&p.ret, 1)
return
}
*b = (*b)[:0]
p.p.Put(b)
}
type BytesPool struct {
p *sync.Pool
get uint64
put uint64
mis uint64
ret uint64
c int
}
func NewBytesPool(size int) *BytesPool {
p := &BytesPool{c: size}
p.p = &sync.Pool{
New: func() interface{} {
atomic.AddUint64(&p.mis, 1)
b := bytes.NewBuffer(make([]byte, 0, size))
return b
},
}
poolsMu.Lock()
pools = append(pools, p)
poolsMu.Unlock()
return p
}
func (p *BytesPool) Cap() int {
return p.c
}
func (p *BytesPool) Stats() Stats {
return Stats{
Put: atomic.LoadUint64(&p.put),
Get: atomic.LoadUint64(&p.get),
Mis: atomic.LoadUint64(&p.mis),
Ret: atomic.LoadUint64(&p.ret),
}
}
func (p *BytesPool) Get() *bytes.Buffer {
return p.p.Get().(*bytes.Buffer)
}
func (p *BytesPool) Put(b *bytes.Buffer) {
if (*b).Cap() > p.c {
atomic.AddUint64(&p.ret, 1)
return
}
b.Reset()
p.p.Put(b)
}
type StringsPool struct {
p *sync.Pool
get uint64
put uint64
mis uint64
ret uint64
c int
}
func NewStringsPool(size int) *StringsPool {
p := &StringsPool{c: size}
p.p = &sync.Pool{
New: func() interface{} {
atomic.AddUint64(&p.mis, 1)
return &strings.Builder{}
},
}
poolsMu.Lock()
pools = append(pools, p)
poolsMu.Unlock()
return p
}
func (p *StringsPool) Cap() int {
return p.c
}
func (p *StringsPool) Stats() Stats {
return Stats{
Put: atomic.LoadUint64(&p.put),
Get: atomic.LoadUint64(&p.get),
Mis: atomic.LoadUint64(&p.mis),
Ret: atomic.LoadUint64(&p.ret),
}
}
func (p *StringsPool) Get() *strings.Builder {
atomic.AddUint64(&p.get, 1)
return p.p.Get().(*strings.Builder)
}
func (p *StringsPool) Put(b *strings.Builder) {
atomic.AddUint64(&p.put, 1)
if b.Cap() > p.c {
atomic.AddUint64(&p.ret, 1)
return
}
b.Reset()
p.p.Put(b)
}

View File

@ -2,12 +2,30 @@ package pool
import (
"bytes"
"strings"
"testing"
)
func TestByte(t *testing.T) {
p := NewBytePool(1024)
b := p.Get()
copy(*b, []byte(`test`))
if bytes.Equal(*b, []byte("test")) {
t.Fatal("pool not works")
}
p.Put(b)
b = p.Get()
for i := 0; i < 1500; i++ {
*b = append(*b, []byte(`test`)...)
}
p.Put(b)
st := p.Stats()
if st.Get != 2 && st.Put != 2 && st.Mis != 1 && st.Ret != 1 {
t.Fatalf("pool stats error %#+v", st)
}
}
func TestBytes(t *testing.T) {
p := NewPool(func() *bytes.Buffer { return bytes.NewBuffer(nil) })
p := NewBytesPool(1024)
b := p.Get()
b.Write([]byte(`test`))
if b.String() != "test" {
@ -17,7 +35,7 @@ func TestBytes(t *testing.T) {
}
func TestStrings(t *testing.T) {
p := NewPool(func() *strings.Builder { return &strings.Builder{} })
p := NewStringsPool(20)
b := p.Get()
b.Write([]byte(`test`))
if b.String() != "test" {