Compare commits

...

69 Commits
master ... deps

Author SHA1 Message Date
89cad06121 fixup deps
Some checks failed
pr / test (pull_request) Failing after 1m35s
lint / lint (pull_request) Successful in 10m44s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-06 16:45:19 +03:00
0bebf3d59f Merge pull request 'tracer and logger improvements' (#312) from tracer-logger into v3
Reviewed-on: #312
2024-03-06 00:57:01 +03:00
01e05e8df6 tracer and logger improvements
Some checks failed
pr / test (pull_request) Failing after 1m27s
lint / lint (pull_request) Successful in 10m33s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-06 00:53:20 +03:00
2b69a4f51c Merge pull request 'logger/slog: backport default logger keys from master' (#311) from v3-logger into v3
Reviewed-on: #311
2024-03-05 01:54:17 +03:00
4af2b077dd logger/slog: backport default logger keys from master
All checks were successful
pr / test (pull_request) Successful in 1m45s
lint / lint (pull_request) Successful in 10m43s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-04 23:58:31 +03:00
de4418189d Merge pull request 'add missing option' (#309) from logger-stacktrace into v3
Reviewed-on: #309
2024-03-04 23:04:50 +03:00
2c44550897 add missing option
All checks were successful
pr / test (pull_request) Successful in 1m46s
lint / lint (pull_request) Successful in 10m49s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-04 23:03:55 +03:00
99b8a3c950 Merge pull request 'logger/slog: add stacktrace support' (#308) from logger-stacktrace into v3
Reviewed-on: #308
2024-03-04 23:00:35 +03:00
4c7e1607d4 logger/slog: add stacktrace support
Some checks failed
pr / test (pull_request) Failing after 1m28s
lint / lint (pull_request) Successful in 10m40s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-04 22:54:11 +03:00
897be419b4 Merge pull request 'broker noop implementation' (#307) from noops into v3
Reviewed-on: #307
2024-03-04 01:15:16 +03:00
81b9a4341f logger: extend interface, fix tests
All checks were successful
pr / test (pull_request) Successful in 1m35s
lint / lint (pull_request) Successful in 10m40s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-04 01:09:08 +03:00
d3bb2f7236 broker/noop: add initial implementation
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-04 01:05:40 +03:00
97fd62cb21 Merge pull request 'register/noop: add noop register' (#306) from register-noop into v3
Reviewed-on: #306
2024-03-01 21:40:01 +03:00
3cd8bc33d6 fixup test
Some checks failed
pr / test (pull_request) Failing after 1m31s
lint / lint (pull_request) Successful in 10m44s
2024-03-01 21:39:31 +03:00
f6f67af8d0 register/noop: add noop register
Some checks failed
pr / test (pull_request) Failing after 1m34s
lint / lint (pull_request) Successful in 11m0s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-29 23:58:11 +03:00
2d5acaca2f Merge pull request 'server: add GracefulTimeout option' (#304) from graceful into v3
Reviewed-on: #304
2024-02-29 23:24:43 +03:00
0674df3d9f update workflow
Some checks failed
pr / test (pull_request) Failing after 1m40s
lint / lint (pull_request) Successful in 11m5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-29 23:23:51 +03:00
2c282825ce fixup
Some checks failed
pr / test (pull_request) Failing after 1m38s
lint / lint (pull_request) Failing after 1m47s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-29 23:17:36 +03:00
e87ff942bb bump gomod
Some checks failed
lint / lint (pull_request) Failing after 1m40s
pr / test (pull_request) Failing after 1m44s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-29 23:03:39 +03:00
0459ea0613 fixup
Some checks failed
lint / lint (pull_request) Failing after 1m38s
pr / test (pull_request) Failing after 1m38s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-29 22:40:05 +03:00
d44a75d074 add gracefultimeout in server 2024-02-29 22:35:55 +03:00
ccf92eb84d As for interface casting
Co-authored-by: Gorbunov Kirill Andreevich <kgorbunov@mtsbank.ru>
Reviewed-on: #299
Co-authored-by: Кирилл Горбунов <kirya_gorbunov_2015@mail.ru>
Co-committed-by: Кирилл Горбунов <kirya_gorbunov_2015@mail.ru>
2024-02-27 23:35:49 +03:00
6baf1f2744 Merge pull request 'logger/slog: fixup race condition' (#292) from log into v3
Reviewed-on: #292
2024-02-22 08:58:40 +03:00
8e2eafde9c logger/slog: fixup race condition
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-22 08:57:21 +03:00
c2b97b0f20 fixup logger/slog
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-15 10:01:41 +03:00
1db017d966 Merge pull request 'logger/slog: fixup old format' (#291) from fixupslog into v3
Reviewed-on: #291
2024-02-08 08:44:23 +03:00
debf8cb03d logger/slog: fixup old format
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-08 08:43:53 +03:00
1dc9c1891f Merge pull request 'logger/slog: initial import' (#290) from slog into v3
Reviewed-on: #290
2024-02-08 08:18:57 +03:00
930859a537 logger/slog: initial import
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-08 08:17:53 +03:00
3141f1ed8b Merge pull request 'config: add conditions' (#286) from cond-config into v3
Reviewed-on: #286
2024-01-15 00:46:37 +03:00
47943cfb05 config: add conditions
Some checks failed
lint / lint (pull_request) Successful in 1m28s
pr / test (pull_request) Failing after 1m5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-01-15 00:46:00 +03:00
ed4e9d54b1 Merge pull request 'client/noop: fixup md' (#285) from noopfix into v3
Reviewed-on: #285
2023-12-21 00:14:54 +03:00
b4b8583594 client/noop: fixup md
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m45s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-21 00:13:08 +03:00
fb43e8c58c Merge pull request 'client/noop: fix metadata overwrite' (#284) from noopfix into v3
Reviewed-on: #284
2023-12-21 00:07:22 +03:00
8863c10ef4 client/noop: fix metadata overwrite
Some checks failed
lint / lint (pull_request) Failing after 1m29s
pr / test (pull_request) Failing after 2m36s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-21 00:06:56 +03:00
8058095bcc Merge pull request 'copy incoming content-type' (#283) from ct into v3
Reviewed-on: #283
2023-12-20 09:35:33 +03:00
092f5d96b1 copy incoming content-type
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m33s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-20 09:35:01 +03:00
84552513f7 Merge pull request 'fixup multiple client handling' (#280) from multiple into v3
Reviewed-on: #280
2023-11-13 08:20:52 +03:00
80a2db264e fixup multiple client handling
Some checks failed
lint / lint (pull_request) Failing after 1m29s
pr / test (pull_request) Failing after 2m35s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-11-13 08:19:44 +03:00
0be09c8b3e Merge pull request 'database: add FormatDSN' (#278) from database-newv3 into v3
Reviewed-on: #278
2023-11-02 01:35:25 +03:00
047f479e1b database: add FormatDSN
Some checks failed
lint / lint (pull_request) Failing after 1m27s
pr / test (pull_request) Failing after 2m39s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-11-02 01:32:26 +03:00
8f757c953e Merge pull request 'database: initial import for dsn parsing' (#276) from databasev3 into v3
Reviewed-on: #276
2023-11-01 23:44:17 +03:00
5f1c673a24 database: initial import for dsn parsing
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m36s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-11-01 23:42:48 +03:00
6794ea9871 Merge pull request 'client/noop: fix MessageMetadata option' (#274) from client-noop-metadata into v3
Reviewed-on: #274
2023-10-26 03:07:12 +03:00
089e7b6812 client/noop: fix MessageMetadata option
All checks were successful
lint / lint (pull_request) Successful in 1m18s
pr / test (pull_request) Successful in 1m1s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-26 03:06:39 +03:00
1c703f0f0c Merge pull request 'errors: add IsRetrayable func' (#273) from errors into v3
Reviewed-on: #273
2023-10-25 10:24:58 +03:00
d167c8c67c cleanup
All checks were successful
lint / lint (pull_request) Successful in 1m7s
pr / test (pull_request) Successful in 1m2s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-25 02:36:52 +03:00
df4f96a2d8 errors: add IsRetrayable func
All checks were successful
lint / lint (pull_request) Successful in 1m18s
pr / test (pull_request) Successful in 1m3s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-23 02:35:10 +03:00
fac3b20bd4 Merge pull request 'util/reflect: add Equal func with ability to skip some fields' (#244) from util-reflect into v3
Reviewed-on: #244
2023-09-12 11:45:26 +03:00
7c6bd98498 util/reflect: add Equal func with ability to skip some fields
All checks were successful
pr / test (pull_request) Successful in 1m4s
lint / lint (pull_request) Successful in 1m10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-12 10:31:45 +03:00
23e1174f25 Merge pull request 'tracer: improve' (#241) from tracing into v3
Reviewed-on: #241
2023-09-08 13:40:51 +03:00
52bed214cf tracer: improve
Some checks failed
lint / lint (pull_request) Failing after 1m31s
pr / test (pull_request) Failing after 2m44s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-08 13:40:01 +03:00
64c4f5f47e Merge pull request 'tracer: tweaks for span tags and naming' (#239) from tracing into v3
Reviewed-on: #239
2023-09-01 14:58:15 +03:00
036c612137 tracer: tweaks for span tags and naming
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-01 14:58:15 +03:00
ca80e3ecf2 Merge pull request 'tracer: improve tracing info' (#238) from tracing into v3
Reviewed-on: #238
2023-09-01 08:41:46 +03:00
18e7bb41ca tracer: improve tracing info
Some checks failed
lint / lint (pull_request) Failing after 1m29s
pr / test (pull_request) Failing after 2m37s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-01 08:41:23 +03:00
8e72fb1c35 Merge pull request 'add util/test' (#235) from util-test into v3
Reviewed-on: #235
2023-08-07 18:35:31 +03:00
17f21a03f4 add util/test
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m35s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-08-07 18:33:23 +03:00
a076d43a26 add util/test
Some checks failed
lint / lint (pull_request) Failing after 1m31s
pr / test (pull_request) Failing after 2m33s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-08-07 18:32:29 +03:00
de6efaee0b Merge pull request 'config/default: add micro:generate uuid/id' (#232) from config-default-gen into v3
Reviewed-on: #232
2023-07-13 20:27:13 +03:00
9e0e657003 config/default: add micro:generate uuid/id
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m35s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-07-13 20:26:47 +03:00
be5f9ab77f Merge pull request 'tracer: add Flush method' (#225) from traceimp into v3
Reviewed-on: #225
2023-07-04 00:26:33 +03:00
144dca0cae tracer: add Flush method
Some checks failed
pr / test (pull_request) Failing after 2m42s
lint / lint (pull_request) Failing after 1m29s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-07-04 00:25:41 +03:00
75173560e3 Merge pull request 'util/time: ParseDuration fix' (#222) from timefix into v3
Reviewed-on: #222
2023-05-29 14:04:41 +03:00
9b3bccd1f1 util/time: ParseDuration fix
All checks were successful
lint / lint (pull_request) Successful in 1m0s
pr / test (pull_request) Successful in 58s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-05-29 14:02:06 +03:00
ce125b77c1 Merge pull request 'util/time: fix duration parsing' (#219) from timefeature into v3
Reviewed-on: #219
2023-05-27 23:55:51 +03:00
2ee8d4ed46 util/time: fix duration parsing
Some checks failed
lint / lint (pull_request) Successful in 59s
pr / test (pull_request) Failing after 1m0s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-05-27 23:55:08 +03:00
f58781d076 Merge pull request 'server/noop: fix graceful unsubscribe' (#218) from unsubfix into v3
Reviewed-on: #218
2023-05-25 23:19:26 +03:00
e1af4aa3a4 server/noop: fix graceful unsubscribe
All checks were successful
pr / test (pull_request) Successful in 1m2s
lint / lint (pull_request) Successful in 59s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-05-25 23:18:47 +03:00
54 changed files with 3718 additions and 725 deletions

View File

@ -10,11 +10,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: setup-go - name: setup-go
uses: https://gitea.com/actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: 1.18 go-version: 1.21
- name: checkout - name: checkout
uses: https://gitea.com/actions/checkout@v3 uses: actions/checkout@v3
- name: deps - name: deps
run: go get -v -d ./... run: go get -v -d ./...
- name: lint - name: lint

View File

@ -10,11 +10,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: checkout - name: checkout
uses: https://gitea.com/actions/checkout@v3 uses: actions/checkout@v3
- name: setup-go - name: setup-go
uses: https://gitea.com/actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: 1.18 go-version: 1.21
- name: deps - name: deps
run: go get -v -t -d ./... run: go get -v -t -d ./...
- name: test - name: test

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"sync" "sync"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger" "go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata" "go.unistack.org/micro/v3/metadata"
maddr "go.unistack.org/micro/v3/util/addr" maddr "go.unistack.org/micro/v3/util/addr"
@ -15,7 +16,7 @@ import (
type memoryBroker struct { type memoryBroker struct {
subscribers map[string][]*memorySubscriber subscribers map[string][]*memorySubscriber
addr string addr string
opts Options opts broker.Options
sync.RWMutex sync.RWMutex
connected bool connected bool
} }
@ -24,20 +25,20 @@ type memoryEvent struct {
err error err error
message interface{} message interface{}
topic string topic string
opts Options opts broker.Options
} }
type memorySubscriber struct { type memorySubscriber struct {
ctx context.Context ctx context.Context
exit chan bool exit chan bool
handler Handler handler broker.Handler
batchhandler BatchHandler batchhandler broker.BatchHandler
id string id string
topic string topic string
opts SubscribeOptions opts broker.SubscribeOptions
} }
func (m *memoryBroker) Options() Options { func (m *memoryBroker) Options() broker.Options {
return m.opts return m.opts
} }
@ -46,6 +47,12 @@ func (m *memoryBroker) Address() string {
} }
func (m *memoryBroker) Connect(ctx context.Context) error { func (m *memoryBroker) Connect(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -70,6 +77,12 @@ func (m *memoryBroker) Connect(ctx context.Context) error {
} }
func (m *memoryBroker) Disconnect(ctx context.Context) error { func (m *memoryBroker) Disconnect(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -81,27 +94,27 @@ func (m *memoryBroker) Disconnect(ctx context.Context) error {
return nil return nil
} }
func (m *memoryBroker) Init(opts ...Option) error { func (m *memoryBroker) Init(opts ...broker.Option) error {
for _, o := range opts { for _, o := range opts {
o(&m.opts) o(&m.opts)
} }
return nil return nil
} }
func (m *memoryBroker) Publish(ctx context.Context, topic string, msg *Message, opts ...PublishOption) error { func (m *memoryBroker) Publish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error {
msg.Header.Set(metadata.HeaderTopic, topic) msg.Header.Set(metadata.HeaderTopic, topic)
return m.publish(ctx, []*Message{msg}, opts...) return m.publish(ctx, []*broker.Message{msg}, opts...)
} }
func (m *memoryBroker) BatchPublish(ctx context.Context, msgs []*Message, opts ...PublishOption) error { func (m *memoryBroker) BatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
return m.publish(ctx, msgs, opts...) return m.publish(ctx, msgs, opts...)
} }
func (m *memoryBroker) publish(ctx context.Context, msgs []*Message, opts ...PublishOption) error { func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
m.RLock() m.RLock()
if !m.connected { if !m.connected {
m.RUnlock() m.RUnlock()
return ErrNotConnected return broker.ErrNotConnected
} }
m.RUnlock() m.RUnlock()
@ -111,9 +124,9 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*Message, opts ...Pub
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return ctx.Err()
default: default:
options := NewPublishOptions(opts...) options := broker.NewPublishOptions(opts...)
msgTopicMap := make(map[string]Events) msgTopicMap := make(map[string]broker.Events)
for _, v := range msgs { for _, v := range msgs {
p := &memoryEvent{opts: m.opts} p := &memoryEvent{opts: m.opts}
@ -188,11 +201,11 @@ func (m *memoryBroker) publish(ctx context.Context, msgs []*Message, opts ...Pub
return nil return nil
} }
func (m *memoryBroker) BatchSubscribe(ctx context.Context, topic string, handler BatchHandler, opts ...SubscribeOption) (Subscriber, error) { func (m *memoryBroker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
m.RLock() m.RLock()
if !m.connected { if !m.connected {
m.RUnlock() m.RUnlock()
return nil, ErrNotConnected return nil, broker.ErrNotConnected
} }
m.RUnlock() m.RUnlock()
@ -201,7 +214,7 @@ func (m *memoryBroker) BatchSubscribe(ctx context.Context, topic string, handler
return nil, err return nil, err
} }
options := NewSubscribeOptions(opts...) options := broker.NewSubscribeOptions(opts...)
sub := &memorySubscriber{ sub := &memorySubscriber{
exit: make(chan bool, 1), exit: make(chan bool, 1),
@ -233,11 +246,11 @@ func (m *memoryBroker) BatchSubscribe(ctx context.Context, topic string, handler
return sub, nil return sub, nil
} }
func (m *memoryBroker) Subscribe(ctx context.Context, topic string, handler Handler, opts ...SubscribeOption) (Subscriber, error) { func (m *memoryBroker) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
m.RLock() m.RLock()
if !m.connected { if !m.connected {
m.RUnlock() m.RUnlock()
return nil, ErrNotConnected return nil, broker.ErrNotConnected
} }
m.RUnlock() m.RUnlock()
@ -246,7 +259,7 @@ func (m *memoryBroker) Subscribe(ctx context.Context, topic string, handler Hand
return nil, err return nil, err
} }
options := NewSubscribeOptions(opts...) options := broker.NewSubscribeOptions(opts...)
sub := &memorySubscriber{ sub := &memorySubscriber{
exit: make(chan bool, 1), exit: make(chan bool, 1),
@ -290,12 +303,12 @@ func (m *memoryEvent) Topic() string {
return m.topic return m.topic
} }
func (m *memoryEvent) Message() *Message { func (m *memoryEvent) Message() *broker.Message {
switch v := m.message.(type) { switch v := m.message.(type) {
case *Message: case *broker.Message:
return v return v
case []byte: case []byte:
msg := &Message{} msg := &broker.Message{}
if err := m.opts.Codec.Unmarshal(v, msg); err != nil { if err := m.opts.Codec.Unmarshal(v, msg); err != nil {
if m.opts.Logger.V(logger.ErrorLevel) { if m.opts.Logger.V(logger.ErrorLevel) {
m.opts.Logger.Error(m.opts.Context, "[memory]: failed to unmarshal: %v", err) m.opts.Logger.Error(m.opts.Context, "[memory]: failed to unmarshal: %v", err)
@ -320,7 +333,7 @@ func (m *memoryEvent) SetError(err error) {
m.err = err m.err = err
} }
func (m *memorySubscriber) Options() SubscribeOptions { func (m *memorySubscriber) Options() broker.SubscribeOptions {
return m.opts return m.opts
} }
@ -334,9 +347,9 @@ func (m *memorySubscriber) Unsubscribe(ctx context.Context) error {
} }
// NewBroker return new memory broker // NewBroker return new memory broker
func NewBroker(opts ...Option) Broker { func NewBroker(opts ...broker.Option) broker.Broker {
return &memoryBroker{ return &memoryBroker{
opts: NewOptions(opts...), opts: broker.NewOptions(opts...),
subscribers: make(map[string][]*memorySubscriber), subscribers: make(map[string][]*memorySubscriber),
} }
} }

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/metadata" "go.unistack.org/micro/v3/metadata"
) )
@ -19,7 +20,7 @@ func TestMemoryBatchBroker(t *testing.T) {
topic := "test" topic := "test"
count := 10 count := 10
fn := func(evts Events) error { fn := func(evts broker.Events) error {
return evts.Ack() return evts.Ack()
} }
@ -28,9 +29,9 @@ func TestMemoryBatchBroker(t *testing.T) {
t.Fatalf("Unexpected error subscribing %v", err) t.Fatalf("Unexpected error subscribing %v", err)
} }
msgs := make([]*Message, 0, count) msgs := make([]*broker.Message, 0, count)
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
message := &Message{ message := &broker.Message{
Header: map[string]string{ Header: map[string]string{
metadata.HeaderTopic: topic, metadata.HeaderTopic: topic,
"foo": "bar", "foo": "bar",
@ -65,7 +66,7 @@ func TestMemoryBroker(t *testing.T) {
topic := "test" topic := "test"
count := 10 count := 10
fn := func(p Event) error { fn := func(p broker.Event) error {
return nil return nil
} }
@ -74,9 +75,9 @@ func TestMemoryBroker(t *testing.T) {
t.Fatalf("Unexpected error subscribing %v", err) t.Fatalf("Unexpected error subscribing %v", err)
} }
msgs := make([]*Message, 0, count) msgs := make([]*broker.Message, 0, count)
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
message := &Message{ message := &broker.Message{
Header: map[string]string{ Header: map[string]string{
metadata.HeaderTopic: topic, metadata.HeaderTopic: topic,
"foo": "bar", "foo": "bar",

82
broker/noop.go Normal file
View File

@ -0,0 +1,82 @@
package broker
import (
"context"
"strings"
)
type NoopBroker struct {
opts Options
}
func NewBroker(opts ...Option) *NoopBroker {
b := &NoopBroker{opts: NewOptions(opts...)}
return b
}
func (b *NoopBroker) Name() string {
return b.opts.Name
}
func (b *NoopBroker) String() string {
return "noop"
}
func (b *NoopBroker) Options() Options {
return b.opts
}
func (b *NoopBroker) Init(opts ...Option) error {
for _, opt := range opts {
opt(&b.opts)
}
return nil
}
func (b *NoopBroker) Connect(_ context.Context) error {
return nil
}
func (b *NoopBroker) Disconnect(_ context.Context) error {
return nil
}
func (b *NoopBroker) Address() string {
return strings.Join(b.opts.Addrs, ",")
}
func (b *NoopBroker) BatchPublish(_ context.Context, _ []*Message, _ ...PublishOption) error {
return nil
}
func (b *NoopBroker) Publish(_ context.Context, _ string, _ *Message, _ ...PublishOption) error {
return nil
}
type NoopSubscriber struct {
ctx context.Context
topic string
handler Handler
batchHandler BatchHandler
opts SubscribeOptions
}
func (b *NoopBroker) BatchSubscribe(ctx context.Context, topic string, handler BatchHandler, opts ...SubscribeOption) (Subscriber, error) {
return &NoopSubscriber{ctx: ctx, topic: topic, opts: NewSubscribeOptions(opts...), batchHandler: handler}, nil
}
func (b *NoopBroker) Subscribe(ctx context.Context, topic string, handler Handler, opts ...SubscribeOption) (Subscriber, error) {
return &NoopSubscriber{ctx: ctx, topic: topic, opts: NewSubscribeOptions(opts...), handler: handler}, nil
}
func (s *NoopSubscriber) Options() SubscribeOptions {
return s.opts
}
func (s *NoopSubscriber) Topic() string {
return s.topic
}
func (s *NoopSubscriber) Unsubscribe(ctx context.Context) error {
return nil
}

View File

@ -3,6 +3,7 @@ package client
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"time" "time"
"go.unistack.org/micro/v3/broker" "go.unistack.org/micro/v3/broker"
@ -485,21 +486,34 @@ func (n *noopClient) publish(ctx context.Context, ps []Message, opts ...PublishO
msgs := make([]*broker.Message, 0, len(ps)) msgs := make([]*broker.Message, 0, len(ps))
for _, p := range ps { // get proxy
md, ok := metadata.FromOutgoingContext(ctx) exchange := ""
if !ok { if v, ok := os.LookupEnv("MICRO_PROXY"); ok {
md = metadata.New(0) exchange = v
} }
md[metadata.HeaderContentType] = p.ContentType()
topic := p.Topic()
// get the exchange // get the exchange
if len(options.Exchange) > 0 { if len(options.Exchange) > 0 {
topic = options.Exchange exchange = options.Exchange
} }
omd, ok := metadata.FromOutgoingContext(ctx)
if !ok {
omd = metadata.New(0)
}
for _, p := range ps {
md := metadata.Copy(omd)
md[metadata.HeaderContentType] = p.ContentType()
topic := p.Topic()
if len(exchange) > 0 {
topic = exchange
}
md[metadata.HeaderTopic] = topic md[metadata.HeaderTopic] = topic
iter := p.Metadata().Iterator()
var k, v string
for iter.Next(&k, &v) {
md.Set(k, v)
}
var body []byte var body []byte

View File

@ -7,7 +7,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/imdario/mergo" "dario.cat/mergo"
"github.com/google/uuid"
mid "go.unistack.org/micro/v3/util/id"
rutil "go.unistack.org/micro/v3/util/reflect" rutil "go.unistack.org/micro/v3/util/reflect"
mtime "go.unistack.org/micro/v3/util/time" mtime "go.unistack.org/micro/v3/util/time"
) )
@ -37,6 +39,10 @@ func (c *defaultConfig) Init(opts ...Option) error {
} }
func (c *defaultConfig) Load(ctx context.Context, opts ...LoadOption) error { func (c *defaultConfig) Load(ctx context.Context, opts ...LoadOption) error {
if c.opts.SkipLoad != nil && c.opts.SkipLoad(ctx, c) {
return nil
}
if err := DefaultBeforeLoad(ctx, c); err != nil && !c.opts.AllowFail { if err := DefaultBeforeLoad(ctx, c); err != nil && !c.opts.AllowFail {
return err return err
} }
@ -124,6 +130,20 @@ func fillValue(value reflect.Value, val string) error {
} }
value.Set(reflect.ValueOf(v)) value.Set(reflect.ValueOf(v))
case reflect.String: case reflect.String:
switch val {
case "micro:generate uuid":
uid, err := uuid.NewRandom()
if err != nil {
return err
}
val = uid.String()
case "micro:generate id":
uid, err := mid.New()
if err != nil {
return err
}
val = uid
}
value.Set(reflect.ValueOf(val)) value.Set(reflect.ValueOf(val))
case reflect.Float32: case reflect.Float32:
v, err := strconv.ParseFloat(val, 32) v, err := strconv.ParseFloat(val, 32)
@ -275,7 +295,11 @@ func fillValues(valueOf reflect.Value, tname string) error {
return nil return nil
} }
func (c *defaultConfig) Save(ctx context.Context, opts ...SaveOption) error { func (c *defaultConfig) Save(ctx context.Context, _ ...SaveOption) error {
if c.opts.SkipSave != nil && c.opts.SkipSave(ctx, c) {
return nil
}
if err := DefaultBeforeSave(ctx, c); err != nil { if err := DefaultBeforeSave(ctx, c); err != nil {
return err return err
} }

View File

@ -7,6 +7,7 @@ import (
"time" "time"
"go.unistack.org/micro/v3/config" "go.unistack.org/micro/v3/config"
mid "go.unistack.org/micro/v3/util/id"
mtime "go.unistack.org/micro/v3/util/time" mtime "go.unistack.org/micro/v3/util/time"
) )
@ -17,6 +18,9 @@ type cfg struct {
IntValue int `default:"99"` IntValue int `default:"99"`
DurationValue time.Duration `default:"10s"` DurationValue time.Duration `default:"10s"`
MDurationValue mtime.Duration `default:"10s"` MDurationValue mtime.Duration `default:"10s"`
MapValue map[string]bool `default:"key1=true,key2=false"`
UUIDValue string `default:"micro:generate uuid"`
IDValue string `default:"micro:generate id"`
} }
type cfgStructValue struct { type cfgStructValue struct {
@ -67,6 +71,21 @@ func TestDefault(t *testing.T) {
if conf.StringValue != "after_load" { if conf.StringValue != "after_load" {
t.Fatal("AfterLoad option not working") t.Fatal("AfterLoad option not working")
} }
if len(conf.MapValue) != 2 {
t.Fatalf("map value invalid: %#+v\n", conf.MapValue)
}
if conf.UUIDValue == "" {
t.Fatalf("uuid value empty")
} else if len(conf.UUIDValue) != 36 {
t.Fatalf("uuid value invalid: %s", conf.UUIDValue)
}
if conf.IDValue == "" {
t.Fatalf("id value empty")
} else if len(conf.IDValue) != mid.DefaultSize {
t.Fatalf("id value invalid: %s", conf.IDValue)
}
_ = conf _ = conf
// t.Logf("%#+v\n", conf) // t.Logf("%#+v\n", conf)
} }

View File

@ -42,6 +42,10 @@ type Options struct {
AfterInit []func(context.Context, Config) error AfterInit []func(context.Context, Config) error
// AllowFail flag to allow fail in config source // AllowFail flag to allow fail in config source
AllowFail bool AllowFail bool
// SkipLoad runs only if condition returns true
SkipLoad func(context.Context, Config) bool
// SkipSave runs only if condition returns true
SkipSave func(context.Context, Config) bool
} }
// Option function signature // Option function signature
@ -68,9 +72,9 @@ type LoadOption func(o *LoadOptions)
// LoadOptions struct // LoadOptions struct
type LoadOptions struct { type LoadOptions struct {
Struct interface{} Struct interface{}
Context context.Context
Override bool Override bool
Append bool Append bool
Context context.Context
} }
// NewLoadOptions create LoadOptions struct with provided opts // NewLoadOptions create LoadOptions struct with provided opts

157
database/dsn.go Normal file
View File

@ -0,0 +1,157 @@
package database
import (
"crypto/tls"
"errors"
"fmt"
"net/url"
"strings"
)
var (
ErrInvalidDSNAddr = errors.New("invalid dsn addr")
ErrInvalidDSNUnescaped = errors.New("dsn must be escaped")
ErrInvalidDSNNoSlash = errors.New("dsn must contains slash")
)
type Config struct {
TLSConfig *tls.Config
Username string
Password string
Scheme string
Host string
Port string
Database string
Params []string
}
func (cfg *Config) FormatDSN() string {
var s strings.Builder
if len(cfg.Scheme) > 0 {
s.WriteString(cfg.Scheme + "://")
}
// [username[:password]@]
if len(cfg.Username) > 0 {
s.WriteString(cfg.Username)
if len(cfg.Password) > 0 {
s.WriteByte(':')
s.WriteString(url.PathEscape(cfg.Password))
}
s.WriteByte('@')
}
// [host:port]
if len(cfg.Host) > 0 {
s.WriteString(cfg.Host)
if len(cfg.Port) > 0 {
s.WriteByte(':')
s.WriteString(cfg.Port)
}
}
// /dbname
s.WriteByte('/')
s.WriteString(url.PathEscape(cfg.Database))
for i := 0; i < len(cfg.Params); i += 2 {
if i == 0 {
s.WriteString("?")
} else {
s.WriteString("&")
}
s.WriteString(cfg.Params[i])
s.WriteString("=")
s.WriteString(cfg.Params[i+1])
}
return s.String()
}
func ParseDSN(dsn string) (*Config, error) {
cfg := &Config{}
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find last '/' that goes before dbname
foundSlash := false
for i := len(dsn) - 1; i >= 0; i-- {
if dsn[i] == '/' {
foundSlash = true
var j, k int
// left part is empty if i <= 0
if i > 0 {
// Find the first ':' in dsn
for j = i; j >= 0; j-- {
if dsn[j] == ':' {
cfg.Scheme = dsn[0:j]
}
}
// [username[:password]@][host]
// Find the last '@' in dsn[:i]
for j = i; j >= 0; j-- {
if dsn[j] == '@' {
// username[:password]
// Find the second ':' in dsn[:j]
for k = 0; k < j; k++ {
if dsn[k] == ':' {
if cfg.Scheme == dsn[:k] {
continue
}
var err error
cfg.Password, err = url.PathUnescape(dsn[k+1 : j])
if err != nil {
return nil, err
}
break
}
}
cfg.Username = dsn[len(cfg.Scheme)+3 : k]
break
}
}
for k = j + 1; k < i; k++ {
if dsn[k] == ':' {
cfg.Host = dsn[j+1 : k]
cfg.Port = dsn[k+1 : i]
break
}
}
}
// dbname[?param1=value1&...&paramN=valueN]
// Find the first '?' in dsn[i+1:]
for j = i + 1; j < len(dsn); j++ {
if dsn[j] == '?' {
parts := strings.Split(dsn[j+1:], "&")
cfg.Params = make([]string, 0, len(parts)*2)
for _, p := range parts {
k, v, found := strings.Cut(p, "=")
if !found {
continue
}
cfg.Params = append(cfg.Params, k, v)
}
break
}
}
var err error
dbname := dsn[i+1 : j]
if cfg.Database, err = url.PathUnescape(dbname); err != nil {
return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
}
break
}
}
if !foundSlash && len(dsn) > 0 {
return nil, ErrInvalidDSNNoSlash
}
return cfg, nil
}

31
database/dsn_test.go Normal file
View File

@ -0,0 +1,31 @@
package database
import (
"net/url"
"testing"
)
func TestParseDSN(t *testing.T) {
cfg, err := ParseDSN("postgres://username:p@ssword#@host:12345/dbname?key1=val2&key2=val2")
if err != nil {
t.Fatal(err)
}
if cfg.Password != "p@ssword#" {
t.Fatalf("parsing error")
}
}
func TestFormatDSN(t *testing.T) {
src := "postgres://username:p@ssword#@host:12345/dbname?key1=val2&key2=val2"
cfg, err := ParseDSN(src)
if err != nil {
t.Fatal(err)
}
dst, err := url.PathUnescape(cfg.FormatDSN())
if err != nil {
t.Fatal(err)
}
if src != dst {
t.Fatalf("\n%s\n%s", src, dst)
}
}

View File

@ -4,11 +4,17 @@ package errors // import "go.unistack.org/micro/v3/errors"
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
var ( var (
@ -340,3 +346,135 @@ func addslashes(str string) string {
} }
return buf.String() return buf.String()
} }
type retryableError struct {
err error
}
// Retryable returns error that can be retried later
func Retryable(err error) error {
return &retryableError{err: err}
}
type IsRetryableFunc func(error) bool
var (
RetrayableOracleErrors = []IsRetryableFunc{
func(err error) bool {
errmsg := err.Error()
switch {
case strings.Contains(errmsg, `ORA-`):
return true
case strings.Contains(errmsg, `can not assign`):
return true
case strings.Contains(errmsg, `can't assign`):
return true
}
return false
},
}
RetrayablePostgresErrors = []IsRetryableFunc{
func(err error) bool {
errmsg := err.Error()
switch {
case strings.Contains(errmsg, `number of field descriptions must equal number of`):
return true
case strings.Contains(errmsg, `not a pointer`):
return true
case strings.Contains(errmsg, `values, but dst struct has only`):
return true
case strings.Contains(errmsg, `struct doesn't have corresponding row field`):
return true
case strings.Contains(errmsg, `cannot find field`):
return true
case strings.Contains(errmsg, `cannot scan`) || strings.Contains(errmsg, `cannot convert`):
return true
case strings.Contains(errmsg, `failed to connect to`):
return true
}
return false
},
}
RetryableMicroErrors = []IsRetryableFunc{
func(err error) bool {
switch verr := err.(type) {
case *Error:
switch verr.Code {
case 401, 403, 408, 500, 501, 502, 503, 504:
return true
default:
return false
}
case *retryableError:
return true
}
return false
},
}
RetryableGoErrors = []IsRetryableFunc{
func(err error) bool {
switch verr := err.(type) {
case interface{ SafeToRetry() bool }:
return verr.SafeToRetry()
case interface{ Timeout() bool }:
return verr.Timeout()
}
switch {
case errors.Is(err, io.EOF), errors.Is(err, io.ErrUnexpectedEOF):
return true
case errors.Is(err, context.DeadlineExceeded):
return true
case errors.Is(err, io.ErrClosedPipe), errors.Is(err, io.ErrShortBuffer), errors.Is(err, io.ErrShortWrite):
return true
}
return false
},
}
RetryableGrpcErrors = []IsRetryableFunc{
func(err error) bool {
st, ok := status.FromError(err)
if !ok {
return false
}
switch st.Code() {
case codes.Unavailable, codes.ResourceExhausted:
return true
case codes.DeadlineExceeded:
return true
case codes.Internal:
switch {
case strings.Contains(st.Message(), `transport: received the unexpected content-type "text/html; charset=UTF-8"`):
return true
case strings.Contains(st.Message(), io.ErrUnexpectedEOF.Error()):
return true
case strings.Contains(st.Message(), `stream terminated by RST_STREAM with error code: INTERNAL_ERROR`):
return true
}
}
return false
},
}
)
// Unwrap provides error wrapping
func (e *retryableError) Unwrap() error {
return e.err
}
// Error returns the error string
func (e *retryableError) Error() string {
if e.err == nil {
return ""
}
return e.err.Error()
}
// IsRetryable checks error for ability to retry later
func IsRetryable(err error, fns ...IsRetryableFunc) bool {
for _, fn := range fns {
if ok := fn(err); ok {
return true
}
}
return false
}

View File

@ -8,6 +8,13 @@ import (
"testing" "testing"
) )
func TestIsRetrayable(t *testing.T) {
err := fmt.Errorf("ORA-")
if !IsRetryable(err, RetrayableOracleErrors...) {
t.Fatalf("IsRetrayable not works")
}
}
func TestMarshalJSON(t *testing.T) { func TestMarshalJSON(t *testing.T) {
e := InternalServerError("id", "err: %v", fmt.Errorf("err: %v", `xxx: "UNIX_TIMESTAMP": invalid identifier`)) e := InternalServerError("id", "err: %v", fmt.Errorf("err: %v", `xxx: "UNIX_TIMESTAMP": invalid identifier`))
_, err := json.Marshal(e) _, err := json.Marshal(e)

17
go.mod
View File

@ -1,9 +1,20 @@
module go.unistack.org/micro/v3 module go.unistack.org/micro/v3
go 1.19 go 1.20
require ( require (
github.com/imdario/mergo v0.3.14 dario.cat/mergo v1.0.0
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/google/uuid v1.3.0
github.com/patrickmn/go-cache v2.1.0+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/silas/dag v0.0.0-20211117232152-9d50aa809f35 github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
golang.org/x/sync v0.3.0
google.golang.org/grpc v1.57.0
google.golang.org/protobuf v1.31.0
)
require (
github.com/golang/protobuf v1.5.3 // indirect
golang.org/x/net v0.14.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e // indirect
) )

32
go.sum
View File

@ -1,9 +1,33 @@
github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
github.com/imdario/mergo v0.3.14/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/silas/dag v0.0.0-20211117232152-9d50aa809f35 h1:4mohWoM/UGg1BvFFiqSPRl5uwJY3rVV0HQX0ETqauqQ= github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5 h1:G/FZtUu7a6NTWl3KUHMV9jkLAh/Rvtf03NWMHaEDl+E=
github.com/silas/dag v0.0.0-20211117232152-9d50aa809f35/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I= github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e h1:NumxXLPfHSndr3wBBdeKiVHjGVFzi9RX2HwwQke94iY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,230 +0,0 @@
package logger
import (
"context"
"encoding/json"
"fmt"
"os"
"runtime"
"strings"
"sync"
"time"
)
type defaultLogger struct {
enc *json.Encoder
opts Options
sync.RWMutex
}
// Init(opts...) should only overwrite provided options
func (l *defaultLogger) Init(opts ...Option) error {
l.Lock()
for _, o := range opts {
o(&l.opts)
}
l.enc = json.NewEncoder(l.opts.Out)
// wrap the Log func
l.Unlock()
return nil
}
func (l *defaultLogger) String() string {
return "micro"
}
func (l *defaultLogger) Clone(opts ...Option) Logger {
newopts := NewOptions(opts...)
oldopts := l.opts
for _, o := range opts {
o(&newopts)
o(&oldopts)
}
l.Lock()
cl := &defaultLogger{opts: oldopts, enc: json.NewEncoder(l.opts.Out)}
l.Unlock()
return cl
}
func (l *defaultLogger) V(level Level) bool {
l.RLock()
ok := l.opts.Level.Enabled(level)
l.RUnlock()
return ok
}
func (l *defaultLogger) Level(level Level) {
l.Lock()
l.opts.Level = level
l.Unlock()
}
func (l *defaultLogger) Fields(fields ...interface{}) Logger {
l.RLock()
nl := &defaultLogger{opts: l.opts, enc: l.enc}
if len(fields) == 0 {
l.RUnlock()
return nl
} else if len(fields)%2 != 0 {
fields = fields[:len(fields)-1]
}
nl.opts.Fields = copyFields(l.opts.Fields)
nl.opts.Fields = append(nl.opts.Fields, fields...)
l.RUnlock()
return nl
}
func copyFields(src []interface{}) []interface{} {
dst := make([]interface{}, len(src))
copy(dst, src)
return dst
}
// logCallerfilePath returns a package/file:line description of the caller,
// preserving only the leaf directory name and file name.
func logCallerfilePath(loggingFilePath string) string {
// To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
idx := strings.LastIndexByte(loggingFilePath, '/')
if idx == -1 {
return loggingFilePath
}
idx = strings.LastIndexByte(loggingFilePath[:idx], '/')
if idx == -1 {
return loggingFilePath
}
return loggingFilePath[idx+1:]
}
func (l *defaultLogger) Info(ctx context.Context, args ...interface{}) {
l.Log(ctx, InfoLevel, args...)
}
func (l *defaultLogger) Error(ctx context.Context, args ...interface{}) {
l.Log(ctx, ErrorLevel, args...)
}
func (l *defaultLogger) Debug(ctx context.Context, args ...interface{}) {
l.Log(ctx, DebugLevel, args...)
}
func (l *defaultLogger) Warn(ctx context.Context, args ...interface{}) {
l.Log(ctx, WarnLevel, args...)
}
func (l *defaultLogger) Trace(ctx context.Context, args ...interface{}) {
l.Log(ctx, TraceLevel, args...)
}
func (l *defaultLogger) Fatal(ctx context.Context, args ...interface{}) {
l.Log(ctx, FatalLevel, args...)
os.Exit(1)
}
func (l *defaultLogger) Infof(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, InfoLevel, msg, args...)
}
func (l *defaultLogger) Errorf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, ErrorLevel, msg, args...)
}
func (l *defaultLogger) Debugf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, DebugLevel, msg, args...)
}
func (l *defaultLogger) Warnf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, WarnLevel, msg, args...)
}
func (l *defaultLogger) Tracef(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, TraceLevel, msg, args...)
}
func (l *defaultLogger) Fatalf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, FatalLevel, msg, args...)
os.Exit(1)
}
func (l *defaultLogger) Log(ctx context.Context, level Level, args ...interface{}) {
if !l.V(level) {
return
}
l.RLock()
fields := copyFields(l.opts.Fields)
l.RUnlock()
fields = append(fields, "level", level.String())
if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok {
fields = append(fields, "caller", fmt.Sprintf("%s:%d", logCallerfilePath(file), line))
}
fields = append(fields, "timestamp", time.Now().Format("2006-01-02 15:04:05"))
if len(args) > 0 {
fields = append(fields, "msg", fmt.Sprint(args...))
}
out := make(map[string]interface{}, len(fields)/2)
for i := 0; i < len(fields); i += 2 {
out[fields[i].(string)] = fields[i+1]
}
l.RLock()
_ = l.enc.Encode(out)
l.RUnlock()
}
func (l *defaultLogger) Logf(ctx context.Context, level Level, msg string, args ...interface{}) {
if !l.V(level) {
return
}
l.RLock()
fields := copyFields(l.opts.Fields)
l.RUnlock()
fields = append(fields, "level", level.String())
if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok {
fields = append(fields, "caller", fmt.Sprintf("%s:%d", logCallerfilePath(file), line))
}
fields = append(fields, "timestamp", time.Now().Format("2006-01-02 15:04:05"))
if len(args) > 0 {
fields = append(fields, "msg", fmt.Sprintf(msg, args...))
} else if msg != "" {
fields = append(fields, "msg", msg)
}
out := make(map[string]interface{}, len(fields)/2)
for i := 0; i < len(fields); i += 2 {
out[fields[i].(string)] = fields[i+1]
}
l.RLock()
_ = l.enc.Encode(out)
l.RUnlock()
}
func (l *defaultLogger) Options() Options {
return l.opts
}
// NewLogger builds a new logger based on options
func NewLogger(opts ...Option) Logger {
l := &defaultLogger{
opts: NewOptions(opts...),
}
l.enc = json.NewEncoder(l.opts.Out)
return l
}

View File

@ -6,6 +6,10 @@ import (
"os" "os"
) )
type ContextAttrFunc func(ctx context.Context) []interface{}
var DefaultContextAttrFuncs []ContextAttrFunc
var ( var (
// DefaultLogger variable // DefaultLogger variable
DefaultLogger = NewLogger(WithLevel(ParseLevel(os.Getenv("MICRO_LOG_LEVEL")))) DefaultLogger = NewLogger(WithLevel(ParseLevel(os.Getenv("MICRO_LOG_LEVEL"))))
@ -57,7 +61,9 @@ type Logger interface {
Log(ctx context.Context, level Level, args ...interface{}) Log(ctx context.Context, level Level, args ...interface{})
// Logf logs message with needed level // Logf logs message with needed level
Logf(ctx context.Context, level Level, msg string, args ...interface{}) Logf(ctx context.Context, level Level, msg string, args ...interface{})
// String returns the name of logger // Name returns broker instance name
Name() string
// String returns the type of logger
String() string String() string
} }
@ -65,76 +71,106 @@ type Logger interface {
type Field interface{} type Field interface{}
// Info writes msg to default logger on info level // Info writes msg to default logger on info level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Info(ctx context.Context, args ...interface{}) { func Info(ctx context.Context, args ...interface{}) {
DefaultLogger.Info(ctx, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Info(ctx, args...)
} }
// Error writes msg to default logger on error level // Error writes msg to default logger on error level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Error(ctx context.Context, args ...interface{}) { func Error(ctx context.Context, args ...interface{}) {
DefaultLogger.Error(ctx, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Error(ctx, args...)
} }
// Debug writes msg to default logger on debug level // Debug writes msg to default logger on debug level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Debug(ctx context.Context, args ...interface{}) { func Debug(ctx context.Context, args ...interface{}) {
DefaultLogger.Debug(ctx, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debug(ctx, args...)
} }
// Warn writes msg to default logger on warn level // Warn writes msg to default logger on warn level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Warn(ctx context.Context, args ...interface{}) { func Warn(ctx context.Context, args ...interface{}) {
DefaultLogger.Warn(ctx, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warn(ctx, args...)
} }
// Trace writes msg to default logger on trace level // Trace writes msg to default logger on trace level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Trace(ctx context.Context, args ...interface{}) { func Trace(ctx context.Context, args ...interface{}) {
DefaultLogger.Trace(ctx, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Trace(ctx, args...)
} }
// Fatal writes msg to default logger on fatal level // Fatal writes msg to default logger on fatal level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Fatal(ctx context.Context, args ...interface{}) { func Fatal(ctx context.Context, args ...interface{}) {
DefaultLogger.Fatal(ctx, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatal(ctx, args...)
} }
// Infof writes formatted msg to default logger on info level // Infof writes formatted msg to default logger on info level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Infof(ctx context.Context, msg string, args ...interface{}) { func Infof(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Infof(ctx, msg, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Infof(ctx, msg, args...)
} }
// Errorf writes formatted msg to default logger on error level // Errorf writes formatted msg to default logger on error level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Errorf(ctx context.Context, msg string, args ...interface{}) { func Errorf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Errorf(ctx, msg, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Errorf(ctx, msg, args...)
} }
// Debugf writes formatted msg to default logger on debug level // Debugf writes formatted msg to default logger on debug level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Debugf(ctx context.Context, msg string, args ...interface{}) { func Debugf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Debugf(ctx, msg, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Debugf(ctx, msg, args...)
} }
// Warnf writes formatted msg to default logger on warn level // Warnf writes formatted msg to default logger on warn level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Warnf(ctx context.Context, msg string, args ...interface{}) { func Warnf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Warnf(ctx, msg, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Warnf(ctx, msg, args...)
} }
// Tracef writes formatted msg to default logger on trace level // Tracef writes formatted msg to default logger on trace level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Tracef(ctx context.Context, msg string, args ...interface{}) { func Tracef(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Tracef(ctx, msg, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Tracef(ctx, msg, args...)
} }
// Fatalf writes formatted msg to default logger on fatal level // Fatalf writes formatted msg to default logger on fatal level
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Fatalf(ctx context.Context, msg string, args ...interface{}) { func Fatalf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Fatalf(ctx, msg, args...) DefaultLogger.Clone(WithCallerSkipCount(DefaultCallerSkipCount+1)).Fatalf(ctx, msg, args...)
} }
// V returns true if passed level enabled in default logger // V returns true if passed level enabled in default logger
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func V(level Level) bool { func V(level Level) bool {
return DefaultLogger.V(level) return DefaultLogger.V(level)
} }
// Init initialize logger // Init initialize logger
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Init(opts ...Option) error { func Init(opts ...Option) error {
return DefaultLogger.Init(opts...) return DefaultLogger.Init(opts...)
} }
// Fields create logger with specific fields // Fields create logger with specific fields
//
// Deprecated: Dont use logger methods directly, use instance of logger to avoid additional allocations
func Fields(fields ...interface{}) Logger { func Fields(fields ...interface{}) Logger {
return DefaultLogger.Fields(fields...) return DefaultLogger.Fields(fields...)
} }

View File

@ -1,138 +0,0 @@
package logger
import (
"bytes"
"context"
"log"
"testing"
)
func TestContext(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl, ok := FromContext(NewContext(ctx, l.Fields("key", "val")))
if !ok {
t.Fatal("context without logger")
}
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFromContextWithFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
var ok bool
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
ctx = NewContext(ctx, nl)
l, ok = FromContext(ctx)
if !ok {
t.Fatalf("context does not have logger")
}
l.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestClone(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Clone(WithLevel(ErrorLevel))
if err := nl.Init(); err != nil {
t.Fatal(err)
}
nl.Info(ctx, "info message")
if len(buf.Bytes()) != 0 {
t.Fatal("message must not be logged")
}
l.Info(ctx, "info message")
if len(buf.Bytes()) == 0 {
t.Fatal("message must be logged")
}
}
func TestRedirectStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
fn := RedirectStdLogger(l, ErrorLevel)
defer fn()
log.Print("test")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"error","msg":"test","timestamp"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
lg := NewStdLogger(l, ErrorLevel)
lg.Print("test")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"error","msg":"test","timestamp"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestLogger(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Trace(ctx, "trace_msg1")
l.Warn(ctx, "warn_msg1")
l.Fields("error", "test").Info(ctx, "error message")
l.Warn(ctx, "first", " ", "second")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"trace","msg":"trace_msg1"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"warn","msg":"warn_msg1"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"error":"test","level":"info","msg":"error message"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"level":"warn","msg":"first second"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}

94
logger/noop.go Normal file
View File

@ -0,0 +1,94 @@
package logger
import (
"context"
)
type noopLogger struct {
opts Options
}
func NewLogger(opts ...Option) Logger {
options := NewOptions(opts...)
return &noopLogger{opts: options}
}
func (l *noopLogger) V(_ Level) bool {
return false
}
func (l *noopLogger) Level(_ Level) {
}
func (l *noopLogger) Name() string {
return l.opts.Name
}
func (l *noopLogger) Init(opts ...Option) error {
for _, o := range opts {
o(&l.opts)
}
return nil
}
func (l *noopLogger) Clone(opts ...Option) Logger {
nl := &noopLogger{opts: l.opts}
for _, o := range opts {
o(&nl.opts)
}
return nl
}
func (l *noopLogger) Fields(_ ...interface{}) Logger {
return l
}
func (l *noopLogger) Options() Options {
return l.opts
}
func (l *noopLogger) String() string {
return "noop"
}
func (l *noopLogger) Log(ctx context.Context, lvl Level, attrs ...interface{}) {
}
func (l *noopLogger) Info(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Debug(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Error(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Trace(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Warn(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Fatal(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Logf(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
}

View File

@ -3,10 +3,11 @@ package logger
import ( import (
"context" "context"
"io" "io"
"log/slog"
"os" "os"
) )
// Option func // Option func signature
type Option func(*Options) type Option func(*Options)
// Options holds logger options // Options holds logger options
@ -15,14 +16,33 @@ type Options struct {
Out io.Writer Out io.Writer
// Context holds exernal options // Context holds exernal options
Context context.Context Context context.Context
// Fields holds additional metadata
Fields []interface{}
// Name holds the logger name // Name holds the logger name
Name string Name string
// The logging level the logger should log // Fields holds additional metadata
Level Level Fields []interface{}
// CallerSkipCount number of frmaes to skip // CallerSkipCount number of frmaes to skip
CallerSkipCount int CallerSkipCount int
// ContextAttrFuncs contains funcs that executed before log func on context
ContextAttrFuncs []ContextAttrFunc
// TimeKey is the key used for the time of the log call
TimeKey string
// LevelKey is the key used for the level of the log call
LevelKey string
// ErroreKey is the key used for the error of the log call
ErrorKey string
// MessageKey is the key used for the message of the log call
MessageKey string
// SourceKey is the key used for the source file and line of the log call
SourceKey string
// StacktraceKey is the key used for the stacktrace
StacktraceKey string
// AddStacktrace controls writing of stacktaces on error
AddStacktrace bool
// AddSource enabled writing source file and position in log
AddSource bool
// The logging level the logger should log
Level Level
} }
// NewOptions creates new options struct // NewOptions creates new options struct
@ -33,13 +53,26 @@ func NewOptions(opts ...Option) Options {
Out: os.Stderr, Out: os.Stderr,
CallerSkipCount: DefaultCallerSkipCount, CallerSkipCount: DefaultCallerSkipCount,
Context: context.Background(), Context: context.Background(),
ContextAttrFuncs: DefaultContextAttrFuncs,
AddSource: true,
} }
WithMicroKeys()(&options)
for _, o := range opts { for _, o := range opts {
o(&options) o(&options)
} }
return options return options
} }
// WithContextAttrFuncs appends default funcs for the context arrts filler
func WithContextAttrFuncs(fncs ...ContextAttrFunc) Option {
return func(o *Options) {
o.ContextAttrFuncs = append(o.ContextAttrFuncs, fncs...)
}
}
// WithFields set default fields for the logger // WithFields set default fields for the logger
func WithFields(fields ...interface{}) Option { func WithFields(fields ...interface{}) Option {
return func(o *Options) { return func(o *Options) {
@ -61,6 +94,20 @@ func WithOutput(out io.Writer) Option {
} }
} }
// WitAddStacktrace controls writing stacktrace on error
func WithAddStacktrace(v bool) Option {
return func(o *Options) {
o.AddStacktrace = v
}
}
// WitAddSource controls writing source file and pos in log
func WithAddSource(v bool) Option {
return func(o *Options) {
o.AddSource = v
}
}
// WithCallerSkipCount set frame count to skip // WithCallerSkipCount set frame count to skip
func WithCallerSkipCount(c int) Option { func WithCallerSkipCount(c int) Option {
return func(o *Options) { return func(o *Options) {
@ -81,3 +128,47 @@ func WithName(n string) Option {
o.Name = n o.Name = n
} }
} }
func WithZapKeys() Option {
return func(o *Options) {
o.TimeKey = "@timestamp"
o.LevelKey = "level"
o.MessageKey = "msg"
o.SourceKey = "caller"
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}
func WithZerologKeys() Option {
return func(o *Options) {
o.TimeKey = "time"
o.LevelKey = "level"
o.MessageKey = "message"
o.SourceKey = "caller"
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}
func WithSlogKeys() Option {
return func(o *Options) {
o.TimeKey = slog.TimeKey
o.LevelKey = slog.LevelKey
o.MessageKey = slog.MessageKey
o.SourceKey = slog.SourceKey
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}
func WithMicroKeys() Option {
return func(o *Options) {
o.TimeKey = "timestamp"
o.LevelKey = "level"
o.MessageKey = "msg"
o.SourceKey = "caller"
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}

572
logger/slog/slog.go Normal file
View File

@ -0,0 +1,572 @@
package slog
import (
"context"
"fmt"
"log/slog"
"os"
"regexp"
"runtime"
"strconv"
"sync"
"time"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/tracer"
)
var reTrace = regexp.MustCompile(`.*/slog/logger\.go.*\n`)
var (
traceValue = slog.StringValue("trace")
debugValue = slog.StringValue("debug")
infoValue = slog.StringValue("info")
warnValue = slog.StringValue("warn")
errorValue = slog.StringValue("error")
fatalValue = slog.StringValue("fatal")
)
func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.SourceKey:
source := a.Value.Any().(*slog.Source)
a.Value = slog.StringValue(source.File + ":" + strconv.Itoa(source.Line))
a.Key = s.opts.SourceKey
case slog.TimeKey:
a.Key = s.opts.TimeKey
case slog.MessageKey:
a.Key = s.opts.MessageKey
case slog.LevelKey:
level := a.Value.Any().(slog.Level)
lvl := slogToLoggerLevel(level)
a.Key = s.opts.LevelKey
switch {
case lvl < logger.DebugLevel:
a.Value = traceValue
case lvl < logger.InfoLevel:
a.Value = debugValue
case lvl < logger.WarnLevel:
a.Value = infoValue
case lvl < logger.ErrorLevel:
a.Value = warnValue
case lvl < logger.FatalLevel:
a.Value = errorValue
case lvl >= logger.FatalLevel:
a.Value = fatalValue
default:
a.Value = infoValue
}
}
return a
}
type slogLogger struct {
slog *slog.Logger
leveler *slog.LevelVar
opts logger.Options
mu sync.RWMutex
}
func (s *slogLogger) Clone(opts ...logger.Option) logger.Logger {
s.mu.RLock()
options := s.opts
for _, o := range opts {
o(&options)
}
l := &slogLogger{
opts: options,
}
l.leveler = new(slog.LevelVar)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: l.renameAttr,
Level: l.leveler,
AddSource: l.opts.AddSource,
}
l.leveler.Set(loggerToSlogLevel(l.opts.Level))
handler := slog.NewJSONHandler(options.Out, handleOpt)
l.slog = slog.New(handler).With(options.Fields...)
s.mu.RUnlock()
return l
}
func (s *slogLogger) V(level logger.Level) bool {
return s.opts.Level.Enabled(level)
}
func (s *slogLogger) Level(level logger.Level) {
s.leveler.Set(loggerToSlogLevel(level))
}
func (s *slogLogger) Options() logger.Options {
return s.opts
}
func (s *slogLogger) Fields(attrs ...interface{}) logger.Logger {
s.mu.RLock()
l := &slogLogger{opts: s.opts}
l.leveler = new(slog.LevelVar)
l.leveler.Set(s.leveler.Level())
handleOpt := &slog.HandlerOptions{
ReplaceAttr: l.renameAttr,
Level: l.leveler,
AddSource: l.opts.AddSource,
}
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
l.slog = slog.New(handler).With(attrs...)
s.mu.RUnlock()
return l
}
func (s *slogLogger) Init(opts ...logger.Option) error {
s.mu.Lock()
if len(s.opts.ContextAttrFuncs) == 0 {
s.opts.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
for _, o := range opts {
o(&s.opts)
}
s.leveler = new(slog.LevelVar)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: s.renameAttr,
Level: s.leveler,
AddSource: s.opts.AddSource,
}
s.leveler.Set(loggerToSlogLevel(s.opts.Level))
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
s.slog = slog.New(handler).With(s.opts.Fields...)
s.mu.Unlock()
return nil
}
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, attrs ...interface{}) {
if !s.V(lvl) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), loggerToSlogLevel(lvl), fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
}
}
}
r.Add(attrs[1:]...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Logf(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
if !s.V(lvl) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), loggerToSlogLevel(lvl), msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace && lvl == logger.ErrorLevel {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, (slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1])))
}
}
}
r.Add(attrs[1:]...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Info(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelInfo, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelInfo, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Debug(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.DebugLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.DebugLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Trace(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.TraceLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug-1, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.TraceLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug-1, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Error(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.ErrorLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
}
}
}
r.Add(attrs[1:]...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.ErrorLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
if s.opts.AddStacktrace {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String("stacktrace", traceLines[len(traceLines)-1]))
}
}
}
r.Add(attrs...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == s.opts.ErrorKey {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Fatal(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.FatalLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError+1, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
os.Exit(1)
}
func (s *slogLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.FatalLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError+1, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
os.Exit(1)
}
func (s *slogLogger) Warn(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.WarnLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelWarn, fmt.Sprintf("%s", attrs[0]), pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.WarnLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelWarn, msg, pcs[0])
for _, fn := range s.opts.ContextAttrFuncs {
attrs = append(attrs, fn(ctx)...)
}
for idx, attr := range attrs {
if ve, ok := attr.(error); ok && ve != nil {
attrs[idx] = slog.String(s.opts.ErrorKey, ve.Error())
break
}
}
r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Name() string {
return s.opts.Name
}
func (s *slogLogger) String() string {
return "slog"
}
func NewLogger(opts ...logger.Option) logger.Logger {
s := &slogLogger{
opts: logger.NewOptions(opts...),
}
return s
}
func loggerToSlogLevel(level logger.Level) slog.Level {
switch level {
case logger.DebugLevel:
return slog.LevelDebug
case logger.WarnLevel:
return slog.LevelWarn
case logger.ErrorLevel:
return slog.LevelError
case logger.TraceLevel:
return slog.LevelDebug - 1
case logger.FatalLevel:
return slog.LevelError + 1
default:
return slog.LevelInfo
}
}
func slogToLoggerLevel(level slog.Level) logger.Level {
switch level {
case slog.LevelDebug:
return logger.DebugLevel
case slog.LevelWarn:
return logger.WarnLevel
case slog.LevelError:
return logger.ErrorLevel
case slog.LevelDebug - 1:
return logger.TraceLevel
case slog.LevelError + 1:
return logger.FatalLevel
default:
return logger.InfoLevel
}
}

176
logger/slog/slog_test.go Normal file
View File

@ -0,0 +1,176 @@
package slog
import (
"bytes"
"context"
"fmt"
"log"
"testing"
"go.unistack.org/micro/v3/logger"
)
func TestError(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf), logger.WithStacktrace(true))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Error(ctx, "message", fmt.Errorf("error message"))
if !bytes.Contains(buf.Bytes(), []byte(`"stacktrace":"`)) {
t.Fatalf("logger stacktrace not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"error":"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestErrorf(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf), logger.WithStacktrace(true))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Errorf(ctx, "message", fmt.Errorf("error message"))
if !bytes.Contains(buf.Bytes(), []byte(`"stacktrace":"`)) {
t.Fatalf("logger stacktrace not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"error":"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestContext(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl, ok := logger.FromContext(logger.NewContext(ctx, l.Fields("key", "val")))
if !ok {
t.Fatal("context without logger")
}
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFromContextWithFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
var ok bool
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
ctx = logger.NewContext(ctx, nl)
l, ok = logger.FromContext(ctx)
if !ok {
t.Fatalf("context does not have logger")
}
l.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestClone(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Clone(logger.WithLevel(logger.ErrorLevel))
if err := nl.Init(); err != nil {
t.Fatal(err)
}
nl.Info(ctx, "info message")
if len(buf.Bytes()) != 0 {
t.Fatal("message must not be logged")
}
l.Info(ctx, "info message")
if len(buf.Bytes()) == 0 {
t.Fatal("message must be logged")
}
}
func TestRedirectStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
fn := logger.RedirectStdLogger(l, logger.ErrorLevel)
defer fn()
log.Print("test")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
lg := logger.NewStdLogger(l, logger.ErrorLevel)
lg.Print("test")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestLogger(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Trace(ctx, "trace_msg1")
l.Warn(ctx, "warn_msg1")
l.Fields("error", "test").Info(ctx, "error message")
l.Warn(ctx, "first second")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"trace"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"trace_msg1"`))) {
t.Fatalf("logger tracer, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"warn_msg1"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"error message","error":"test"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"first second"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
}

View File

@ -39,8 +39,6 @@ func FromOutgoingContext(ctx context.Context) (Metadata, bool) {
// FromContext returns metadata from the given context // FromContext returns metadata from the given context
// returned metadata shoud not be modified or race condition happens // returned metadata shoud not be modified or race condition happens
//
// Deprecated: use FromIncomingContext or FromOutgoingContext
func FromContext(ctx context.Context) (Metadata, bool) { func FromContext(ctx context.Context) (Metadata, bool) {
if ctx == nil { if ctx == nil {
return nil, false return nil, false
@ -53,8 +51,6 @@ func FromContext(ctx context.Context) (Metadata, bool) {
} }
// NewContext creates a new context with the given metadata // NewContext creates a new context with the given metadata
//
// Deprecated: use NewIncomingContext or NewOutgoingContext
func NewContext(ctx context.Context, md Metadata) context.Context { func NewContext(ctx context.Context, md Metadata) context.Context {
if ctx == nil { if ctx == nil {
ctx = context.Background() ctx = context.Background()

View File

@ -19,6 +19,8 @@ var (
HeaderTimeout = "Micro-Timeout" HeaderTimeout = "Micro-Timeout"
// HeaderAuthorization specifies Authorization header // HeaderAuthorization specifies Authorization header
HeaderAuthorization = "Authorization" HeaderAuthorization = "Authorization"
// HeaderXRequestID specifies request id
HeaderXRequestID = "X-Request-Id"
) )
// Metadata is our way of representing request headers internally. // Metadata is our way of representing request headers internally.

94
micro.go Normal file
View File

@ -0,0 +1,94 @@
package micro
import (
"reflect"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/flow"
"go.unistack.org/micro/v3/fsm"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/register"
"go.unistack.org/micro/v3/resolver"
"go.unistack.org/micro/v3/router"
"go.unistack.org/micro/v3/selector"
"go.unistack.org/micro/v3/server"
"go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/sync"
"go.unistack.org/micro/v3/tracer"
)
func As(b any, target any) bool {
if b == nil {
return false
}
if target == nil {
return false
}
val := reflect.ValueOf(target)
typ := val.Type()
if typ.Kind() != reflect.Ptr || val.IsNil() {
return false
}
targetType := typ.Elem()
if targetType.Kind() != reflect.Interface {
switch {
case targetType.Implements(brokerType):
break
case targetType.Implements(loggerType):
break
case targetType.Implements(clientType):
break
case targetType.Implements(serverType):
break
case targetType.Implements(codecType):
break
case targetType.Implements(flowType):
break
case targetType.Implements(fsmType):
break
case targetType.Implements(meterType):
break
case targetType.Implements(registerType):
break
case targetType.Implements(resolverType):
break
case targetType.Implements(selectorType):
break
case targetType.Implements(storeType):
break
case targetType.Implements(syncType):
break
case targetType.Implements(serviceType):
break
case targetType.Implements(routerType):
break
default:
return false
}
}
if reflect.TypeOf(b).AssignableTo(targetType) {
val.Elem().Set(reflect.ValueOf(b))
return true
}
return false
}
var brokerType = reflect.TypeOf((*broker.Broker)(nil)).Elem()
var loggerType = reflect.TypeOf((*logger.Logger)(nil)).Elem()
var clientType = reflect.TypeOf((*client.Client)(nil)).Elem()
var serverType = reflect.TypeOf((*server.Server)(nil)).Elem()
var codecType = reflect.TypeOf((*codec.Codec)(nil)).Elem()
var flowType = reflect.TypeOf((*flow.Flow)(nil)).Elem()
var fsmType = reflect.TypeOf((*fsm.FSM)(nil)).Elem()
var meterType = reflect.TypeOf((*meter.Meter)(nil)).Elem()
var registerType = reflect.TypeOf((*register.Register)(nil)).Elem()
var resolverType = reflect.TypeOf((*resolver.Resolver)(nil)).Elem()
var routerType = reflect.TypeOf((*router.Router)(nil)).Elem()
var selectorType = reflect.TypeOf((*selector.Selector)(nil)).Elem()
var storeType = reflect.TypeOf((*store.Store)(nil)).Elem()
var syncType = reflect.TypeOf((*sync.Sync)(nil)).Elem()
var tracerType = reflect.TypeOf((*tracer.Tracer)(nil)).Elem()
var serviceType = reflect.TypeOf((*Service)(nil)).Elem()

115
micro_test.go Normal file
View File

@ -0,0 +1,115 @@
package micro
import (
"context"
"fmt"
"reflect"
"testing"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/fsm"
)
func TestAs(t *testing.T) {
var b *bro
broTarget := &bro{name: "kafka"}
fsmTarget := &fsmT{name: "fsm"}
testCases := []struct {
b any
target any
match bool
want any
}{
{
broTarget,
&b,
true,
broTarget,
},
{
nil,
&b,
false,
nil,
},
{
fsmTarget,
&b,
false,
nil,
},
}
for i, tc := range testCases {
name := fmt.Sprintf("%d:As(Errorf(..., %v), %v)", i, tc.b, tc.target)
// Clear the target pointer, in case it was set in a previous test.
rtarget := reflect.ValueOf(tc.target)
rtarget.Elem().Set(reflect.Zero(reflect.TypeOf(tc.target).Elem()))
t.Run(name, func(t *testing.T) {
match := As(tc.b, tc.target)
if match != tc.match {
t.Fatalf("match: got %v; want %v", match, tc.match)
}
if !match {
return
}
if got := rtarget.Elem().Interface(); got != tc.want {
t.Fatalf("got %#v, want %#v", got, tc.want)
}
})
}
}
type bro struct {
name string
}
func (p *bro) Name() string { return p.name }
func (p *bro) Init(opts ...broker.Option) error { return nil }
// Options returns broker options
func (p *bro) Options() broker.Options { return broker.Options{} }
// Address return configured address
func (p *bro) Address() string { return "" }
// Connect connects to broker
func (p *bro) Connect(ctx context.Context) error { return nil }
// Disconnect disconnect from broker
func (p *bro) Disconnect(ctx context.Context) error { return nil }
// Publish message, msg can be single broker.Message or []broker.Message
func (p *bro) Publish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error {
return nil
}
// BatchPublish messages to broker with multiple topics
func (p *bro) BatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
return nil
}
// BatchSubscribe subscribes to topic messages via handler
func (p *bro) BatchSubscribe(ctx context.Context, topic string, h broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return nil, nil
}
// Subscribe subscribes to topic message via handler
func (p *bro) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return nil, nil
}
// String type of broker
func (p *bro) String() string { return p.name }
type fsmT struct {
name string
}
func (f *fsmT) Start(ctx context.Context, a interface{}, o ...Option) (interface{}, error) {
return nil, nil
}
func (f *fsmT) Current() string { return f.name }
func (f *fsmT) Reset() {}
func (f *fsmT) State(s string, sf fsm.StateFunc) {}

View File

@ -6,6 +6,7 @@ import (
"time" "time"
"go.unistack.org/micro/v3/logger" "go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/register"
"go.unistack.org/micro/v3/util/id" "go.unistack.org/micro/v3/util/id"
) )
@ -16,7 +17,7 @@ var (
type node struct { type node struct {
LastSeen time.Time LastSeen time.Time
*Node *register.Node
TTL time.Duration TTL time.Duration
} }
@ -25,23 +26,23 @@ type record struct {
Version string Version string
Metadata map[string]string Metadata map[string]string
Nodes map[string]*node Nodes map[string]*node
Endpoints []*Endpoint Endpoints []*register.Endpoint
} }
type memory struct { type memory struct {
sync.RWMutex sync.RWMutex
records map[string]services records map[string]services
watchers map[string]*watcher watchers map[string]*watcher
opts Options opts register.Options
} }
// services is a KV map with service name as the key and a map of records as the value // services is a KV map with service name as the key and a map of records as the value
type services map[string]map[string]*record type services map[string]map[string]*record
// NewRegister returns an initialized in-memory register // NewRegister returns an initialized in-memory register
func NewRegister(opts ...Option) Register { func NewRegister(opts ...register.Option) register.Register {
r := &memory{ r := &memory{
opts: NewOptions(opts...), opts: register.NewOptions(opts...),
records: make(map[string]services), records: make(map[string]services),
watchers: make(map[string]*watcher), watchers: make(map[string]*watcher),
} }
@ -75,7 +76,7 @@ func (m *memory) ttlPrune() {
} }
} }
func (m *memory) sendEvent(r *Result) { func (m *memory) sendEvent(r *register.Result) {
m.RLock() m.RLock()
watchers := make([]*watcher, 0, len(m.watchers)) watchers := make([]*watcher, 0, len(m.watchers))
for _, w := range m.watchers { for _, w := range m.watchers {
@ -106,7 +107,7 @@ func (m *memory) Disconnect(ctx context.Context) error {
return nil return nil
} }
func (m *memory) Init(opts ...Option) error { func (m *memory) Init(opts ...register.Option) error {
for _, o := range opts { for _, o := range opts {
o(&m.opts) o(&m.opts)
} }
@ -118,15 +119,15 @@ func (m *memory) Init(opts ...Option) error {
return nil return nil
} }
func (m *memory) Options() Options { func (m *memory) Options() register.Options {
return m.opts return m.opts
} }
func (m *memory) Register(ctx context.Context, s *Service, opts ...RegisterOption) error { func (m *memory) Register(ctx context.Context, s *register.Service, opts ...register.RegisterOption) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
options := NewRegisterOptions(opts...) options := register.NewRegisterOptions(opts...)
// get the services for this domain from the register // get the services for this domain from the register
srvs, ok := m.records[options.Domain] srvs, ok := m.records[options.Domain]
@ -153,7 +154,7 @@ func (m *memory) Register(ctx context.Context, s *Service, opts ...RegisterOptio
m.opts.Logger.Debugf(m.opts.Context, "Register added new service: %s, version: %s", s.Name, s.Version) m.opts.Logger.Debugf(m.opts.Context, "Register added new service: %s, version: %s", s.Name, s.Version)
} }
m.records[options.Domain] = srvs m.records[options.Domain] = srvs
go m.sendEvent(&Result{Action: "create", Service: s}) go m.sendEvent(&register.Result{Action: "create", Service: s})
} }
var addedNodes bool var addedNodes bool
@ -176,7 +177,7 @@ func (m *memory) Register(ctx context.Context, s *Service, opts ...RegisterOptio
// add the node // add the node
srvs[s.Name][s.Version].Nodes[n.ID] = &node{ srvs[s.Name][s.Version].Nodes[n.ID] = &node{
Node: &Node{ Node: &register.Node{
ID: n.ID, ID: n.ID,
Address: n.Address, Address: n.Address,
Metadata: metadata, Metadata: metadata,
@ -192,7 +193,7 @@ func (m *memory) Register(ctx context.Context, s *Service, opts ...RegisterOptio
if m.opts.Logger.V(logger.DebugLevel) { if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register added new node to service: %s, version: %s", s.Name, s.Version) m.opts.Logger.Debugf(m.opts.Context, "Register added new node to service: %s, version: %s", s.Name, s.Version)
} }
go m.sendEvent(&Result{Action: "update", Service: s}) go m.sendEvent(&register.Result{Action: "update", Service: s})
} else { } else {
// refresh TTL and timestamp // refresh TTL and timestamp
for _, n := range s.Nodes { for _, n := range s.Nodes {
@ -208,11 +209,11 @@ func (m *memory) Register(ctx context.Context, s *Service, opts ...RegisterOptio
return nil return nil
} }
func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterOption) error { func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...register.DeregisterOption) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
options := NewDeregisterOptions(opts...) options := register.NewDeregisterOptions(opts...)
// domain is set in metadata so it can be passed to watchers // domain is set in metadata so it can be passed to watchers
if s.Metadata == nil { if s.Metadata == nil {
@ -252,7 +253,7 @@ func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterO
// is cleanup // is cleanup
if len(version.Nodes) > 0 { if len(version.Nodes) > 0 {
m.records[options.Domain][s.Name][s.Version] = version m.records[options.Domain][s.Name][s.Version] = version
go m.sendEvent(&Result{Action: "update", Service: s}) go m.sendEvent(&register.Result{Action: "update", Service: s})
return nil return nil
} }
@ -260,7 +261,7 @@ func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterO
// register and exit // register and exit
if len(versions) == 1 { if len(versions) == 1 {
delete(m.records[options.Domain], s.Name) delete(m.records[options.Domain], s.Name)
go m.sendEvent(&Result{Action: "delete", Service: s}) go m.sendEvent(&register.Result{Action: "delete", Service: s})
if m.opts.Logger.V(logger.DebugLevel) { if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s", s.Name) m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s", s.Name)
@ -270,7 +271,7 @@ func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterO
// there are other versions of the service running, so only remove this version of it // there are other versions of the service running, so only remove this version of it
delete(m.records[options.Domain][s.Name], s.Version) delete(m.records[options.Domain][s.Name], s.Version)
go m.sendEvent(&Result{Action: "delete", Service: s}) go m.sendEvent(&register.Result{Action: "delete", Service: s})
if m.opts.Logger.V(logger.DebugLevel) { if m.opts.Logger.V(logger.DebugLevel) {
m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s, version: %s", s.Name, s.Version) m.opts.Logger.Debugf(m.opts.Context, "Register removed service: %s, version: %s", s.Name, s.Version)
} }
@ -278,20 +279,20 @@ func (m *memory) Deregister(ctx context.Context, s *Service, opts ...DeregisterO
return nil return nil
} }
func (m *memory) LookupService(ctx context.Context, name string, opts ...LookupOption) ([]*Service, error) { func (m *memory) LookupService(ctx context.Context, name string, opts ...register.LookupOption) ([]*register.Service, error) {
options := NewLookupOptions(opts...) options := register.NewLookupOptions(opts...)
// if it's a wildcard domain, return from all domains // if it's a wildcard domain, return from all domains
if options.Domain == WildcardDomain { if options.Domain == register.WildcardDomain {
m.RLock() m.RLock()
recs := m.records recs := m.records
m.RUnlock() m.RUnlock()
var services []*Service var services []*register.Service
for domain := range recs { for domain := range recs {
srvs, err := m.LookupService(ctx, name, append(opts, LookupDomain(domain))...) srvs, err := m.LookupService(ctx, name, append(opts, register.LookupDomain(domain))...)
if err == ErrNotFound { if err == register.ErrNotFound {
continue continue
} else if err != nil { } else if err != nil {
return nil, err return nil, err
@ -300,7 +301,7 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...LookupO
} }
if len(services) == 0 { if len(services) == 0 {
return nil, ErrNotFound return nil, register.ErrNotFound
} }
return services, nil return services, nil
} }
@ -311,17 +312,17 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...LookupO
// check the domain exists // check the domain exists
services, ok := m.records[options.Domain] services, ok := m.records[options.Domain]
if !ok { if !ok {
return nil, ErrNotFound return nil, register.ErrNotFound
} }
// check the service exists // check the service exists
versions, ok := services[name] versions, ok := services[name]
if !ok || len(versions) == 0 { if !ok || len(versions) == 0 {
return nil, ErrNotFound return nil, register.ErrNotFound
} }
// serialize the response // serialize the response
result := make([]*Service, len(versions)) result := make([]*register.Service, len(versions))
var i int var i int
@ -333,19 +334,19 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...LookupO
return result, nil return result, nil
} }
func (m *memory) ListServices(ctx context.Context, opts ...ListOption) ([]*Service, error) { func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption) ([]*register.Service, error) {
options := NewListOptions(opts...) options := register.NewListOptions(opts...)
// if it's a wildcard domain, list from all domains // if it's a wildcard domain, list from all domains
if options.Domain == WildcardDomain { if options.Domain == register.WildcardDomain {
m.RLock() m.RLock()
recs := m.records recs := m.records
m.RUnlock() m.RUnlock()
var services []*Service var services []*register.Service
for domain := range recs { for domain := range recs {
srvs, err := m.ListServices(ctx, append(opts, ListDomain(domain))...) srvs, err := m.ListServices(ctx, append(opts, register.ListDomain(domain))...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -361,11 +362,11 @@ func (m *memory) ListServices(ctx context.Context, opts ...ListOption) ([]*Servi
// ensure the domain exists // ensure the domain exists
services, ok := m.records[options.Domain] services, ok := m.records[options.Domain]
if !ok { if !ok {
return make([]*Service, 0), nil return make([]*register.Service, 0), nil
} }
// serialize the result, each version counts as an individual service // serialize the result, each version counts as an individual service
var result []*Service var result []*register.Service
for _, service := range services { for _, service := range services {
for _, version := range service { for _, version := range service {
@ -376,16 +377,16 @@ func (m *memory) ListServices(ctx context.Context, opts ...ListOption) ([]*Servi
return result, nil return result, nil
} }
func (m *memory) Watch(ctx context.Context, opts ...WatchOption) (Watcher, error) { func (m *memory) Watch(ctx context.Context, opts ...register.WatchOption) (register.Watcher, error) {
id, err := id.New() id, err := id.New()
if err != nil { if err != nil {
return nil, err return nil, err
} }
wo := NewWatchOptions(opts...) wo := register.NewWatchOptions(opts...)
// construct the watcher // construct the watcher
w := &watcher{ w := &watcher{
exit: make(chan bool), exit: make(chan bool),
res: make(chan *Result), res: make(chan *register.Result),
id: id, id: id,
wo: wo, wo: wo,
} }
@ -406,13 +407,13 @@ func (m *memory) String() string {
} }
type watcher struct { type watcher struct {
res chan *Result res chan *register.Result
exit chan bool exit chan bool
wo WatchOptions wo register.WatchOptions
id string id string
} }
func (m *watcher) Next() (*Result, error) { func (m *watcher) Next() (*register.Result, error) {
for { for {
select { select {
case r := <-m.res: case r := <-m.res:
@ -429,15 +430,15 @@ func (m *watcher) Next() (*Result, error) {
if r.Service.Metadata != nil && len(r.Service.Metadata["domain"]) > 0 { if r.Service.Metadata != nil && len(r.Service.Metadata["domain"]) > 0 {
domain = r.Service.Metadata["domain"] domain = r.Service.Metadata["domain"]
} else { } else {
domain = DefaultDomain domain = register.DefaultDomain
} }
// only send the event if watching the wildcard or this specific domain // only send the event if watching the wildcard or this specific domain
if m.wo.Domain == WildcardDomain || m.wo.Domain == domain { if m.wo.Domain == register.WildcardDomain || m.wo.Domain == domain {
return r, nil return r, nil
} }
case <-m.exit: case <-m.exit:
return nil, ErrWatcherStopped return nil, register.ErrWatcherStopped
} }
} }
} }
@ -451,7 +452,7 @@ func (m *watcher) Stop() {
} }
} }
func serviceToRecord(s *Service, ttl time.Duration) *record { func serviceToRecord(s *register.Service, ttl time.Duration) *record {
metadata := make(map[string]string, len(s.Metadata)) metadata := make(map[string]string, len(s.Metadata))
for k, v := range s.Metadata { for k, v := range s.Metadata {
metadata[k] = v metadata[k] = v
@ -466,7 +467,7 @@ func serviceToRecord(s *Service, ttl time.Duration) *record {
} }
} }
endpoints := make([]*Endpoint, len(s.Endpoints)) endpoints := make([]*register.Endpoint, len(s.Endpoints))
for i, e := range s.Endpoints { for i, e := range s.Endpoints {
endpoints[i] = e endpoints[i] = e
} }
@ -480,7 +481,7 @@ func serviceToRecord(s *Service, ttl time.Duration) *record {
} }
} }
func recordToService(r *record, domain string) *Service { func recordToService(r *record, domain string) *register.Service {
metadata := make(map[string]string, len(r.Metadata)) metadata := make(map[string]string, len(r.Metadata))
for k, v := range r.Metadata { for k, v := range r.Metadata {
metadata[k] = v metadata[k] = v
@ -489,14 +490,14 @@ func recordToService(r *record, domain string) *Service {
// set the domain in metadata so it can be determined when a wildcard query is performed // set the domain in metadata so it can be determined when a wildcard query is performed
metadata["domain"] = domain metadata["domain"] = domain
endpoints := make([]*Endpoint, len(r.Endpoints)) endpoints := make([]*register.Endpoint, len(r.Endpoints))
for i, e := range r.Endpoints { for i, e := range r.Endpoints {
md := make(map[string]string, len(e.Metadata)) md := make(map[string]string, len(e.Metadata))
for k, v := range e.Metadata { for k, v := range e.Metadata {
md[k] = v md[k] = v
} }
endpoints[i] = &Endpoint{ endpoints[i] = &register.Endpoint{
Name: e.Name, Name: e.Name,
Request: e.Request, Request: e.Request,
Response: e.Response, Response: e.Response,
@ -504,7 +505,7 @@ func recordToService(r *record, domain string) *Service {
} }
} }
nodes := make([]*Node, len(r.Nodes)) nodes := make([]*register.Node, len(r.Nodes))
i := 0 i := 0
for _, n := range r.Nodes { for _, n := range r.Nodes {
md := make(map[string]string, len(n.Metadata)) md := make(map[string]string, len(n.Metadata))
@ -512,7 +513,7 @@ func recordToService(r *record, domain string) *Service {
md[k] = v md[k] = v
} }
nodes[i] = &Node{ nodes[i] = &register.Node{
ID: n.ID, ID: n.ID,
Address: n.Address, Address: n.Address,
Metadata: md, Metadata: md,
@ -520,7 +521,7 @@ func recordToService(r *record, domain string) *Service {
i++ i++
} }
return &Service{ return &register.Service{
Name: r.Name, Name: r.Name,
Version: r.Version, Version: r.Version,
Metadata: metadata, Metadata: metadata,

View File

@ -6,14 +6,16 @@ import (
"sync" "sync"
"testing" "testing"
"time" "time"
"go.unistack.org/micro/v3/register"
) )
var testData = map[string][]*Service{ var testData = map[string][]*register.Service{
"foo": { "foo": {
{ {
Name: "foo", Name: "foo",
Version: "1.0.0", Version: "1.0.0",
Nodes: []*Node{ Nodes: []*register.Node{
{ {
ID: "foo-1.0.0-123", ID: "foo-1.0.0-123",
Address: "localhost:9999", Address: "localhost:9999",
@ -27,7 +29,7 @@ var testData = map[string][]*Service{
{ {
Name: "foo", Name: "foo",
Version: "1.0.1", Version: "1.0.1",
Nodes: []*Node{ Nodes: []*register.Node{
{ {
ID: "foo-1.0.1-321", ID: "foo-1.0.1-321",
Address: "localhost:6666", Address: "localhost:6666",
@ -37,7 +39,7 @@ var testData = map[string][]*Service{
{ {
Name: "foo", Name: "foo",
Version: "1.0.3", Version: "1.0.3",
Nodes: []*Node{ Nodes: []*register.Node{
{ {
ID: "foo-1.0.3-345", ID: "foo-1.0.3-345",
Address: "localhost:8888", Address: "localhost:8888",
@ -49,7 +51,7 @@ var testData = map[string][]*Service{
{ {
Name: "bar", Name: "bar",
Version: "default", Version: "default",
Nodes: []*Node{ Nodes: []*register.Node{
{ {
ID: "bar-1.0.0-123", ID: "bar-1.0.0-123",
Address: "localhost:9999", Address: "localhost:9999",
@ -63,7 +65,7 @@ var testData = map[string][]*Service{
{ {
Name: "bar", Name: "bar",
Version: "latest", Version: "latest",
Nodes: []*Node{ Nodes: []*register.Node{
{ {
ID: "bar-1.0.1-321", ID: "bar-1.0.1-321",
Address: "localhost:6666", Address: "localhost:6666",
@ -78,7 +80,7 @@ func TestMemoryRegistry(t *testing.T) {
ctx := context.TODO() ctx := context.TODO()
m := NewRegister() m := NewRegister()
fn := func(k string, v []*Service) { fn := func(k string, v []*register.Service) {
services, err := m.LookupService(ctx, k) services, err := m.LookupService(ctx, k)
if err != nil { if err != nil {
t.Errorf("Unexpected error getting service %s: %v", k, err) t.Errorf("Unexpected error getting service %s: %v", k, err)
@ -155,8 +157,8 @@ func TestMemoryRegistry(t *testing.T) {
for _, v := range testData { for _, v := range testData {
for _, service := range v { for _, service := range v {
services, err := m.LookupService(ctx, service.Name) services, err := m.LookupService(ctx, service.Name)
if err != ErrNotFound { if err != register.ErrNotFound {
t.Errorf("Expected error: %v, got: %v", ErrNotFound, err) t.Errorf("Expected error: %v, got: %v", register.ErrNotFound, err)
} }
if len(services) != 0 { if len(services) != 0 {
t.Errorf("Expected %d services for %s, got %d", 0, service.Name, len(services)) t.Errorf("Expected %d services for %s, got %d", 0, service.Name, len(services))
@ -171,7 +173,7 @@ func TestMemoryRegistryTTL(t *testing.T) {
for _, v := range testData { for _, v := range testData {
for _, service := range v { for _, service := range v {
if err := m.Register(ctx, service, RegisterTTL(time.Millisecond)); err != nil { if err := m.Register(ctx, service, register.RegisterTTL(time.Millisecond)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -200,7 +202,7 @@ func TestMemoryRegistryTTLConcurrent(t *testing.T) {
ctx := context.TODO() ctx := context.TODO()
for _, v := range testData { for _, v := range testData {
for _, service := range v { for _, service := range v {
if err := m.Register(ctx, service, RegisterTTL(waitTime/2)); err != nil { if err := m.Register(ctx, service, register.RegisterTTL(waitTime/2)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -249,34 +251,34 @@ func TestMemoryWildcard(t *testing.T) {
m := NewRegister() m := NewRegister()
ctx := context.TODO() ctx := context.TODO()
testSrv := &Service{Name: "foo", Version: "1.0.0"} testSrv := &register.Service{Name: "foo", Version: "1.0.0"}
if err := m.Register(ctx, testSrv, RegisterDomain("one")); err != nil { if err := m.Register(ctx, testSrv, register.RegisterDomain("one")); err != nil {
t.Fatalf("Register err: %v", err) t.Fatalf("Register err: %v", err)
} }
if err := m.Register(ctx, testSrv, RegisterDomain("two")); err != nil { if err := m.Register(ctx, testSrv, register.RegisterDomain("two")); err != nil {
t.Fatalf("Register err: %v", err) t.Fatalf("Register err: %v", err)
} }
if recs, err := m.ListServices(ctx, ListDomain("one")); err != nil { if recs, err := m.ListServices(ctx, register.ListDomain("one")); err != nil {
t.Errorf("List err: %v", err) t.Errorf("List err: %v", err)
} else if len(recs) != 1 { } else if len(recs) != 1 {
t.Errorf("Expected 1 record, got %v", len(recs)) t.Errorf("Expected 1 record, got %v", len(recs))
} }
if recs, err := m.ListServices(ctx, ListDomain("*")); err != nil { if recs, err := m.ListServices(ctx, register.ListDomain("*")); err != nil {
t.Errorf("List err: %v", err) t.Errorf("List err: %v", err)
} else if len(recs) != 2 { } else if len(recs) != 2 {
t.Errorf("Expected 2 records, got %v", len(recs)) t.Errorf("Expected 2 records, got %v", len(recs))
} }
if recs, err := m.LookupService(ctx, testSrv.Name, LookupDomain("one")); err != nil { if recs, err := m.LookupService(ctx, testSrv.Name, register.LookupDomain("one")); err != nil {
t.Errorf("Lookup err: %v", err) t.Errorf("Lookup err: %v", err)
} else if len(recs) != 1 { } else if len(recs) != 1 {
t.Errorf("Expected 1 record, got %v", len(recs)) t.Errorf("Expected 1 record, got %v", len(recs))
} }
if recs, err := m.LookupService(ctx, testSrv.Name, LookupDomain("*")); err != nil { if recs, err := m.LookupService(ctx, testSrv.Name, register.LookupDomain("*")); err != nil {
t.Errorf("Lookup err: %v", err) t.Errorf("Lookup err: %v", err)
} else if len(recs) != 2 { } else if len(recs) != 2 {
t.Errorf("Expected 2 records, got %v", len(recs)) t.Errorf("Expected 2 records, got %v", len(recs))
@ -284,7 +286,7 @@ func TestMemoryWildcard(t *testing.T) {
} }
func TestWatcher(t *testing.T) { func TestWatcher(t *testing.T) {
testSrv := &Service{Name: "foo", Version: "1.0.0"} testSrv := &register.Service{Name: "foo", Version: "1.0.0"}
ctx := context.TODO() ctx := context.TODO()
m := NewRegister() m := NewRegister()

72
register/noop.go Normal file
View File

@ -0,0 +1,72 @@
package register
import "context"
type noop struct {
opts Options
}
func NewRegister(opts ...Option) Register {
return &noop{
opts: NewOptions(opts...),
}
}
func (n *noop) Name() string {
return n.opts.Name
}
func (n *noop) Init(opts ...Option) error {
for _, o := range opts {
o(&n.opts)
}
return nil
}
func (n *noop) Options() Options {
return n.opts
}
func (n *noop) Connect(ctx context.Context) error {
return nil
}
func (n *noop) Disconnect(ctx context.Context) error {
return nil
}
func (n *noop) Register(ctx context.Context, service *Service, option ...RegisterOption) error {
return nil
}
func (n *noop) Deregister(ctx context.Context, service *Service, option ...DeregisterOption) error {
return nil
}
func (n *noop) LookupService(ctx context.Context, s string, option ...LookupOption) ([]*Service, error) {
return nil, nil
}
func (n *noop) ListServices(ctx context.Context, option ...ListOption) ([]*Service, error) {
return nil, nil
}
func (n *noop) Watch(ctx context.Context, opts ...WatchOption) (Watcher, error) {
wOpts := NewWatchOptions(opts...)
return &watcher{wo: wOpts}, nil
}
func (n *noop) String() string {
return "noop"
}
type watcher struct {
wo WatchOptions
}
func (m *watcher) Next() (*Result, error) {
return nil, nil
}
func (m *watcher) Stop() {}

View File

@ -202,39 +202,6 @@ func (n *noopServer) Register() error {
n.Lock() n.Lock()
defer n.Unlock() defer n.Unlock()
cx := config.Context
var sub broker.Subscriber
for sb := range n.subscribers {
if sb.Options().Context != nil {
cx = sb.Options().Context
}
opts := []broker.SubscribeOption{broker.SubscribeContext(cx), broker.SubscribeAutoAck(sb.Options().AutoAck)}
if queue := sb.Options().Queue; len(queue) > 0 {
opts = append(opts, broker.SubscribeGroup(queue))
}
if sb.Options().Batch {
// batch processing handler
sub, err = config.Broker.BatchSubscribe(cx, sb.Topic(), n.newBatchSubHandler(sb, config), opts...)
} else {
// single processing handler
sub, err = config.Broker.Subscribe(cx, sb.Topic(), n.newSubHandler(sb, config), opts...)
}
if err != nil {
return err
}
if config.Logger.V(logger.InfoLevel) {
config.Logger.Infof(n.opts.Context, "subscribing to topic: %s", sb.Topic())
}
n.subscribers[sb] = []broker.Subscriber{sub}
}
n.registered = true n.registered = true
if cacheService { if cacheService {
n.rsvc = service n.rsvc = service
@ -366,6 +333,10 @@ func (n *noopServer) Start() error {
} }
} }
if err := n.subscribe(); err != nil {
return err
}
go func() { go func() {
t := new(time.Ticker) t := new(time.Ticker)
@ -449,6 +420,45 @@ func (n *noopServer) Start() error {
return nil return nil
} }
func (n *noopServer) subscribe() error {
config := n.Options()
cx := config.Context
var err error
var sub broker.Subscriber
for sb := range n.subscribers {
if sb.Options().Context != nil {
cx = sb.Options().Context
}
opts := []broker.SubscribeOption{broker.SubscribeContext(cx), broker.SubscribeAutoAck(sb.Options().AutoAck)}
if queue := sb.Options().Queue; len(queue) > 0 {
opts = append(opts, broker.SubscribeGroup(queue))
}
if sb.Options().Batch {
// batch processing handler
sub, err = config.Broker.BatchSubscribe(cx, sb.Topic(), n.createBatchSubHandler(sb, config), opts...)
} else {
// single processing handler
sub, err = config.Broker.Subscribe(cx, sb.Topic(), n.createSubHandler(sb, config), opts...)
}
if err != nil {
return err
}
if config.Logger.V(logger.InfoLevel) {
config.Logger.Infof(n.opts.Context, "subscribing to topic: %s", sb.Topic())
}
n.subscribers[sb] = []broker.Subscriber{sub}
}
return nil
}
func (n *noopServer) Stop() error { func (n *noopServer) Stop() error {
n.RLock() n.RLock()
if !n.started { if !n.started {

View File

@ -86,6 +86,8 @@ type Options struct {
DeregisterAttempts int DeregisterAttempts int
// Hooks may contains SubscriberWrapper, HandlerWrapper or Server func wrapper // Hooks may contains SubscriberWrapper, HandlerWrapper or Server func wrapper
Hooks options.Hooks Hooks options.Hooks
// GracefulTimeout timeout for graceful stop server
GracefulTimeout time.Duration
} }
// NewOptions returns new options struct with default or passed values // NewOptions returns new options struct with default or passed values
@ -108,6 +110,7 @@ func NewOptions(opts ...Option) Options {
Version: DefaultVersion, Version: DefaultVersion,
ID: id.Must(), ID: id.Must(),
Namespace: DefaultNamespace, Namespace: DefaultNamespace,
GracefulTimeout: DefaultGracefulTimeout,
} }
for _, o := range opts { for _, o := range opts {
@ -321,6 +324,14 @@ func Listener(l net.Listener) Option {
// HandlerOption func // HandlerOption func
type HandlerOption func(*HandlerOptions) type HandlerOption func(*HandlerOptions)
// GracefulTimeout duration
func GracefulTimeout(td time.Duration) Option {
return func(o *Options) {
o.GracefulTimeout = td
}
}
// HandlerOptions struct // HandlerOptions struct
type HandlerOptions struct { type HandlerOptions struct {
// Context holds external options // Context holds external options

View File

@ -34,6 +34,8 @@ var (
DefaultMaxMsgRecvSize = 1024 * 1024 * 4 // 4Mb DefaultMaxMsgRecvSize = 1024 * 1024 * 4 // 4Mb
// DefaultMaxMsgSendSize holds default max send size // DefaultMaxMsgSendSize holds default max send size
DefaultMaxMsgSendSize = 1024 * 1024 * 4 // 4Mb DefaultMaxMsgSendSize = 1024 * 1024 * 4 // 4Mb
// DefaultGracefulTimeout default time for graceful stop
DefaultGracefulTimeout = 5 * time.Second
) )
// Server is a simple micro server abstraction // Server is a simple micro server abstraction

View File

@ -191,7 +191,7 @@ func newSubscriber(topic string, sub interface{}, opts ...SubscriberOption) Subs
} }
//nolint:gocyclo //nolint:gocyclo
func (n *noopServer) newBatchSubHandler(sb *subscriber, opts Options) broker.BatchHandler { func (n *noopServer) createBatchSubHandler(sb *subscriber, opts Options) broker.BatchHandler {
return func(ps broker.Events) (err error) { return func(ps broker.Events) (err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
@ -309,7 +309,7 @@ func (n *noopServer) newBatchSubHandler(sb *subscriber, opts Options) broker.Bat
} }
//nolint:gocyclo //nolint:gocyclo
func (n *noopServer) newSubHandler(sb *subscriber, opts Options) broker.Handler { func (n *noopServer) createSubHandler(sb *subscriber, opts Options) broker.Handler {
return func(p broker.Event) (err error) { return func(p broker.Event) (err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
@ -342,9 +342,6 @@ func (n *noopServer) newSubHandler(sb *subscriber, opts Options) broker.Handler
hdr := metadata.New(len(msg.Header)) hdr := metadata.New(len(msg.Header))
for k, v := range msg.Header { for k, v := range msg.Header {
if k == "Content-Type" {
continue
}
hdr.Set(k, v) hdr.Set(k, v)
} }

View File

@ -72,8 +72,8 @@ func RegisterSubscriber(topic string, s server.Server, h interface{}, opts ...se
} }
type service struct { type service struct {
sync.RWMutex
opts Options opts Options
sync.RWMutex
} }
// NewService creates and returns a new Service based on the packages within. // NewService creates and returns a new Service based on the packages within.
@ -88,6 +88,7 @@ func (s *service) Name() string {
// Init initialises options. Additionally it calls cmd.Init // Init initialises options. Additionally it calls cmd.Init
// which parses command line flags. cmd.Init is only called // which parses command line flags. cmd.Init is only called
// on first Init. // on first Init.
//
//nolint:gocyclo //nolint:gocyclo
func (s *service) Init(opts ...Option) error { func (s *service) Init(opts ...Option) error {
var err error var err error
@ -375,17 +376,13 @@ func (s *service) Run() error {
return s.Stop() return s.Stop()
} }
type nameIface interface { type Namer interface {
Name() string Name() string
} }
func getNameIndex(n string, ifaces interface{}) int { func getNameIndex[T Namer](n string, ifaces []T) int {
values, ok := ifaces.([]interface{}) for idx, iface := range ifaces {
if !ok { if iface.Name() == n {
return 0
}
for idx, iface := range values {
if ifc, ok := iface.(nameIface); ok && ifc.Name() == n {
return idx return idx
} }
} }

View File

@ -17,6 +17,22 @@ import (
"go.unistack.org/micro/v3/tracer" "go.unistack.org/micro/v3/tracer"
) )
func TestClient(t *testing.T) {
c1 := client.NewClient(client.Name("test1"))
c2 := client.NewClient(client.Name("test2"))
svc := NewService(Client(c1, c2))
if err := svc.Init(); err != nil {
t.Fatal(err)
}
x1 := svc.Client("test2")
if x1.Name() != "test2" {
t.Fatalf("invalid client %#+v", svc.Options().Clients)
}
}
type testItem struct { type testItem struct {
name string name string
} }
@ -25,12 +41,11 @@ func (ti *testItem) Name() string {
return ti.name return ti.name
} }
func TestGetNameIndex(t *testing.T) { func Test_getNameIndex(t *testing.T) {
item1 := &testItem{name: "first"} items := []*testItem{{name: "test1"}, {name: "test2"}}
item2 := &testItem{name: "second"} idx := getNameIndex("test2", items)
items := []interface{}{item1, item2} if items[idx].Name() != "test2" {
if idx := getNameIndex("second", items); idx != 1 { t.Fatal("getNameIndex wrong")
t.Fatalf("getNameIndex func error, item not found")
} }
} }

139
tracer/memory/memory.go Normal file
View File

@ -0,0 +1,139 @@
package memory
import (
"context"
"time"
"go.unistack.org/micro/v3/tracer"
"go.unistack.org/micro/v3/util/id"
)
var _ tracer.Tracer = (*Tracer)(nil)
type Tracer struct {
opts tracer.Options
spans []tracer.Span
}
func (t *Tracer) Spans() []tracer.Span {
return t.spans
}
func (t *Tracer) Start(ctx context.Context, name string, opts ...tracer.SpanOption) (context.Context, tracer.Span) {
options := tracer.NewSpanOptions(opts...)
span := &Span{
name: name,
ctx: ctx,
tracer: t,
kind: options.Kind,
startTime: time.Now(),
}
span.spanID.s, _ = id.New()
span.traceID.s, _ = id.New()
if span.ctx == nil {
span.ctx = context.Background()
}
t.spans = append(t.spans, span)
return tracer.NewSpanContext(ctx, span), span
}
func (t *Tracer) Flush(_ context.Context) error {
return nil
}
func (t *Tracer) Init(opts ...tracer.Option) error {
for _, o := range opts {
o(&t.opts)
}
return nil
}
func (t *Tracer) Name() string {
return t.opts.Name
}
type noopStringer struct {
s string
}
func (s noopStringer) String() string {
return s.s
}
type Span struct {
ctx context.Context
tracer tracer.Tracer
name string
statusMsg string
startTime time.Time
finishTime time.Time
traceID noopStringer
spanID noopStringer
events []*Event
labels []interface{}
logs []interface{}
kind tracer.SpanKind
status tracer.SpanStatus
}
func (s *Span) Finish(_ ...tracer.SpanOption) {
s.finishTime = time.Now()
}
func (s *Span) Context() context.Context {
return s.ctx
}
func (s *Span) Tracer() tracer.Tracer {
return s.tracer
}
type Event struct {
name string
labels []interface{}
}
func (s *Span) AddEvent(name string, opts ...tracer.EventOption) {
options := tracer.NewEventOptions(opts...)
s.events = append(s.events, &Event{name: name, labels: options.Labels})
}
func (s *Span) SetName(name string) {
s.name = name
}
func (s *Span) AddLogs(kv ...interface{}) {
s.logs = append(s.logs, kv...)
}
func (s *Span) AddLabels(kv ...interface{}) {
s.labels = append(s.labels, kv...)
}
func (s *Span) Kind() tracer.SpanKind {
return s.kind
}
func (s *Span) TraceID() string {
return s.traceID.String()
}
func (s *Span) SpanID() string {
return s.spanID.String()
}
func (s *Span) Status() (tracer.SpanStatus, string) {
return s.status, s.statusMsg
}
func (s *Span) SetStatus(st tracer.SpanStatus, msg string) {
s.status = st
s.statusMsg = msg
}
// NewTracer returns new memory tracer
func NewTracer(opts ...tracer.Option) *Tracer {
return &Tracer{
opts: tracer.NewOptions(opts...),
}
}

View File

@ -0,0 +1,38 @@
package memory
import (
"bytes"
"context"
"fmt"
"strings"
"testing"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/logger/slog"
"go.unistack.org/micro/v3/tracer"
)
func TestLoggerWithTracer(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
logger.DefaultLogger = slog.NewLogger(logger.WithOutput(buf))
if err := logger.Init(); err != nil {
t.Fatal(err)
}
var span tracer.Span
tr := NewTracer()
ctx, span = tr.Start(ctx, "test1")
logger.Error(ctx, "my test error", fmt.Errorf("error"))
if !strings.Contains(buf.String(), span.TraceID()) {
t.Fatalf("log does not contains trace id: %s", buf.Bytes())
}
_, _ = tr.Start(ctx, "test2")
for _, s := range tr.Spans() {
_ = s
}
}

View File

@ -2,25 +2,51 @@ package tracer
import ( import (
"context" "context"
"go.unistack.org/micro/v3/util/id"
) )
var _ Tracer = (*noopTracer)(nil)
type noopTracer struct { type noopTracer struct {
opts Options opts Options
spans []Span
}
func (t *noopTracer) Spans() []Span {
return t.spans
} }
func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
options := NewSpanOptions(opts...)
span := &noopSpan{ span := &noopSpan{
name: name, name: name,
ctx: ctx, ctx: ctx,
tracer: t, tracer: t,
opts: NewSpanOptions(opts...), labels: options.Labels,
kind: options.Kind,
} }
span.spanID.s, _ = id.New()
span.traceID.s, _ = id.New()
if span.ctx == nil { if span.ctx == nil {
span.ctx = context.Background() span.ctx = context.Background()
} }
t.spans = append(t.spans, span)
return NewSpanContext(ctx, span), span return NewSpanContext(ctx, span), span
} }
type noopStringer struct {
s string
}
func (s noopStringer) String() string {
return s.s
}
func (t *noopTracer) Flush(ctx context.Context) error {
return nil
}
func (t *noopTracer) Init(opts ...Option) error { func (t *noopTracer) Init(opts ...Option) error {
for _, o := range opts { for _, o := range opts {
o(&t.opts) o(&t.opts)
@ -32,16 +58,34 @@ func (t *noopTracer) Name() string {
return t.opts.Name return t.opts.Name
} }
type noopEvent struct {
name string
labels []interface{}
}
type noopSpan struct { type noopSpan struct {
ctx context.Context ctx context.Context
tracer Tracer tracer Tracer
name string name string
opts SpanOptions
status SpanStatus
statusMsg string statusMsg string
traceID noopStringer
spanID noopStringer
events []*noopEvent
labels []interface{}
logs []interface{}
kind SpanKind
status SpanStatus
} }
func (s *noopSpan) Finish(opts ...SpanOption) { func (s *noopSpan) TraceID() string {
return s.traceID.String()
}
func (s *noopSpan) SpanID() string {
return s.spanID.String()
}
func (s *noopSpan) Finish(_ ...SpanOption) {
} }
func (s *noopSpan) Context() context.Context { func (s *noopSpan) Context() context.Context {
@ -53,22 +97,24 @@ func (s *noopSpan) Tracer() Tracer {
} }
func (s *noopSpan) AddEvent(name string, opts ...EventOption) { func (s *noopSpan) AddEvent(name string, opts ...EventOption) {
options := NewEventOptions(opts...)
s.events = append(s.events, &noopEvent{name: name, labels: options.Labels})
} }
func (s *noopSpan) SetName(name string) { func (s *noopSpan) SetName(name string) {
s.name = name s.name = name
} }
func (s *noopSpan) SetLabels(labels ...interface{}) { func (s *noopSpan) AddLogs(kv ...interface{}) {
s.opts.Labels = labels s.logs = append(s.logs, kv...)
} }
func (s *noopSpan) AddLabels(labels ...interface{}) { func (s *noopSpan) AddLabels(kv ...interface{}) {
s.opts.Labels = append(s.opts.Labels, labels...) s.labels = append(s.labels, kv...)
} }
func (s *noopSpan) Kind() SpanKind { func (s *noopSpan) Kind() SpanKind {
return s.opts.Kind return s.kind
} }
func (s *noopSpan) Status() (SpanStatus, string) { func (s *noopSpan) Status() (SpanStatus, string) {

View File

@ -91,14 +91,22 @@ type SpanOptions struct {
type SpanOption func(o *SpanOptions) type SpanOption func(o *SpanOptions)
// EventOptions contains event options // EventOptions contains event options
type EventOptions struct{} type EventOptions struct {
Labels []interface{}
}
// EventOption func signature // EventOption func signature
type EventOption func(o *EventOptions) type EventOption func(o *EventOptions)
func WithSpanLabels(labels ...interface{}) SpanOption { func WithEventLabels(kv ...interface{}) EventOption {
return func(o *EventOptions) {
o.Labels = kv
}
}
func WithSpanLabels(kv ...interface{}) SpanOption {
return func(o *SpanOptions) { return func(o *SpanOptions) {
o.Labels = labels o.Labels = kv
} }
} }
@ -128,6 +136,15 @@ func Logger(l logger.Logger) Option {
} }
} }
// NewEventOptions returns default EventOptions
func NewEventOptions(opts ...EventOption) EventOptions {
options := EventOptions{}
for _, o := range opts {
o(&options)
}
return options
}
// NewSpanOptions returns default SpanOptions // NewSpanOptions returns default SpanOptions
func NewSpanOptions(opts ...SpanOption) SpanOptions { func NewSpanOptions(opts ...SpanOption) SpanOptions {
options := SpanOptions{ options := SpanOptions{

View File

@ -3,11 +3,35 @@ package tracer // import "go.unistack.org/micro/v3/tracer"
import ( import (
"context" "context"
"fmt"
"sort"
"go.unistack.org/micro/v3/logger"
) )
// DefaultTracer is the global default tracer // DefaultTracer is the global default tracer
var DefaultTracer = NewTracer() var DefaultTracer = NewTracer()
var (
// TraceIDKey is the key used for the trace id in the log call
TraceIDKey = "trace-id"
// SpanIDKey is the key used for the span id in the log call
SpanIDKey = "span-id"
)
func init() {
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs,
func(ctx context.Context) []interface{} {
if span, ok := SpanFromContext(ctx); ok {
return []interface{}{
TraceIDKey, span.TraceID(),
SpanIDKey, span.SpanID(),
}
}
return nil
})
}
// Tracer is an interface for distributed tracing // Tracer is an interface for distributed tracing
type Tracer interface { type Tracer interface {
// Name return tracer name // Name return tracer name
@ -16,6 +40,8 @@ type Tracer interface {
Init(...Option) error Init(...Option) error
// Start a trace // Start a trace
Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span)
// Flush flushes spans
Flush(ctx context.Context) error
} }
type Span interface { type Span interface {
@ -23,8 +49,6 @@ type Span interface {
Tracer() Tracer Tracer() Tracer
// Finish complete and send span // Finish complete and send span
Finish(opts ...SpanOption) Finish(opts ...SpanOption)
// AddEvent add event to span
AddEvent(name string, opts ...EventOption)
// Context return context with span // Context return context with span
Context() context.Context Context() context.Context
// SetName set the span name // SetName set the span name
@ -33,10 +57,50 @@ type Span interface {
SetStatus(status SpanStatus, msg string) SetStatus(status SpanStatus, msg string)
// Status returns span status and msg // Status returns span status and msg
Status() (SpanStatus, string) Status() (SpanStatus, string)
// SetLabels set the span labels // AddLabels append labels to span
SetLabels(labels ...interface{}) AddLabels(kv ...interface{})
// AddLabels append the span labels // AddEvent append event to span
AddLabels(labels ...interface{}) AddEvent(name string, opts ...EventOption)
// AddEvent append event to span
AddLogs(kv ...interface{})
// Kind returns span kind // Kind returns span kind
Kind() SpanKind Kind() SpanKind
// TraceID returns trace id
TraceID() string
// SpanID returns span id
SpanID() string
}
// sort labels alphabeticaly by label name
type byKey []interface{}
func (k byKey) Len() int { return len(k) / 2 }
func (k byKey) Less(i, j int) bool { return fmt.Sprintf("%s", k[i*2]) < fmt.Sprintf("%s", k[j*2]) }
func (k byKey) Swap(i, j int) {
k[i*2], k[j*2] = k[j*2], k[i*2]
k[i*2+1], k[j*2+1] = k[j*2+1], k[i*2+1]
}
func UniqLabels(labels []interface{}) []interface{} {
if len(labels)%2 == 1 {
labels = labels[:len(labels)-1]
}
if len(labels) > 2 {
sort.Sort(byKey(labels))
idx := 0
for {
if labels[idx] == labels[idx+2] {
copy(labels[idx:], labels[idx+2:])
labels = labels[:len(labels)-2]
} else {
idx += 2
}
if idx+2 >= len(labels) {
break
}
}
}
return labels
} }

13
tracer/tracer_test.go Normal file
View File

@ -0,0 +1,13 @@
package tracer
import (
"testing"
)
func TestUniqLabels(t *testing.T) {
labels := []interface{}{"key1", "val1", "key1", "val2"}
labels = UniqLabels(labels)
if labels[1] != "val2" {
t.Fatalf("UniqLabels not works")
}
}

View File

@ -4,6 +4,7 @@ package wrapper // import "go.unistack.org/micro/v3/tracer/wrapper"
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"go.unistack.org/micro/v3/client" "go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/metadata" "go.unistack.org/micro/v3/metadata"
@ -11,107 +12,87 @@ import (
"go.unistack.org/micro/v3/tracer" "go.unistack.org/micro/v3/tracer"
) )
var DefaultHeadersExctract = []string{metadata.HeaderXRequestID}
func ExtractDefaultLabels(md metadata.Metadata) []interface{} {
labels := make([]interface{}, 0, len(DefaultHeadersExctract))
for _, k := range DefaultHeadersExctract {
if v, ok := md.Get(k); ok {
labels = append(labels, strings.ToLower(k), v)
}
}
return labels
}
var ( var (
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []client.CallOption, sp tracer.Span, err error) { DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []client.CallOption, sp tracer.Span, err error) {
sp.SetName(fmt.Sprintf("Call %s.%s", req.Service(), req.Method()))
var labels []interface{} var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok { if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = make([]interface{}, 0, len(md)+1) labels = append(labels, ExtractDefaultLabels(md)...)
for k, v := range md {
labels = append(labels, k, v)
}
} }
if err != nil { if err != nil {
labels = append(labels, "error", err.Error())
sp.SetStatus(tracer.SpanStatusError, err.Error()) sp.SetStatus(tracer.SpanStatusError, err.Error())
} }
labels = append(labels, "kind", sp.Kind()) sp.AddLabels(labels...)
sp.SetLabels(labels...)
} }
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []client.CallOption, stream client.Stream, sp tracer.Span, err error) { DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []client.CallOption, stream client.Stream, sp tracer.Span, err error) {
sp.SetName(fmt.Sprintf("Stream %s.%s", req.Service(), req.Method()))
var labels []interface{} var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok { if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = make([]interface{}, 0, len(md)) labels = append(labels, ExtractDefaultLabels(md)...)
for k, v := range md {
labels = append(labels, k, v)
}
} }
if err != nil { if err != nil {
labels = append(labels, "error", err.Error())
sp.SetStatus(tracer.SpanStatusError, err.Error()) sp.SetStatus(tracer.SpanStatusError, err.Error())
} }
labels = append(labels, "kind", sp.Kind()) sp.AddLabels(labels...)
sp.SetLabels(labels...)
} }
DefaultClientPublishObserver = func(ctx context.Context, msg client.Message, opts []client.PublishOption, sp tracer.Span, err error) { DefaultClientPublishObserver = func(ctx context.Context, msg client.Message, opts []client.PublishOption, sp tracer.Span, err error) {
sp.SetName(fmt.Sprintf("Publish %s", msg.Topic()))
var labels []interface{} var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok { if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = make([]interface{}, 0, len(md)) labels = append(labels, ExtractDefaultLabels(md)...)
for k, v := range md {
labels = append(labels, k, v)
}
} }
labels = append(labels, ExtractDefaultLabels(msg.Metadata())...)
if err != nil { if err != nil {
labels = append(labels, "error", err.Error())
sp.SetStatus(tracer.SpanStatusError, err.Error()) sp.SetStatus(tracer.SpanStatusError, err.Error())
} }
labels = append(labels, "kind", sp.Kind()) sp.AddLabels(labels...)
sp.SetLabels(labels...)
} }
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, sp tracer.Span, err error) { DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, sp tracer.Span, err error) {
sp.SetName(fmt.Sprintf("Handler %s.%s", req.Service(), req.Method()))
var labels []interface{} var labels []interface{}
if md, ok := metadata.FromIncomingContext(ctx); ok { if md, ok := metadata.FromIncomingContext(ctx); ok {
labels = make([]interface{}, 0, len(md)) labels = append(labels, ExtractDefaultLabels(md)...)
for k, v := range md {
labels = append(labels, k, v)
}
} }
if err != nil { if err != nil {
labels = append(labels, "error", err.Error())
sp.SetStatus(tracer.SpanStatusError, err.Error()) sp.SetStatus(tracer.SpanStatusError, err.Error())
} }
labels = append(labels, "kind", sp.Kind()) sp.AddLabels(labels...)
sp.SetLabels(labels...)
} }
DefaultServerSubscriberObserver = func(ctx context.Context, msg server.Message, sp tracer.Span, err error) { DefaultServerSubscriberObserver = func(ctx context.Context, msg server.Message, sp tracer.Span, err error) {
sp.SetName(fmt.Sprintf("Subscriber %s", msg.Topic()))
var labels []interface{} var labels []interface{}
if md, ok := metadata.FromIncomingContext(ctx); ok { if md, ok := metadata.FromIncomingContext(ctx); ok {
labels = make([]interface{}, 0, len(md)) labels = append(labels, ExtractDefaultLabels(md)...)
for k, v := range md {
labels = append(labels, k, v)
}
} }
labels = append(labels, ExtractDefaultLabels(msg.Header())...)
if err != nil { if err != nil {
labels = append(labels, "error", err.Error())
sp.SetStatus(tracer.SpanStatusError, err.Error()) sp.SetStatus(tracer.SpanStatusError, err.Error())
} }
labels = append(labels, "kind", sp.Kind()) sp.AddLabels(labels...)
sp.SetLabels(labels...)
} }
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, sp tracer.Span, err error) { DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, sp tracer.Span, err error) {
sp.SetName(fmt.Sprintf("Call %s.%s", req.Service(), req.Method())) sp.SetName(fmt.Sprintf("%s.%s call", req.Service(), req.Method()))
var labels []interface{} var labels []interface{}
if md, ok := metadata.FromOutgoingContext(ctx); ok { if md, ok := metadata.FromOutgoingContext(ctx); ok {
labels = make([]interface{}, 0, len(md)) labels = append(labels, ExtractDefaultLabels(md)...)
for k, v := range md {
labels = append(labels, k, v)
}
} }
if err != nil { if err != nil {
labels = append(labels, "error", err.Error())
sp.SetStatus(tracer.SpanStatusError, err.Error()) sp.SetStatus(tracer.SpanStatusError, err.Error())
} }
labels = append(labels, "kind", sp.Kind()) sp.AddLabels(labels...)
sp.SetLabels(labels...)
} }
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"} DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
@ -241,16 +222,22 @@ func (ot *tWrapper) Call(ctx context.Context, req client.Request, rsp interface{
} }
} }
sp, ok := tracer.SpanFromContext(ctx) nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
if !ok { tracer.WithSpanKind(tracer.SpanKindClient),
ctx, sp = ot.opts.Tracer.Start(ctx, "", tracer.WithSpanKind(tracer.SpanKindClient)) tracer.WithSpanLabels(
} "rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", "unary",
),
)
defer sp.Finish() defer sp.Finish()
err := ot.Client.Call(ctx, req, rsp, opts...) err := ot.Client.Call(nctx, req, rsp, opts...)
for _, o := range ot.opts.ClientCallObservers { for _, o := range ot.opts.ClientCallObservers {
o(ctx, req, rsp, opts, sp, err) o(nctx, req, rsp, opts, sp, err)
} }
return err return err
@ -264,32 +251,36 @@ func (ot *tWrapper) Stream(ctx context.Context, req client.Request, opts ...clie
} }
} }
sp, ok := tracer.SpanFromContext(ctx) nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
if !ok { tracer.WithSpanKind(tracer.SpanKindClient),
ctx, sp = ot.opts.Tracer.Start(ctx, "", tracer.WithSpanKind(tracer.SpanKindClient)) tracer.WithSpanLabels(
} "rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", "stream",
),
)
defer sp.Finish() defer sp.Finish()
stream, err := ot.Client.Stream(ctx, req, opts...) stream, err := ot.Client.Stream(nctx, req, opts...)
for _, o := range ot.opts.ClientStreamObservers { for _, o := range ot.opts.ClientStreamObservers {
o(ctx, req, opts, stream, sp, err) o(nctx, req, opts, stream, sp, err)
} }
return stream, err return stream, err
} }
func (ot *tWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error { func (ot *tWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error {
sp, ok := tracer.SpanFromContext(ctx) nctx, sp := ot.opts.Tracer.Start(ctx, msg.Topic()+" publish", tracer.WithSpanKind(tracer.SpanKindProducer))
if !ok {
ctx, sp = ot.opts.Tracer.Start(ctx, "", tracer.WithSpanKind(tracer.SpanKindProducer))
}
defer sp.Finish() defer sp.Finish()
sp.AddLabels("messaging.destination.name", msg.Topic())
err := ot.Client.Publish(ctx, msg, opts...) sp.AddLabels("messaging.operation", "publish")
err := ot.Client.Publish(nctx, msg, opts...)
for _, o := range ot.opts.ClientPublishObservers { for _, o := range ot.opts.ClientPublishObservers {
o(ctx, msg, opts, sp, err) o(nctx, msg, opts, sp, err)
} }
return err return err
@ -303,32 +294,41 @@ func (ot *tWrapper) ServerHandler(ctx context.Context, req server.Request, rsp i
} }
} }
sp, ok := tracer.SpanFromContext(ctx) callType := "unary"
if !ok { if req.Stream() {
ctx, sp = ot.opts.Tracer.Start(ctx, "", tracer.WithSpanKind(tracer.SpanKindServer)) callType = "stream"
} }
nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-server", req.Service(), req.Method()),
tracer.WithSpanKind(tracer.SpanKindServer),
tracer.WithSpanLabels(
"rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", callType,
),
)
defer sp.Finish() defer sp.Finish()
err := ot.serverHandler(ctx, req, rsp) err := ot.serverHandler(nctx, req, rsp)
for _, o := range ot.opts.ServerHandlerObservers { for _, o := range ot.opts.ServerHandlerObservers {
o(ctx, req, rsp, sp, err) o(nctx, req, rsp, sp, err)
} }
return err return err
} }
func (ot *tWrapper) ServerSubscriber(ctx context.Context, msg server.Message) error { func (ot *tWrapper) ServerSubscriber(ctx context.Context, msg server.Message) error {
sp, ok := tracer.SpanFromContext(ctx) nctx, sp := ot.opts.Tracer.Start(ctx, msg.Topic()+" process", tracer.WithSpanKind(tracer.SpanKindConsumer))
if !ok {
ctx, sp = ot.opts.Tracer.Start(ctx, "", tracer.WithSpanKind(tracer.SpanKindConsumer))
}
defer sp.Finish() defer sp.Finish()
sp.AddLabels("messaging.operation", "process")
err := ot.serverSubscriber(ctx, msg) sp.AddLabels("messaging.source.name", msg.Topic())
err := ot.serverSubscriber(nctx, msg)
for _, o := range ot.opts.ServerSubscriberObservers { for _, o := range ot.opts.ServerSubscriberObservers {
o(ctx, msg, sp, err) o(nctx, msg, sp, err)
} }
return err return err
@ -366,16 +366,23 @@ func (ot *tWrapper) ClientCallFunc(ctx context.Context, addr string, req client.
} }
} }
sp, ok := tracer.SpanFromContext(ctx) nctx, sp := ot.opts.Tracer.Start(ctx, fmt.Sprintf("%s.%s rpc-client", req.Service(), req.Method()),
if !ok { tracer.WithSpanKind(tracer.SpanKindClient),
ctx, sp = ot.opts.Tracer.Start(ctx, "", tracer.WithSpanKind(tracer.SpanKindClient)) tracer.WithSpanLabels(
} "rpc.service", req.Service(),
"rpc.method", req.Method(),
"rpc.flavor", "rpc",
"rpc.call", "/"+req.Service()+"/"+req.Endpoint(),
"rpc.call_type", "unary",
),
)
defer sp.Finish() defer sp.Finish()
err := ot.clientCallFunc(ctx, addr, req, rsp, opts) err := ot.clientCallFunc(nctx, addr, req, rsp, opts)
for _, o := range ot.opts.ClientCallFuncObservers { for _, o := range ot.opts.ClientCallFuncObservers {
o(ctx, addr, req, rsp, opts, sp, err) o(nctx, addr, req, rsp, opts, sp, err)
} }
return err return err

271
util/grpc/tracer.go Normal file
View File

@ -0,0 +1,271 @@
package grpc_util
import (
"context"
"net"
"strconv"
"strings"
"sync/atomic"
"time"
"go.unistack.org/micro/v3/tracer"
grpc_codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
type gRPCContextKey struct{}
type gRPCContext struct {
messagesReceived int64
messagesSent int64
}
type Options struct {
Tracer tracer.Tracer
}
type Option func(*Options)
func Tracer(tr tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = tr
}
}
// NewServerHandler creates a stats.Handler for gRPC server.
func NewServerHandler(opts ...Option) stats.Handler {
options := Options{Tracer: tracer.DefaultTracer}
for _, o := range opts {
o(&options)
}
h := &serverHandler{
opts: options,
}
return h
}
type serverHandler struct {
opts Options
}
// TagRPC can attach some information to the given context.
func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
name, attrs := parseFullMethod(info.FullMethodName)
attrs = append(attrs, "rpc.system", "grpc")
ctx, _ = h.opts.Tracer.Start(
ctx,
name,
tracer.WithSpanKind(tracer.SpanKindServer),
tracer.WithSpanLabels(attrs...),
)
gctx := gRPCContext{}
return context.WithValue(ctx, gRPCContextKey{}, &gctx)
}
// HandleRPC processes the RPC stats.
func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
handleRPC(ctx, rs)
}
// TagConn can attach some information to the given context.
func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
if span, ok := tracer.SpanFromContext(ctx); ok {
attrs := peerAttr(peerFromCtx(ctx))
span.AddLabels(attrs...)
}
return ctx
}
// HandleConn processes the Conn stats.
func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
}
type clientHandler struct {
opts Options
}
// NewClientHandler creates a stats.Handler for gRPC client.
func NewClientHandler(opts ...Option) stats.Handler {
options := Options{Tracer: tracer.DefaultTracer}
for _, o := range opts {
o(&options)
}
h := &clientHandler{
opts: options,
}
return h
}
// TagRPC can attach some information to the given context.
func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
name, attrs := parseFullMethod(info.FullMethodName)
attrs = append(attrs, "rpc.system", "grpc", "rpc.flavor", "grpc", "rpc.call", info.FullMethodName)
ctx, _ = h.opts.Tracer.Start(
ctx,
name,
tracer.WithSpanKind(tracer.SpanKindClient),
tracer.WithSpanLabels(attrs...),
)
gctx := gRPCContext{}
return context.WithValue(ctx, gRPCContextKey{}, &gctx)
}
// HandleRPC processes the RPC stats.
func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
handleRPC(ctx, rs)
}
// TagConn can attach some information to the given context.
func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
// TODO
if span, ok := tracer.SpanFromContext(ctx); ok {
attrs := peerAttr(cti.RemoteAddr.String())
span.AddLabels(attrs...)
}
return ctx
}
// HandleConn processes the Conn stats.
func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
// no-op
}
func handleRPC(ctx context.Context, rs stats.RPCStats) {
span, ok := tracer.SpanFromContext(ctx)
gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
var messageID int64
if rs.IsClient() {
span.AddLabels("span.kind", "client")
} else {
span.AddLabels("span.kind", "server")
}
switch rs := rs.(type) {
case *stats.Begin:
if rs.IsClientStream || rs.IsServerStream {
span.AddLabels("rpc.call_type", "stream")
} else {
span.AddLabels("rpc.call_type", "unary")
}
span.AddEvent("message",
tracer.WithEventLabels(
"message.begin_time", rs.BeginTime.Format(time.RFC3339),
),
)
case *stats.InPayload:
if gctx != nil {
messageID = atomic.AddInt64(&gctx.messagesReceived, 1)
}
if ok {
span.AddEvent("message",
tracer.WithEventLabels(
"message.recv_time", rs.RecvTime.Format(time.RFC3339),
"message.type", "RECEIVED",
"message.id", messageID,
"message.compressed_size", rs.CompressedLength,
"message.uncompressed_size", rs.Length,
),
)
}
case *stats.OutPayload:
if gctx != nil {
messageID = atomic.AddInt64(&gctx.messagesSent, 1)
}
if ok {
span.AddEvent("message",
tracer.WithEventLabels(
"message.sent_time", rs.SentTime.Format(time.RFC3339),
"message.type", "SENT",
"message.id", messageID,
"message.compressed_size", rs.CompressedLength,
"message.uncompressed_size", rs.Length,
),
)
}
case *stats.End:
if ok {
span.AddEvent("message",
tracer.WithEventLabels(
"message.begin_time", rs.BeginTime.Format(time.RFC3339),
"message.end_time", rs.EndTime.Format(time.RFC3339),
),
)
if rs.Error != nil {
s, _ := status.FromError(rs.Error)
span.SetStatus(tracer.SpanStatusError, s.Message())
span.AddLabels("rpc.grpc.status_code", s.Code())
} else {
span.AddLabels("rpc.grpc.status_code", grpc_codes.OK)
}
span.Finish()
}
default:
return
}
}
func parseFullMethod(fullMethod string) (string, []interface{}) {
if !strings.HasPrefix(fullMethod, "/") {
// Invalid format, does not follow `/package.service/method`.
return fullMethod, nil
}
name := fullMethod[1:]
pos := strings.LastIndex(name, "/")
if pos < 0 {
// Invalid format, does not follow `/package.service/method`.
return name, nil
}
service, method := name[:pos], name[pos+1:]
var attrs []interface{}
if service != "" {
attrs = append(attrs, "rpc.service", service)
}
if method != "" {
attrs = append(attrs, "rpc.method", method)
}
return name, attrs
}
func peerAttr(addr string) []interface{} {
host, p, err := net.SplitHostPort(addr)
if err != nil {
return nil
}
if host == "" {
host = "127.0.0.1"
}
port, err := strconv.Atoi(p)
if err != nil {
return nil
}
var attr []interface{}
if ip := net.ParseIP(host); ip != nil {
attr = []interface{}{
"net.sock.peer.addr", host,
"net.sock.peer.port", port,
}
} else {
attr = []interface{}{
"net.peer.name", host,
"net.peer.port", port,
}
}
return attr
}
func peerFromCtx(ctx context.Context) string {
p, ok := peer.FromContext(ctx)
if !ok {
return ""
}
return p.Addr.String()
}

253
util/http/clienttracer.go Normal file
View File

@ -0,0 +1,253 @@
//
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http
import (
"context"
"crypto/tls"
"net/http/httptrace"
"net/textproto"
"strings"
"sync"
"go.unistack.org/micro/v3/tracer"
)
const (
httpStatus = "http.status"
httpHeaderMIME = "http.mime"
httpRemoteAddr = "http.remote"
httpLocalAddr = "http.local"
httpHost = "http.host"
)
var hookMap = map[string]string{
"http.dns": "http.getconn",
"http.connect": "http.getconn",
"http.tls": "http.getconn",
}
func parentHook(hook string) string {
if strings.HasPrefix(hook, "http.connect") {
return hookMap["http.connect"]
}
return hookMap[hook]
}
type clientTracer struct {
context.Context
tr tracer.Tracer
activeHooks map[string]context.Context
root tracer.Span
mtx sync.Mutex
}
func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrace {
ct := &clientTracer{
Context: ctx,
activeHooks: make(map[string]context.Context),
tr: tr,
}
return &httptrace.ClientTrace{
GetConn: ct.getConn,
GotConn: ct.gotConn,
PutIdleConn: ct.putIdleConn,
GotFirstResponseByte: ct.gotFirstResponseByte,
Got100Continue: ct.got100Continue,
Got1xxResponse: ct.got1xxResponse,
DNSStart: ct.dnsStart,
DNSDone: ct.dnsDone,
ConnectStart: ct.connectStart,
ConnectDone: ct.connectDone,
TLSHandshakeStart: ct.tlsHandshakeStart,
TLSHandshakeDone: ct.tlsHandshakeDone,
WroteHeaderField: ct.wroteHeaderField,
WroteHeaders: ct.wroteHeaders,
Wait100Continue: ct.wait100Continue,
WroteRequest: ct.wroteRequest,
}
}
func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
ct.mtx.Lock()
defer ct.mtx.Unlock()
if hookCtx, found := ct.activeHooks[hook]; !found {
var sp tracer.Span
ct.activeHooks[hook], sp = ct.tr.Start(ct.getParentContext(hook), spanName, tracer.WithSpanLabels(attrs...), tracer.WithSpanKind(tracer.SpanKindClient))
if ct.root == nil {
ct.root = sp
}
} else {
// end was called before start finished, add the start attributes and end the span here
if span, ok := tracer.SpanFromContext(hookCtx); ok {
span.AddLabels(attrs...)
span.Finish()
}
delete(ct.activeHooks, hook)
}
}
func (ct *clientTracer) end(hook string, err error, attrs ...interface{}) {
ct.mtx.Lock()
defer ct.mtx.Unlock()
if ctx, ok := ct.activeHooks[hook]; ok { // nolint:nestif
if span, ok := tracer.SpanFromContext(ctx); ok {
if err != nil {
span.SetStatus(tracer.SpanStatusError, err.Error())
}
span.AddLabels(attrs...)
span.Finish()
}
delete(ct.activeHooks, hook)
} else {
// start is not finished before end is called.
// Start a span here with the ending attributes that will be finished when start finishes.
// Yes, it's backwards. v0v
ctx, span := ct.tr.Start(ct.getParentContext(hook), hook, tracer.WithSpanLabels(attrs...), tracer.WithSpanKind(tracer.SpanKindClient))
if err != nil {
span.SetStatus(tracer.SpanStatusError, err.Error())
}
ct.activeHooks[hook] = ctx
}
}
func (ct *clientTracer) getParentContext(hook string) context.Context {
ctx, ok := ct.activeHooks[parentHook(hook)]
if !ok {
return ct.Context
}
return ctx
}
func (ct *clientTracer) span(hook string) (tracer.Span, bool) {
ct.mtx.Lock()
defer ct.mtx.Unlock()
if ctx, ok := ct.activeHooks[hook]; ok {
return tracer.SpanFromContext(ctx)
}
return nil, false
}
func (ct *clientTracer) getConn(host string) {
ct.start("http.getconn", "http.getconn", httpHost, host)
}
func (ct *clientTracer) gotConn(info httptrace.GotConnInfo) {
ct.end("http.getconn",
nil,
httpRemoteAddr, info.Conn.RemoteAddr().String(),
httpLocalAddr, info.Conn.LocalAddr().String(),
)
}
func (ct *clientTracer) putIdleConn(err error) {
ct.end("http.receive", err)
}
func (ct *clientTracer) gotFirstResponseByte() {
ct.start("http.receive", "http.receive")
}
func (ct *clientTracer) dnsStart(info httptrace.DNSStartInfo) {
ct.start("http.dns", "http.dns", httpHost, info.Host)
}
func (ct *clientTracer) dnsDone(info httptrace.DNSDoneInfo) {
ct.end("http.dns", info.Err)
}
func (ct *clientTracer) connectStart(network, addr string) {
_ = network
ct.start("http.connect."+addr, "http.connect", httpRemoteAddr, addr)
}
func (ct *clientTracer) connectDone(network, addr string, err error) {
_ = network
ct.end("http.connect."+addr, err)
}
func (ct *clientTracer) tlsHandshakeStart() {
ct.start("http.tls", "http.tls")
}
func (ct *clientTracer) tlsHandshakeDone(_ tls.ConnectionState, err error) {
ct.end("http.tls", err)
}
func (ct *clientTracer) wroteHeaderField(k string, v []string) {
if sp, ok := ct.span("http.headers"); !ok || sp == nil {
ct.start("http.headers", "http.headers")
}
ct.root.AddLabels("http."+strings.ToLower(k), sliceToString(v))
}
func (ct *clientTracer) wroteHeaders() {
ct.start("http.send", "http.send")
}
func (ct *clientTracer) wroteRequest(info httptrace.WroteRequestInfo) {
if info.Err != nil {
ct.root.SetStatus(tracer.SpanStatusError, info.Err.Error())
}
ct.end("http.send", info.Err)
}
func (ct *clientTracer) got100Continue() {
if sp, ok := ct.span("http.receive"); ok && sp != nil {
sp.AddEvent("GOT 100 - Continue")
}
}
func (ct *clientTracer) wait100Continue() {
if sp, ok := ct.span("http.receive"); ok && sp != nil {
sp.AddEvent("GOT 100 - Wait")
}
}
func (ct *clientTracer) got1xxResponse(code int, header textproto.MIMEHeader) error {
if sp, ok := ct.span("http.receive"); ok && sp != nil {
sp.AddEvent("GOT 1xx",
tracer.WithEventLabels(
httpStatus, code,
httpHeaderMIME, sm2s(header),
),
)
}
return nil
}
func sliceToString(value []string) string {
if len(value) == 0 {
return "undefined"
}
return strings.Join(value, ",")
}
func sm2s(value map[string][]string) string {
var buf strings.Builder
for k, v := range value {
if buf.Len() != 0 {
buf.WriteString(",")
}
buf.WriteString(k)
buf.WriteString("=")
buf.WriteString(sliceToString(v))
}
return buf.String()
}

View File

@ -5,8 +5,32 @@ import (
"testing" "testing"
) )
func TestTrieBackwards(t *testing.T) { func TestTrieRPC(t *testing.T) {
_ = &Trie{} var err error
type handler struct {
name string
}
tr := NewTrie()
if err = tr.Insert([]string{"helloworld"}, "Call", &handler{name: "helloworld.Call"}); err != nil {
t.Fatal(err)
}
if err = tr.Insert([]string{"helloworld"}, "Stream", &handler{name: "helloworld.Stream"}); err != nil {
t.Fatal(err)
}
h, _, err := tr.Search("helloworld", "Call")
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if h.(*handler).name != "helloworld.Call" {
t.Fatalf("invalid handler %v", h)
}
h, _, err = tr.Search("helloworld", "Stream")
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if h.(*handler).name != "helloworld.Stream" {
t.Fatalf("invalid handler %v", h)
}
} }
func TestTrieWildcardPathPrefix(t *testing.T) { func TestTrieWildcardPathPrefix(t *testing.T) {

View File

@ -508,3 +508,74 @@ func FieldName(name string) string {
return string(newstr) return string(newstr)
} }
func Equal(src interface{}, dst interface{}, excptFields ...string) bool {
srcVal := reflect.ValueOf(src)
dstVal := reflect.ValueOf(dst)
switch srcVal.Kind() {
case reflect.Array, reflect.Slice:
for i := 0; i < srcVal.Len(); i++ {
e := srcVal.Index(i).Interface()
a := dstVal.Index(i).Interface()
if !Equal(e, a, excptFields...) {
return false
}
}
return true
case reflect.Map:
for i := 0; i < len(srcVal.MapKeys()); i++ {
key := srcVal.MapKeys()[i]
keyStr := fmt.Sprintf("%v", key.Interface())
if stringContains(keyStr, excptFields) {
continue
}
s := srcVal.MapIndex(key)
d := dstVal.MapIndex(key)
if !Equal(s.Interface(), d.Interface(), excptFields...) {
return false
}
}
return true
case reflect.Struct, reflect.Interface:
for i := 0; i < srcVal.NumField(); i++ {
typeField := srcVal.Type().Field(i)
if stringContains(typeField.Name, excptFields) {
continue
}
s := srcVal.Field(i)
d := dstVal.FieldByName(typeField.Name)
if s.CanInterface() && d.CanInterface() {
if !Equal(s.Interface(), d.Interface(), excptFields...) {
return false
}
} else {
return false
}
}
return true
case reflect.Ptr:
if srcVal.IsNil() {
return dstVal.IsNil()
}
s := srcVal.Elem()
d := reflect.Indirect(dstVal)
if s.CanInterface() && d.CanInterface() {
return Equal(s.Interface(), d.Interface(), excptFields...)
}
return false
case reflect.String, reflect.Int, reflect.Int64, reflect.Float32, reflect.Float64, reflect.Bool:
return src == dst
default:
return srcVal.Interface() == dstVal.Interface()
}
}
func stringContains(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}

View File

@ -133,3 +133,16 @@ func TestMergeNested(t *testing.T) {
t.Fatalf("merge error: %#+v", dst.Nested) t.Fatalf("merge error: %#+v", dst.Nested)
} }
} }
func TestEqual(t *testing.T) {
type tstr struct {
Key1 string
Key2 string
}
src := &tstr{Key1: "val1", Key2: "micro:generate"}
dst := &tstr{Key1: "val1", Key2: "val2"}
if !Equal(src, dst, "Key2") {
t.Fatal("invalid Equal test")
}
}

505
util/test/test.go Normal file
View File

@ -0,0 +1,505 @@
package test
import (
"bufio"
"bytes"
"context"
"database/sql/driver"
"encoding/csv"
"fmt"
"io"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"time"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/errors"
"go.unistack.org/micro/v3/metadata"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
var ErrUnknownContentType = fmt.Errorf("unknown content type")
type Extension struct {
Ext []string
}
var (
// ExtToTypes map file extension to content type
ExtToTypes = map[string][]string{
"json": {"application/json", "application/grpc+json"},
"yaml": {"application/yaml", "application/yml", "text/yaml", "text/yml"},
"yml": {"application/yaml", "application/yml", "text/yaml", "text/yml"},
"proto": {"application/grpc", "application/grpc+proto", "application/proto"},
}
// DefaultExts specifies default file extensions to load data
DefaultExts = []string{"csv", "json", "yaml", "yml", "proto"}
// Codecs map to detect codec for test file or request content type
Codecs map[string]codec.Codec
// ResponseCompareFunc used to compare actual response with test case data
ResponseCompareFunc = func(expectRsp []byte, testRsp interface{}, expectCodec codec.Codec, testCodec codec.Codec) error {
var err error
expectMap := make(map[string]interface{})
if err = expectCodec.Unmarshal(expectRsp, &expectMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
testMap := make(map[string]interface{})
switch v := testRsp.(type) {
case *codec.Frame:
if err = testCodec.Unmarshal(v.Data, &testMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
case *errors.Error:
if err = expectCodec.Unmarshal([]byte(v.Error()), &testMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
case error:
st, ok := status.FromError(v)
if !ok {
return v
}
me := errors.Parse(st.Message())
if me.Code != 0 {
if err = expectCodec.Unmarshal([]byte(me.Error()), &testMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
break
}
for _, se := range st.Details() {
switch ne := se.(type) {
case proto.Message:
buf, err := testCodec.Marshal(ne)
if err != nil {
return fmt.Errorf("failed to marshal err: %w", err)
}
if err = testCodec.Unmarshal(buf, &testMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
default:
return st.Err()
}
}
case interface{ GRPCStatus() *status.Status }:
st := v.GRPCStatus()
me := errors.Parse(st.Message())
if me.Code != 0 {
if err = expectCodec.Unmarshal([]byte(me.Error()), &testMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
break
}
case *status.Status:
me := errors.Parse(v.Message())
if me.Code != 0 {
if err = expectCodec.Unmarshal([]byte(me.Error()), &testMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
break
}
for _, se := range v.Details() {
switch ne := se.(type) {
case proto.Message:
buf, err := testCodec.Marshal(ne)
if err != nil {
return fmt.Errorf("failed to marshal err: %w", err)
}
if err = testCodec.Unmarshal(buf, &testMap); err != nil {
return fmt.Errorf("failed to unmarshal err: %w", err)
}
default:
return v.Err()
}
}
}
if !reflect.DeepEqual(expectMap, testMap) {
return fmt.Errorf("test: %s != rsp: %s", expectMap, testMap)
}
return nil
}
)
func FromCSVString(columns []*sqlmock.Column, rows *sqlmock.Rows, s string) *sqlmock.Rows {
res := strings.NewReader(strings.TrimSpace(s))
csvReader := csv.NewReader(res)
for {
res, err := csvReader.Read()
if err != nil || res == nil {
break
}
var row []driver.Value
for i, v := range res {
item := CSVColumnParser(strings.TrimSpace(v))
if null, nullOk := columns[i].IsNullable(); null && nullOk && item == nil {
row = append(row, nil)
} else {
row = append(row, item)
}
}
rows = rows.AddRow(row...)
}
return rows
}
func CSVColumnParser(s string) []byte {
switch {
case strings.ToLower(s) == "null":
return nil
case s == "":
return nil
}
return []byte(s)
}
func NewResponseFromFile(rspfile string) (*codec.Frame, error) {
rspbuf, err := os.ReadFile(rspfile)
if err != nil {
return nil, err
}
return &codec.Frame{Data: rspbuf}, nil
}
func getCodec(codecs map[string]codec.Codec, ext string) (codec.Codec, error) {
var c codec.Codec
if cts, ok := ExtToTypes[ext]; ok {
for _, t := range cts {
if c, ok = codecs[t]; ok {
return c, nil
}
}
}
return nil, ErrUnknownContentType
}
func getContentType(codecs map[string]codec.Codec, ext string) (string, error) {
if cts, ok := ExtToTypes[ext]; ok {
for _, t := range cts {
if _, ok = codecs[t]; ok {
return t, nil
}
}
}
return "", ErrUnknownContentType
}
func getExt(name string) string {
ext := filepath.Ext(name)
if len(ext) > 0 && ext[0] == '.' {
ext = ext[1:]
}
return ext
}
func getNameWithoutExt(name string) string {
return strings.TrimSuffix(name, filepath.Ext(name))
}
func NewRequestFromFile(c client.Client, reqfile string) (client.Request, error) {
reqbuf, err := os.ReadFile(reqfile)
if err != nil {
return nil, err
}
endpoint := path.Base(path.Dir(reqfile))
if idx := strings.Index(endpoint, "_"); idx > 0 {
endpoint = endpoint[idx+1:]
}
ext := getExt(reqfile)
ct, err := getContentType(c.Options().Codecs, ext)
if err != nil {
return nil, err
}
req := c.NewRequest("test", endpoint, &codec.Frame{Data: reqbuf}, client.RequestContentType(ct))
return req, nil
}
func SQLFromFile(m sqlmock.Sqlmock, name string) error {
fp, err := os.Open(name)
if err != nil {
return err
}
defer fp.Close()
return SQLFromReader(m, fp)
}
func SQLFromBytes(m sqlmock.Sqlmock, buf []byte) error {
return SQLFromReader(m, bytes.NewReader(buf))
}
func SQLFromString(m sqlmock.Sqlmock, buf string) error {
return SQLFromReader(m, strings.NewReader(buf))
}
func SQLFromReader(m sqlmock.Sqlmock, r io.Reader) error {
var rows *sqlmock.Rows
var exp *sqlmock.ExpectedQuery
var columns []*sqlmock.Column
br := bufio.NewReader(r)
for {
s, err := br.ReadString('\n')
if err != nil && err != io.EOF {
return err
} else if err == io.EOF && len(s) == 0 {
if rows != nil && exp != nil {
exp.WillReturnRows(rows)
}
return nil
}
if s[0] != '#' {
r := csv.NewReader(strings.NewReader(s))
r.Comma = ','
var records [][]string
records, err = r.ReadAll()
if err != nil {
return err
}
if rows == nil && len(columns) > 0 {
rows = m.NewRowsWithColumnDefinition(columns...)
} else {
for idx := 0; idx < len(records); idx++ {
if len(columns) == 0 {
return fmt.Errorf("csv file not valid, does not have %q line", "# columns ")
}
rows = FromCSVString(columns, rows, strings.Join(records[idx], ","))
}
}
continue
}
if rows != nil {
exp.WillReturnRows(rows)
rows = nil
}
switch {
case strings.HasPrefix(strings.ToLower(s[2:]), "columns"):
for _, field := range strings.Split(s[2+len("columns")+1:], ",") {
args := strings.Split(field, "|")
column := sqlmock.NewColumn(args[0]).Nullable(false)
if len(args) > 1 {
for _, arg := range args {
switch arg {
case "BOOLEAN", "BOOL":
column = column.OfType("BOOL", false)
case "NUMBER", "DECIMAL":
column = column.OfType("DECIMAL", float64(0.0)).WithPrecisionAndScale(10, 4)
case "VARCHAR":
column = column.OfType("VARCHAR", nil)
case "NULL":
column = column.Nullable(true)
}
}
}
columns = append(columns, column)
}
case strings.HasPrefix(strings.ToLower(s[2:]), "begin"):
m.ExpectBegin()
case strings.HasPrefix(strings.ToLower(s[2:]), "commit"):
m.ExpectCommit()
case strings.HasPrefix(strings.ToLower(s[2:]), "rollback"):
m.ExpectRollback()
case strings.HasPrefix(strings.ToLower(s[2:]), "exec "):
m.ExpectExec(s[2+len("exec "):])
case strings.HasPrefix(strings.ToLower(s[2:]), "query "):
exp = m.ExpectQuery(s[2+len("query "):])
}
}
}
func Run(ctx context.Context, c client.Client, m sqlmock.Sqlmock, dir string, exts []string) error {
tcases, err := GetCases(dir, exts)
if err != nil {
return err
}
g, gctx := errgroup.WithContext(ctx)
if !strings.Contains(dir, "parallel") {
g.SetLimit(1)
}
for _, tcase := range tcases {
for _, dbfile := range tcase.dbfiles {
if err = SQLFromFile(m, dbfile); err != nil {
return err
}
}
tc := tcase
g.Go(func() error {
var xrid string
var gerr error
treq, err := NewRequestFromFile(c, tc.reqfile)
if err != nil {
gerr = fmt.Errorf("failed to read request from file %s err: %w", tc.reqfile, err)
return gerr
}
xrid = fmt.Sprintf("%s-%d", treq.Endpoint(), time.Now().Unix())
defer func() {
if gerr == nil {
fmt.Printf("test %s xrid: %s status: success\n", filepath.Dir(tc.reqfile), xrid)
} else {
fmt.Printf("test %s xrid: %s status: failure error: %v\n", filepath.Dir(tc.reqfile), xrid, err)
}
}()
data := &codec.Frame{}
md := metadata.New(1)
md.Set("X-Request-Id", xrid)
cerr := c.Call(metadata.NewOutgoingContext(gctx, md), treq, data, client.WithContentType(treq.ContentType()))
var rspfile string
if tc.errfile != "" {
rspfile = tc.errfile
} else if tc.rspfile != "" {
rspfile = tc.rspfile
} else {
gerr = fmt.Errorf("errfile and rspfile is empty")
return gerr
}
expectRsp, err := NewResponseFromFile(rspfile)
if err != nil {
gerr = fmt.Errorf("failed to read response from file %s err: %w", rspfile, err)
return gerr
}
testCodec, err := getCodec(Codecs, getExt(tc.reqfile))
if err != nil {
gerr = fmt.Errorf("failed to get response file codec err: %w", err)
return gerr
}
expectCodec, err := getCodec(Codecs, getExt(rspfile))
if err != nil {
gerr = fmt.Errorf("failed to get response file codec err: %w", err)
return gerr
}
if cerr == nil && tc.errfile != "" {
gerr = fmt.Errorf("expected err %s not happened", expectRsp.Data)
return gerr
} else if cerr != nil && tc.errfile != "" {
if err = ResponseCompareFunc(expectRsp.Data, cerr, expectCodec, testCodec); err != nil {
gerr = err
return gerr
}
} else if cerr != nil && tc.errfile == "" {
gerr = cerr
return gerr
} else if cerr == nil && tc.errfile == "" {
if err = ResponseCompareFunc(expectRsp.Data, data, expectCodec, testCodec); err != nil {
gerr = err
return gerr
}
}
/*
cf, err := getCodec(c.Options().Codecs, getExt(tc.rspfile))
if err != nil {
return err
}
*/
return nil
})
}
return g.Wait()
}
type Case struct {
dbfiles []string
reqfile string
rspfile string
errfile string
}
func GetCases(dir string, exts []string) ([]Case, error) {
var tcases []Case
entries, err := os.ReadDir(dir)
if len(entries) == 0 && err != nil {
return tcases, err
}
if exts == nil {
exts = DefaultExts
}
var dirs []string
var dbfiles []string
var reqfile, rspfile, errfile string
for _, entry := range entries {
if entry.IsDir() {
dirs = append(dirs, filepath.Join(dir, entry.Name()))
continue
}
if info, err := entry.Info(); err != nil {
return tcases, err
} else if !info.Mode().IsRegular() {
continue
}
for _, ext := range exts {
if getExt(entry.Name()) == ext {
name := getNameWithoutExt(entry.Name())
switch {
case strings.HasSuffix(name, "_db"):
dbfiles = append(dbfiles, filepath.Join(dir, entry.Name()))
case strings.HasSuffix(name, "_req"):
reqfile = filepath.Join(dir, entry.Name())
case strings.HasSuffix(name, "_rsp"):
rspfile = filepath.Join(dir, entry.Name())
case strings.HasSuffix(name, "_err"):
errfile = filepath.Join(dir, entry.Name())
}
}
}
}
if reqfile != "" && (rspfile != "" || errfile != "") {
tcases = append(tcases, Case{dbfiles: dbfiles, reqfile: reqfile, rspfile: rspfile, errfile: errfile})
}
for _, dir = range dirs {
ntcases, err := GetCases(dir, exts)
if len(ntcases) == 0 && err != nil {
return tcases, err
} else if len(ntcases) == 0 {
continue
}
tcases = append(tcases, ntcases...)
}
return tcases, nil
}

72
util/test/test_test.go Normal file
View File

@ -0,0 +1,72 @@
package test
import (
"context"
"testing"
"github.com/DATA-DOG/go-sqlmock"
)
func Test_SQLFromFile(t *testing.T) {
ctx := context.TODO()
db, c, err := sqlmock.New()
if err != nil {
t.Fatal(err)
}
defer db.Close()
if err = SQLFromFile(c, "testdata/result/01_firstcase/Call_db.csv"); err != nil {
t.Fatal(err)
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
t.Fatal(err)
}
rows, err := tx.QueryContext(ctx, "select * from test;")
if err != nil {
t.Fatal(err)
}
for rows.Next() {
var id int64
var name string
err = rows.Scan(&id, &name)
if err != nil {
t.Fatal(err)
}
if id != 1 || name != "test" {
t.Fatalf("invalid rows %v %v", id, name)
}
}
if err = rows.Close(); err != nil {
t.Fatal(err)
}
if err = rows.Err(); err != nil {
t.Fatal(err)
}
if err = tx.Commit(); err != nil {
t.Fatal(err)
}
if err = c.ExpectationsWereMet(); err != nil {
t.Fatal(err)
}
}
func Test_GetCases(t *testing.T) {
files, err := GetCases("testdata/", nil)
if err != nil {
t.Fatal(err)
}
if len(files) == 0 {
t.Fatalf("no files matching")
}
if n := len(files); n != 1 {
t.Fatalf("invalid number of test cases %d", n)
}
}

View File

@ -0,0 +1,6 @@
# begin
# query select \* from test;
# columns id|VARCHAR,name|VARCHAR
id,name
1,test
# commit
1 # begin
2 # query select \* from test;
3 # columns id|VARCHAR,name|VARCHAR
4 id,name
5 1,test
6 # commit

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1 @@
{}

View File

@ -2,7 +2,9 @@ package time
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"strconv"
"time" "time"
) )
@ -13,39 +15,42 @@ func ParseDuration(s string) (time.Duration, error) {
return 0, fmt.Errorf(`time: invalid duration "` + s + `"`) return 0, fmt.Errorf(`time: invalid duration "` + s + `"`)
} }
//var sb strings.Builder var p int
/* var hours int
loop:
for i, r := range s { for i, r := range s {
switch r { switch r {
case 'd': case 's', 'm':
n, err := strconv.Atoi(s[idx:i]) break loop
case 'h':
d, err := strconv.Atoi(s[p:i])
if err != nil { if err != nil {
return 0, errors.New("time: invalid duration " + s) return 0, errors.New("time: invalid duration " + s)
} }
s[idx:i] = fmt.Sprintf("%d", n*24) hours += d
default: p = i + 1
sb.WriteRune(r)
}
}
*/
var td time.Duration
var err error
switch s[len(s)-1] {
case 's', 'm', 'h':
td, err = time.ParseDuration(s)
case 'd': case 'd':
if td, err = time.ParseDuration(s[:len(s)-1] + "h"); err == nil { d, err := strconv.Atoi(s[p:i])
td *= 24 if err != nil {
return 0, errors.New("time: invalid duration " + s)
} }
hours += d * 24
p = i + 1
case 'y': case 'y':
if td, err = time.ParseDuration(s[:len(s)-1] + "h"); err == nil { n, err := strconv.Atoi(s[p:i])
year := time.Date(time.Now().Year(), time.December, 31, 0, 0, 0, 0, time.Local) if err != nil {
days := year.YearDay() return 0, errors.New("time: invalid duration " + s)
td *= 24 * time.Duration(days) }
var d int
for j := n - 1; j >= 0; j-- {
d += time.Date(time.Now().Year()+j, time.December, 31, 0, 0, 0, 0, time.Local).YearDay()
}
hours += d * 24
p = i + 1
} }
} }
return td, err return time.ParseDuration(fmt.Sprintf("%dh%s", hours, s[p:]))
} }
func (d Duration) MarshalJSON() ([]byte, error) { func (d Duration) MarshalJSON() ([]byte, error) {
@ -62,7 +67,7 @@ func (d *Duration) UnmarshalJSON(b []byte) error {
*d = Duration(time.Duration(value)) *d = Duration(time.Duration(value))
return nil return nil
case string: case string:
dv, err := time.ParseDuration(value) dv, err := ParseDuration(value)
if err != nil { if err != nil {
return err return err
} }

View File

@ -23,32 +23,39 @@ func TestUnmarshalJSON(t *testing.T) {
TTL Duration `json:"ttl"` TTL Duration `json:"ttl"`
} }
v := &str{} v := &str{}
var err error
err := json.Unmarshal([]byte(`{"ttl":"10ms"}`), v) err = json.Unmarshal([]byte(`{"ttl":"10ms"}`), v)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} else if v.TTL != 10000000 { } else if v.TTL != 10000000 {
t.Fatalf("invalid duration %v != 10000000", v.TTL) t.Fatalf("invalid duration %v != 10000000", v.TTL)
} }
err = json.Unmarshal([]byte(`{"ttl":"1y"}`), v)
if err != nil {
t.Fatal(err)
} else if v.TTL != 31622400000000000 {
t.Fatalf("invalid duration %v != 31622400000000000", v.TTL)
}
} }
func TestParseDuration(t *testing.T) { func TestParseDuration(t *testing.T) {
var td time.Duration var td time.Duration
var err error var err error
t.Skip()
td, err = ParseDuration("14d4h") td, err = ParseDuration("14d4h")
if err != nil { if err != nil {
t.Fatalf("ParseDuration error: %v", err) t.Fatalf("ParseDuration error: %v", err)
} }
if td.String() != "336h0m0s" { if td.String() != "340h0m0s" {
t.Fatalf("ParseDuration 14d != 336h0m0s : %s", td.String()) t.Fatalf("ParseDuration 14d != 340h0m0s : %s", td.String())
} }
td, err = ParseDuration("1y") td, err = ParseDuration("1y")
if err != nil { if err != nil {
t.Fatalf("ParseDuration error: %v", err) t.Fatalf("ParseDuration error: %v", err)
} }
if td.String() != "8760h0m0s" { if td.String() != "8784h0m0s" {
t.Fatalf("ParseDuration 1y != 8760h0m0s : %s", td.String()) t.Fatalf("ParseDuration 1y != 8784h0m0s : %s", td.String())
} }
} }