Compare commits

..

25 Commits

Author SHA1 Message Date
1db017d966 Merge pull request 'logger/slog: fixup old format' (#291) from fixupslog into v3
Reviewed-on: #291
2024-02-08 08:44:23 +03:00
debf8cb03d logger/slog: fixup old format
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-08 08:43:53 +03:00
1dc9c1891f Merge pull request 'logger/slog: initial import' (#290) from slog into v3
Reviewed-on: #290
2024-02-08 08:18:57 +03:00
930859a537 logger/slog: initial import
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-02-08 08:17:53 +03:00
3141f1ed8b Merge pull request 'config: add conditions' (#286) from cond-config into v3
Reviewed-on: #286
2024-01-15 00:46:37 +03:00
47943cfb05 config: add conditions
Some checks failed
lint / lint (pull_request) Successful in 1m28s
pr / test (pull_request) Failing after 1m5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-01-15 00:46:00 +03:00
ed4e9d54b1 Merge pull request 'client/noop: fixup md' (#285) from noopfix into v3
Reviewed-on: #285
2023-12-21 00:14:54 +03:00
b4b8583594 client/noop: fixup md
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m45s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-21 00:13:08 +03:00
fb43e8c58c Merge pull request 'client/noop: fix metadata overwrite' (#284) from noopfix into v3
Reviewed-on: #284
2023-12-21 00:07:22 +03:00
8863c10ef4 client/noop: fix metadata overwrite
Some checks failed
lint / lint (pull_request) Failing after 1m29s
pr / test (pull_request) Failing after 2m36s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-21 00:06:56 +03:00
8058095bcc Merge pull request 'copy incoming content-type' (#283) from ct into v3
Reviewed-on: #283
2023-12-20 09:35:33 +03:00
092f5d96b1 copy incoming content-type
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m33s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-20 09:35:01 +03:00
84552513f7 Merge pull request 'fixup multiple client handling' (#280) from multiple into v3
Reviewed-on: #280
2023-11-13 08:20:52 +03:00
80a2db264e fixup multiple client handling
Some checks failed
lint / lint (pull_request) Failing after 1m29s
pr / test (pull_request) Failing after 2m35s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-11-13 08:19:44 +03:00
0be09c8b3e Merge pull request 'database: add FormatDSN' (#278) from database-newv3 into v3
Reviewed-on: #278
2023-11-02 01:35:25 +03:00
047f479e1b database: add FormatDSN
Some checks failed
lint / lint (pull_request) Failing after 1m27s
pr / test (pull_request) Failing after 2m39s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-11-02 01:32:26 +03:00
8f757c953e Merge pull request 'database: initial import for dsn parsing' (#276) from databasev3 into v3
Reviewed-on: #276
2023-11-01 23:44:17 +03:00
5f1c673a24 database: initial import for dsn parsing
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m36s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-11-01 23:42:48 +03:00
6794ea9871 Merge pull request 'client/noop: fix MessageMetadata option' (#274) from client-noop-metadata into v3
Reviewed-on: #274
2023-10-26 03:07:12 +03:00
089e7b6812 client/noop: fix MessageMetadata option
All checks were successful
lint / lint (pull_request) Successful in 1m18s
pr / test (pull_request) Successful in 1m1s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-26 03:06:39 +03:00
1c703f0f0c Merge pull request 'errors: add IsRetrayable func' (#273) from errors into v3
Reviewed-on: #273
2023-10-25 10:24:58 +03:00
d167c8c67c cleanup
All checks were successful
lint / lint (pull_request) Successful in 1m7s
pr / test (pull_request) Successful in 1m2s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-25 02:36:52 +03:00
df4f96a2d8 errors: add IsRetrayable func
All checks were successful
lint / lint (pull_request) Successful in 1m18s
pr / test (pull_request) Successful in 1m3s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-23 02:35:10 +03:00
fac3b20bd4 Merge pull request 'util/reflect: add Equal func with ability to skip some fields' (#244) from util-reflect into v3
Reviewed-on: #244
2023-09-12 11:45:26 +03:00
7c6bd98498 util/reflect: add Equal func with ability to skip some fields
All checks were successful
pr / test (pull_request) Successful in 1m4s
lint / lint (pull_request) Successful in 1m10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-12 10:31:45 +03:00
17 changed files with 1211 additions and 256 deletions

View File

@@ -3,6 +3,7 @@ package client
import (
"context"
"fmt"
"os"
"time"
"go.unistack.org/micro/v3/broker"
@@ -485,21 +486,34 @@ func (n *noopClient) publish(ctx context.Context, ps []Message, opts ...PublishO
msgs := make([]*broker.Message, 0, len(ps))
// get proxy
exchange := ""
if v, ok := os.LookupEnv("MICRO_PROXY"); ok {
exchange = v
}
// get the exchange
if len(options.Exchange) > 0 {
exchange = options.Exchange
}
omd, ok := metadata.FromOutgoingContext(ctx)
if !ok {
omd = metadata.New(0)
}
for _, p := range ps {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
md = metadata.New(0)
}
md := metadata.Copy(omd)
md[metadata.HeaderContentType] = p.ContentType()
topic := p.Topic()
// get the exchange
if len(options.Exchange) > 0 {
topic = options.Exchange
if len(exchange) > 0 {
topic = exchange
}
md[metadata.HeaderTopic] = topic
iter := p.Metadata().Iterator()
var k, v string
for iter.Next(&k, &v) {
md.Set(k, v)
}
var body []byte

View File

@@ -39,6 +39,10 @@ func (c *defaultConfig) Init(opts ...Option) error {
}
func (c *defaultConfig) Load(ctx context.Context, opts ...LoadOption) error {
if c.opts.SkipLoad != nil && c.opts.SkipLoad(ctx, c) {
return nil
}
if err := DefaultBeforeLoad(ctx, c); err != nil && !c.opts.AllowFail {
return err
}
@@ -291,7 +295,11 @@ func fillValues(valueOf reflect.Value, tname string) error {
return nil
}
func (c *defaultConfig) Save(ctx context.Context, opts ...SaveOption) error {
func (c *defaultConfig) Save(ctx context.Context, _ ...SaveOption) error {
if c.opts.SkipSave != nil && c.opts.SkipSave(ctx, c) {
return nil
}
if err := DefaultBeforeSave(ctx, c); err != nil {
return err
}

View File

@@ -42,6 +42,10 @@ type Options struct {
AfterInit []func(context.Context, Config) error
// AllowFail flag to allow fail in config source
AllowFail bool
// SkipLoad runs only if condition returns true
SkipLoad func(context.Context, Config) bool
// SkipSave runs only if condition returns true
SkipSave func(context.Context, Config) bool
}
// Option function signature
@@ -68,9 +72,9 @@ type LoadOption func(o *LoadOptions)
// LoadOptions struct
type LoadOptions struct {
Struct interface{}
Context context.Context
Override bool
Append bool
Context context.Context
}
// NewLoadOptions create LoadOptions struct with provided opts

157
database/dsn.go Normal file
View File

@@ -0,0 +1,157 @@
package database
import (
"crypto/tls"
"errors"
"fmt"
"net/url"
"strings"
)
var (
ErrInvalidDSNAddr = errors.New("invalid dsn addr")
ErrInvalidDSNUnescaped = errors.New("dsn must be escaped")
ErrInvalidDSNNoSlash = errors.New("dsn must contains slash")
)
type Config struct {
TLSConfig *tls.Config
Username string
Password string
Scheme string
Host string
Port string
Database string
Params []string
}
func (cfg *Config) FormatDSN() string {
var s strings.Builder
if len(cfg.Scheme) > 0 {
s.WriteString(cfg.Scheme + "://")
}
// [username[:password]@]
if len(cfg.Username) > 0 {
s.WriteString(cfg.Username)
if len(cfg.Password) > 0 {
s.WriteByte(':')
s.WriteString(url.PathEscape(cfg.Password))
}
s.WriteByte('@')
}
// [host:port]
if len(cfg.Host) > 0 {
s.WriteString(cfg.Host)
if len(cfg.Port) > 0 {
s.WriteByte(':')
s.WriteString(cfg.Port)
}
}
// /dbname
s.WriteByte('/')
s.WriteString(url.PathEscape(cfg.Database))
for i := 0; i < len(cfg.Params); i += 2 {
if i == 0 {
s.WriteString("?")
} else {
s.WriteString("&")
}
s.WriteString(cfg.Params[i])
s.WriteString("=")
s.WriteString(cfg.Params[i+1])
}
return s.String()
}
func ParseDSN(dsn string) (*Config, error) {
cfg := &Config{}
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find last '/' that goes before dbname
foundSlash := false
for i := len(dsn) - 1; i >= 0; i-- {
if dsn[i] == '/' {
foundSlash = true
var j, k int
// left part is empty if i <= 0
if i > 0 {
// Find the first ':' in dsn
for j = i; j >= 0; j-- {
if dsn[j] == ':' {
cfg.Scheme = dsn[0:j]
}
}
// [username[:password]@][host]
// Find the last '@' in dsn[:i]
for j = i; j >= 0; j-- {
if dsn[j] == '@' {
// username[:password]
// Find the second ':' in dsn[:j]
for k = 0; k < j; k++ {
if dsn[k] == ':' {
if cfg.Scheme == dsn[:k] {
continue
}
var err error
cfg.Password, err = url.PathUnescape(dsn[k+1 : j])
if err != nil {
return nil, err
}
break
}
}
cfg.Username = dsn[len(cfg.Scheme)+3 : k]
break
}
}
for k = j + 1; k < i; k++ {
if dsn[k] == ':' {
cfg.Host = dsn[j+1 : k]
cfg.Port = dsn[k+1 : i]
break
}
}
}
// dbname[?param1=value1&...&paramN=valueN]
// Find the first '?' in dsn[i+1:]
for j = i + 1; j < len(dsn); j++ {
if dsn[j] == '?' {
parts := strings.Split(dsn[j+1:], "&")
cfg.Params = make([]string, 0, len(parts)*2)
for _, p := range parts {
k, v, found := strings.Cut(p, "=")
if !found {
continue
}
cfg.Params = append(cfg.Params, k, v)
}
break
}
}
var err error
dbname := dsn[i+1 : j]
if cfg.Database, err = url.PathUnescape(dbname); err != nil {
return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
}
break
}
}
if !foundSlash && len(dsn) > 0 {
return nil, ErrInvalidDSNNoSlash
}
return cfg, nil
}

31
database/dsn_test.go Normal file
View File

@@ -0,0 +1,31 @@
package database
import (
"net/url"
"testing"
)
func TestParseDSN(t *testing.T) {
cfg, err := ParseDSN("postgres://username:p@ssword#@host:12345/dbname?key1=val2&key2=val2")
if err != nil {
t.Fatal(err)
}
if cfg.Password != "p@ssword#" {
t.Fatalf("parsing error")
}
}
func TestFormatDSN(t *testing.T) {
src := "postgres://username:p@ssword#@host:12345/dbname?key1=val2&key2=val2"
cfg, err := ParseDSN(src)
if err != nil {
t.Fatal(err)
}
dst, err := url.PathUnescape(cfg.FormatDSN())
if err != nil {
t.Fatal(err)
}
if src != dst {
t.Fatalf("\n%s\n%s", src, dst)
}
}

View File

@@ -4,11 +4,17 @@ package errors // import "go.unistack.org/micro/v3/errors"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var (
@@ -340,3 +346,135 @@ func addslashes(str string) string {
}
return buf.String()
}
type retryableError struct {
err error
}
// Retryable returns error that can be retried later
func Retryable(err error) error {
return &retryableError{err: err}
}
type IsRetryableFunc func(error) bool
var (
RetrayableOracleErrors = []IsRetryableFunc{
func(err error) bool {
errmsg := err.Error()
switch {
case strings.Contains(errmsg, `ORA-`):
return true
case strings.Contains(errmsg, `can not assign`):
return true
case strings.Contains(errmsg, `can't assign`):
return true
}
return false
},
}
RetrayablePostgresErrors = []IsRetryableFunc{
func(err error) bool {
errmsg := err.Error()
switch {
case strings.Contains(errmsg, `number of field descriptions must equal number of`):
return true
case strings.Contains(errmsg, `not a pointer`):
return true
case strings.Contains(errmsg, `values, but dst struct has only`):
return true
case strings.Contains(errmsg, `struct doesn't have corresponding row field`):
return true
case strings.Contains(errmsg, `cannot find field`):
return true
case strings.Contains(errmsg, `cannot scan`) || strings.Contains(errmsg, `cannot convert`):
return true
case strings.Contains(errmsg, `failed to connect to`):
return true
}
return false
},
}
RetryableMicroErrors = []IsRetryableFunc{
func(err error) bool {
switch verr := err.(type) {
case *Error:
switch verr.Code {
case 401, 403, 408, 500, 501, 502, 503, 504:
return true
default:
return false
}
case *retryableError:
return true
}
return false
},
}
RetryableGoErrors = []IsRetryableFunc{
func(err error) bool {
switch verr := err.(type) {
case interface{ SafeToRetry() bool }:
return verr.SafeToRetry()
case interface{ Timeout() bool }:
return verr.Timeout()
}
switch {
case errors.Is(err, io.EOF), errors.Is(err, io.ErrUnexpectedEOF):
return true
case errors.Is(err, context.DeadlineExceeded):
return true
case errors.Is(err, io.ErrClosedPipe), errors.Is(err, io.ErrShortBuffer), errors.Is(err, io.ErrShortWrite):
return true
}
return false
},
}
RetryableGrpcErrors = []IsRetryableFunc{
func(err error) bool {
st, ok := status.FromError(err)
if !ok {
return false
}
switch st.Code() {
case codes.Unavailable, codes.ResourceExhausted:
return true
case codes.DeadlineExceeded:
return true
case codes.Internal:
switch {
case strings.Contains(st.Message(), `transport: received the unexpected content-type "text/html; charset=UTF-8"`):
return true
case strings.Contains(st.Message(), io.ErrUnexpectedEOF.Error()):
return true
case strings.Contains(st.Message(), `stream terminated by RST_STREAM with error code: INTERNAL_ERROR`):
return true
}
}
return false
},
}
)
// Unwrap provides error wrapping
func (e *retryableError) Unwrap() error {
return e.err
}
// Error returns the error string
func (e *retryableError) Error() string {
if e.err == nil {
return ""
}
return e.err.Error()
}
// IsRetryable checks error for ability to retry later
func IsRetryable(err error, fns ...IsRetryableFunc) bool {
for _, fn := range fns {
if ok := fn(err); ok {
return true
}
}
return false
}

View File

@@ -8,6 +8,13 @@ import (
"testing"
)
func TestIsRetrayable(t *testing.T) {
err := fmt.Errorf("ORA-")
if !IsRetryable(err, RetrayableOracleErrors...) {
t.Fatalf("IsRetrayable not works")
}
}
func TestMarshalJSON(t *testing.T) {
e := InternalServerError("id", "err: %v", fmt.Errorf("err: %v", `xxx: "UNIX_TIMESTAMP": invalid identifier`))
_, err := json.Marshal(e)

View File

@@ -1,230 +0,0 @@
package logger
import (
"context"
"encoding/json"
"fmt"
"os"
"runtime"
"strings"
"sync"
"time"
)
type defaultLogger struct {
enc *json.Encoder
opts Options
sync.RWMutex
}
// Init(opts...) should only overwrite provided options
func (l *defaultLogger) Init(opts ...Option) error {
l.Lock()
for _, o := range opts {
o(&l.opts)
}
l.enc = json.NewEncoder(l.opts.Out)
// wrap the Log func
l.Unlock()
return nil
}
func (l *defaultLogger) String() string {
return "micro"
}
func (l *defaultLogger) Clone(opts ...Option) Logger {
newopts := NewOptions(opts...)
oldopts := l.opts
for _, o := range opts {
o(&newopts)
o(&oldopts)
}
l.Lock()
cl := &defaultLogger{opts: oldopts, enc: json.NewEncoder(l.opts.Out)}
l.Unlock()
return cl
}
func (l *defaultLogger) V(level Level) bool {
l.RLock()
ok := l.opts.Level.Enabled(level)
l.RUnlock()
return ok
}
func (l *defaultLogger) Level(level Level) {
l.Lock()
l.opts.Level = level
l.Unlock()
}
func (l *defaultLogger) Fields(fields ...interface{}) Logger {
l.RLock()
nl := &defaultLogger{opts: l.opts, enc: l.enc}
if len(fields) == 0 {
l.RUnlock()
return nl
} else if len(fields)%2 != 0 {
fields = fields[:len(fields)-1]
}
nl.opts.Fields = copyFields(l.opts.Fields)
nl.opts.Fields = append(nl.opts.Fields, fields...)
l.RUnlock()
return nl
}
func copyFields(src []interface{}) []interface{} {
dst := make([]interface{}, len(src))
copy(dst, src)
return dst
}
// logCallerfilePath returns a package/file:line description of the caller,
// preserving only the leaf directory name and file name.
func logCallerfilePath(loggingFilePath string) string {
// To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
idx := strings.LastIndexByte(loggingFilePath, '/')
if idx == -1 {
return loggingFilePath
}
idx = strings.LastIndexByte(loggingFilePath[:idx], '/')
if idx == -1 {
return loggingFilePath
}
return loggingFilePath[idx+1:]
}
func (l *defaultLogger) Info(ctx context.Context, args ...interface{}) {
l.Log(ctx, InfoLevel, args...)
}
func (l *defaultLogger) Error(ctx context.Context, args ...interface{}) {
l.Log(ctx, ErrorLevel, args...)
}
func (l *defaultLogger) Debug(ctx context.Context, args ...interface{}) {
l.Log(ctx, DebugLevel, args...)
}
func (l *defaultLogger) Warn(ctx context.Context, args ...interface{}) {
l.Log(ctx, WarnLevel, args...)
}
func (l *defaultLogger) Trace(ctx context.Context, args ...interface{}) {
l.Log(ctx, TraceLevel, args...)
}
func (l *defaultLogger) Fatal(ctx context.Context, args ...interface{}) {
l.Log(ctx, FatalLevel, args...)
os.Exit(1)
}
func (l *defaultLogger) Infof(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, InfoLevel, msg, args...)
}
func (l *defaultLogger) Errorf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, ErrorLevel, msg, args...)
}
func (l *defaultLogger) Debugf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, DebugLevel, msg, args...)
}
func (l *defaultLogger) Warnf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, WarnLevel, msg, args...)
}
func (l *defaultLogger) Tracef(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, TraceLevel, msg, args...)
}
func (l *defaultLogger) Fatalf(ctx context.Context, msg string, args ...interface{}) {
l.Logf(ctx, FatalLevel, msg, args...)
os.Exit(1)
}
func (l *defaultLogger) Log(ctx context.Context, level Level, args ...interface{}) {
if !l.V(level) {
return
}
l.RLock()
fields := copyFields(l.opts.Fields)
l.RUnlock()
fields = append(fields, "level", level.String())
if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok {
fields = append(fields, "caller", fmt.Sprintf("%s:%d", logCallerfilePath(file), line))
}
fields = append(fields, "timestamp", time.Now().Format("2006-01-02 15:04:05"))
if len(args) > 0 {
fields = append(fields, "msg", fmt.Sprint(args...))
}
out := make(map[string]interface{}, len(fields)/2)
for i := 0; i < len(fields); i += 2 {
out[fields[i].(string)] = fields[i+1]
}
l.RLock()
_ = l.enc.Encode(out)
l.RUnlock()
}
func (l *defaultLogger) Logf(ctx context.Context, level Level, msg string, args ...interface{}) {
if !l.V(level) {
return
}
l.RLock()
fields := copyFields(l.opts.Fields)
l.RUnlock()
fields = append(fields, "level", level.String())
if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok {
fields = append(fields, "caller", fmt.Sprintf("%s:%d", logCallerfilePath(file), line))
}
fields = append(fields, "timestamp", time.Now().Format("2006-01-02 15:04:05"))
if len(args) > 0 {
fields = append(fields, "msg", fmt.Sprintf(msg, args...))
} else if msg != "" {
fields = append(fields, "msg", msg)
}
out := make(map[string]interface{}, len(fields)/2)
for i := 0; i < len(fields); i += 2 {
out[fields[i].(string)] = fields[i+1]
}
l.RLock()
_ = l.enc.Encode(out)
l.RUnlock()
}
func (l *defaultLogger) Options() Options {
return l.opts
}
// NewLogger builds a new logger based on options
func NewLogger(opts ...Option) Logger {
l := &defaultLogger{
opts: NewOptions(opts...),
}
l.enc = json.NewEncoder(l.opts.Out)
return l
}

90
logger/noop.go Normal file
View File

@@ -0,0 +1,90 @@
package logger
import (
"context"
)
type noopLogger struct {
opts Options
}
func NewLogger(opts ...Option) Logger {
options := NewOptions(opts...)
return &noopLogger{opts: options}
}
func (l *noopLogger) V(lvl Level) bool {
return false
}
func (l *noopLogger) Level(lvl Level) {
}
func (l *noopLogger) Init(opts ...Option) error {
for _, o := range opts {
o(&l.opts)
}
return nil
}
func (l *noopLogger) Clone(opts ...Option) Logger {
nl := &noopLogger{opts: l.opts}
for _, o := range opts {
o(&nl.opts)
}
return nl
}
func (l *noopLogger) Fields(attrs ...interface{}) Logger {
return l
}
func (l *noopLogger) Options() Options {
return l.opts
}
func (l *noopLogger) String() string {
return "noop"
}
func (l *noopLogger) Log(ctx context.Context, lvl Level, attrs ...interface{}) {
}
func (l *noopLogger) Info(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Debug(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Error(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Trace(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Warn(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Fatal(ctx context.Context, attrs ...interface{}) {
}
func (l *noopLogger) Logf(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
}

27
logger/slog/options.go Normal file
View File

@@ -0,0 +1,27 @@
package slog
import "go.unistack.org/micro/v3/logger"
type sourceKey struct{}
func WithSourceKey(v string) logger.Option {
return logger.SetOption(sourceKey{}, v)
}
type timeKey struct{}
func WithTimeKey(v string) logger.Option {
return logger.SetOption(timeKey{}, v)
}
type messageKey struct{}
func WithMessageKey(v string) logger.Option {
return logger.SetOption(messageKey{}, v)
}
type levelKey struct{}
func WithLevelKey(v string) logger.Option {
return logger.SetOption(levelKey{}, v)
}

419
logger/slog/slog.go Normal file
View File

@@ -0,0 +1,419 @@
package slog
import (
"context"
"fmt"
"log/slog"
"os"
"runtime"
"strconv"
"time"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/tracer"
)
var (
DefaultSourceKey string = slog.SourceKey
DefaultTimeKey string = slog.TimeKey
DefaultMessageKey string = slog.MessageKey
DefaultLevelKey string = slog.LevelKey
)
var (
traceValue = slog.StringValue("trace")
debugValue = slog.StringValue("debug")
infoValue = slog.StringValue("info")
warnValue = slog.StringValue("warn")
errorValue = slog.StringValue("error")
fatalValue = slog.StringValue("fatal")
)
func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.SourceKey:
source := a.Value.Any().(*slog.Source)
a.Value = slog.StringValue(source.File + ":" + strconv.Itoa(source.Line))
a.Key = s.sourceKey
case slog.TimeKey:
a.Key = s.timeKey
case slog.MessageKey:
a.Key = s.messageKey
case slog.LevelKey:
level := a.Value.Any().(slog.Level)
lvl := slogToLoggerLevel(level)
a.Key = s.levelKey
switch {
case lvl < logger.DebugLevel:
a.Value = traceValue
case lvl < logger.InfoLevel:
a.Value = debugValue
case lvl < logger.WarnLevel:
a.Value = infoValue
case lvl < logger.ErrorLevel:
a.Value = warnValue
case lvl < logger.FatalLevel:
a.Value = errorValue
case lvl >= logger.FatalLevel:
a.Value = fatalValue
default:
a.Value = infoValue
}
}
return a
}
type slogLogger struct {
slog *slog.Logger
leveler *slog.LevelVar
levelKey string
messageKey string
sourceKey string
timeKey string
opts logger.Options
}
func (s *slogLogger) Clone(opts ...logger.Option) logger.Logger {
options := s.opts
for _, o := range opts {
o(&options)
}
l := &slogLogger{
opts: options,
levelKey: s.levelKey,
messageKey: s.messageKey,
sourceKey: s.sourceKey,
timeKey: s.timeKey,
}
if v, ok := l.opts.Context.Value(levelKey{}).(string); ok && v != "" {
l.levelKey = v
}
if v, ok := l.opts.Context.Value(messageKey{}).(string); ok && v != "" {
l.messageKey = v
}
if v, ok := l.opts.Context.Value(sourceKey{}).(string); ok && v != "" {
l.sourceKey = v
}
if v, ok := l.opts.Context.Value(timeKey{}).(string); ok && v != "" {
l.timeKey = v
}
l.leveler = new(slog.LevelVar)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: s.renameAttr,
Level: l.leveler,
AddSource: true,
}
l.leveler.Set(loggerToSlogLevel(l.opts.Level))
handler := slog.NewJSONHandler(options.Out, handleOpt)
l.slog = slog.New(handler).With(options.Fields...)
return l
}
func (s *slogLogger) V(level logger.Level) bool {
return s.opts.Level.Enabled(level)
}
func (s *slogLogger) Level(level logger.Level) {
s.leveler.Set(loggerToSlogLevel(level))
}
func (s *slogLogger) Options() logger.Options {
return s.opts
}
func (s *slogLogger) Fields(attrs ...interface{}) logger.Logger {
nl := &slogLogger{
opts: s.opts,
levelKey: s.levelKey,
messageKey: s.messageKey,
sourceKey: s.sourceKey,
timeKey: s.timeKey,
}
nl.leveler = new(slog.LevelVar)
nl.leveler.Set(s.leveler.Level())
handleOpt := &slog.HandlerOptions{
ReplaceAttr: nl.renameAttr,
Level: s.leveler,
AddSource: true,
}
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
nl.slog = slog.New(handler).With(attrs...)
return nl
}
func (s *slogLogger) Init(opts ...logger.Option) error {
for _, o := range opts {
o(&s.opts)
}
if v, ok := s.opts.Context.Value(levelKey{}).(string); ok && v != "" {
s.levelKey = v
}
if v, ok := s.opts.Context.Value(messageKey{}).(string); ok && v != "" {
s.messageKey = v
}
if v, ok := s.opts.Context.Value(sourceKey{}).(string); ok && v != "" {
s.sourceKey = v
}
if v, ok := s.opts.Context.Value(timeKey{}).(string); ok && v != "" {
s.timeKey = v
}
s.leveler = new(slog.LevelVar)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: s.renameAttr,
Level: s.leveler,
AddSource: true,
}
s.leveler.Set(loggerToSlogLevel(s.opts.Level))
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
s.slog = slog.New(handler).With(s.opts.Fields...)
slog.SetDefault(s.slog)
return nil
}
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, attrs ...interface{}) {
if !s.V(lvl) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), loggerToSlogLevel(lvl), fmt.Sprintf("%s", attrs[0]), pcs[0])
// r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Logf(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
if !s.V(lvl) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), loggerToSlogLevel(lvl), fmt.Sprintf(msg, attrs...), pcs[0])
// r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Info(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelInfo, fmt.Sprintf("%s", attrs[0]), pcs[0])
// r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Infof(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.InfoLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelInfo, fmt.Sprintf(msg, attrs...), pcs[0])
// r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Debug(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.DebugLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug, fmt.Sprintf("%s", attrs[0]), pcs[0])
// r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Debugf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.DebugLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug, fmt.Sprintf(msg, attrs...), pcs[0])
// r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Trace(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.TraceLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug-1, fmt.Sprintf("%s", attrs[0]), pcs[0])
// r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Tracef(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.TraceLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelDebug-1, fmt.Sprintf(msg, attrs...), pcs[0])
// r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Error(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.ErrorLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError, fmt.Sprintf("%s", attrs[0]), pcs[0])
// r.Add(attrs[1:]...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == "error" {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Errorf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.ErrorLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError, fmt.Sprintf(msg, attrs...), pcs[0])
// r.Add(attrs...)
r.Attrs(func(a slog.Attr) bool {
if a.Key == "error" {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, a.Value.String())
return false
}
}
return true
})
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Fatal(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.FatalLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError+1, fmt.Sprintf("%s", attrs[0]), pcs[0])
// r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
os.Exit(1)
}
func (s *slogLogger) Fatalf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.FatalLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelError+1, fmt.Sprintf(msg, attrs...), pcs[0])
// r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
os.Exit(1)
}
func (s *slogLogger) Warn(ctx context.Context, attrs ...interface{}) {
if !s.V(logger.WarnLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelWarn, fmt.Sprintf("%s", attrs[0]), pcs[0])
// r.Add(attrs[1:]...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) Warnf(ctx context.Context, msg string, attrs ...interface{}) {
if !s.V(logger.WarnLevel) {
return
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, Infof]
r := slog.NewRecord(time.Now(), slog.LevelWarn, fmt.Sprintf(msg, attrs...), pcs[0])
// r.Add(attrs...)
_ = s.slog.Handler().Handle(ctx, r)
}
func (s *slogLogger) String() string {
return "slog"
}
func NewLogger(opts ...logger.Option) logger.Logger {
s := &slogLogger{
opts: logger.NewOptions(opts...),
sourceKey: DefaultSourceKey,
timeKey: DefaultTimeKey,
messageKey: DefaultMessageKey,
levelKey: DefaultLevelKey,
}
if v, ok := s.opts.Context.Value(levelKey{}).(string); ok && v != "" {
s.levelKey = v
}
if v, ok := s.opts.Context.Value(messageKey{}).(string); ok && v != "" {
s.messageKey = v
}
if v, ok := s.opts.Context.Value(sourceKey{}).(string); ok && v != "" {
s.sourceKey = v
}
if v, ok := s.opts.Context.Value(timeKey{}).(string); ok && v != "" {
s.timeKey = v
}
return s
}
func loggerToSlogLevel(level logger.Level) slog.Level {
switch level {
case logger.DebugLevel:
return slog.LevelDebug
case logger.WarnLevel:
return slog.LevelWarn
case logger.ErrorLevel:
return slog.LevelError
case logger.TraceLevel:
return slog.LevelDebug - 1
case logger.FatalLevel:
return slog.LevelError + 1
default:
return slog.LevelInfo
}
}
func slogToLoggerLevel(level slog.Level) logger.Level {
switch level {
case slog.LevelDebug:
return logger.DebugLevel
case slog.LevelWarn:
return logger.WarnLevel
case slog.LevelError:
return logger.ErrorLevel
case slog.LevelDebug - 1:
return logger.TraceLevel
case slog.LevelError + 1:
return logger.FatalLevel
default:
return logger.InfoLevel
}
}

141
logger/slog/slog_test.go Normal file
View File

@@ -0,0 +1,141 @@
package slog
import (
"bytes"
"context"
"log"
"testing"
"go.unistack.org/micro/v3/logger"
)
func TestContext(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl, ok := logger.FromContext(logger.NewContext(ctx, l.Fields("key", "val")))
if !ok {
t.Fatal("context without logger")
}
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFromContextWithFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
var ok bool
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
ctx = logger.NewContext(ctx, nl)
l, ok = logger.FromContext(ctx)
if !ok {
t.Fatalf("context does not have logger")
}
l.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestClone(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Clone(logger.WithLevel(logger.ErrorLevel))
if err := nl.Init(); err != nil {
t.Fatal(err)
}
nl.Info(ctx, "info message")
if len(buf.Bytes()) != 0 {
t.Fatal("message must not be logged")
}
l.Info(ctx, "info message")
if len(buf.Bytes()) == 0 {
t.Fatal("message must be logged")
}
}
func TestRedirectStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
fn := logger.RedirectStdLogger(l, logger.ErrorLevel)
defer fn()
log.Print("test")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
lg := logger.NewStdLogger(l, logger.ErrorLevel)
lg.Print("test")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestLogger(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Trace(ctx, "trace_msg1")
l.Warn(ctx, "warn_msg1")
l.Fields("error", "test").Info(ctx, "error message")
l.Warn(ctx, "first second")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"trace"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"trace_msg1"`))) {
t.Fatalf("logger tracer, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"warn_msg1"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"error message","error":"test"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"first second"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
}

View File

@@ -342,9 +342,6 @@ func (n *noopServer) createSubHandler(sb *subscriber, opts Options) broker.Handl
hdr := metadata.New(len(msg.Header))
for k, v := range msg.Header {
if k == "Content-Type" {
continue
}
hdr.Set(k, v)
}

View File

@@ -88,6 +88,7 @@ func (s *service) Name() string {
// Init initialises options. Additionally it calls cmd.Init
// which parses command line flags. cmd.Init is only called
// on first Init.
//
//nolint:gocyclo
func (s *service) Init(opts ...Option) error {
var err error
@@ -375,19 +376,71 @@ func (s *service) Run() error {
return s.Stop()
}
type nameIface interface {
Name() string
}
func getNameIndex(n string, ifaces interface{}) int {
values, ok := ifaces.([]interface{})
if !ok {
return 0
}
for idx, iface := range values {
if ifc, ok := iface.(nameIface); ok && ifc.Name() == n {
return idx
switch values := ifaces.(type) {
case []router.Router:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []register.Register:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []store.Store:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []tracer.Tracer:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []server.Server:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []config.Config:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []meter.Meter:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []broker.Broker:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
case []client.Client:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
/*
case []logger.Logger:
for idx, iface := range values {
if iface.Name() == n {
return idx
}
}
*/
}
return 0
}

View File

@@ -17,6 +17,21 @@ import (
"go.unistack.org/micro/v3/tracer"
)
func TestClient(t *testing.T) {
c1 := client.NewClient(client.Name("test1"))
c2 := client.NewClient(client.Name("test2"))
svc := NewService(Client(c1, c2))
if err := svc.Init(); err != nil {
t.Fatal(err)
}
x1 := svc.Client("test2")
if x1.Name() != "test2" {
t.Fatal("invalid client")
}
}
type testItem struct {
name string
}

View File

@@ -508,3 +508,74 @@ func FieldName(name string) string {
return string(newstr)
}
func Equal(src interface{}, dst interface{}, excptFields ...string) bool {
srcVal := reflect.ValueOf(src)
dstVal := reflect.ValueOf(dst)
switch srcVal.Kind() {
case reflect.Array, reflect.Slice:
for i := 0; i < srcVal.Len(); i++ {
e := srcVal.Index(i).Interface()
a := dstVal.Index(i).Interface()
if !Equal(e, a, excptFields...) {
return false
}
}
return true
case reflect.Map:
for i := 0; i < len(srcVal.MapKeys()); i++ {
key := srcVal.MapKeys()[i]
keyStr := fmt.Sprintf("%v", key.Interface())
if stringContains(keyStr, excptFields) {
continue
}
s := srcVal.MapIndex(key)
d := dstVal.MapIndex(key)
if !Equal(s.Interface(), d.Interface(), excptFields...) {
return false
}
}
return true
case reflect.Struct, reflect.Interface:
for i := 0; i < srcVal.NumField(); i++ {
typeField := srcVal.Type().Field(i)
if stringContains(typeField.Name, excptFields) {
continue
}
s := srcVal.Field(i)
d := dstVal.FieldByName(typeField.Name)
if s.CanInterface() && d.CanInterface() {
if !Equal(s.Interface(), d.Interface(), excptFields...) {
return false
}
} else {
return false
}
}
return true
case reflect.Ptr:
if srcVal.IsNil() {
return dstVal.IsNil()
}
s := srcVal.Elem()
d := reflect.Indirect(dstVal)
if s.CanInterface() && d.CanInterface() {
return Equal(s.Interface(), d.Interface(), excptFields...)
}
return false
case reflect.String, reflect.Int, reflect.Int64, reflect.Float32, reflect.Float64, reflect.Bool:
return src == dst
default:
return srcVal.Interface() == dstVal.Interface()
}
}
func stringContains(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}

View File

@@ -133,3 +133,16 @@ func TestMergeNested(t *testing.T) {
t.Fatalf("merge error: %#+v", dst.Nested)
}
}
func TestEqual(t *testing.T) {
type tstr struct {
Key1 string
Key2 string
}
src := &tstr{Key1: "val1", Key2: "micro:generate"}
dst := &tstr{Key1: "val1", Key2: "val2"}
if !Equal(src, dst, "Key2") {
t.Fatal("invalid Equal test")
}
}