Compare commits

..

3 Commits

Author SHA1 Message Date
518cc1db73 fixup duplicate nested redis span
Some checks failed
build / lint (push) Successful in 22s
build / test (push) Failing after 1m29s
codeql / analyze (go) (push) Failing after 1m52s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 12:59:05 +03:00
4484cd34ec unify span names
Some checks failed
build / lint (push) Successful in 23s
build / test (push) Failing after 1m29s
codeql / analyze (go) (push) Failing after 1m55s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 09:16:08 +03:00
7bceeee6bf unify span names
Some checks failed
build / lint (push) Successful in 23s
build / test (push) Failing after 1m31s
codeql / analyze (go) (push) Failing after 1m52s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 08:57:29 +03:00
2 changed files with 46 additions and 90 deletions

View File

@@ -10,7 +10,6 @@ import (
redis "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/tracer"
pool "go.unistack.org/micro/v3/util/xpool"
)
@@ -56,10 +55,13 @@ type wrappedClient struct {
}
func (r *Store) Connect(ctx context.Context) error {
var err error
if r.cli.Client != nil {
return r.cli.Client.Ping(ctx).Err()
err = r.cli.Client.Ping(ctx).Err()
}
return r.cli.ClusterClient.Ping(ctx).Err()
err = r.cli.ClusterClient.Ping(ctx).Err()
setSpanError(ctx, err)
return err
}
func (r *Store) Init(opts ...store.Option) error {
@@ -103,8 +105,6 @@ func (r *Store) Disconnect(ctx context.Context) error {
close(r.done)
return err
}
return err
}
func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
@@ -122,8 +122,6 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
}
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
ctx, sp := r.opts.Tracer.Start(ctx, "cache exists "+rkey)
defer sp.Finish()
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now()
@@ -134,6 +132,7 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
} else {
val, err = r.cli.ClusterClient.Exists(ctx, rkey).Result()
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -144,7 +143,6 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -167,8 +165,6 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
}
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
ctx, sp := r.opts.Tracer.Start(ctx, "cache read "+rkey)
defer sp.Finish()
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now()
@@ -179,6 +175,7 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
} else {
buf, err = r.cli.ClusterClient.Get(ctx, rkey).Bytes()
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -189,7 +186,6 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -201,7 +197,7 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
*b = string(buf)
default:
if err = r.opts.Codec.Unmarshal(buf, val); err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
setSpanError(ctx, err)
}
}
@@ -228,9 +224,6 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
}
}
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mread %v", keys))
defer sp.Finish()
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now()
var rvals []interface{}
@@ -240,6 +233,7 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
} else {
rvals, err = r.cli.ClusterClient.MGet(ctx, keys...).Result()
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -250,7 +244,6 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -292,7 +285,7 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
itm.Set(reflect.New(vt.Elem()))
if err = r.opts.Codec.Unmarshal(buf, itm.Interface()); err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
setSpanError(ctx, err)
return err
}
}
@@ -321,9 +314,6 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
}
}
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mdelete %v", keys))
defer sp.Finish()
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now()
var err error
@@ -332,6 +322,7 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
} else {
err = r.cli.ClusterClient.Del(ctx, keys...).Err()
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -342,7 +333,6 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -364,9 +354,6 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
defer cancel()
}
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache delete %v", key))
defer sp.Finish()
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now()
var err error
@@ -375,6 +362,7 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
} else {
err = r.cli.ClusterClient.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -385,7 +373,6 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -407,9 +394,6 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
defer cancel()
}
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mwrite %v", keys))
defer sp.Finish()
kvs := make([]string, 0, len(keys)*2)
for idx, key := range keys {
@@ -423,7 +407,6 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
default:
buf, err := r.opts.Codec.Marshal(vt)
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
return err
}
kvs = append(kvs, string(buf))
@@ -436,6 +419,7 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
pipeliner := func(pipe redis.Pipeliner) error {
for idx := 0; idx < len(kvs); idx += 2 {
if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil {
setSpanError(ctx, err)
return err
}
}
@@ -450,7 +434,7 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
} else {
cmds, err = r.cli.ClusterClient.Pipelined(ctx, pipeliner)
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -461,7 +445,6 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -472,7 +455,7 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
}
sp.SetStatus(tracer.SpanStatusError, err.Error())
setSpanError(ctx, err)
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -496,8 +479,6 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
}
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache write %v", rkey))
defer sp.Finish()
var buf []byte
switch vt := val.(type) {
@@ -509,7 +490,6 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
var err error
buf, err = r.opts.Codec.Marshal(val)
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
return err
}
}
@@ -522,6 +502,7 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
} else {
err = r.cli.ClusterClient.Set(ctx, rkey, buf, options.TTL).Err()
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -532,7 +513,6 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
@@ -562,9 +542,6 @@ func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, e
defer cancel()
}
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache list %v", rkey))
defer sp.Finish()
// TODO: add support for prefix/suffix/limit
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now()
@@ -583,6 +560,7 @@ func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, e
return nil
})
}
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
@@ -593,7 +571,6 @@ func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, e
} else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return nil, err
}

View File

@@ -16,23 +16,20 @@ func setTracing(rdb redis.UniversalClient, tr tracer.Tracer, opts ...tracer.Span
case *redis.Client:
opt := rdb.Options()
connString := formatDBConnString(opt.Network, opt.Addr)
opts = addServerAttributes(opts, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr, opts...))
rdb.AddHook(newTracingHook(connString, tr))
case *redis.ClusterClient:
rdb.AddHook(newTracingHook("", tr, opts...))
rdb.OnNewNode(func(rdb *redis.Client) {
opt := rdb.Options()
opts = addServerAttributes(opts, opt.Addr)
connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr, opts...))
rdb.AddHook(newTracingHook(connString, tr))
})
case *redis.Ring:
rdb.AddHook(newTracingHook("", tr, opts...))
rdb.OnNewNode(func(rdb *redis.Client) {
opt := rdb.Options()
opts = addServerAttributes(opts, opt.Addr)
connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr, opts...))
rdb.AddHook(newTracingHook(connString, tr))
})
}
}
@@ -58,15 +55,13 @@ func newTracingHook(connString string, tr tracer.Tracer, opts ...tracer.SpanOpti
func (h *tracingHook) DialHook(hook redis.DialHook) redis.DialHook {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
ctx, span := h.tr.Start(ctx, "redis.dial", h.opts...)
_, span := h.tr.Start(ctx, "redis.dial", h.opts...)
defer span.Finish()
conn, err := hook(ctx, network, addr)
if err != nil {
recordError(span, err)
return nil, err
}
return conn, nil
recordError(span, err)
return conn, err
}
}
@@ -74,41 +69,46 @@ func (h *tracingHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
return func(ctx context.Context, cmd redis.Cmder) error {
cmdString := rediscmd.CmdString(cmd)
ctx, span := h.tr.Start(ctx, cmd.FullName(), append(h.opts, tracer.WithSpanLabels("db.statement", cmdString))...)
_, span := h.tr.Start(ctx, "redis.process", append(h.opts, tracer.WithSpanLabels("db.statement", cmdString))...)
defer span.Finish()
if err := hook(ctx, cmd); err != nil {
recordError(span, err)
return err
}
return nil
err := hook(ctx, cmd)
recordError(span, err)
return err
}
}
func (h *tracingHook) ProcessPipelineHook(
hook redis.ProcessPipelineHook,
) redis.ProcessPipelineHook {
func (h *tracingHook) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
return func(ctx context.Context, cmds []redis.Cmder) error {
summary, cmdsString := rediscmd.CmdsString(cmds)
_, cmdsString := rediscmd.CmdsString(cmds)
opts := append(h.opts, tracer.WithSpanLabels(
"db.redis.num_cmd", strconv.Itoa(len(cmds)),
"db.statement", cmdsString,
))
ctx, span := h.tr.Start(ctx, "redis.pipeline "+summary, opts...)
_, span := h.tr.Start(ctx, "redis.process_pipeline", opts...)
defer span.Finish()
if err := hook(ctx, cmds); err != nil {
recordError(span, err)
return err
}
return nil
err := hook(ctx, cmds)
recordError(span, err)
return err
}
}
func setSpanError(ctx context.Context, err error) {
if err == nil || err == redis.Nil {
return
}
if sp, ok := tracer.SpanFromContext(ctx); !ok && sp != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
}
func recordError(span tracer.Span, err error) {
if err != redis.Nil {
if err != nil && err != redis.Nil {
span.SetStatus(tracer.SpanStatusError, err.Error())
}
}
@@ -119,24 +119,3 @@ func formatDBConnString(network, addr string) string {
}
return fmt.Sprintf("%s://%s", network, addr)
}
// Database span attributes semantic conventions recommended server address and port
// https://opentelemetry.io/docs/specs/semconv/database/database-spans/#connection-level-attributes
func addServerAttributes(opts []tracer.SpanOption, addr string) []tracer.SpanOption {
host, portString, err := net.SplitHostPort(addr)
if err != nil {
return opts
}
opts = append(opts, tracer.WithSpanLabels("server.address", host))
// Parse the port string to an integer
port, err := strconv.Atoi(portString)
if err != nil {
return opts
}
opts = append(opts, tracer.WithSpanLabels("server.port", port))
return opts
}