Compare commits

...

9 Commits

Author SHA1 Message Date
2f3951773f cleanup trace spans
Some checks failed
build / lint (push) Successful in 24s
build / test (push) Failing after 1m25s
codeql / analyze (go) (push) Failing after 1m53s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 23:11:12 +03:00
b263e14032 cleanup trace spans from cluster slots command
Some checks failed
build / lint (push) Successful in 22s
build / test (push) Failing after 1m30s
codeql / analyze (go) (push) Failing after 1m51s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 13:44:29 +03:00
518cc1db73 fixup duplicate nested redis span
Some checks failed
build / lint (push) Successful in 22s
build / test (push) Failing after 1m29s
codeql / analyze (go) (push) Failing after 1m52s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 12:59:05 +03:00
4484cd34ec unify span names
Some checks failed
build / lint (push) Successful in 23s
build / test (push) Failing after 1m29s
codeql / analyze (go) (push) Failing after 1m55s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 09:16:08 +03:00
7bceeee6bf unify span names
Some checks failed
build / lint (push) Successful in 23s
build / test (push) Failing after 1m31s
codeql / analyze (go) (push) Failing after 1m52s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 08:57:29 +03:00
aed9512b93 improve metrics and tracing
Some checks failed
build / lint (push) Successful in 40s
build / test (push) Failing after 1m43s
codeql / analyze (go) (push) Failing after 3m9s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-04 15:13:19 +03:00
de72a10973 rework cluster mode
Some checks failed
build / test (push) Failing after 1m32s
codeql / analyze (go) (push) Failing after 1m48s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-23 09:30:35 +03:00
62c2de51d4 fixup strings pool
Some checks failed
build / test (push) Has been cancelled
build / lint (push) Has been cancelled
codeql / analyze (go) (push) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-15 08:41:58 +03:00
741b2310ec add tracer support
Some checks failed
build / test (push) Failing after 1m32s
codeql / analyze (go) (push) Failing after 1m35s
build / lint (push) Successful in 9m14s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-14 22:40:30 +03:00
7 changed files with 488 additions and 108 deletions

10
go.mod
View File

@@ -1,14 +1,16 @@
module go.unistack.org/micro-store-redis/v3 module go.unistack.org/micro-store-redis/v3
go 1.20 go 1.21
toolchain go1.22.4
require ( require (
github.com/redis/go-redis/v9 v9.2.1 github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3
go.unistack.org/micro/v3 v3.10.62 github.com/redis/go-redis/v9 v9.5.3
go.unistack.org/micro/v3 v3.10.80
) )
require ( require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
) )

14
go.sum
View File

@@ -1,12 +1,14 @@
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3 h1:1/BDligzCa40GTllkDnY3Y5DTHuKCONbB2JcRyIfl20=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3/go.mod h1:3dZmcLn3Qw6FLlWASn1g4y+YO9ycEFUOM+bhBmzLVKQ=
github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg= github.com/redis/go-redis/v9 v9.5.3 h1:fOAp1/uJG+ZtcITgZOfYFmTKPE7n4Vclj1wZFgRciUU=
github.com/redis/go-redis/v9 v9.2.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= github.com/redis/go-redis/v9 v9.5.3/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
go.unistack.org/micro/v3 v3.10.62 h1:PCwLSt3W53UGosH/5qU3kU0iJxK8jlKOm9p4v/Zti5o= go.unistack.org/micro/v3 v3.10.80 h1:A0zWNoM9MOcMg9gdFFgVkgbT3uSYVIINhuvumX9nP2o=
go.unistack.org/micro/v3 v3.10.62/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg= go.unistack.org/micro/v3 v3.10.80/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg=

View File

@@ -1,8 +1,13 @@
package redis package redis
import ( import (
"time"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/store" "go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/tracer"
) )
type configKey struct{} type configKey struct{}
@@ -16,3 +21,59 @@ type clusterConfigKey struct{}
func ClusterConfig(c *redis.ClusterOptions) store.Option { func ClusterConfig(c *redis.ClusterOptions) store.Option {
return store.SetOption(clusterConfigKey{}, c) return store.SetOption(clusterConfigKey{}, c)
} }
var (
// DefaultMeterStatsInterval holds default stats interval
DefaultMeterStatsInterval = 5 * time.Second
// DefaultMeterMetricPrefix holds default metric prefix
DefaultMeterMetricPrefix = "micro_store_"
)
// Options struct holds wrapper options
type Options struct {
Logger logger.Logger
Meter meter.Meter
Tracer tracer.Tracer
MeterMetricPrefix string
MeterStatsInterval time.Duration
}
// Option func signature
type Option func(*Options)
// NewOptions create new Options struct from provided option slice
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
MeterStatsInterval: DefaultMeterStatsInterval,
MeterMetricPrefix: DefaultMeterMetricPrefix,
}
for _, o := range opts {
o(&options)
}
options.Meter = options.Meter.Clone(
meter.MetricPrefix(options.MeterMetricPrefix),
)
options.Logger = options.Logger.Clone(logger.WithCallerSkipCount(1))
return options
}
// MetricInterval specifies stats interval for *sql.DB
func MetricInterval(td time.Duration) Option {
return func(o *Options) {
o.MeterStatsInterval = td
}
}
// MetricPrefix specifies prefix for each metric
func MetricPrefix(pref string) Option {
return func(o *Options) {
o.MeterMetricPrefix = pref
}
}

288
redis.go
View File

@@ -44,26 +44,24 @@ var (
type Store struct { type Store struct {
opts store.Options opts store.Options
cli redisClient cli *wrappedClient
pool pool.Pool[strings.Builder] done chan struct{}
pool pool.Pool[*strings.Builder]
} }
type redisClient interface { type wrappedClient struct {
Get(ctx context.Context, key string) *redis.StringCmd *redis.Client
Del(ctx context.Context, keys ...string) *redis.IntCmd *redis.ClusterClient
Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.StatusCmd
Keys(ctx context.Context, pattern string) *redis.StringSliceCmd
MGet(ctx context.Context, keys ...string) *redis.SliceCmd
MSet(ctx context.Context, kv ...interface{}) *redis.StatusCmd
Exists(ctx context.Context, keys ...string) *redis.IntCmd
Ping(ctx context.Context) *redis.StatusCmd
Pipeline() redis.Pipeliner
Pipelined(ctx context.Context, fn func(redis.Pipeliner) error) ([]redis.Cmder, error)
Close() error
} }
func (r *Store) Connect(ctx context.Context) error { func (r *Store) Connect(ctx context.Context) error {
return r.cli.Ping(ctx).Err() var err error
if r.cli.Client != nil {
err = r.cli.Client.Ping(ctx).Err()
}
err = r.cli.ClusterClient.Ping(ctx).Err()
setSpanError(ctx, err)
return err
} }
func (r *Store) Init(opts ...store.Option) error { func (r *Store) Init(opts ...store.Option) error {
@@ -71,15 +69,42 @@ func (r *Store) Init(opts ...store.Option) error {
o(&r.opts) o(&r.opts)
} }
return r.configure() err := r.configure()
if err != nil {
return err
}
return nil
} }
func (r *Store) Redis() *redis.Client { func (r *Store) Client() *redis.Client {
return r.cli.(*redis.Client) if r.cli.Client != nil {
return r.cli.Client
}
return nil
}
func (r *Store) ClusterClient() *redis.ClusterClient {
if r.cli.ClusterClient != nil {
return r.cli.ClusterClient
}
return nil
} }
func (r *Store) Disconnect(ctx context.Context) error { func (r *Store) Disconnect(ctx context.Context) error {
return r.cli.Close() var err error
select {
case <-r.done:
return err
default:
if r.cli.Client != nil {
err = r.cli.Client.Close()
} else if r.cli.ClusterClient != nil {
err = r.cli.ClusterClient.Close()
}
close(r.done)
return err
}
} }
func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error { func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
@@ -97,23 +122,28 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
} }
rkey := r.getKey(r.opts.Namespace, options.Namespace, key) rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
ctx, sp := r.opts.Tracer.Start(ctx, "cache read "+rkey)
defer sp.Finish()
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
val, err := r.cli.Exists(ctx, rkey).Result() var err error
var val int64
if r.cli.Client != nil {
val, err = r.cli.Client.Exists(ctx, rkey).Result()
} else {
val, err = r.cli.ClusterClient.Exists(ctx, rkey).Result()
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil || (err == nil && val == 0) { if err == redis.Nil || (err == nil && val == 0) {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -134,20 +164,29 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
defer cancel() defer cancel()
} }
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
buf, err := r.cli.Get(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Bytes() var buf []byte
var err error
if r.cli.Client != nil {
buf, err = r.cli.Client.Get(ctx, rkey).Bytes()
} else {
buf, err = r.cli.ClusterClient.Get(ctx, rkey).Bytes()
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil || (err == nil && buf == nil) { if err == redis.Nil || (err == nil && buf == nil) {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -157,7 +196,9 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
case *string: case *string:
*b = string(buf) *b = string(buf)
default: default:
err = r.opts.Codec.Unmarshal(buf, val) if err = r.opts.Codec.Unmarshal(buf, val); err != nil {
setSpanError(ctx, err)
}
} }
return err return err
@@ -183,20 +224,27 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
} }
} }
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
rvals, err := r.cli.MGet(ctx, keys...).Result() var rvals []interface{}
var err error
if r.cli.Client != nil {
rvals, err = r.cli.Client.MGet(ctx, keys...).Result()
} else {
rvals, err = r.cli.ClusterClient.MGet(ctx, keys...).Result()
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil || (len(rvals) == 0) { if err == redis.Nil || (len(rvals) == 0) {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -237,10 +285,12 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
itm.Set(reflect.New(vt.Elem())) itm.Set(reflect.New(vt.Elem()))
if err = r.opts.Codec.Unmarshal(buf, itm.Interface()); err != nil { if err = r.opts.Codec.Unmarshal(buf, itm.Interface()); err != nil {
setSpanError(ctx, err)
return err return err
} }
} }
vv.Set(nvv) vv.Set(nvv)
return nil return nil
} }
@@ -264,20 +314,26 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
} }
} }
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
err := r.cli.Del(ctx, keys...).Err() var err error
if r.cli.Client != nil {
err = r.cli.Client.Del(ctx, keys...).Err()
} else {
err = r.cli.ClusterClient.Del(ctx, keys...).Err()
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -298,20 +354,26 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
defer cancel() defer cancel()
} }
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
err := r.cli.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err() var err error
if r.cli.Client != nil {
err = r.cli.Client.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
} else {
err = r.cli.ClusterClient.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -351,39 +413,50 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
} }
} }
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
cmds, err := r.cli.Pipelined(ctx, func(pipe redis.Pipeliner) error { pipeliner := func(pipe redis.Pipeliner) error {
for idx := 0; idx < len(kvs); idx += 2 { for idx := 0; idx < len(kvs); idx += 2 {
if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil { if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil {
setSpanError(ctx, err)
return err return err
} }
} }
return nil return nil
}) }
var err error
var cmds []redis.Cmder
if r.cli.Client != nil {
cmds, err = r.cli.Client.Pipelined(ctx, pipeliner)
} else {
cmds, err = r.cli.ClusterClient.Pipelined(ctx, pipeliner)
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
for _, cmd := range cmds { for _, cmd := range cmds {
if err = cmd.Err(); err != nil { if err = cmd.Err(); err != nil {
if err == redis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} }
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() setSpanError(ctx, err)
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
} }
@@ -405,6 +478,8 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
defer cancel() defer cancel()
} }
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
var buf []byte var buf []byte
switch vt := val.(type) { switch vt := val.(type) {
case string: case string:
@@ -419,20 +494,26 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
} }
} }
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
err := r.cli.Set(ctx, r.getKey(r.opts.Namespace, options.Namespace, key), buf, options.TTL).Err() var err error
if r.cli.Client != nil {
err = r.cli.Client.Set(ctx, rkey, buf, options.TTL).Err()
} else {
err = r.cli.ClusterClient.Set(ctx, rkey, buf, options.TTL).Err()
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -462,20 +543,35 @@ func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, e
} }
// TODO: add support for prefix/suffix/limit // TODO: add support for prefix/suffix/limit
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
keys, err := r.cli.Keys(ctx, rkey).Result() var keys []string
var err error
if r.cli.Client != nil {
keys, err = r.cli.Client.Keys(ctx, rkey).Result()
} else {
err = r.cli.ClusterClient.ForEachMaster(ctx, func(nctx context.Context, cli *redis.Client) error {
nkeys, nerr := cli.Keys(nctx, rkey).Result()
if nerr != nil {
return nerr
}
keys = append(keys, nkeys...)
return nil
})
}
setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return nil, store.ErrNotFound return nil, store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return nil, err return nil, err
} }
@@ -507,7 +603,7 @@ func (r *Store) String() string {
} }
func NewStore(opts ...store.Option) *Store { func NewStore(opts ...store.Option) *Store {
return &Store{opts: store.NewOptions(opts...)} return &Store{done: make(chan struct{}), opts: store.NewOptions(opts...)}
} }
func (r *Store) configure() error { func (r *Store) configure() error {
@@ -563,11 +659,19 @@ func (r *Store) configure() error {
} }
if redisOptions != nil { if redisOptions != nil {
r.cli = redis.NewClient(redisOptions) c := redis.NewClient(redisOptions)
setTracing(c, r.opts.Tracer)
r.cli = &wrappedClient{Client: c}
} else if redisClusterOptions != nil { } else if redisClusterOptions != nil {
r.cli = redis.NewClusterClient(redisClusterOptions) c := redis.NewClusterClient(redisClusterOptions)
setTracing(c, r.opts.Tracer)
r.cli = &wrappedClient{ClusterClient: c}
} }
r.pool = pool.NewPool(func() *strings.Builder { return &strings.Builder{} })
r.statsMeter()
return nil return nil
} }

View File

@@ -7,14 +7,41 @@ import (
"testing" "testing"
"time" "time"
"github.com/redis/go-redis/v9"
"go.unistack.org/micro/v3/store" "go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/tracer"
) )
func TestKeepTTL(t *testing.T) {
ctx := context.Background()
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
r := NewStore(store.Addrs(os.Getenv("STORE_NODES")))
if err := r.Init(); err != nil {
t.Fatal(err)
}
if err := r.Connect(ctx); err != nil {
t.Fatal(err)
}
key := "key"
err := r.Write(ctx, key, "val1", store.WriteTTL(15*time.Second))
if err != nil {
t.Fatalf("Write error: %v", err)
}
time.Sleep(3 * time.Second)
err = r.Write(ctx, key, "val2", store.WriteTTL(-1))
if err != nil {
t.Fatalf("Write error: %v", err)
}
}
func Test_rkv_configure(t *testing.T) { func Test_rkv_configure(t *testing.T) {
type fields struct { type fields struct {
options store.Options options store.Options
Client *redis.Client Client *wrappedClient
} }
type wantValues struct { type wantValues struct {
username string username string
@@ -37,7 +64,7 @@ func Test_rkv_configure(t *testing.T) {
}, },
}, },
{ {
name: "legacy Url", fields: fields{options: store.Options{Addrs: []string{"127.0.0.1:6379"}}, Client: nil}, name: "legacy Url", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"127.0.0.1:6379"}}, Client: nil},
wantErr: false, want: wantValues{ wantErr: false, want: wantValues{
username: "", username: "",
password: "", password: "",
@@ -45,7 +72,7 @@ func Test_rkv_configure(t *testing.T) {
}, },
}, },
{ {
name: "New Url", fields: fields{options: store.Options{Addrs: []string{"redis://127.0.0.1:6379"}}, Client: nil}, name: "New Url", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"redis://127.0.0.1:6379"}}, Client: nil},
wantErr: false, want: wantValues{ wantErr: false, want: wantValues{
username: "", username: "",
password: "", password: "",
@@ -53,7 +80,7 @@ func Test_rkv_configure(t *testing.T) {
}, },
}, },
{ {
name: "Url with Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://:password@redis:6379"}}, Client: nil}, name: "Url with Pwd", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"redis://:password@redis:6379"}}, Client: nil},
wantErr: false, want: wantValues{ wantErr: false, want: wantValues{
username: "", username: "",
password: "password", password: "password",
@@ -61,7 +88,7 @@ func Test_rkv_configure(t *testing.T) {
}, },
}, },
{ {
name: "Url with username and Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://username:password@redis:6379"}}, Client: nil}, name: "Url with username and Pwd", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"redis://username:password@redis:6379"}}, Client: nil},
wantErr: false, want: wantValues{ wantErr: false, want: wantValues{
username: "username", username: "username",
password: "password", password: "password",

57
stats.go Normal file
View File

@@ -0,0 +1,57 @@
package redis
import (
"time"
"github.com/redis/go-redis/v9"
)
var (
PoolHitsTotal = "pool_hits_total"
PoolMissesTotal = "pool_misses_total"
PoolTimeoutTotal = "pool_timeout_total"
PoolConnTotalCurrent = "pool_conn_total_current"
PoolConnIdleCurrent = "pool_conn_idle_current"
PoolConnStaleTotal = "pool_conn_stale_total"
meterRequestTotal = "request_total"
meterRequestLatencyMicroseconds = "latency_microseconds"
meterRequestDurationSeconds = "request_duration_seconds"
)
type Statser interface {
PoolStats() *redis.PoolStats
}
func (r *Store) statsMeter() {
var st Statser
if r.cli.Client != nil {
st = r.cli.Client
} else if r.cli.ClusterClient != nil {
st = r.cli.ClusterClient
} else {
return
}
go func() {
ticker := time.NewTicker(DefaultMeterStatsInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if st == nil {
return
}
stats := st.PoolStats()
r.opts.Meter.Counter(PoolHitsTotal).Set(uint64(stats.Hits))
r.opts.Meter.Counter(PoolMissesTotal).Set(uint64(stats.Misses))
r.opts.Meter.Counter(PoolTimeoutTotal).Set(uint64(stats.Timeouts))
r.opts.Meter.Counter(PoolConnTotalCurrent).Set(uint64(stats.TotalConns))
r.opts.Meter.Counter(PoolConnIdleCurrent).Set(uint64(stats.IdleConns))
r.opts.Meter.Counter(PoolConnStaleTotal).Set(uint64(stats.StaleConns))
}
}
}()
}

127
tracer.go Normal file
View File

@@ -0,0 +1,127 @@
package redis
import (
"context"
"fmt"
"net"
"strconv"
"github.com/redis/go-redis/extra/rediscmd/v9"
"github.com/redis/go-redis/v9"
"go.unistack.org/micro/v3/tracer"
)
func setTracing(rdb redis.UniversalClient, tr tracer.Tracer, opts ...tracer.SpanOption) {
switch rdb := rdb.(type) {
case *redis.Client:
opt := rdb.Options()
connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr))
case *redis.ClusterClient:
rdb.OnNewNode(func(rdb *redis.Client) {
opt := rdb.Options()
connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr))
})
case *redis.Ring:
rdb.OnNewNode(func(rdb *redis.Client) {
opt := rdb.Options()
connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr))
})
}
}
type tracingHook struct {
tr tracer.Tracer
opts []tracer.SpanOption
}
var _ redis.Hook = (*tracingHook)(nil)
func newTracingHook(connString string, tr tracer.Tracer, opts ...tracer.SpanOption) *tracingHook {
opts = append(opts, tracer.WithSpanKind(tracer.SpanKindClient))
if connString != "" {
opts = append(opts, tracer.WithSpanLabels("db.connection_string", connString))
}
return &tracingHook{
tr: tr,
opts: opts,
}
}
func (h *tracingHook) DialHook(hook redis.DialHook) redis.DialHook {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
_, span := h.tr.Start(ctx, "redis.dial", h.opts...)
defer span.Finish()
conn, err := hook(ctx, network, addr)
recordError(span, err)
return conn, err
}
}
func (h *tracingHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
return func(ctx context.Context, cmd redis.Cmder) error {
cmdString := rediscmd.CmdString(cmd)
var err error
switch cmdString {
case "cluster slots":
break
default:
_, span := h.tr.Start(ctx, "redis.process", append(h.opts, tracer.WithSpanLabels("db.statement", cmdString))...)
defer func() {
recordError(span, err)
span.Finish()
}()
}
err = hook(ctx, cmd)
return err
}
}
func (h *tracingHook) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
return func(ctx context.Context, cmds []redis.Cmder) error {
_, cmdsString := rediscmd.CmdsString(cmds)
opts := append(h.opts, tracer.WithSpanLabels(
"db.redis.num_cmd", strconv.Itoa(len(cmds)),
"db.statement", cmdsString,
))
_, span := h.tr.Start(ctx, "redis.process_pipeline", opts...)
defer span.Finish()
err := hook(ctx, cmds)
recordError(span, err)
return err
}
}
func setSpanError(ctx context.Context, err error) {
if err == nil || err == redis.Nil {
return
}
if sp, ok := tracer.SpanFromContext(ctx); !ok && sp != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
}
func recordError(span tracer.Span, err error) {
if err != nil && err != redis.Nil {
span.SetStatus(tracer.SpanStatusError, err.Error())
}
}
func formatDBConnString(network, addr string) string {
if network == "tcp" {
network = "redis"
}
return fmt.Sprintf("%s://%s", network, addr)
}