Compare commits

..

5 Commits

Author SHA1 Message Date
62c2de51d4 fixup strings pool
Some checks failed
build / test (push) Has been cancelled
build / lint (push) Has been cancelled
codeql / analyze (go) (push) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-15 08:41:58 +03:00
741b2310ec add tracer support
Some checks failed
build / test (push) Failing after 1m32s
codeql / analyze (go) (push) Failing after 1m35s
build / lint (push) Successful in 9m14s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-14 22:40:30 +03:00
1e8a44b088 add meter support
Some checks failed
build / test (push) Failing after 1m14s
codeql / analyze (go) (push) Failing after 1m58s
build / lint (push) Successful in 9m26s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-14 22:27:28 +03:00
2245314c2f Merge pull request 'add ability to get *redis.Client' (#110) from redis into v3
Some checks failed
build / test (push) Failing after 1m28s
build / lint (push) Failing after 2m35s
codeql / analyze (go) (push) Failing after 2m48s
Reviewed-on: #110
2023-12-12 13:49:14 +03:00
db770c3fe7 add ability to get *redis.Client
Some checks failed
codeql / analyze (go) (pull_request) Failing after 2m48s
prbuild / test (pull_request) Failing after 1m30s
prbuild / lint (pull_request) Failing after 2m37s
autoapprove / autoapprove (pull_request) Failing after 1m26s
automerge / automerge (pull_request) Failing after 4s
dependabot-automerge / automerge (pull_request) Has been skipped
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-12 13:48:47 +03:00
5 changed files with 372 additions and 174 deletions

4
go.mod
View File

@@ -1,10 +1,10 @@
module go.unistack.org/micro-store-redis/v4
module go.unistack.org/micro-store-redis/v3
go 1.20
require (
github.com/redis/go-redis/v9 v9.2.1
go.unistack.org/micro/v4 v4.0.7
go.unistack.org/micro/v3 v3.10.62
)
require (

4
go.sum
View File

@@ -8,5 +8,5 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg=
github.com/redis/go-redis/v9 v9.2.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
go.unistack.org/micro/v4 v4.0.7 h1:2lwtZlHcSwgkahhFbkI4x1lOS79lw8uLHtcEhlFF+AM=
go.unistack.org/micro/v4 v4.0.7/go.mod h1:bVEYTlPi0EsdgZZt311bIroDg9ict7ky3C87dSCCAGk=
go.unistack.org/micro/v3 v3.10.62 h1:PCwLSt3W53UGosH/5qU3kU0iJxK8jlKOm9p4v/Zti5o=
go.unistack.org/micro/v3 v3.10.62/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg=

View File

@@ -2,17 +2,17 @@ package redis
import (
"github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v3/store"
)
type configKey struct{}
func Config(c *redis.Options) options.Option {
return options.ContextOption(configKey{}, c)
func Config(c *redis.Options) store.Option {
return store.SetOption(configKey{}, c)
}
type clusterConfigKey struct{}
func ClusterConfig(c *redis.ClusterOptions) options.Option {
return options.ContextOption(clusterConfigKey{}, c)
func ClusterConfig(c *redis.ClusterOptions) store.Option {
return store.SetOption(clusterConfigKey{}, c)
}

507
redis.go
View File

@@ -7,14 +7,46 @@ import (
"strings"
"time"
"github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/store"
redis "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/tracer"
pool "go.unistack.org/micro/v3/util/xpool"
)
var (
DefaultPathSeparator = "/"
DefaultClusterOptions = &redis.ClusterOptions{
Username: "",
Password: "", // no password set
MaxRetries: 2,
MaxRetryBackoff: 256 * time.Millisecond,
DialTimeout: 1 * time.Second,
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
PoolTimeout: 1 * time.Second,
MinIdleConns: 10,
}
DefaultOptions = &redis.Options{
Username: "",
Password: "", // no password set
DB: 0, // use default DB
MaxRetries: 2,
MaxRetryBackoff: 256 * time.Millisecond,
DialTimeout: 1 * time.Second,
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
PoolTimeout: 1 * time.Second,
MinIdleConns: 10,
}
)
type Store struct {
opts store.Options
cli redisClient
pool pool.Pool[*strings.Builder]
}
type redisClient interface {
@@ -35,7 +67,7 @@ func (r *Store) Connect(ctx context.Context) error {
return r.cli.Ping(ctx).Err()
}
func (r *Store) Init(opts ...options.Option) error {
func (r *Store) Init(opts ...store.Option) error {
for _, o := range opts {
o(&r.opts)
}
@@ -43,94 +75,143 @@ func (r *Store) Init(opts ...options.Option) error {
return r.configure()
}
func (r *Store) Redis() *redis.Client {
return r.cli.(*redis.Client)
}
func (r *Store) Disconnect(ctx context.Context) error {
return r.cli.Close()
}
func (r *Store) Exists(ctx context.Context, key string, opts ...options.Option) error {
if r.opts.Timeout > 0 {
func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
options := store.NewExistsOptions(opts...)
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
options := store.NewExistsOptions(opts...)
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
}
if options.Namespace != "" {
key = fmt.Sprintf("%s%s", r.opts.Namespace, key)
}
val, err := r.cli.Exists(ctx, key).Result()
if err != nil {
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
ctx, sp := r.opts.Tracer.Start(ctx, "cache exists "+rkey)
defer sp.Finish()
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
val, err := r.cli.Exists(ctx, rkey).Result()
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil || (err == nil && val == 0) {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
if val == 0 {
return store.ErrNotFound
}
return nil
}
func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...options.Option) error {
if r.opts.Timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
defer cancel()
}
func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...store.ReadOption) error {
options := store.NewReadOptions(opts...)
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
}
if options.Namespace != "" {
key = fmt.Sprintf("%s%s", options.Namespace, key)
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
buf, err := r.cli.Get(ctx, key).Bytes()
if err != nil && err == redis.Nil {
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
ctx, sp := r.opts.Tracer.Start(ctx, "cache read "+rkey)
defer sp.Finish()
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
buf, err := r.cli.Get(ctx, rkey).Bytes()
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil || (err == nil && buf == nil) {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
if buf == nil {
return store.ErrNotFound
switch b := val.(type) {
case *[]byte:
*b = buf
case *string:
*b = string(buf)
default:
if err = r.opts.Codec.Unmarshal(buf, val); err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
return r.opts.Codec.Unmarshal(buf, val)
}
return err
}
func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts ...options.Option) error {
if len(keys) == 1 {
vt := reflect.ValueOf(vals)
if vt.Kind() == reflect.Ptr {
vt = reflect.Indirect(vt)
return r.Read(ctx, keys[0], vt.Index(0).Interface(), opts...)
func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts ...store.ReadOption) error {
options := store.NewReadOptions(opts...)
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
}
if r.opts.Timeout > 0 {
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
options := store.NewReadOptions(opts...)
rkeys := make([]string, 0, len(keys))
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
}
for _, key := range keys {
if options.Namespace != "" {
rkeys = append(rkeys, fmt.Sprintf("%s%s", options.Namespace, key))
} else {
rkeys = append(rkeys, key)
if r.opts.Namespace != "" || options.Namespace != "" {
for idx, key := range keys {
keys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
}
}
rvals, err := r.cli.MGet(ctx, rkeys...).Result()
if err != nil && err == redis.Nil {
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mread %v", keys))
defer sp.Finish()
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
rvals, err := r.cli.MGet(ctx, keys...).Result()
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil || (len(rvals) == 0) {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
if len(rvals) == 0 {
return store.ErrNotFound
}
vv := reflect.ValueOf(vals)
vt := reflect.TypeOf(vals)
@@ -160,77 +241,127 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
// special case for raw data
if vt.Kind() == reflect.Slice && vt.Elem().Kind() == reflect.Uint8 {
itm.Set(reflect.MakeSlice(itm.Type(), len(buf), len(buf)))
} else {
itm.Set(reflect.New(vt.Elem()))
itm.SetBytes(buf)
continue
} else if vt.Kind() == reflect.String {
itm.SetString(string(buf))
continue
}
itm.Set(reflect.New(vt.Elem()))
if err = r.opts.Codec.Unmarshal(buf, itm.Interface()); err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
return err
}
}
vv.Set(nvv)
return nil
}
func (r *Store) MDelete(ctx context.Context, keys []string, opts ...options.Option) error {
if len(keys) == 1 {
return r.Delete(ctx, keys[0], opts...)
func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.DeleteOption) error {
options := store.NewDeleteOptions(opts...)
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
if r.opts.Timeout > 0 {
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
options := store.NewDeleteOptions(opts...)
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
if r.opts.Namespace != "" || options.Namespace != "" {
for idx, key := range keys {
keys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
}
if options.Namespace == "" {
return r.cli.Del(ctx, keys...).Err()
}
for idx := range keys {
keys[idx] = options.Namespace + keys[idx]
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mdelete %v", keys))
defer sp.Finish()
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
err := r.cli.Del(ctx, keys...).Err()
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
return r.cli.Del(ctx, keys...).Err()
return nil
}
func (r *Store) Delete(ctx context.Context, key string, opts ...options.Option) error {
if r.opts.Timeout > 0 {
func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error {
options := store.NewDeleteOptions(opts...)
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
options := store.NewDeleteOptions(opts...)
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache delete %v", key))
defer sp.Finish()
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
err := r.cli.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
if options.Namespace == "" {
return r.cli.Del(ctx, key).Err()
}
return r.cli.Del(ctx, fmt.Sprintf("%s%s", options.Namespace, key)).Err()
return nil
}
func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, opts ...options.Option) error {
if len(keys) == 1 {
return r.Write(ctx, keys[0], vals[0], opts...)
}
if r.opts.Timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
defer cancel()
}
func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, opts ...store.WriteOption) error {
options := store.NewWriteOptions(opts...)
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mwrite %v", keys))
defer sp.Finish()
kvs := make([]string, 0, len(keys)*2)
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
}
for idx, key := range keys {
if options.Namespace != "" {
kvs = append(kvs, fmt.Sprintf("%s%s", options.Namespace, key))
} else {
kvs = append(kvs, key)
}
kvs = append(kvs, r.getKey(r.opts.Namespace, options.Namespace, key))
switch vt := vals[idx].(type) {
case string:
@@ -240,24 +371,48 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
default:
buf, err := r.opts.Codec.Marshal(vt)
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
return err
}
kvs = append(kvs, string(buf))
}
}
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
cmds, err := r.cli.Pipelined(ctx, func(pipe redis.Pipeliner) error {
for idx := 0; idx < len(kvs); idx += 2 {
pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result()
if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil {
return err
}
}
return nil
})
if err != nil {
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
for _, cmd := range cmds {
if err = cmd.Err(); err != nil {
if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
}
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
}
@@ -265,17 +420,23 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
return nil
}
func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...options.Option) error {
if r.opts.Timeout > 0 {
func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...store.WriteOption) error {
options := store.NewWriteOptions(opts...)
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
options := store.NewWriteOptions(opts...)
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
}
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache write %v", rkey))
defer sp.Finish()
var buf []byte
switch vt := val.(type) {
@@ -287,46 +448,89 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
var err error
buf, err = r.opts.Codec.Marshal(val)
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
return err
}
}
if options.Namespace != "" {
key = fmt.Sprintf("%s%s", options.Namespace, key)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
err := r.cli.Set(ctx, rkey, buf, options.TTL).Err()
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return err
}
return r.cli.Set(ctx, key, buf, options.TTL).Err()
return err
}
func (r *Store) List(ctx context.Context, opts ...options.Option) ([]string, error) {
func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, error) {
options := store.NewListOptions(opts...)
if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace
}
rkey := fmt.Sprintf("%s%s*", options.Namespace, options.Prefix)
rkey := r.getKey(options.Namespace, "", options.Prefix+"*")
if options.Suffix != "" {
rkey += options.Suffix
}
if r.opts.Timeout > 0 {
timeout := r.opts.Timeout
if options.Timeout > 0 {
timeout = options.Timeout
}
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache list %v", rkey))
defer sp.Finish()
// TODO: add support for prefix/suffix/limit
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
ts := time.Now()
keys, err := r.cli.Keys(ctx, rkey).Result()
if err != nil {
te := time.Since(ts)
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
return nil, store.ErrNotFound
} else if err == nil {
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
return nil, err
}
if options.Namespace == "" {
prefix := r.opts.Namespace
if options.Namespace != "" {
prefix = options.Namespace
}
if prefix == "" {
return keys, nil
}
nkeys := make([]string, 0, len(keys))
for _, key := range keys {
nkeys = append(nkeys, strings.TrimPrefix(key, options.Namespace))
for idx, key := range keys {
keys[idx] = strings.TrimPrefix(key, prefix)
}
return nkeys, nil
return keys, nil
}
func (r *Store) Options() store.Options {
@@ -341,7 +545,7 @@ func (r *Store) String() string {
return "redis"
}
func NewStore(opts ...options.Option) *Store {
func NewStore(opts ...store.Option) *Store {
return &Store{opts: store.NewOptions(opts...)}
}
@@ -350,7 +554,7 @@ func (r *Store) configure() error {
var redisClusterOptions *redis.ClusterOptions
var err error
nodes := r.opts.Address
nodes := r.opts.Addrs
if len(nodes) == 0 {
nodes = []string{"redis://127.0.0.1:6379"}
@@ -387,36 +591,14 @@ func (r *Store) configure() error {
if redisOptions == nil && redisClusterOptions == nil && len(nodes) == 1 {
redisOptions, err = redis.ParseURL(nodes[0])
if err != nil {
// Backwards compatibility
redisOptions = &redis.Options{
Addr: nodes[0],
Username: "",
Password: "", // no password set
DB: 0, // use default DB
MaxRetries: 2,
MaxRetryBackoff: 256 * time.Millisecond,
DialTimeout: 1 * time.Second,
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
PoolTimeout: 1 * time.Second,
MinIdleConns: 10,
TLSConfig: r.opts.TLSConfig,
}
redisOptions = DefaultOptions
redisOptions.Addr = r.opts.Addrs[0]
redisOptions.TLSConfig = r.opts.TLSConfig
}
} else if redisOptions == nil && redisClusterOptions == nil && len(nodes) > 1 {
redisClusterOptions = &redis.ClusterOptions{
Addrs: nodes,
Username: "",
Password: "", // no password set
MaxRetries: 2,
MaxRetryBackoff: 256 * time.Millisecond,
DialTimeout: 1 * time.Second,
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
PoolTimeout: 1 * time.Second,
MinIdleConns: 10,
TLSConfig: r.opts.TLSConfig,
}
redisClusterOptions = DefaultClusterOptions
redisClusterOptions.Addrs = r.opts.Addrs
redisClusterOptions.TLSConfig = r.opts.TLSConfig
}
if redisOptions != nil {
@@ -425,5 +607,22 @@ func (r *Store) configure() error {
r.cli = redis.NewClusterClient(redisClusterOptions)
}
r.pool = pool.NewPool(func() *strings.Builder { return &strings.Builder{} })
return nil
}
func (r *Store) getKey(mainNamespace string, opNamespace string, key string) string {
b := r.pool.Get()
defer r.pool.Put(b)
b.Reset()
if opNamespace == "" {
opNamespace = mainNamespace
}
if opNamespace != "" {
b.WriteString(opNamespace)
b.WriteString(DefaultPathSeparator)
}
b.WriteString(key)
return b.String()
}

View File

@@ -8,8 +8,7 @@ import (
"time"
"github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/store"
"go.unistack.org/micro/v3/store"
)
func Test_rkv_configure(t *testing.T) {
@@ -38,7 +37,7 @@ func Test_rkv_configure(t *testing.T) {
},
},
{
name: "legacy Url", fields: fields{options: store.Options{Address: []string{"127.0.0.1:6379"}}, Client: nil},
name: "legacy Url", fields: fields{options: store.Options{Addrs: []string{"127.0.0.1:6379"}}, Client: nil},
wantErr: false, want: wantValues{
username: "",
password: "",
@@ -46,7 +45,7 @@ func Test_rkv_configure(t *testing.T) {
},
},
{
name: "New Url", fields: fields{options: store.Options{Address: []string{"redis://127.0.0.1:6379"}}, Client: nil},
name: "New Url", fields: fields{options: store.Options{Addrs: []string{"redis://127.0.0.1:6379"}}, Client: nil},
wantErr: false, want: wantValues{
username: "",
password: "",
@@ -54,7 +53,7 @@ func Test_rkv_configure(t *testing.T) {
},
},
{
name: "Url with Pwd", fields: fields{options: store.Options{Address: []string{"redis://:password@redis:6379"}}, Client: nil},
name: "Url with Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://:password@redis:6379"}}, Client: nil},
wantErr: false, want: wantValues{
username: "",
password: "password",
@@ -62,7 +61,7 @@ func Test_rkv_configure(t *testing.T) {
},
},
{
name: "Url with username and Pwd", fields: fields{options: store.Options{Address: []string{"redis://username:password@redis:6379"}}, Client: nil},
name: "Url with username and Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://username:password@redis:6379"}}, Client: nil},
wantErr: false, want: wantValues{
username: "username",
password: "password",
@@ -72,11 +71,11 @@ func Test_rkv_configure(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &Store{
rc := &Store{
opts: tt.fields.options,
cli: tt.fields.Client,
}
err := r.configure()
err := rc.configure()
if (err != nil) != tt.wantErr {
t.Errorf("configure() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -91,7 +90,7 @@ func Test_Store(t *testing.T) {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
r := NewStore(options.Address(os.Getenv("STORE_NODES")))
r := NewStore(store.Addrs(os.Getenv("STORE_NODES")))
if err := r.Init(); err != nil {
t.Fatal(err)
@@ -131,7 +130,7 @@ func Test_MRead(t *testing.T) {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
r := NewStore(options.Address(os.Getenv("STORE_NODES")))
r := NewStore(store.Addrs(os.Getenv("STORE_NODES")))
if err = r.Init(); err != nil {
t.Fatal(err)