Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
de72a10973 | |||
62c2de51d4 | |||
741b2310ec | |||
1e8a44b088 | |||
2245314c2f | |||
db770c3fe7 |
4
go.mod
4
go.mod
@@ -1,10 +1,10 @@
|
||||
module go.unistack.org/micro-store-redis/v4
|
||||
module go.unistack.org/micro-store-redis/v3
|
||||
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/redis/go-redis/v9 v9.2.1
|
||||
go.unistack.org/micro/v4 v4.0.7
|
||||
go.unistack.org/micro/v3 v3.10.62
|
||||
)
|
||||
|
||||
require (
|
||||
|
4
go.sum
4
go.sum
@@ -8,5 +8,5 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg=
|
||||
github.com/redis/go-redis/v9 v9.2.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
|
||||
go.unistack.org/micro/v4 v4.0.7 h1:2lwtZlHcSwgkahhFbkI4x1lOS79lw8uLHtcEhlFF+AM=
|
||||
go.unistack.org/micro/v4 v4.0.7/go.mod h1:bVEYTlPi0EsdgZZt311bIroDg9ict7ky3C87dSCCAGk=
|
||||
go.unistack.org/micro/v3 v3.10.62 h1:PCwLSt3W53UGosH/5qU3kU0iJxK8jlKOm9p4v/Zti5o=
|
||||
go.unistack.org/micro/v3 v3.10.62/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg=
|
||||
|
10
options.go
10
options.go
@@ -2,17 +2,17 @@ package redis
|
||||
|
||||
import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v4/options"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
)
|
||||
|
||||
type configKey struct{}
|
||||
|
||||
func Config(c *redis.Options) options.Option {
|
||||
return options.ContextOption(configKey{}, c)
|
||||
func Config(c *redis.Options) store.Option {
|
||||
return store.SetOption(configKey{}, c)
|
||||
}
|
||||
|
||||
type clusterConfigKey struct{}
|
||||
|
||||
func ClusterConfig(c *redis.ClusterOptions) options.Option {
|
||||
return options.ContextOption(clusterConfigKey{}, c)
|
||||
func ClusterConfig(c *redis.ClusterOptions) store.Option {
|
||||
return store.SetOption(clusterConfigKey{}, c)
|
||||
}
|
||||
|
636
redis.go
636
redis.go
@@ -7,35 +7,61 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v4/options"
|
||||
"go.unistack.org/micro/v4/store"
|
||||
redis "github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/semconv"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
pool "go.unistack.org/micro/v3/util/xpool"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultPathSeparator = "/"
|
||||
|
||||
DefaultClusterOptions = &redis.ClusterOptions{
|
||||
Username: "",
|
||||
Password: "", // no password set
|
||||
MaxRetries: 2,
|
||||
MaxRetryBackoff: 256 * time.Millisecond,
|
||||
DialTimeout: 1 * time.Second,
|
||||
ReadTimeout: 1 * time.Second,
|
||||
WriteTimeout: 1 * time.Second,
|
||||
PoolTimeout: 1 * time.Second,
|
||||
MinIdleConns: 10,
|
||||
}
|
||||
|
||||
DefaultOptions = &redis.Options{
|
||||
Username: "",
|
||||
Password: "", // no password set
|
||||
DB: 0, // use default DB
|
||||
MaxRetries: 2,
|
||||
MaxRetryBackoff: 256 * time.Millisecond,
|
||||
DialTimeout: 1 * time.Second,
|
||||
ReadTimeout: 1 * time.Second,
|
||||
WriteTimeout: 1 * time.Second,
|
||||
PoolTimeout: 1 * time.Second,
|
||||
MinIdleConns: 10,
|
||||
}
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
opts store.Options
|
||||
cli redisClient
|
||||
cli *wrappedClient
|
||||
pool pool.Pool[*strings.Builder]
|
||||
}
|
||||
|
||||
type redisClient interface {
|
||||
Get(ctx context.Context, key string) *redis.StringCmd
|
||||
Del(ctx context.Context, keys ...string) *redis.IntCmd
|
||||
Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.StatusCmd
|
||||
Keys(ctx context.Context, pattern string) *redis.StringSliceCmd
|
||||
MGet(ctx context.Context, keys ...string) *redis.SliceCmd
|
||||
MSet(ctx context.Context, kv ...interface{}) *redis.StatusCmd
|
||||
Exists(ctx context.Context, keys ...string) *redis.IntCmd
|
||||
Ping(ctx context.Context) *redis.StatusCmd
|
||||
Pipeline() redis.Pipeliner
|
||||
Pipelined(ctx context.Context, fn func(redis.Pipeliner) error) ([]redis.Cmder, error)
|
||||
Close() error
|
||||
type wrappedClient struct {
|
||||
*redis.Client
|
||||
*redis.ClusterClient
|
||||
}
|
||||
|
||||
func (r *Store) Connect(ctx context.Context) error {
|
||||
return r.cli.Ping(ctx).Err()
|
||||
if r.cli.Client != nil {
|
||||
return r.cli.Client.Ping(ctx).Err()
|
||||
}
|
||||
return r.cli.ClusterClient.Ping(ctx).Err()
|
||||
}
|
||||
|
||||
func (r *Store) Init(opts ...options.Option) error {
|
||||
func (r *Store) Init(opts ...store.Option) error {
|
||||
for _, o := range opts {
|
||||
o(&r.opts)
|
||||
}
|
||||
@@ -43,93 +69,173 @@ func (r *Store) Init(opts ...options.Option) error {
|
||||
return r.configure()
|
||||
}
|
||||
|
||||
func (r *Store) Disconnect(ctx context.Context) error {
|
||||
return r.cli.Close()
|
||||
}
|
||||
|
||||
func (r *Store) Exists(ctx context.Context, key string, opts ...options.Option) error {
|
||||
if r.opts.Timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
options := store.NewExistsOptions(opts...)
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
}
|
||||
if options.Namespace != "" {
|
||||
key = fmt.Sprintf("%s%s", r.opts.Namespace, key)
|
||||
}
|
||||
val, err := r.cli.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if val == 0 {
|
||||
return store.ErrNotFound
|
||||
func (r *Store) Client() *redis.Client {
|
||||
if r.cli.Client != nil {
|
||||
return r.cli.Client
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...options.Option) error {
|
||||
if r.opts.Timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
defer cancel()
|
||||
func (r *Store) ClusterClient() *redis.ClusterClient {
|
||||
if r.cli.ClusterClient != nil {
|
||||
return r.cli.ClusterClient
|
||||
}
|
||||
options := store.NewReadOptions(opts...)
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
}
|
||||
if options.Namespace != "" {
|
||||
key = fmt.Sprintf("%s%s", options.Namespace, key)
|
||||
}
|
||||
|
||||
buf, err := r.cli.Get(ctx, key).Bytes()
|
||||
if err != nil && err == redis.Nil {
|
||||
return store.ErrNotFound
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if buf == nil {
|
||||
return store.ErrNotFound
|
||||
}
|
||||
return r.opts.Codec.Unmarshal(buf, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts ...options.Option) error {
|
||||
if len(keys) == 1 {
|
||||
vt := reflect.ValueOf(vals)
|
||||
if vt.Kind() == reflect.Ptr {
|
||||
vt = reflect.Indirect(vt)
|
||||
return r.Read(ctx, keys[0], vt.Index(0).Interface(), opts...)
|
||||
}
|
||||
func (r *Store) Disconnect(ctx context.Context) error {
|
||||
if r.cli.Client != nil {
|
||||
return r.cli.Client.Close()
|
||||
}
|
||||
if r.opts.Timeout > 0 {
|
||||
return r.cli.ClusterClient.Close()
|
||||
}
|
||||
|
||||
func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
|
||||
options := store.NewExistsOptions(opts...)
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
options := store.NewReadOptions(opts...)
|
||||
rkeys := make([]string, 0, len(keys))
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
|
||||
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, "cache exists "+rkey)
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var err error
|
||||
var val int64
|
||||
if r.cli.Client != nil {
|
||||
val, err = r.cli.Client.Exists(ctx, rkey).Result()
|
||||
} else {
|
||||
val, err = r.cli.ClusterClient.Exists(ctx, rkey).Result()
|
||||
}
|
||||
for _, key := range keys {
|
||||
if options.Namespace != "" {
|
||||
rkeys = append(rkeys, fmt.Sprintf("%s%s", options.Namespace, key))
|
||||
} else {
|
||||
rkeys = append(rkeys, key)
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil || (err == nil && val == 0) {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...store.ReadOption) error {
|
||||
options := store.NewReadOptions(opts...)
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, "cache read "+rkey)
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var buf []byte
|
||||
var err error
|
||||
if r.cli.Client != nil {
|
||||
buf, err = r.cli.Client.Get(ctx, rkey).Bytes()
|
||||
} else {
|
||||
buf, err = r.cli.ClusterClient.Get(ctx, rkey).Bytes()
|
||||
}
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil || (err == nil && buf == nil) {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
switch b := val.(type) {
|
||||
case *[]byte:
|
||||
*b = buf
|
||||
case *string:
|
||||
*b = string(buf)
|
||||
default:
|
||||
if err = r.opts.Codec.Unmarshal(buf, val); err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
rvals, err := r.cli.MGet(ctx, rkeys...).Result()
|
||||
if err != nil && err == redis.Nil {
|
||||
return store.ErrNotFound
|
||||
} else if err != nil {
|
||||
return err
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts ...store.ReadOption) error {
|
||||
options := store.NewReadOptions(opts...)
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
if len(rvals) == 0 {
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
if r.opts.Namespace != "" || options.Namespace != "" {
|
||||
for idx, key := range keys {
|
||||
keys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mread %v", keys))
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var rvals []interface{}
|
||||
var err error
|
||||
if r.cli.Client != nil {
|
||||
rvals, err = r.cli.Client.MGet(ctx, keys...).Result()
|
||||
} else {
|
||||
rvals, err = r.cli.ClusterClient.MGet(ctx, keys...).Result()
|
||||
}
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil || (len(rvals) == 0) {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
vv := reflect.ValueOf(vals)
|
||||
@@ -160,77 +266,137 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
|
||||
// special case for raw data
|
||||
if vt.Kind() == reflect.Slice && vt.Elem().Kind() == reflect.Uint8 {
|
||||
itm.Set(reflect.MakeSlice(itm.Type(), len(buf), len(buf)))
|
||||
} else {
|
||||
itm.Set(reflect.New(vt.Elem()))
|
||||
itm.SetBytes(buf)
|
||||
continue
|
||||
} else if vt.Kind() == reflect.String {
|
||||
itm.SetString(string(buf))
|
||||
continue
|
||||
}
|
||||
|
||||
itm.Set(reflect.New(vt.Elem()))
|
||||
if err = r.opts.Codec.Unmarshal(buf, itm.Interface()); err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
vv.Set(nvv)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) MDelete(ctx context.Context, keys []string, opts ...options.Option) error {
|
||||
if len(keys) == 1 {
|
||||
return r.Delete(ctx, keys[0], opts...)
|
||||
func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.DeleteOption) error {
|
||||
options := store.NewDeleteOptions(opts...)
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
if r.opts.Timeout > 0 {
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
options := store.NewDeleteOptions(opts...)
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
|
||||
if r.opts.Namespace != "" || options.Namespace != "" {
|
||||
for idx, key := range keys {
|
||||
keys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
}
|
||||
}
|
||||
if options.Namespace == "" {
|
||||
return r.cli.Del(ctx, keys...).Err()
|
||||
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mdelete %v", keys))
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var err error
|
||||
if r.cli.Client != nil {
|
||||
err = r.cli.Client.Del(ctx, keys...).Err()
|
||||
} else {
|
||||
err = r.cli.ClusterClient.Del(ctx, keys...).Err()
|
||||
}
|
||||
for idx := range keys {
|
||||
keys[idx] = options.Namespace + keys[idx]
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
return r.cli.Del(ctx, keys...).Err()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) Delete(ctx context.Context, key string, opts ...options.Option) error {
|
||||
if r.opts.Timeout > 0 {
|
||||
func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error {
|
||||
options := store.NewDeleteOptions(opts...)
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
options := store.NewDeleteOptions(opts...)
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache delete %v", key))
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var err error
|
||||
if r.cli.Client != nil {
|
||||
err = r.cli.Client.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
|
||||
} else {
|
||||
err = r.cli.ClusterClient.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
|
||||
}
|
||||
if options.Namespace == "" {
|
||||
return r.cli.Del(ctx, key).Err()
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
return r.cli.Del(ctx, fmt.Sprintf("%s%s", options.Namespace, key)).Err()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, opts ...options.Option) error {
|
||||
if len(keys) == 1 {
|
||||
return r.Write(ctx, keys[0], vals[0], opts...)
|
||||
}
|
||||
if r.opts.Timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, opts ...store.WriteOption) error {
|
||||
options := store.NewWriteOptions(opts...)
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mwrite %v", keys))
|
||||
defer sp.Finish()
|
||||
|
||||
kvs := make([]string, 0, len(keys)*2)
|
||||
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
}
|
||||
|
||||
for idx, key := range keys {
|
||||
if options.Namespace != "" {
|
||||
kvs = append(kvs, fmt.Sprintf("%s%s", options.Namespace, key))
|
||||
} else {
|
||||
kvs = append(kvs, key)
|
||||
}
|
||||
kvs = append(kvs, r.getKey(r.opts.Namespace, options.Namespace, key))
|
||||
|
||||
switch vt := vals[idx].(type) {
|
||||
case string:
|
||||
@@ -240,24 +406,57 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
|
||||
default:
|
||||
buf, err := r.opts.Codec.Marshal(vt)
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
return err
|
||||
}
|
||||
kvs = append(kvs, string(buf))
|
||||
}
|
||||
}
|
||||
|
||||
cmds, err := r.cli.Pipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
|
||||
pipeliner := func(pipe redis.Pipeliner) error {
|
||||
for idx := 0; idx < len(kvs); idx += 2 {
|
||||
pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result()
|
||||
if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
}
|
||||
|
||||
var err error
|
||||
var cmds []redis.Cmder
|
||||
|
||||
if r.cli.Client != nil {
|
||||
cmds, err = r.cli.Client.Pipelined(ctx, pipeliner)
|
||||
} else {
|
||||
cmds, err = r.cli.ClusterClient.Pipelined(ctx, pipeliner)
|
||||
}
|
||||
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cmd := range cmds {
|
||||
if err = cmd.Err(); err != nil {
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
}
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -265,17 +464,23 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...options.Option) error {
|
||||
if r.opts.Timeout > 0 {
|
||||
func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...store.WriteOption) error {
|
||||
options := store.NewWriteOptions(opts...)
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
options := store.NewWriteOptions(opts...)
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
}
|
||||
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache write %v", rkey))
|
||||
defer sp.Finish()
|
||||
|
||||
var buf []byte
|
||||
switch vt := val.(type) {
|
||||
@@ -287,46 +492,108 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
|
||||
var err error
|
||||
buf, err = r.opts.Codec.Marshal(val)
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.Namespace != "" {
|
||||
key = fmt.Sprintf("%s%s", options.Namespace, key)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var err error
|
||||
if r.cli.Client != nil {
|
||||
err = r.cli.Client.Set(ctx, rkey, buf, options.TTL).Err()
|
||||
} else {
|
||||
err = r.cli.ClusterClient.Set(ctx, rkey, buf, options.TTL).Err()
|
||||
}
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
return r.cli.Set(ctx, key, buf, options.TTL).Err()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Store) List(ctx context.Context, opts ...options.Option) ([]string, error) {
|
||||
func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, error) {
|
||||
options := store.NewListOptions(opts...)
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
}
|
||||
|
||||
rkey := fmt.Sprintf("%s%s*", options.Namespace, options.Prefix)
|
||||
rkey := r.getKey(options.Namespace, "", options.Prefix+"*")
|
||||
if options.Suffix != "" {
|
||||
rkey += options.Suffix
|
||||
}
|
||||
if r.opts.Timeout > 0 {
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
timeout = options.Timeout
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.opts.Timeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache list %v", rkey))
|
||||
defer sp.Finish()
|
||||
|
||||
// TODO: add support for prefix/suffix/limit
|
||||
keys, err := r.cli.Keys(ctx, rkey).Result()
|
||||
if err != nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var keys []string
|
||||
var err error
|
||||
|
||||
if r.cli.Client != nil {
|
||||
keys, err = r.cli.Client.Keys(ctx, rkey).Result()
|
||||
} else {
|
||||
err = r.cli.ClusterClient.ForEachMaster(ctx, func(nctx context.Context, cli *redis.Client) error {
|
||||
nkeys, nerr := cli.Keys(nctx, rkey).Result()
|
||||
if nerr != nil {
|
||||
return nerr
|
||||
}
|
||||
keys = append(keys, nkeys...)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return nil, store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return nil, err
|
||||
}
|
||||
if options.Namespace == "" {
|
||||
|
||||
prefix := r.opts.Namespace
|
||||
if options.Namespace != "" {
|
||||
prefix = options.Namespace
|
||||
}
|
||||
if prefix == "" {
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
nkeys := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
nkeys = append(nkeys, strings.TrimPrefix(key, options.Namespace))
|
||||
for idx, key := range keys {
|
||||
keys[idx] = strings.TrimPrefix(key, prefix)
|
||||
}
|
||||
return nkeys, nil
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (r *Store) Options() store.Options {
|
||||
@@ -341,7 +608,7 @@ func (r *Store) String() string {
|
||||
return "redis"
|
||||
}
|
||||
|
||||
func NewStore(opts ...options.Option) *Store {
|
||||
func NewStore(opts ...store.Option) *Store {
|
||||
return &Store{opts: store.NewOptions(opts...)}
|
||||
}
|
||||
|
||||
@@ -350,7 +617,7 @@ func (r *Store) configure() error {
|
||||
var redisClusterOptions *redis.ClusterOptions
|
||||
var err error
|
||||
|
||||
nodes := r.opts.Address
|
||||
nodes := r.opts.Addrs
|
||||
|
||||
if len(nodes) == 0 {
|
||||
nodes = []string{"redis://127.0.0.1:6379"}
|
||||
@@ -387,43 +654,38 @@ func (r *Store) configure() error {
|
||||
if redisOptions == nil && redisClusterOptions == nil && len(nodes) == 1 {
|
||||
redisOptions, err = redis.ParseURL(nodes[0])
|
||||
if err != nil {
|
||||
// Backwards compatibility
|
||||
redisOptions = &redis.Options{
|
||||
Addr: nodes[0],
|
||||
Username: "",
|
||||
Password: "", // no password set
|
||||
DB: 0, // use default DB
|
||||
MaxRetries: 2,
|
||||
MaxRetryBackoff: 256 * time.Millisecond,
|
||||
DialTimeout: 1 * time.Second,
|
||||
ReadTimeout: 1 * time.Second,
|
||||
WriteTimeout: 1 * time.Second,
|
||||
PoolTimeout: 1 * time.Second,
|
||||
MinIdleConns: 10,
|
||||
TLSConfig: r.opts.TLSConfig,
|
||||
}
|
||||
redisOptions = DefaultOptions
|
||||
redisOptions.Addr = r.opts.Addrs[0]
|
||||
redisOptions.TLSConfig = r.opts.TLSConfig
|
||||
}
|
||||
} else if redisOptions == nil && redisClusterOptions == nil && len(nodes) > 1 {
|
||||
redisClusterOptions = &redis.ClusterOptions{
|
||||
Addrs: nodes,
|
||||
Username: "",
|
||||
Password: "", // no password set
|
||||
MaxRetries: 2,
|
||||
MaxRetryBackoff: 256 * time.Millisecond,
|
||||
DialTimeout: 1 * time.Second,
|
||||
ReadTimeout: 1 * time.Second,
|
||||
WriteTimeout: 1 * time.Second,
|
||||
PoolTimeout: 1 * time.Second,
|
||||
MinIdleConns: 10,
|
||||
TLSConfig: r.opts.TLSConfig,
|
||||
}
|
||||
redisClusterOptions = DefaultClusterOptions
|
||||
redisClusterOptions.Addrs = r.opts.Addrs
|
||||
redisClusterOptions.TLSConfig = r.opts.TLSConfig
|
||||
}
|
||||
|
||||
if redisOptions != nil {
|
||||
r.cli = redis.NewClient(redisOptions)
|
||||
r.cli = &wrappedClient{Client: redis.NewClient(redisOptions)}
|
||||
} else if redisClusterOptions != nil {
|
||||
r.cli = redis.NewClusterClient(redisClusterOptions)
|
||||
r.cli = &wrappedClient{ClusterClient: redis.NewClusterClient(redisClusterOptions)}
|
||||
}
|
||||
|
||||
r.pool = pool.NewPool(func() *strings.Builder { return &strings.Builder{} })
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) getKey(mainNamespace string, opNamespace string, key string) string {
|
||||
b := r.pool.Get()
|
||||
defer r.pool.Put(b)
|
||||
b.Reset()
|
||||
|
||||
if opNamespace == "" {
|
||||
opNamespace = mainNamespace
|
||||
}
|
||||
if opNamespace != "" {
|
||||
b.WriteString(opNamespace)
|
||||
b.WriteString(DefaultPathSeparator)
|
||||
}
|
||||
b.WriteString(key)
|
||||
return b.String()
|
||||
}
|
||||
|
@@ -7,15 +7,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v4/options"
|
||||
"go.unistack.org/micro/v4/store"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
)
|
||||
|
||||
func Test_rkv_configure(t *testing.T) {
|
||||
type fields struct {
|
||||
options store.Options
|
||||
Client *redis.Client
|
||||
Client *wrappedClient
|
||||
}
|
||||
type wantValues struct {
|
||||
username string
|
||||
@@ -38,7 +36,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "legacy Url", fields: fields{options: store.Options{Address: []string{"127.0.0.1:6379"}}, Client: nil},
|
||||
name: "legacy Url", fields: fields{options: store.Options{Addrs: []string{"127.0.0.1:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "",
|
||||
password: "",
|
||||
@@ -46,7 +44,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "New Url", fields: fields{options: store.Options{Address: []string{"redis://127.0.0.1:6379"}}, Client: nil},
|
||||
name: "New Url", fields: fields{options: store.Options{Addrs: []string{"redis://127.0.0.1:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "",
|
||||
password: "",
|
||||
@@ -54,7 +52,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Url with Pwd", fields: fields{options: store.Options{Address: []string{"redis://:password@redis:6379"}}, Client: nil},
|
||||
name: "Url with Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://:password@redis:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "",
|
||||
password: "password",
|
||||
@@ -62,7 +60,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Url with username and Pwd", fields: fields{options: store.Options{Address: []string{"redis://username:password@redis:6379"}}, Client: nil},
|
||||
name: "Url with username and Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://username:password@redis:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "username",
|
||||
password: "password",
|
||||
@@ -72,11 +70,11 @@ func Test_rkv_configure(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := &Store{
|
||||
rc := &Store{
|
||||
opts: tt.fields.options,
|
||||
cli: tt.fields.Client,
|
||||
}
|
||||
err := r.configure()
|
||||
err := rc.configure()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("configure() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -91,7 +89,7 @@ func Test_Store(t *testing.T) {
|
||||
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
|
||||
t.Skip()
|
||||
}
|
||||
r := NewStore(options.Address(os.Getenv("STORE_NODES")))
|
||||
r := NewStore(store.Addrs(os.Getenv("STORE_NODES")))
|
||||
|
||||
if err := r.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -131,7 +129,7 @@ func Test_MRead(t *testing.T) {
|
||||
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
|
||||
t.Skip()
|
||||
}
|
||||
r := NewStore(options.Address(os.Getenv("STORE_NODES")))
|
||||
r := NewStore(store.Addrs(os.Getenv("STORE_NODES")))
|
||||
|
||||
if err = r.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
|
Reference in New Issue
Block a user