Compare commits
No commits in common. "v3" and "v3.10.6" have entirely different histories.
@ -1,29 +0,0 @@
|
||||
name: lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
filter: 'blob:none'
|
||||
- name: setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache-dependency-path: "**/*.sum"
|
||||
go-version: 'stable'
|
||||
- name: setup deps
|
||||
run: go get -v ./...
|
||||
- name: run lint
|
||||
uses: https://github.com/golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: 'latest'
|
@ -1,34 +0,0 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
filter: 'blob:none'
|
||||
- name: setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache-dependency-path: "**/*.sum"
|
||||
go-version: 'stable'
|
||||
- name: setup deps
|
||||
run: go get -v ./...
|
||||
- name: run test
|
||||
env:
|
||||
INTEGRATION_TESTS: yes
|
||||
run: go test -mod readonly -v ./...
|
@ -1,53 +0,0 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
filter: 'blob:none'
|
||||
- name: checkout tests
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
filter: 'blob:none'
|
||||
repository: unistack-org/micro-tests
|
||||
path: micro-tests
|
||||
- name: setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache-dependency-path: "**/*.sum"
|
||||
go-version: 'stable'
|
||||
- name: setup go work
|
||||
env:
|
||||
GOWORK: /workspace/${{ github.repository_owner }}/go.work
|
||||
run: |
|
||||
go work init
|
||||
go work use .
|
||||
go work use micro-tests
|
||||
- name: setup deps
|
||||
env:
|
||||
GOWORK: /workspace/${{ github.repository_owner }}/go.work
|
||||
run: go get -v ./...
|
||||
- name: run tests
|
||||
env:
|
||||
INTEGRATION_TESTS: yes
|
||||
GOWORK: /workspace/${{ github.repository_owner }}/go.work
|
||||
run: |
|
||||
cd micro-tests
|
||||
go test -mod readonly -v ./... || true
|
19
.github/dependabot.yml
vendored
Normal file
19
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
# Maintain dependencies for Golang
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
20
.github/workflows/autoapprove.yml
vendored
Normal file
20
.github/workflows/autoapprove.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
name: "autoapprove"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [assigned, opened, synchronize, reopened]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
autoapprove:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: approve
|
||||
uses: hmarr/auto-approve-action@v3
|
||||
if: github.actor == 'vtolstov' || github.actor == 'dependabot[bot]'
|
||||
id: approve
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
21
.github/workflows/automerge.yml
vendored
Normal file
21
.github/workflows/automerge.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: "automerge"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [assigned, opened, synchronize, reopened]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
automerge:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.actor == 'vtolstov'
|
||||
steps:
|
||||
- name: merge
|
||||
id: merge
|
||||
run: gh pr merge --auto --merge "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.TOKEN}}
|
47
.github/workflows/build.yml
vendored
Normal file
47
.github/workflows/build.yml
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
name: build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: setup
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: ${{ runner.os }}-go-
|
||||
- name: deps
|
||||
run: go get -v -t -d ./...
|
||||
- name: test
|
||||
env:
|
||||
INTEGRATION_TESTS: yes
|
||||
run: go test -mod readonly -v ./...
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: lint
|
||||
uses: golangci/golangci-lint-action@v3.4.0
|
||||
continue-on-error: true
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.30
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
# Optional: golangci-lint command line arguments.
|
||||
# args: --issues-exit-code=0
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
# only-new-issues: true
|
78
.github/workflows/codeql-analysis.yml
vendored
Normal file
78
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "codeql"
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["prbuild"]
|
||||
types:
|
||||
- completed
|
||||
push:
|
||||
branches: [ master, v3 ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master, v3 ]
|
||||
schedule:
|
||||
- cron: '34 1 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: setup
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: init
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: analyze
|
||||
uses: github/codeql-action/analyze@v2
|
27
.github/workflows/dependabot-automerge.yml
vendored
Normal file
27
.github/workflows/dependabot-automerge.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
name: "dependabot-automerge"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [assigned, opened, synchronize, reopened]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
automerge:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.actor == 'dependabot[bot]'
|
||||
steps:
|
||||
- name: metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v1.3.6
|
||||
with:
|
||||
github-token: "${{ secrets.TOKEN }}"
|
||||
- name: merge
|
||||
id: merge
|
||||
if: ${{contains(steps.metadata.outputs.dependency-names, 'go.unistack.org')}}
|
||||
run: gh pr merge --auto --merge "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
GITHUB_TOKEN: ${{secrets.TOKEN}}
|
47
.github/workflows/pr.yml
vendored
Normal file
47
.github/workflows/pr.yml
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
name: prbuild
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: setup
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: ${{ runner.os }}-go-
|
||||
- name: deps
|
||||
run: go get -v -t -d ./...
|
||||
- name: test
|
||||
env:
|
||||
INTEGRATION_TESTS: yes
|
||||
run: go test -mod readonly -v ./...
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: lint
|
||||
uses: golangci/golangci-lint-action@v3.4.0
|
||||
continue-on-error: true
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.30
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
# Optional: golangci-lint command line arguments.
|
||||
# args: --issues-exit-code=0
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
# only-new-issues: true
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1 +0,0 @@
|
||||
.idea
|
@ -1,5 +0,0 @@
|
||||
run:
|
||||
concurrency: 8
|
||||
deadline: 5m
|
||||
issues-exit-code: 1
|
||||
tests: true
|
86
event.go
86
event.go
@ -1,86 +0,0 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
goredis "github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
)
|
||||
|
||||
type eventHook struct {
|
||||
s *Store
|
||||
}
|
||||
|
||||
var _ goredis.Hook = (*eventHook)(nil)
|
||||
|
||||
func newEventHook(s *Store) *eventHook {
|
||||
return &eventHook{s: s}
|
||||
}
|
||||
|
||||
func (h *eventHook) DialHook(hook goredis.DialHook) goredis.DialHook {
|
||||
return func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
conn, err := hook(ctx, network, addr)
|
||||
if err != nil {
|
||||
if !isRedisError(err) {
|
||||
if h.s.connected.CompareAndSwap(1, 0) {
|
||||
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeDisconnect})
|
||||
}
|
||||
} else {
|
||||
h.s.connected.Store(1)
|
||||
}
|
||||
} else {
|
||||
if h.s.connected.CompareAndSwap(0, 1) {
|
||||
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeConnect})
|
||||
}
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *eventHook) ProcessHook(hook goredis.ProcessHook) goredis.ProcessHook {
|
||||
return func(ctx context.Context, cmd goredis.Cmder) error {
|
||||
err := hook(ctx, cmd)
|
||||
if err != nil {
|
||||
if !isRedisError(err) {
|
||||
if h.s.connected.CompareAndSwap(1, 0) {
|
||||
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeDisconnect})
|
||||
}
|
||||
} else {
|
||||
h.s.connected.Store(1)
|
||||
}
|
||||
} else {
|
||||
if h.s.connected.CompareAndSwap(0, 1) {
|
||||
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeConnect})
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *eventHook) ProcessPipelineHook(hook goredis.ProcessPipelineHook) goredis.ProcessPipelineHook {
|
||||
return func(ctx context.Context, cmds []goredis.Cmder) error {
|
||||
err := hook(ctx, cmds)
|
||||
if err != nil {
|
||||
if !isRedisError(err) {
|
||||
if h.s.connected.CompareAndSwap(1, 0) {
|
||||
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeDisconnect})
|
||||
}
|
||||
} else {
|
||||
h.s.connected.Store(1)
|
||||
}
|
||||
} else {
|
||||
if h.s.connected.CompareAndSwap(0, 1) {
|
||||
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeConnect})
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func isRedisError(err error) bool {
|
||||
var rerr goredis.Error
|
||||
return errors.As(err, &rerr)
|
||||
}
|
15
go.mod
15
go.mod
@ -1,19 +1,14 @@
|
||||
module go.unistack.org/micro-store-redis/v3
|
||||
|
||||
go 1.22
|
||||
|
||||
toolchain go1.22.4
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.7.0
|
||||
github.com/redis/go-redis/v9 v9.7.0
|
||||
go.unistack.org/micro/v3 v3.10.108
|
||||
github.com/redis/go-redis/v9 v9.2.1
|
||||
go.unistack.org/micro/v3 v3.10.62
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
go.unistack.org/micro-proto/v3 v3.4.1 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
)
|
||||
|
24
go.sum
24
go.sum
@ -1,20 +1,12 @@
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.7.0 h1:BIx9TNZH/Jsr4l1i7VVxnV0JPiwYj8qyrHyuL0fGZrk=
|
||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.7.0/go.mod h1:eTg/YQtGYAZD5r3DlGlJptJ45AHA+/G+2NPn30PKzik=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1 h1:UTjLSRz2YZuaHk9iSlVqqsA50JQNAEK2ZFboGqtEa9Q=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1/go.mod h1:okx/cnOhzuCX0ggl/vToatbCupi0O44diiiLLsZ93Zo=
|
||||
go.unistack.org/micro/v3 v3.10.108 h1:3L7SkilMVLtH8y3pQIPtr3jjQYrf0AMv1oAkoL3nFkE=
|
||||
go.unistack.org/micro/v3 v3.10.108/go.mod h1:YzMldzHN9Ei+zy5t/Psu7RUWDZwUfrNYiStSQtTz90g=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg=
|
||||
github.com/redis/go-redis/v9 v9.2.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
|
||||
go.unistack.org/micro/v3 v3.10.62 h1:PCwLSt3W53UGosH/5qU3kU0iJxK8jlKOm9p4v/Zti5o=
|
||||
go.unistack.org/micro/v3 v3.10.62/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg=
|
||||
|
55
options.go
55
options.go
@ -1,67 +1,18 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
goredis "github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/meter"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
type configKey struct{}
|
||||
|
||||
func Config(c *goredis.Options) store.Option {
|
||||
func Config(c *redis.Options) store.Option {
|
||||
return store.SetOption(configKey{}, c)
|
||||
}
|
||||
|
||||
type clusterConfigKey struct{}
|
||||
|
||||
func ClusterConfig(c *goredis.ClusterOptions) store.Option {
|
||||
func ClusterConfig(c *redis.ClusterOptions) store.Option {
|
||||
return store.SetOption(clusterConfigKey{}, c)
|
||||
}
|
||||
|
||||
type universalConfigKey struct{}
|
||||
|
||||
func UniversalConfig(c *goredis.UniversalOptions) store.Option {
|
||||
return store.SetOption(universalConfigKey{}, c)
|
||||
}
|
||||
|
||||
var (
|
||||
labelHost = "redis_host"
|
||||
labelName = "redis_name"
|
||||
)
|
||||
|
||||
// Options struct holds wrapper options
|
||||
type Options struct {
|
||||
Logger logger.Logger
|
||||
Meter meter.Meter
|
||||
Tracer tracer.Tracer
|
||||
RedisHost string
|
||||
RedisName string
|
||||
}
|
||||
|
||||
// Option func signature
|
||||
type Option func(*Options)
|
||||
|
||||
// NewOptions create new Options struct from provided option slice
|
||||
func NewOptions(opts ...Option) Options {
|
||||
options := Options{
|
||||
Logger: logger.DefaultLogger,
|
||||
Meter: meter.DefaultMeter,
|
||||
Tracer: tracer.DefaultTracer,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
options.Meter = options.Meter.Clone(
|
||||
meter.Labels(
|
||||
labelHost, options.RedisHost,
|
||||
labelName, options.RedisName),
|
||||
)
|
||||
|
||||
options.Logger = options.Logger.Clone(logger.WithAddCallerSkipCount(1))
|
||||
|
||||
return options
|
||||
}
|
||||
|
641
redis.go
641
redis.go
@ -2,25 +2,22 @@ package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
goredis "github.com/redis/go-redis/v9"
|
||||
redis "github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/semconv"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
"go.unistack.org/micro/v3/util/id"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
pool "go.unistack.org/micro/v3/util/xpool"
|
||||
)
|
||||
|
||||
var (
|
||||
_ store.Store = (*Store)(nil)
|
||||
_ store.Event = (*event)(nil)
|
||||
sendEventTime = 10 * time.Millisecond
|
||||
DefaultUniversalOptions = &goredis.UniversalOptions{
|
||||
DefaultPathSeparator = "/"
|
||||
|
||||
DefaultClusterOptions = &redis.ClusterOptions{
|
||||
Username: "",
|
||||
Password: "", // no password set
|
||||
MaxRetries: 2,
|
||||
@ -32,19 +29,7 @@ var (
|
||||
MinIdleConns: 10,
|
||||
}
|
||||
|
||||
DefaultClusterOptions = &goredis.ClusterOptions{
|
||||
Username: "",
|
||||
Password: "", // no password set
|
||||
MaxRetries: 2,
|
||||
MaxRetryBackoff: 256 * time.Millisecond,
|
||||
DialTimeout: 1 * time.Second,
|
||||
ReadTimeout: 1 * time.Second,
|
||||
WriteTimeout: 1 * time.Second,
|
||||
PoolTimeout: 1 * time.Second,
|
||||
MinIdleConns: 10,
|
||||
}
|
||||
|
||||
DefaultOptions = &goredis.Options{
|
||||
DefaultOptions = &redis.Options{
|
||||
Username: "",
|
||||
Password: "", // no password set
|
||||
DB: 0, // use default DB
|
||||
@ -59,92 +44,47 @@ var (
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
cli goredis.UniversalClient
|
||||
pool *pool.StringsPool
|
||||
connected *atomic.Uint32
|
||||
opts store.Options
|
||||
watchers map[string]*watcher
|
||||
mu sync.RWMutex
|
||||
cli redisClient
|
||||
pool pool.Pool[strings.Builder]
|
||||
}
|
||||
|
||||
func (r *Store) Live() bool {
|
||||
return r.connected.Load() == 1
|
||||
}
|
||||
|
||||
func (r *Store) Ready() bool {
|
||||
return r.connected.Load() == 1
|
||||
}
|
||||
|
||||
func (r *Store) Health() bool {
|
||||
return r.connected.Load() == 1
|
||||
type redisClient interface {
|
||||
Get(ctx context.Context, key string) *redis.StringCmd
|
||||
Del(ctx context.Context, keys ...string) *redis.IntCmd
|
||||
Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.StatusCmd
|
||||
Keys(ctx context.Context, pattern string) *redis.StringSliceCmd
|
||||
MGet(ctx context.Context, keys ...string) *redis.SliceCmd
|
||||
MSet(ctx context.Context, kv ...interface{}) *redis.StatusCmd
|
||||
Exists(ctx context.Context, keys ...string) *redis.IntCmd
|
||||
Ping(ctx context.Context) *redis.StatusCmd
|
||||
Pipeline() redis.Pipeliner
|
||||
Pipelined(ctx context.Context, fn func(redis.Pipeliner) error) ([]redis.Cmder, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
func (r *Store) Connect(ctx context.Context) error {
|
||||
if r.connected.Load() == 1 {
|
||||
return nil
|
||||
}
|
||||
if r.cli == nil {
|
||||
return store.ErrNotConnected
|
||||
}
|
||||
if r.opts.LazyConnect {
|
||||
return nil
|
||||
}
|
||||
if err := r.cli.Ping(ctx).Err(); err != nil {
|
||||
setSpanError(ctx, err)
|
||||
return err
|
||||
}
|
||||
r.connected.Store(1)
|
||||
return nil
|
||||
return r.cli.Ping(ctx).Err()
|
||||
}
|
||||
|
||||
func (r *Store) Init(opts ...store.Option) error {
|
||||
err := r.configure(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
for _, o := range opts {
|
||||
o(&r.opts)
|
||||
}
|
||||
|
||||
return nil
|
||||
return r.configure()
|
||||
}
|
||||
|
||||
func (r *Store) Client() *goredis.Client {
|
||||
if c, ok := r.cli.(*goredis.Client); ok {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) UniversalClient() goredis.UniversalClient {
|
||||
return r.cli
|
||||
}
|
||||
|
||||
func (r *Store) ClusterClient() *goredis.ClusterClient {
|
||||
if c, ok := r.cli.(*goredis.ClusterClient); ok {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
func (r *Store) Redis() *redis.Client {
|
||||
return r.cli.(*redis.Client)
|
||||
}
|
||||
|
||||
func (r *Store) Disconnect(ctx context.Context) error {
|
||||
if r.connected.Load() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.cli != nil {
|
||||
if err := r.cli.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r.connected.Store(1)
|
||||
return nil
|
||||
return r.cli.Close()
|
||||
}
|
||||
|
||||
func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
|
||||
b := r.pool.Get()
|
||||
defer r.pool.Put(b)
|
||||
options := store.NewExistsOptions(opts...)
|
||||
labels := make([]string, 0, 6)
|
||||
labels = append(labels, "name", options.Name, "statement", "exists")
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
@ -157,24 +97,25 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
rkey := r.getKey(b, r.opts.Namespace, options.Namespace, key)
|
||||
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, "cache exists "+rkey)
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
val, err := r.cli.Exists(ctx, rkey).Result()
|
||||
setSpanError(ctx, err)
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec()
|
||||
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
if errors.Is(err, goredis.Nil) || (err == nil && val == 0) {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil || (err == nil && val == 0) {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc()
|
||||
} else {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -182,12 +123,7 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
|
||||
}
|
||||
|
||||
func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...store.ReadOption) error {
|
||||
b := r.pool.Get()
|
||||
defer r.pool.Put(b)
|
||||
|
||||
options := store.NewReadOptions(opts...)
|
||||
labels := make([]string, 0, 6)
|
||||
labels = append(labels, "name", options.Name, "statement", "read")
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
@ -200,23 +136,25 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
rkey := r.getKey(b, r.opts.Namespace, options.Namespace, key)
|
||||
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, "cache read "+rkey)
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
buf, err := r.cli.Get(ctx, rkey).Bytes()
|
||||
setSpanError(ctx, err)
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec()
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
if errors.Is(err, goredis.Nil) || (err == nil && buf == nil) {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil || (err == nil && buf == nil) {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -227,7 +165,7 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
|
||||
*b = string(buf)
|
||||
default:
|
||||
if err = r.opts.Codec.Unmarshal(buf, val); err != nil {
|
||||
setSpanError(ctx, err)
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,42 +186,30 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
var rkeys []string
|
||||
var pools []*strings.Builder
|
||||
if r.opts.Namespace != "" || options.Namespace != "" {
|
||||
rkeys = make([]string, len(keys))
|
||||
pools = make([]*strings.Builder, len(keys))
|
||||
for idx, key := range keys {
|
||||
b := r.pool.Get()
|
||||
pools[idx] = b
|
||||
rkeys[idx] = r.getKey(b, r.opts.Namespace, options.Namespace, key)
|
||||
keys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
}
|
||||
}
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mread %v", keys))
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var rvals []interface{}
|
||||
var err error
|
||||
if r.opts.Namespace != "" || options.Namespace != "" {
|
||||
rvals, err = r.cli.MGet(ctx, rkeys...).Result()
|
||||
for idx := range pools {
|
||||
r.pool.Put(pools[idx])
|
||||
}
|
||||
} else {
|
||||
rvals, err = r.cli.MGet(ctx, keys...).Result()
|
||||
}
|
||||
setSpanError(ctx, err)
|
||||
rvals, err := r.cli.MGet(ctx, keys...).Result()
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == goredis.Nil || (len(rvals) == 0) {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil || (len(rvals) == 0) {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -324,7 +250,7 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
|
||||
|
||||
itm.Set(reflect.New(vt.Elem()))
|
||||
if err = r.opts.Codec.Unmarshal(buf, itm.Interface()); err != nil {
|
||||
setSpanError(ctx, err)
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -335,8 +261,6 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
|
||||
|
||||
func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.DeleteOption) error {
|
||||
options := store.NewDeleteOptions(opts...)
|
||||
labels := make([]string, 0, 6)
|
||||
labels = append(labels, "name", options.Name, "statement", "delete")
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
@ -349,41 +273,30 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
var rkeys []string
|
||||
var pools []*strings.Builder
|
||||
if r.opts.Namespace != "" || options.Namespace != "" {
|
||||
rkeys = make([]string, len(keys))
|
||||
pools = make([]*strings.Builder, len(keys))
|
||||
for idx, key := range keys {
|
||||
b := r.pool.Get()
|
||||
pools[idx] = b
|
||||
rkeys[idx] = r.getKey(b, r.opts.Namespace, options.Namespace, key)
|
||||
keys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
}
|
||||
}
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc()
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mdelete %v", keys))
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var err error
|
||||
if r.opts.Namespace != "" || options.Namespace != "" {
|
||||
err = r.cli.Del(ctx, rkeys...).Err()
|
||||
for idx := range pools {
|
||||
r.pool.Put(pools[idx])
|
||||
}
|
||||
} else {
|
||||
err = r.cli.Del(ctx, keys...).Err()
|
||||
}
|
||||
setSpanError(ctx, err)
|
||||
err := r.cli.Del(ctx, keys...).Err()
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec()
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
if err == goredis.Nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -391,12 +304,7 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
|
||||
}
|
||||
|
||||
func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error {
|
||||
b := r.pool.Get()
|
||||
defer r.pool.Put(b)
|
||||
|
||||
options := store.NewDeleteOptions(opts...)
|
||||
labels := make([]string, 0, 6)
|
||||
labels = append(labels, "name", options.Name, "statement", "delete")
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
@ -409,22 +317,24 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc()
|
||||
ts := time.Now()
|
||||
err := r.cli.Del(ctx, r.getKey(b, r.opts.Namespace, options.Namespace, key)).Err()
|
||||
te := time.Since(ts)
|
||||
setSpanError(ctx, err)
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache delete %v", key))
|
||||
defer sp.Finish()
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec()
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
if errors.Is(err, goredis.Nil) {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
err := r.cli.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
|
||||
te := time.Since(ts)
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -433,8 +343,6 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
|
||||
|
||||
func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, opts ...store.WriteOption) error {
|
||||
options := store.NewWriteOptions(opts...)
|
||||
labels := make([]string, 0, 6)
|
||||
labels = append(labels, "name", options.Name, "statement", "write")
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
@ -447,12 +355,13 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache mwrite %v", keys))
|
||||
defer sp.Finish()
|
||||
|
||||
kvs := make([]string, 0, len(keys)*2)
|
||||
pools := make([]*strings.Builder, len(keys))
|
||||
|
||||
for idx, key := range keys {
|
||||
b := r.pool.Get()
|
||||
pools[idx] = b
|
||||
kvs = append(kvs, r.getKey(b, r.opts.Namespace, options.Namespace, key))
|
||||
kvs = append(kvs, r.getKey(r.opts.Namespace, options.Namespace, key))
|
||||
|
||||
switch vt := vals[idx].(type) {
|
||||
case string:
|
||||
@ -462,53 +371,48 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
|
||||
default:
|
||||
buf, err := r.opts.Codec.Marshal(vt)
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
return err
|
||||
}
|
||||
kvs = append(kvs, string(buf))
|
||||
}
|
||||
}
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
|
||||
pipeliner := func(pipe goredis.Pipeliner) error {
|
||||
cmds, err := r.cli.Pipelined(ctx, func(pipe redis.Pipeliner) error {
|
||||
for idx := 0; idx < len(kvs); idx += 2 {
|
||||
if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil {
|
||||
setSpanError(ctx, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
})
|
||||
|
||||
ts := time.Now()
|
||||
cmds, err := r.cli.Pipelined(ctx, pipeliner)
|
||||
for idx := range pools {
|
||||
r.pool.Put(pools[idx])
|
||||
}
|
||||
te := time.Since(ts)
|
||||
setSpanError(ctx, err)
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec()
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
if err == goredis.Nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cmd := range cmds {
|
||||
if err = cmd.Err(); err != nil {
|
||||
if err == goredis.Nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
}
|
||||
setSpanError(ctx, err)
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -517,12 +421,7 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
|
||||
}
|
||||
|
||||
func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...store.WriteOption) error {
|
||||
b := r.pool.Get()
|
||||
defer r.pool.Put(b)
|
||||
|
||||
options := store.NewWriteOptions(opts...)
|
||||
labels := make([]string, 0, 6)
|
||||
labels = append(labels, "name", options.Name, "statement", "write")
|
||||
|
||||
timeout := r.opts.Timeout
|
||||
if options.Timeout > 0 {
|
||||
@ -535,7 +434,9 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
rkey := r.getKey(b, r.opts.Namespace, options.Namespace, key)
|
||||
rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache write %v", rkey))
|
||||
defer sp.Finish()
|
||||
|
||||
var buf []byte
|
||||
switch vt := val.(type) {
|
||||
@ -547,26 +448,26 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
|
||||
var err error
|
||||
buf, err = r.opts.Codec.Marshal(val)
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
err := r.cli.Set(ctx, rkey, buf, options.TTL).Err()
|
||||
te := time.Since(ts)
|
||||
setSpanError(ctx, err)
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec()
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
if errors.Is(err, goredis.Nil) {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc()
|
||||
} else {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -574,18 +475,12 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
|
||||
}
|
||||
|
||||
func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, error) {
|
||||
b := r.pool.Get()
|
||||
defer r.pool.Put(b)
|
||||
|
||||
options := store.NewListOptions(opts...)
|
||||
labels := make([]string, 0, 6)
|
||||
labels = append(labels, "name", options.Name, "statement", "list")
|
||||
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = r.opts.Namespace
|
||||
}
|
||||
|
||||
rkey := r.getKey(b, options.Namespace, "", options.Prefix+"*")
|
||||
rkey := r.getKey(options.Namespace, "", options.Prefix+"*")
|
||||
if options.Suffix != "" {
|
||||
rkey += options.Suffix
|
||||
}
|
||||
@ -601,37 +496,25 @@ func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, e
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
ctx, sp := r.opts.Tracer.Start(ctx, fmt.Sprintf("cache list %v", rkey))
|
||||
defer sp.Finish()
|
||||
|
||||
// TODO: add support for prefix/suffix/limit
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Inc()
|
||||
ts := time.Now()
|
||||
var keys []string
|
||||
var err error
|
||||
|
||||
if c, ok := r.cli.(*goredis.ClusterClient); ok {
|
||||
err = c.ForEachMaster(ctx, func(nctx context.Context, cli *goredis.Client) error {
|
||||
nkeys, nerr := cli.Keys(nctx, rkey).Result()
|
||||
if nerr != nil {
|
||||
return nerr
|
||||
}
|
||||
keys = append(keys, nkeys...)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
keys, err = r.cli.Keys(ctx, rkey).Result()
|
||||
}
|
||||
keys, err := r.cli.Keys(ctx, rkey).Result()
|
||||
te := time.Since(ts)
|
||||
setSpanError(ctx, err)
|
||||
|
||||
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec()
|
||||
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds())
|
||||
if err == goredis.Nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestInflight, "name", options.Name).Dec()
|
||||
r.opts.Meter.Summary(semconv.CacheRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
|
||||
r.opts.Meter.Histogram(semconv.CacheRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
|
||||
if err == redis.Nil {
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "miss").Inc()
|
||||
return nil, store.ErrNotFound
|
||||
} else if err == nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "git")...).Inc()
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "hit").Inc()
|
||||
} else if err != nil {
|
||||
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc()
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
r.opts.Meter.Counter(semconv.CacheRequestTotal, "name", options.Name, "status", "failure").Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -663,214 +546,82 @@ func (r *Store) String() string {
|
||||
}
|
||||
|
||||
func NewStore(opts ...store.Option) *Store {
|
||||
b := atomic.Uint32{}
|
||||
return &Store{
|
||||
opts: store.NewOptions(opts...),
|
||||
connected: &b,
|
||||
watchers: make(map[string]*watcher),
|
||||
}
|
||||
return &Store{opts: store.NewOptions(opts...)}
|
||||
}
|
||||
|
||||
func (r *Store) configure(opts ...store.Option) error {
|
||||
if r.cli != nil && len(opts) == 0 {
|
||||
func (r *Store) configure() error {
|
||||
var redisOptions *redis.Options
|
||||
var redisClusterOptions *redis.ClusterOptions
|
||||
var err error
|
||||
|
||||
nodes := r.opts.Addrs
|
||||
|
||||
if len(nodes) == 0 {
|
||||
nodes = []string{"redis://127.0.0.1:6379"}
|
||||
}
|
||||
|
||||
if r.cli != nil && r.opts.Context == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(&r.opts)
|
||||
}
|
||||
|
||||
universalOptions := DefaultUniversalOptions
|
||||
|
||||
if r.opts.Context != nil {
|
||||
if o, ok := r.opts.Context.Value(configKey{}).(*goredis.Options); ok {
|
||||
universalOptions.Addrs = []string{o.Addr}
|
||||
universalOptions.Dialer = o.Dialer
|
||||
universalOptions.OnConnect = o.OnConnect
|
||||
universalOptions.Username = o.Username
|
||||
universalOptions.Password = o.Password
|
||||
|
||||
universalOptions.MaxRetries = o.MaxRetries
|
||||
universalOptions.MinRetryBackoff = o.MinRetryBackoff
|
||||
universalOptions.MaxRetryBackoff = o.MaxRetryBackoff
|
||||
|
||||
universalOptions.DialTimeout = o.DialTimeout
|
||||
universalOptions.ReadTimeout = o.ReadTimeout
|
||||
universalOptions.WriteTimeout = o.WriteTimeout
|
||||
universalOptions.ContextTimeoutEnabled = o.ContextTimeoutEnabled
|
||||
|
||||
universalOptions.PoolFIFO = o.PoolFIFO
|
||||
|
||||
universalOptions.PoolSize = o.PoolSize
|
||||
universalOptions.PoolTimeout = o.PoolTimeout
|
||||
universalOptions.MinIdleConns = o.MinIdleConns
|
||||
universalOptions.MaxIdleConns = o.MaxIdleConns
|
||||
universalOptions.ConnMaxIdleTime = o.ConnMaxIdleTime
|
||||
universalOptions.ConnMaxLifetime = o.ConnMaxLifetime
|
||||
|
||||
if o.TLSConfig != nil {
|
||||
universalOptions.TLSConfig = o.TLSConfig
|
||||
if c, ok := r.opts.Context.Value(configKey{}).(*redis.Options); ok {
|
||||
redisOptions = c
|
||||
if r.opts.TLSConfig != nil {
|
||||
redisOptions.TLSConfig = r.opts.TLSConfig
|
||||
}
|
||||
}
|
||||
|
||||
if o, ok := r.opts.Context.Value(clusterConfigKey{}).(*goredis.ClusterOptions); ok {
|
||||
universalOptions.Addrs = o.Addrs
|
||||
universalOptions.Dialer = o.Dialer
|
||||
universalOptions.OnConnect = o.OnConnect
|
||||
universalOptions.Username = o.Username
|
||||
universalOptions.Password = o.Password
|
||||
|
||||
universalOptions.MaxRedirects = o.MaxRedirects
|
||||
universalOptions.ReadOnly = o.ReadOnly
|
||||
universalOptions.RouteByLatency = o.RouteByLatency
|
||||
universalOptions.RouteRandomly = o.RouteRandomly
|
||||
|
||||
universalOptions.MaxRetries = o.MaxRetries
|
||||
universalOptions.MinRetryBackoff = o.MinRetryBackoff
|
||||
universalOptions.MaxRetryBackoff = o.MaxRetryBackoff
|
||||
|
||||
universalOptions.DialTimeout = o.DialTimeout
|
||||
universalOptions.ReadTimeout = o.ReadTimeout
|
||||
universalOptions.WriteTimeout = o.WriteTimeout
|
||||
universalOptions.ContextTimeoutEnabled = o.ContextTimeoutEnabled
|
||||
|
||||
universalOptions.PoolFIFO = o.PoolFIFO
|
||||
|
||||
universalOptions.PoolSize = o.PoolSize
|
||||
universalOptions.PoolTimeout = o.PoolTimeout
|
||||
universalOptions.MinIdleConns = o.MinIdleConns
|
||||
universalOptions.MaxIdleConns = o.MaxIdleConns
|
||||
universalOptions.ConnMaxIdleTime = o.ConnMaxIdleTime
|
||||
universalOptions.ConnMaxLifetime = o.ConnMaxLifetime
|
||||
if o.TLSConfig != nil {
|
||||
universalOptions.TLSConfig = o.TLSConfig
|
||||
}
|
||||
}
|
||||
|
||||
if o, ok := r.opts.Context.Value(universalConfigKey{}).(*goredis.UniversalOptions); ok {
|
||||
universalOptions = o
|
||||
if o.TLSConfig != nil {
|
||||
universalOptions.TLSConfig = o.TLSConfig
|
||||
if c, ok := r.opts.Context.Value(clusterConfigKey{}).(*redis.ClusterOptions); ok {
|
||||
redisClusterOptions = c
|
||||
if r.opts.TLSConfig != nil {
|
||||
redisClusterOptions.TLSConfig = r.opts.TLSConfig
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(r.opts.Addrs) > 0 {
|
||||
universalOptions.Addrs = r.opts.Addrs
|
||||
} else {
|
||||
universalOptions.Addrs = []string{"127.0.0.1:6379"}
|
||||
if redisOptions != nil && redisClusterOptions != nil {
|
||||
return fmt.Errorf("must specify only one option Config or ClusterConfig")
|
||||
}
|
||||
|
||||
r.cli = goredis.NewUniversalClient(universalOptions)
|
||||
setTracing(r.cli, r.opts.Tracer)
|
||||
r.cli.AddHook(newEventHook(r))
|
||||
if redisOptions == nil && redisClusterOptions == nil && r.cli != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.pool = pool.NewStringsPool(50)
|
||||
if redisOptions == nil && redisClusterOptions == nil && len(nodes) == 1 {
|
||||
redisOptions, err = redis.ParseURL(nodes[0])
|
||||
if err != nil {
|
||||
redisOptions = DefaultOptions
|
||||
redisOptions.Addr = r.opts.Addrs[0]
|
||||
redisOptions.TLSConfig = r.opts.TLSConfig
|
||||
}
|
||||
} else if redisOptions == nil && redisClusterOptions == nil && len(nodes) > 1 {
|
||||
redisClusterOptions = DefaultClusterOptions
|
||||
redisClusterOptions.Addrs = r.opts.Addrs
|
||||
redisClusterOptions.TLSConfig = r.opts.TLSConfig
|
||||
}
|
||||
|
||||
r.statsMeter()
|
||||
if redisOptions != nil {
|
||||
r.cli = redis.NewClient(redisOptions)
|
||||
} else if redisClusterOptions != nil {
|
||||
r.cli = redis.NewClusterClient(redisClusterOptions)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Store) getKey(b *strings.Builder, mainNamespace string, opNamespace string, key string) string {
|
||||
func (r *Store) getKey(mainNamespace string, opNamespace string, key string) string {
|
||||
b := r.pool.Get()
|
||||
defer r.pool.Put(b)
|
||||
b.Reset()
|
||||
|
||||
if opNamespace == "" {
|
||||
opNamespace = mainNamespace
|
||||
}
|
||||
if opNamespace != "" {
|
||||
b.WriteString(opNamespace)
|
||||
b.WriteString(r.opts.Separator)
|
||||
b.WriteString(DefaultPathSeparator)
|
||||
}
|
||||
b.WriteString(key)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (r *Store) Watch(ctx context.Context, opts ...store.WatchOption) (store.Watcher, error) {
|
||||
id, err := id.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wo, err := store.NewWatchOptions(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// construct the watcher
|
||||
w := &watcher{
|
||||
exit: make(chan bool),
|
||||
ch: make(chan store.Event),
|
||||
id: id,
|
||||
opts: wo,
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.watchers[w.id] = w
|
||||
r.mu.Unlock()
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (r *Store) sendEvent(e store.Event) {
|
||||
r.mu.RLock()
|
||||
watchers := make([]*watcher, 0, len(r.watchers))
|
||||
for _, w := range r.watchers {
|
||||
watchers = append(watchers, w)
|
||||
}
|
||||
r.mu.RUnlock()
|
||||
for _, w := range watchers {
|
||||
select {
|
||||
case <-w.exit:
|
||||
r.mu.Lock()
|
||||
delete(r.watchers, w.id)
|
||||
r.mu.Unlock()
|
||||
default:
|
||||
select {
|
||||
case w.ch <- e:
|
||||
case <-time.After(sendEventTime):
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type watcher struct {
|
||||
ch chan store.Event
|
||||
exit chan bool
|
||||
opts store.WatchOptions
|
||||
id string
|
||||
}
|
||||
|
||||
func (w *watcher) Next() (store.Event, error) {
|
||||
for {
|
||||
select {
|
||||
case e := <-w.ch:
|
||||
return e, nil
|
||||
case <-w.exit:
|
||||
return nil, store.ErrWatcherStopped
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watcher) Stop() {
|
||||
select {
|
||||
case <-w.exit:
|
||||
return
|
||||
default:
|
||||
close(w.exit)
|
||||
}
|
||||
}
|
||||
|
||||
type event struct {
|
||||
ts time.Time
|
||||
t store.EventType
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *event) Error() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e *event) Timestamp() time.Time {
|
||||
return e.ts
|
||||
}
|
||||
|
||||
func (e *event) Type() store.EventType {
|
||||
return e.t
|
||||
}
|
||||
|
@ -4,67 +4,17 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
goredis "github.com/redis/go-redis/v9"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/store"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
func TestLazyConnect(t *testing.T) {
|
||||
t.Skip("skipping test for manual check")
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
|
||||
r := NewStore()
|
||||
|
||||
if err = r.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = r.Connect(ctx); err != nil {
|
||||
t.Logf("connect failed %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
if err = r.Write(ctx, "mykey", "myval"); err != nil {
|
||||
t.Logf("failed to write %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeepTTL(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
|
||||
t.Skip()
|
||||
}
|
||||
r := NewStore(store.Addrs(os.Getenv("STORE_NODES")))
|
||||
|
||||
if err := r.Init(store.LazyConnect(true)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.Connect(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := "key"
|
||||
err := r.Write(ctx, key, "val1", store.WriteTTL(15*time.Second))
|
||||
if err != nil {
|
||||
t.Fatalf("Write error: %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
err = r.Write(ctx, key, "val2", store.WriteTTL(-1))
|
||||
if err != nil {
|
||||
t.Fatalf("Write error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rkv_configure(t *testing.T) {
|
||||
type fields struct {
|
||||
options store.Options
|
||||
Client goredis.UniversalClient
|
||||
Client *redis.Client
|
||||
}
|
||||
type wantValues struct {
|
||||
username string
|
||||
@ -87,7 +37,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "legacy Url", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"127.0.0.1:6379"}}, Client: nil},
|
||||
name: "legacy Url", fields: fields{options: store.Options{Addrs: []string{"127.0.0.1:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "",
|
||||
password: "",
|
||||
@ -95,7 +45,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "New Url", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"redis://127.0.0.1:6379"}}, Client: nil},
|
||||
name: "New Url", fields: fields{options: store.Options{Addrs: []string{"redis://127.0.0.1:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "",
|
||||
password: "",
|
||||
@ -103,7 +53,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Url with Pwd", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"redis://:password@redis:6379"}}, Client: nil},
|
||||
name: "Url with Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://:password@redis:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "",
|
||||
password: "password",
|
||||
@ -111,7 +61,7 @@ func Test_rkv_configure(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Url with username and Pwd", fields: fields{options: store.Options{Tracer: tracer.DefaultTracer, Addrs: []string{"redis://username:password@redis:6379"}}, Client: nil},
|
||||
name: "Url with username and Pwd", fields: fields{options: store.Options{Addrs: []string{"redis://username:password@redis:6379"}}, Client: nil},
|
||||
wantErr: false, want: wantValues{
|
||||
username: "username",
|
||||
password: "password",
|
||||
@ -121,11 +71,9 @@ func Test_rkv_configure(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := atomic.Uint32{}
|
||||
rc := &Store{
|
||||
opts: tt.fields.options,
|
||||
cli: tt.fields.Client,
|
||||
connected: &b,
|
||||
}
|
||||
err := rc.configure()
|
||||
if (err != nil) != tt.wantErr {
|
||||
|
49
stats.go
49
stats.go
@ -1,49 +0,0 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
goredis "github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/meter"
|
||||
)
|
||||
|
||||
var (
|
||||
PoolHitsTotal = "pool_hits_total"
|
||||
PoolMissesTotal = "pool_misses_total"
|
||||
PoolTimeoutTotal = "pool_timeout_total"
|
||||
PoolConnTotalCurrent = "pool_conn_total_current"
|
||||
PoolConnIdleCurrent = "pool_conn_idle_current"
|
||||
PoolConnStaleTotal = "pool_conn_stale_total"
|
||||
)
|
||||
|
||||
type Statser interface {
|
||||
PoolStats() *goredis.PoolStats
|
||||
}
|
||||
|
||||
func (r *Store) statsMeter() {
|
||||
var st Statser
|
||||
|
||||
if r.cli != nil {
|
||||
st = r.cli
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(meter.DefaultMeterStatsInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for _ = range ticker.C {
|
||||
if st == nil {
|
||||
return
|
||||
}
|
||||
stats := st.PoolStats()
|
||||
r.opts.Meter.Counter(PoolHitsTotal).Set(uint64(stats.Hits))
|
||||
r.opts.Meter.Counter(PoolMissesTotal).Set(uint64(stats.Misses))
|
||||
r.opts.Meter.Counter(PoolTimeoutTotal).Set(uint64(stats.Timeouts))
|
||||
r.opts.Meter.Counter(PoolConnTotalCurrent).Set(uint64(stats.TotalConns))
|
||||
r.opts.Meter.Counter(PoolConnIdleCurrent).Set(uint64(stats.IdleConns))
|
||||
r.opts.Meter.Counter(PoolConnStaleTotal).Set(uint64(stats.StaleConns))
|
||||
}
|
||||
}()
|
||||
}
|
128
tracer.go
128
tracer.go
@ -1,128 +0,0 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
rediscmd "github.com/redis/go-redis/extra/rediscmd/v9"
|
||||
goredis "github.com/redis/go-redis/v9"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
func setTracing(rdb goredis.UniversalClient, tr tracer.Tracer, opts ...tracer.SpanOption) {
|
||||
switch rdb := rdb.(type) {
|
||||
case *goredis.Client:
|
||||
opt := rdb.Options()
|
||||
connString := formatDBConnString(opt.Network, opt.Addr)
|
||||
rdb.AddHook(newTracingHook(connString, tr))
|
||||
case *goredis.ClusterClient:
|
||||
rdb.OnNewNode(func(rdb *goredis.Client) {
|
||||
opt := rdb.Options()
|
||||
connString := formatDBConnString(opt.Network, opt.Addr)
|
||||
rdb.AddHook(newTracingHook(connString, tr))
|
||||
})
|
||||
case *goredis.Ring:
|
||||
rdb.OnNewNode(func(rdb *goredis.Client) {
|
||||
opt := rdb.Options()
|
||||
connString := formatDBConnString(opt.Network, opt.Addr)
|
||||
rdb.AddHook(newTracingHook(connString, tr))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type tracingHook struct {
|
||||
tr tracer.Tracer
|
||||
opts []tracer.SpanOption
|
||||
}
|
||||
|
||||
var _ goredis.Hook = (*tracingHook)(nil)
|
||||
|
||||
func newTracingHook(connString string, tr tracer.Tracer, opts ...tracer.SpanOption) *tracingHook {
|
||||
opts = append(opts, tracer.WithSpanKind(tracer.SpanKindClient))
|
||||
if connString != "" {
|
||||
opts = append(opts, tracer.WithSpanLabels("db.connection_string", connString))
|
||||
}
|
||||
|
||||
return &tracingHook{
|
||||
tr: tr,
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *tracingHook) DialHook(hook goredis.DialHook) goredis.DialHook {
|
||||
return func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
/*
|
||||
_, span := h.tr.Start(ctx, "goredis.dial", h.opts...)
|
||||
defer span.Finish()
|
||||
*/
|
||||
conn, err := hook(ctx, network, addr)
|
||||
// recordError(span, err)
|
||||
|
||||
return conn, err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *tracingHook) ProcessHook(hook goredis.ProcessHook) goredis.ProcessHook {
|
||||
return func(ctx context.Context, cmd goredis.Cmder) error {
|
||||
cmdString := rediscmd.CmdString(cmd)
|
||||
var err error
|
||||
|
||||
switch cmdString {
|
||||
case "cluster slots":
|
||||
break
|
||||
default:
|
||||
_, span := h.tr.Start(ctx, "sdk.database", append(h.opts, tracer.WithSpanLabels("db.statement", cmdString))...)
|
||||
defer func() {
|
||||
recordError(span, err)
|
||||
span.Finish()
|
||||
}()
|
||||
}
|
||||
|
||||
err = hook(ctx, cmd)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *tracingHook) ProcessPipelineHook(hook goredis.ProcessPipelineHook) goredis.ProcessPipelineHook {
|
||||
return func(ctx context.Context, cmds []goredis.Cmder) error {
|
||||
_, cmdsString := rediscmd.CmdsString(cmds)
|
||||
|
||||
opts := append(h.opts, tracer.WithSpanLabels(
|
||||
"db.database.num_cmd", strconv.Itoa(len(cmds)),
|
||||
"db.statement", cmdsString,
|
||||
))
|
||||
|
||||
_, span := h.tr.Start(ctx, "sdk.database", opts...)
|
||||
defer span.Finish()
|
||||
|
||||
err := hook(ctx, cmds)
|
||||
recordError(span, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func setSpanError(ctx context.Context, err error) {
|
||||
if err == nil || err == goredis.Nil {
|
||||
return
|
||||
}
|
||||
if sp, ok := tracer.SpanFromContext(ctx); !ok && sp != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func recordError(span tracer.Span, err error) {
|
||||
if err != nil && err != goredis.Nil {
|
||||
span.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func formatDBConnString(network, addr string) string {
|
||||
if network == "tcp" {
|
||||
network = "redis"
|
||||
}
|
||||
return fmt.Sprintf("%s://%s", network, addr)
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user