Compare commits

..

16 Commits
v4 ... v3.10.17

Author SHA1 Message Date
256e61a437 fixup slice override
Some checks failed
build / test (push) Failing after 7s
build / lint (push) Failing after 7s
codeql / analyze (go) (push) Failing after 10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-09-25 15:27:53 +03:00
f9cdd41c94 Merge pull request 'changed MetricPrefix to Labels' (#112) from devstigneev/micro-store-redis:v3 into v3
Some checks failed
build / test (push) Failing after 10s
build / lint (push) Failing after 10s
codeql / analyze (go) (push) Failing after 10s
Reviewed-on: #112
Reviewed-by: Василий Толстов <v.tolstov@unistack.org>
2024-09-23 18:23:27 +03:00
ecad15fe17 changed MetricPrefix to Labels
Some checks failed
automerge / automerge (pull_request) Has been skipped
autoapprove / autoapprove (pull_request) Failing after 5s
dependabot-automerge / automerge (pull_request) Has been skipped
codeql / analyze (go) (pull_request) Has been cancelled
prbuild / test (pull_request) Has been cancelled
prbuild / lint (pull_request) Has been cancelled
2024-09-21 14:01:04 +03:00
fa3d18b353 tracing commented redis.dial
Some checks failed
build / test (push) Failing after 21s
build / lint (push) Successful in 20s
codeql / analyze (go) (push) Failing after 39s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-18 11:28:45 +03:00
2f3951773f cleanup trace spans
Some checks failed
build / lint (push) Successful in 24s
build / test (push) Failing after 1m25s
codeql / analyze (go) (push) Failing after 1m53s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 23:11:12 +03:00
b263e14032 cleanup trace spans from cluster slots command
Some checks failed
build / lint (push) Successful in 22s
build / test (push) Failing after 1m30s
codeql / analyze (go) (push) Failing after 1m51s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 13:44:29 +03:00
518cc1db73 fixup duplicate nested redis span
Some checks failed
build / lint (push) Successful in 22s
build / test (push) Failing after 1m29s
codeql / analyze (go) (push) Failing after 1m52s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 12:59:05 +03:00
4484cd34ec unify span names
Some checks failed
build / lint (push) Successful in 23s
build / test (push) Failing after 1m29s
codeql / analyze (go) (push) Failing after 1m55s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 09:16:08 +03:00
7bceeee6bf unify span names
Some checks failed
build / lint (push) Successful in 23s
build / test (push) Failing after 1m31s
codeql / analyze (go) (push) Failing after 1m52s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-05 08:57:29 +03:00
aed9512b93 improve metrics and tracing
Some checks failed
build / lint (push) Successful in 40s
build / test (push) Failing after 1m43s
codeql / analyze (go) (push) Failing after 3m9s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-04 15:13:19 +03:00
de72a10973 rework cluster mode
Some checks failed
build / test (push) Failing after 1m32s
codeql / analyze (go) (push) Failing after 1m48s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-23 09:30:35 +03:00
62c2de51d4 fixup strings pool
Some checks failed
build / test (push) Has been cancelled
build / lint (push) Has been cancelled
codeql / analyze (go) (push) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-15 08:41:58 +03:00
741b2310ec add tracer support
Some checks failed
build / test (push) Failing after 1m32s
codeql / analyze (go) (push) Failing after 1m35s
build / lint (push) Successful in 9m14s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-14 22:40:30 +03:00
1e8a44b088 add meter support
Some checks failed
build / test (push) Failing after 1m14s
codeql / analyze (go) (push) Failing after 1m58s
build / lint (push) Successful in 9m26s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-14 22:27:28 +03:00
2245314c2f Merge pull request 'add ability to get *redis.Client' (#110) from redis into v3
Some checks failed
build / test (push) Failing after 1m28s
build / lint (push) Failing after 2m35s
codeql / analyze (go) (push) Failing after 2m48s
Reviewed-on: #110
2023-12-12 13:49:14 +03:00
db770c3fe7 add ability to get *redis.Client
Some checks failed
codeql / analyze (go) (pull_request) Failing after 2m48s
prbuild / test (pull_request) Failing after 1m30s
prbuild / lint (pull_request) Failing after 2m37s
autoapprove / autoapprove (pull_request) Failing after 1m26s
automerge / automerge (pull_request) Failing after 4s
dependabot-automerge / automerge (pull_request) Has been skipped
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-12-12 13:48:47 +03:00
26 changed files with 614 additions and 961 deletions

View File

@@ -1,6 +1,6 @@
--- ---
name: Bug report name: Bug report
about: For reporting bugs in micro about: For reporting bugs in go-micro
title: "[BUG]" title: "[BUG]"
labels: '' labels: ''
assignees: '' assignees: ''
@@ -16,3 +16,9 @@ assignees: ''
**How to reproduce the bug:** **How to reproduce the bug:**
If possible, please include a minimal code snippet here. If possible, please include a minimal code snippet here.
**Environment:**
Go Version: please paste `go version` output here
```
please paste `go env` output here
```

View File

@@ -1,6 +1,6 @@
--- ---
name: Feature request / Enhancement name: Feature request / Enhancement
about: If you have a need not served by micro about: If you have a need not served by go-micro
title: "[FEATURE]" title: "[FEATURE]"
labels: '' labels: ''
assignees: '' assignees: ''
@@ -14,4 +14,4 @@ A clear and concise description of what the problem is. Ex. I'm always frustrate
A clear and concise description of what you want to happen. A clear and concise description of what you want to happen.
**Additional context** **Additional context**
Add any other context or screenshots about the feature request here. Add any other context or screenshots about the feature request here.

View File

@@ -1,8 +1,14 @@
--- ---
name: Question name: Question
about: Ask a question about micro about: Ask a question about go-micro
title: '' title: ''
labels: '' labels: ''
assignees: '' assignees: ''
--- ---
Before asking, please check if your question has already been answered:
1. Check the documentation - https://micro.mu/docs/
2. Check the examples and plugins - https://github.com/micro/examples & https://github.com/micro/go-plugins
3. Search existing issues

View File

@@ -1,28 +0,0 @@
name: "autoapprove"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
workflow_run:
workflows: ["prbuild"]
types:
- completed
permissions:
pull-requests: write
contents: write
jobs:
autoapprove:
runs-on: ubuntu-latest
steps:
- name: approve
run: [ "curl -o tea https://dl.gitea.com/tea/main/tea-main-linux-amd64",
"chmod +x ./tea",
"./tea login add --name unistack --token ${{ secrets.GITHUB_TOKEN }} --url https://git.unistack.org",
"./tea pr --repo ${{ github.event.repository.name }}"
]
if: github.actor == 'vtolstov'
id: approve
with:
github-token: ${{ secrets.GITHUB_TOKEN }}

19
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
# Maintain dependencies for Golang
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

20
.github/workflows/autoapprove.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: "autoapprove"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
autoapprove:
runs-on: ubuntu-latest
steps:
- name: approve
uses: hmarr/auto-approve-action@v3
if: github.actor == 'vtolstov' || github.actor == 'dependabot[bot]'
id: approve
with:
github-token: ${{ secrets.GITHUB_TOKEN }}

21
.github/workflows/automerge.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: "automerge"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
automerge:
runs-on: ubuntu-latest
if: github.actor == 'vtolstov'
steps:
- name: merge
id: merge
run: gh pr merge --auto --merge "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.TOKEN}}

47
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: build
on:
push:
branches:
- master
- v3
jobs:
test:
name: test
runs-on: ubuntu-latest
steps:
- name: setup
uses: actions/setup-go@v3
with:
go-version: 1.17
- name: checkout
uses: actions/checkout@v3
- name: cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-go-
- name: deps
run: go get -v -t -d ./...
- name: test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: lint
uses: golangci/golangci-lint-action@v3.4.0
continue-on-error: true
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.30
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# args: --issues-exit-code=0
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true

78
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@@ -0,0 +1,78 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "codeql"
on:
workflow_run:
workflows: ["prbuild"]
types:
- completed
push:
branches: [ master, v3 ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master, v3 ]
schedule:
- cron: '34 1 * * 0'
jobs:
analyze:
name: analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup
uses: actions/setup-go@v3
with:
go-version: 1.17
# Initializes the CodeQL tools for scanning.
- name: init
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: analyze
uses: github/codeql-action/analyze@v2

View File

@@ -0,0 +1,27 @@
name: "dependabot-automerge"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
automerge:
runs-on: ubuntu-latest
if: github.actor == 'dependabot[bot]'
steps:
- name: metadata
id: metadata
uses: dependabot/fetch-metadata@v1.3.6
with:
github-token: "${{ secrets.TOKEN }}"
- name: merge
id: merge
if: ${{contains(steps.metadata.outputs.dependency-names, 'go.unistack.org')}}
run: gh pr merge --auto --merge "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.TOKEN}}

View File

@@ -1,53 +0,0 @@
name: coverage
on:
push:
branches: [ main, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
pull_request:
branches: [ main, v3, v4 ]
jobs:
build:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: test coverage
run: |
go test -v -cover ./... -covermode=count -coverprofile coverage.out -coverpkg ./...
go tool cover -func coverage.out -o coverage.out
- name: coverage badge
uses: tj-actions/coverage-badge-go@v2
with:
green: 80
filename: coverage.out
- uses: stefanzweifel/git-auto-commit-action@v4
name: autocommit
with:
commit_message: Apply Code Coverage Badge
skip_fetch: false
skip_checkout: false
file_pattern: ./README.md
- name: push
if: steps.auto-commit-action.outputs.changes_detected == 'true'
uses: ad-m/github-push-action@master
with:
github_token: ${{ github.token }}
branch: ${{ github.ref }}

View File

@@ -1,29 +0,0 @@
name: lint
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run lint
uses: golangci/golangci-lint-action@v6
with:
version: 'latest'

View File

@@ -1,94 +0,0 @@
name: sync
on:
schedule:
- cron: '*/5 * * * *'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
sync:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: init
run: |
git config --global user.email "vtolstov <vtolstov@users.noreply.github.com>"
git config --global user.name "github-actions[bot]"
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
- name: check master
id: check_master
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync master
if: steps.check_master.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream master
git push upstream master --progress
git push origin master --progress
cd ../
rm -rf repo
- name: check v3
id: check_v3
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v3
if: steps.check_v3.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v3
git push upstream v3 --progress
git push origin v3 --progress
cd ../
rm -rf repo
- name: check v4
id: check_v4
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v4
if: steps.check_v4.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v4
git push upstream v4 --progress
git push origin v4 --progress
cd ../
rm -rf repo

View File

@@ -1,31 +0,0 @@
name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...

View File

@@ -1,50 +0,0 @@
name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: checkout tests
uses: actions/checkout@v4
with:
ref: master
filter: 'blob:none'
repository: unistack-org/micro-tests
path: micro-tests
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup go work
env:
GOWORK: ${{ github.workspace }}/go.work
run: |
go work init
go work use .
go work use micro-tests
- name: setup deps
env:
GOWORK: ${{ github.workspace }}/go.work
run: go get -v ./...
- name: run tests
env:
INTEGRATION_TESTS: yes
GOWORK: ${{ github.workspace }}/go.work
run: |
cd micro-tests
go test -mod readonly -v ./... || true

47
.github/workflows/pr.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: prbuild
on:
pull_request:
branches:
- master
- v3
jobs:
test:
name: test
runs-on: ubuntu-latest
steps:
- name: setup
uses: actions/setup-go@v3
with:
go-version: 1.17
- name: checkout
uses: actions/checkout@v3
- name: cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-go-
- name: deps
run: go get -v -t -d ./...
- name: test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: lint
uses: golangci/golangci-lint-action@v3.4.0
continue-on-error: true
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.30
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# args: --issues-exit-code=0
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true

View File

@@ -1,5 +0,0 @@
run:
concurrency: 8
timeout: 5m
issues-exit-code: 1
tests: true

View File

@@ -1,2 +0,0 @@
# micro-store-redis
![Coverage](https://img.shields.io/badge/Coverage-36.6%25-yellow)

View File

@@ -1,86 +0,0 @@
package redis
import (
"context"
"errors"
"net"
"time"
goredis "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/store"
)
type eventHook struct {
s *Store
}
var _ goredis.Hook = (*eventHook)(nil)
func newEventHook(s *Store) *eventHook {
return &eventHook{s: s}
}
func (h *eventHook) DialHook(hook goredis.DialHook) goredis.DialHook {
return func(ctx context.Context, network, addr string) (net.Conn, error) {
conn, err := hook(ctx, network, addr)
if err != nil {
if !isRedisError(err) {
if h.s.connected.CompareAndSwap(1, 0) {
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeDisconnect})
}
} else {
h.s.connected.Store(1)
}
} else {
if h.s.connected.CompareAndSwap(0, 1) {
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeConnect})
}
}
return conn, err
}
}
func (h *eventHook) ProcessHook(hook goredis.ProcessHook) goredis.ProcessHook {
return func(ctx context.Context, cmd goredis.Cmder) error {
err := hook(ctx, cmd)
if err != nil {
if !isRedisError(err) {
if h.s.connected.CompareAndSwap(1, 0) {
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeDisconnect})
}
} else {
h.s.connected.Store(1)
}
} else {
if h.s.connected.CompareAndSwap(0, 1) {
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeConnect})
}
}
return err
}
}
func (h *eventHook) ProcessPipelineHook(hook goredis.ProcessPipelineHook) goredis.ProcessPipelineHook {
return func(ctx context.Context, cmds []goredis.Cmder) error {
err := hook(ctx, cmds)
if err != nil {
if !isRedisError(err) {
if h.s.connected.CompareAndSwap(1, 0) {
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeDisconnect})
}
} else {
h.s.connected.Store(1)
}
} else {
if h.s.connected.CompareAndSwap(0, 1) {
h.s.sendEvent(&event{ts: time.Now(), err: err, t: store.EventTypeConnect})
}
}
return err
}
}
func isRedisError(err error) bool {
var rerr goredis.Error
return errors.As(err, &rerr)
}

22
go.mod
View File

@@ -1,24 +1,16 @@
module go.unistack.org/micro-store-redis/v4 module go.unistack.org/micro-store-redis/v3
go 1.22.0 go 1.21
toolchain go1.24.2 toolchain go1.22.4
require ( require (
github.com/redis/go-redis/extra/rediscmd/v9 v9.8.0 github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3
github.com/redis/go-redis/v9 v9.8.0 github.com/redis/go-redis/v9 v9.5.3
go.unistack.org/micro/v4 v4.1.10 go.unistack.org/micro/v3 v3.10.80
) )
require ( require (
github.com/ash3in/uuidv8 v1.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/matoous/go-nanoid v1.5.1 // indirect
github.com/spf13/cast v1.8.0 // indirect
go.unistack.org/micro-proto/v4 v4.1.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

51
go.sum
View File

@@ -1,49 +1,14 @@
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/ash3in/uuidv8 v1.2.0 h1:2oogGdtCPwaVtyvPPGin4TfZLtOGE5F+W++E880G6SI=
github.com/ash3in/uuidv8 v1.2.0/go.mod h1:BnU0wJBxnzdEKmVg4xckBkD+VZuecTFTUP3M0dWgyY4=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3 h1:1/BDligzCa40GTllkDnY3Y5DTHuKCONbB2JcRyIfl20=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3/go.mod h1:3dZmcLn3Qw6FLlWASn1g4y+YO9ycEFUOM+bhBmzLVKQ=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/redis/go-redis/v9 v9.5.3 h1:fOAp1/uJG+ZtcITgZOfYFmTKPE7n4Vclj1wZFgRciUU=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/redis/go-redis/v9 v9.5.3/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= go.unistack.org/micro/v3 v3.10.80 h1:A0zWNoM9MOcMg9gdFFgVkgbT3uSYVIINhuvumX9nP2o=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= go.unistack.org/micro/v3 v3.10.80/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/matoous/go-nanoid v1.5.1 h1:aCjdvTyO9LLnTIi0fgdXhOPPvOHjpXN6Ik9DaNjIct4=
github.com/matoous/go-nanoid v1.5.1/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/extra/rediscmd/v9 v9.8.0 h1:/A+PnpT6ufTUt/6YPXiZlCRoyyfEnDag5WGrEK8Gq0I=
github.com/redis/go-redis/extra/rediscmd/v9 v9.8.0/go.mod h1:FGO4BNjl5TfH9U771826GIW2Ul4pOEqHAN+0xjfw+dU=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.unistack.org/micro-proto/v4 v4.1.0 h1:qPwL2n/oqh9RE3RTTDgt28XK3QzV597VugQPaw9lKUk=
go.unistack.org/micro-proto/v4 v4.1.0/go.mod h1:ArmK7o+uFvxSY3dbJhKBBX4Pm1rhWdLEFf3LxBrMtec=
go.unistack.org/micro/v4 v4.1.10 h1:ElmFSEQmLlG42D7tzMHQhx3d5paU7LWLvE8mpui3V0U=
go.unistack.org/micro/v4 v4.1.10/go.mod h1:b4dr7RFlbpSfSsKCva9UuX4zLtkYQteEK+Uuac39qJE=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,49 +1,46 @@
package redis package redis
import ( import (
goredis "github.com/redis/go-redis/v9" "time"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter" "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/store" "go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v4/tracer" "go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/tracer"
) )
type configKey struct{} type configKey struct{}
func Config(c *goredis.Options) store.Option { func Config(c *redis.Options) store.Option {
return store.SetOption(configKey{}, c) return store.SetOption(configKey{}, c)
} }
type clusterConfigKey struct{} type clusterConfigKey struct{}
func ClusterConfig(c *goredis.ClusterOptions) store.Option { func ClusterConfig(c *redis.ClusterOptions) store.Option {
return store.SetOption(clusterConfigKey{}, c) return store.SetOption(clusterConfigKey{}, c)
} }
type universalConfigKey struct{}
func UniversalConfig(c *goredis.UniversalOptions) store.Option {
return store.SetOption(universalConfigKey{}, c)
}
type failoverConfigKey struct{}
func FailoverConfig(c *goredis.FailoverOptions) store.Option {
return store.SetOption(failoverConfigKey{}, c)
}
var ( var (
// DefaultMeterStatsInterval holds default stats interval
DefaultMeterStatsInterval = 5 * time.Second
// DefaultMeterMetricPrefix holds default metric prefix
DefaultMeterMetricPrefix = "micro_store_"
labelHost = "redis_host" labelHost = "redis_host"
labelName = "redis_name" labelName = "redis_name"
) )
// Options struct holds wrapper options // Options struct holds wrapper options
type Options struct { type Options struct {
Logger logger.Logger Logger logger.Logger
Meter meter.Meter Meter meter.Meter
Tracer tracer.Tracer Tracer tracer.Tracer
RedisHost string MeterMetricPrefix string
RedisName string MeterStatsInterval time.Duration
RedisHost string
RedisName string
} }
// Option func signature // Option func signature
@@ -52,9 +49,11 @@ type Option func(*Options)
// NewOptions create new Options struct from provided option slice // NewOptions create new Options struct from provided option slice
func NewOptions(opts ...Option) Options { func NewOptions(opts ...Option) Options {
options := Options{ options := Options{
Logger: logger.DefaultLogger, Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter, Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer, Tracer: tracer.DefaultTracer,
MeterStatsInterval: DefaultMeterStatsInterval,
MeterMetricPrefix: DefaultMeterMetricPrefix,
} }
for _, o := range opts { for _, o := range opts {
@@ -67,7 +66,21 @@ func NewOptions(opts ...Option) Options {
labelName, options.RedisName), labelName, options.RedisName),
) )
options.Logger = options.Logger.Clone(logger.WithAddCallerSkipCount(1)) options.Logger = options.Logger.Clone(logger.WithCallerSkipCount(1))
return options return options
} }
// MetricInterval specifies stats interval for *sql.DB
func MetricInterval(td time.Duration) Option {
return func(o *Options) {
o.MeterStatsInterval = td
}
}
// MetricPrefix specifies prefix for each metric
func MetricPrefix(pref string) Option {
return func(o *Options) {
o.MeterMetricPrefix = pref
}
}

657
redis.go
View File

@@ -2,25 +2,21 @@ package redis
import ( import (
"context" "context"
"errors" "fmt"
"reflect" "reflect"
"strings" "strings"
"sync"
"sync/atomic"
"time" "time"
goredis "github.com/redis/go-redis/v9" redis "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/semconv" "go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v4/store" "go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v4/util/id" pool "go.unistack.org/micro/v3/util/xpool"
pool "go.unistack.org/micro/v4/util/xpool"
) )
var ( var (
_ store.Store = (*Store)(nil) DefaultPathSeparator = "/"
_ store.Event = (*event)(nil)
sendEventTime = 10 * time.Millisecond DefaultClusterOptions = &redis.ClusterOptions{
DefaultUniversalOptions = &goredis.UniversalOptions{
Username: "", Username: "",
Password: "", // no password set Password: "", // no password set
MaxRetries: 2, MaxRetries: 2,
@@ -32,19 +28,7 @@ var (
MinIdleConns: 10, MinIdleConns: 10,
} }
DefaultClusterOptions = &goredis.ClusterOptions{ DefaultOptions = &redis.Options{
Username: "",
Password: "", // no password set
MaxRetries: 2,
MaxRetryBackoff: 256 * time.Millisecond,
DialTimeout: 1 * time.Second,
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
PoolTimeout: 1 * time.Second,
MinIdleConns: 10,
}
DefaultOptions = &goredis.Options{
Username: "", Username: "",
Password: "", // no password set Password: "", // no password set
DB: 0, // use default DB DB: 0, // use default DB
@@ -59,46 +43,33 @@ var (
) )
type Store struct { type Store struct {
cli goredis.UniversalClient opts store.Options
pool *pool.StringsPool cli *wrappedClient
connected *atomic.Uint32 done chan struct{}
opts store.Options pool pool.Pool[*strings.Builder]
watchers map[string]*watcher
mu sync.RWMutex
} }
func (r *Store) Live() bool { type wrappedClient struct {
return r.connected.Load() == 1 *redis.Client
} *redis.ClusterClient
func (r *Store) Ready() bool {
return r.connected.Load() == 1
}
func (r *Store) Health() bool {
return r.connected.Load() == 1
} }
func (r *Store) Connect(ctx context.Context) error { func (r *Store) Connect(ctx context.Context) error {
if r.connected.Load() == 1 { var err error
return nil if r.cli.Client != nil {
err = r.cli.Client.Ping(ctx).Err()
} }
if r.cli == nil { err = r.cli.ClusterClient.Ping(ctx).Err()
return store.ErrNotConnected setSpanError(ctx, err)
} return err
if r.opts.LazyConnect {
return nil
}
if err := r.cli.Ping(ctx).Err(); err != nil {
setSpanError(ctx, err)
return err
}
r.connected.Store(1)
return nil
} }
func (r *Store) Init(opts ...store.Option) error { func (r *Store) Init(opts ...store.Option) error {
err := r.configure(opts...) for _, o := range opts {
o(&r.opts)
}
err := r.configure()
if err != nil { if err != nil {
return err return err
} }
@@ -106,45 +77,38 @@ func (r *Store) Init(opts ...store.Option) error {
return nil return nil
} }
func (r *Store) Client() *goredis.Client { func (r *Store) Client() *redis.Client {
if c, ok := r.cli.(*goredis.Client); ok { if r.cli.Client != nil {
return c return r.cli.Client
} }
return nil return nil
} }
func (r *Store) UniversalClient() goredis.UniversalClient { func (r *Store) ClusterClient() *redis.ClusterClient {
return r.cli if r.cli.ClusterClient != nil {
} return r.cli.ClusterClient
func (r *Store) ClusterClient() *goredis.ClusterClient {
if c, ok := r.cli.(*goredis.ClusterClient); ok {
return c
} }
return nil return nil
} }
func (r *Store) Disconnect(ctx context.Context) error { func (r *Store) Disconnect(ctx context.Context) error {
if r.connected.Load() == 0 { var err error
return nil select {
} case <-r.done:
return err
if r.cli != nil { default:
if err := r.cli.Close(); err != nil { if r.cli.Client != nil {
return err err = r.cli.Client.Close()
} else if r.cli.ClusterClient != nil {
err = r.cli.ClusterClient.Close()
} }
close(r.done)
return err
} }
r.connected.Store(1)
return nil
} }
func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error { func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOption) error {
b := r.pool.Get()
defer r.pool.Put(b)
options := store.NewExistsOptions(opts...) options := store.NewExistsOptions(opts...)
labels := make([]string, 0, 6)
labels = append(labels, "name", options.Name, "statement", "exists")
timeout := r.opts.Timeout timeout := r.opts.Timeout
if options.Timeout > 0 { if options.Timeout > 0 {
@@ -157,24 +121,29 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
defer cancel() defer cancel()
} }
rkey := r.getKey(b, r.opts.Namespace, options.Namespace, key) rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
val, err := r.cli.Exists(ctx, rkey).Result() var err error
var val int64
if r.cli.Client != nil {
val, err = r.cli.Client.Exists(ctx, rkey).Result()
} else {
val, err = r.cli.ClusterClient.Exists(ctx, rkey).Result()
}
setSpanError(ctx, err) setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds()) if err == redis.Nil || (err == nil && val == 0) {
if errors.Is(err, goredis.Nil) || (err == nil && val == 0) { r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else { } else if err != nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -182,12 +151,7 @@ func (r *Store) Exists(ctx context.Context, key string, opts ...store.ExistsOpti
} }
func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...store.ReadOption) error { func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...store.ReadOption) error {
b := r.pool.Get()
defer r.pool.Put(b)
options := store.NewReadOptions(opts...) options := store.NewReadOptions(opts...)
labels := make([]string, 0, 6)
labels = append(labels, "name", options.Name, "statement", "read")
timeout := r.opts.Timeout timeout := r.opts.Timeout
if options.Timeout > 0 { if options.Timeout > 0 {
@@ -200,23 +164,29 @@ func (r *Store) Read(ctx context.Context, key string, val interface{}, opts ...s
defer cancel() defer cancel()
} }
rkey := r.getKey(b, r.opts.Namespace, options.Namespace, key) rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
buf, err := r.cli.Get(ctx, rkey).Bytes() var buf []byte
var err error
if r.cli.Client != nil {
buf, err = r.cli.Client.Get(ctx, rkey).Bytes()
} else {
buf, err = r.cli.ClusterClient.Get(ctx, rkey).Bytes()
}
setSpanError(ctx, err) setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if errors.Is(err, goredis.Nil) || (err == nil && buf == nil) { if err == redis.Nil || (err == nil && buf == nil) {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -249,14 +219,10 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
} }
var rkeys []string var rkeys []string
var pools []*strings.Builder
if r.opts.Namespace != "" || options.Namespace != "" { if r.opts.Namespace != "" || options.Namespace != "" {
rkeys = make([]string, len(keys)) rkeys = make([]string, len(keys))
pools = make([]*strings.Builder, len(keys))
for idx, key := range keys { for idx, key := range keys {
b := r.pool.Get() rkeys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
pools[idx] = b
rkeys[idx] = r.getKey(b, r.opts.Namespace, options.Namespace, key)
} }
} }
@@ -265,19 +231,24 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
var rvals []interface{} var rvals []interface{}
var err error var err error
if r.opts.Namespace != "" || options.Namespace != "" { if r.opts.Namespace != "" || options.Namespace != "" {
rvals, err = r.cli.MGet(ctx, rkeys...).Result() if r.cli.Client != nil {
for idx := range pools { rvals, err = r.cli.Client.MGet(ctx, rkeys...).Result()
r.pool.Put(pools[idx]) } else {
rvals, err = r.cli.ClusterClient.MGet(ctx, rkeys...).Result()
} }
} else { } else {
rvals, err = r.cli.MGet(ctx, keys...).Result() if r.cli.Client != nil {
rvals, err = r.cli.Client.MGet(ctx, keys...).Result()
} else {
rvals, err = r.cli.ClusterClient.MGet(ctx, keys...).Result()
}
} }
setSpanError(ctx, err) setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == goredis.Nil || (len(rvals) == 0) { if err == redis.Nil || (len(rvals) == 0) {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
@@ -335,8 +306,6 @@ func (r *Store) MRead(ctx context.Context, keys []string, vals interface{}, opts
func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.DeleteOption) error { func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.DeleteOption) error {
options := store.NewDeleteOptions(opts...) options := store.NewDeleteOptions(opts...)
labels := make([]string, 0, 6)
labels = append(labels, "name", options.Name, "statement", "delete")
timeout := r.opts.Timeout timeout := r.opts.Timeout
if options.Timeout > 0 { if options.Timeout > 0 {
@@ -350,40 +319,41 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
} }
var rkeys []string var rkeys []string
var pools []*strings.Builder
if r.opts.Namespace != "" || options.Namespace != "" { if r.opts.Namespace != "" || options.Namespace != "" {
rkeys = make([]string, len(keys)) rkeys = make([]string, len(keys))
pools = make([]*strings.Builder, len(keys))
for idx, key := range keys { for idx, key := range keys {
b := r.pool.Get() rkeys[idx] = r.getKey(r.opts.Namespace, options.Namespace, key)
pools[idx] = b
rkeys[idx] = r.getKey(b, r.opts.Namespace, options.Namespace, key)
} }
} }
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
var err error var err error
if r.opts.Namespace != "" || options.Namespace != "" { if r.opts.Namespace != "" || options.Namespace != "" {
err = r.cli.Del(ctx, rkeys...).Err() if r.cli.Client != nil {
for idx := range pools { err = r.cli.Client.Del(ctx, rkeys...).Err()
r.pool.Put(pools[idx]) } else {
err = r.cli.ClusterClient.Del(ctx, rkeys...).Err()
} }
} else { } else {
err = r.cli.Del(ctx, keys...).Err() if r.cli.Client != nil {
err = r.cli.Client.Del(ctx, keys...).Err()
} else {
err = r.cli.ClusterClient.Del(ctx, keys...).Err()
}
} }
setSpanError(ctx, err) setSpanError(ctx, err)
te := time.Since(ts) te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == goredis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -391,12 +361,7 @@ func (r *Store) MDelete(ctx context.Context, keys []string, opts ...store.Delete
} }
func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error { func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOption) error {
b := r.pool.Get()
defer r.pool.Put(b)
options := store.NewDeleteOptions(opts...) options := store.NewDeleteOptions(opts...)
labels := make([]string, 0, 6)
labels = append(labels, "name", options.Name, "statement", "delete")
timeout := r.opts.Timeout timeout := r.opts.Timeout
if options.Timeout > 0 { if options.Timeout > 0 {
@@ -409,22 +374,26 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
defer cancel() defer cancel()
} }
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
err := r.cli.Del(ctx, r.getKey(b, r.opts.Namespace, options.Namespace, key)).Err() var err error
te := time.Since(ts) if r.cli.Client != nil {
err = r.cli.Client.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
} else {
err = r.cli.ClusterClient.Del(ctx, r.getKey(r.opts.Namespace, options.Namespace, key)).Err()
}
setSpanError(ctx, err) setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if errors.Is(err, goredis.Nil) { if err == redis.Nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -433,8 +402,6 @@ func (r *Store) Delete(ctx context.Context, key string, opts ...store.DeleteOpti
func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, opts ...store.WriteOption) error { func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, opts ...store.WriteOption) error {
options := store.NewWriteOptions(opts...) options := store.NewWriteOptions(opts...)
labels := make([]string, 0, 6)
labels = append(labels, "name", options.Name, "statement", "write")
timeout := r.opts.Timeout timeout := r.opts.Timeout
if options.Timeout > 0 { if options.Timeout > 0 {
@@ -448,11 +415,9 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
} }
kvs := make([]string, 0, len(keys)*2) kvs := make([]string, 0, len(keys)*2)
pools := make([]*strings.Builder, len(keys))
for idx, key := range keys { for idx, key := range keys {
b := r.pool.Get() kvs = append(kvs, r.getKey(r.opts.Namespace, options.Namespace, key))
pools[idx] = b
kvs = append(kvs, r.getKey(b, r.opts.Namespace, options.Namespace, key))
switch vt := vals[idx].(type) { switch vt := vals[idx].(type) {
case string: case string:
@@ -468,9 +433,10 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
} }
} }
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now()
pipeliner := func(pipe goredis.Pipeliner) error { pipeliner := func(pipe redis.Pipeliner) error {
for idx := 0; idx < len(kvs); idx += 2 { for idx := 0; idx < len(kvs); idx += 2 {
if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil { if _, err := pipe.Set(ctx, kvs[idx], kvs[idx+1], options.TTL).Result(); err != nil {
setSpanError(ctx, err) setSpanError(ctx, err)
@@ -480,35 +446,37 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
return nil return nil
} }
ts := time.Now() var err error
cmds, err := r.cli.Pipelined(ctx, pipeliner) var cmds []redis.Cmder
for idx := range pools {
r.pool.Put(pools[idx])
}
te := time.Since(ts)
setSpanError(ctx, err)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec() if r.cli.Client != nil {
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds()) cmds, err = r.cli.Client.Pipelined(ctx, pipeliner)
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds()) } else {
if err == goredis.Nil { cmds, err = r.cli.ClusterClient.Pipelined(ctx, pipeliner)
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc() }
setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == redis.Nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
for _, cmd := range cmds { for _, cmd := range cmds {
if err = cmd.Err(); err != nil { if err = cmd.Err(); err != nil {
if err == goredis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} }
setSpanError(ctx, err) setSpanError(ctx, err)
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
} }
@@ -517,12 +485,7 @@ func (r *Store) MWrite(ctx context.Context, keys []string, vals []interface{}, o
} }
func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...store.WriteOption) error { func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...store.WriteOption) error {
b := r.pool.Get()
defer r.pool.Put(b)
options := store.NewWriteOptions(opts...) options := store.NewWriteOptions(opts...)
labels := make([]string, 0, 6)
labels = append(labels, "name", options.Name, "statement", "write")
timeout := r.opts.Timeout timeout := r.opts.Timeout
if options.Timeout > 0 { if options.Timeout > 0 {
@@ -535,7 +498,7 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
defer cancel() defer cancel()
} }
rkey := r.getKey(b, r.opts.Namespace, options.Namespace, key) rkey := r.getKey(r.opts.Namespace, options.Namespace, key)
var buf []byte var buf []byte
switch vt := val.(type) { switch vt := val.(type) {
@@ -551,22 +514,26 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
} }
} }
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
err := r.cli.Set(ctx, rkey, buf, options.TTL).Err() var err error
te := time.Since(ts) if r.cli.Client != nil {
err = r.cli.Client.Set(ctx, rkey, buf, options.TTL).Err()
} else {
err = r.cli.ClusterClient.Set(ctx, rkey, buf, options.TTL).Err()
}
setSpanError(ctx, err) setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if errors.Is(err, goredis.Nil) { if err == redis.Nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return store.ErrNotFound return store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "hit")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else { } else if err != nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return err return err
} }
@@ -574,18 +541,12 @@ func (r *Store) Write(ctx context.Context, key string, val interface{}, opts ...
} }
func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, error) { func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, error) {
b := r.pool.Get()
defer r.pool.Put(b)
options := store.NewListOptions(opts...) options := store.NewListOptions(opts...)
labels := make([]string, 0, 6)
labels = append(labels, "name", options.Name, "statement", "list")
if len(options.Namespace) == 0 { if len(options.Namespace) == 0 {
options.Namespace = r.opts.Namespace options.Namespace = r.opts.Namespace
} }
rkey := r.getKey(b, options.Namespace, "", options.Prefix+"*") rkey := r.getKey(options.Namespace, "", options.Prefix+"*")
if options.Suffix != "" { if options.Suffix != "" {
rkey += options.Suffix rkey += options.Suffix
} }
@@ -602,13 +563,15 @@ func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, e
} }
// TODO: add support for prefix/suffix/limit // TODO: add support for prefix/suffix/limit
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Inc() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Inc()
ts := time.Now() ts := time.Now()
var keys []string var keys []string
var err error var err error
if c, ok := r.cli.(*goredis.ClusterClient); ok { if r.cli.Client != nil {
err = c.ForEachMaster(ctx, func(nctx context.Context, cli *goredis.Client) error { keys, err = r.cli.Client.Keys(ctx, rkey).Result()
} else {
err = r.cli.ClusterClient.ForEachMaster(ctx, func(nctx context.Context, cli *redis.Client) error {
nkeys, nerr := cli.Keys(nctx, rkey).Result() nkeys, nerr := cli.Keys(nctx, rkey).Result()
if nerr != nil { if nerr != nil {
return nerr return nerr
@@ -616,22 +579,19 @@ func (r *Store) List(ctx context.Context, opts ...store.ListOption) ([]string, e
keys = append(keys, nkeys...) keys = append(keys, nkeys...)
return nil return nil
}) })
} else {
keys, err = r.cli.Keys(ctx, rkey).Result()
} }
te := time.Since(ts)
setSpanError(ctx, err) setSpanError(ctx, err)
te := time.Since(ts)
r.opts.Meter.Counter(semconv.StoreRequestInflight, labels...).Dec() r.opts.Meter.Counter(semconv.StoreRequestInflight, "name", options.Name).Dec()
r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, labels...).Update(te.Seconds()) r.opts.Meter.Summary(semconv.StoreRequestLatencyMicroseconds, "name", options.Name).Update(te.Seconds())
r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, labels...).Update(te.Seconds()) r.opts.Meter.Histogram(semconv.StoreRequestDurationSeconds, "name", options.Name).Update(te.Seconds())
if err == goredis.Nil { if err == redis.Nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "miss")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "miss").Inc()
return nil, store.ErrNotFound return nil, store.ErrNotFound
} else if err == nil { } else if err == nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "git")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "hit").Inc()
} else if err != nil { } else if err != nil {
r.opts.Meter.Counter(semconv.StoreRequestTotal, append(labels, "status", "failure")...).Inc() r.opts.Meter.Counter(semconv.StoreRequestTotal, "name", options.Name, "status", "failure").Inc()
return nil, err return nil, err
} }
@@ -663,243 +623,90 @@ func (r *Store) String() string {
} }
func NewStore(opts ...store.Option) *Store { func NewStore(opts ...store.Option) *Store {
b := atomic.Uint32{} return &Store{done: make(chan struct{}), opts: store.NewOptions(opts...)}
return &Store{
opts: store.NewOptions(opts...),
connected: &b,
watchers: make(map[string]*watcher),
}
} }
func (r *Store) configure(opts ...store.Option) error { func (r *Store) configure() error {
if r.cli != nil && len(opts) == 0 { var redisOptions *redis.Options
var redisClusterOptions *redis.ClusterOptions
var err error
nodes := r.opts.Addrs
if len(nodes) == 0 {
nodes = []string{"redis://127.0.0.1:6379"}
}
if r.cli != nil && r.opts.Context == nil {
return nil return nil
} }
for _, o := range opts {
o(&r.opts)
}
universalOptions := DefaultUniversalOptions
if r.opts.Context != nil { if r.opts.Context != nil {
if o, ok := r.opts.Context.Value(configKey{}).(*goredis.Options); ok { if c, ok := r.opts.Context.Value(configKey{}).(*redis.Options); ok {
universalOptions.Addrs = []string{o.Addr} redisOptions = c
universalOptions.Dialer = o.Dialer if r.opts.TLSConfig != nil {
universalOptions.OnConnect = o.OnConnect redisOptions.TLSConfig = r.opts.TLSConfig
universalOptions.Username = o.Username }
universalOptions.Password = o.Password
universalOptions.MaxRetries = o.MaxRetries
universalOptions.MinRetryBackoff = o.MinRetryBackoff
universalOptions.MaxRetryBackoff = o.MaxRetryBackoff
universalOptions.DialTimeout = o.DialTimeout
universalOptions.ReadTimeout = o.ReadTimeout
universalOptions.WriteTimeout = o.WriteTimeout
universalOptions.ContextTimeoutEnabled = o.ContextTimeoutEnabled
universalOptions.PoolFIFO = o.PoolFIFO
universalOptions.PoolSize = o.PoolSize
universalOptions.PoolTimeout = o.PoolTimeout
universalOptions.MinIdleConns = o.MinIdleConns
universalOptions.MaxIdleConns = o.MaxIdleConns
universalOptions.ConnMaxIdleTime = o.ConnMaxIdleTime
universalOptions.ConnMaxLifetime = o.ConnMaxLifetime
universalOptions.TLSConfig = o.TLSConfig
} }
if o, ok := r.opts.Context.Value(clusterConfigKey{}).(*goredis.ClusterOptions); ok { if c, ok := r.opts.Context.Value(clusterConfigKey{}).(*redis.ClusterOptions); ok {
universalOptions.Addrs = o.Addrs redisClusterOptions = c
universalOptions.Dialer = o.Dialer if r.opts.TLSConfig != nil {
universalOptions.OnConnect = o.OnConnect redisClusterOptions.TLSConfig = r.opts.TLSConfig
universalOptions.Username = o.Username }
universalOptions.Password = o.Password
universalOptions.MaxRedirects = o.MaxRedirects
universalOptions.ReadOnly = o.ReadOnly
universalOptions.RouteByLatency = o.RouteByLatency
universalOptions.RouteRandomly = o.RouteRandomly
universalOptions.MaxRetries = o.MaxRetries
universalOptions.MinRetryBackoff = o.MinRetryBackoff
universalOptions.MaxRetryBackoff = o.MaxRetryBackoff
universalOptions.DialTimeout = o.DialTimeout
universalOptions.ReadTimeout = o.ReadTimeout
universalOptions.WriteTimeout = o.WriteTimeout
universalOptions.ContextTimeoutEnabled = o.ContextTimeoutEnabled
universalOptions.PoolFIFO = o.PoolFIFO
universalOptions.PoolSize = o.PoolSize
universalOptions.PoolTimeout = o.PoolTimeout
universalOptions.MinIdleConns = o.MinIdleConns
universalOptions.MaxIdleConns = o.MaxIdleConns
universalOptions.ConnMaxIdleTime = o.ConnMaxIdleTime
universalOptions.ConnMaxLifetime = o.ConnMaxLifetime
universalOptions.TLSConfig = o.TLSConfig
}
if o, ok := r.opts.Context.Value(failoverConfigKey{}).(*goredis.FailoverOptions); ok {
universalOptions.ClientName = o.ClientName
universalOptions.MasterName = o.MasterName
universalOptions.Addrs = o.SentinelAddrs
universalOptions.Dialer = o.Dialer
universalOptions.OnConnect = o.OnConnect
universalOptions.Username = o.Username
universalOptions.Password = o.Password
universalOptions.SentinelUsername = o.SentinelUsername
universalOptions.SentinelPassword = o.SentinelPassword
universalOptions.ReadOnly = o.ReplicaOnly
universalOptions.RouteByLatency = o.RouteByLatency
universalOptions.RouteRandomly = o.RouteRandomly
universalOptions.MaxRetries = o.MaxRetries
universalOptions.MinRetryBackoff = o.MinRetryBackoff
universalOptions.MaxRetryBackoff = o.MaxRetryBackoff
universalOptions.DialTimeout = o.DialTimeout
universalOptions.ReadTimeout = o.ReadTimeout
universalOptions.WriteTimeout = o.WriteTimeout
universalOptions.ContextTimeoutEnabled = o.ContextTimeoutEnabled
universalOptions.PoolFIFO = o.PoolFIFO
universalOptions.PoolSize = o.PoolSize
universalOptions.PoolTimeout = o.PoolTimeout
universalOptions.MinIdleConns = o.MinIdleConns
universalOptions.MaxIdleConns = o.MaxIdleConns
universalOptions.ConnMaxIdleTime = o.ConnMaxIdleTime
universalOptions.ConnMaxLifetime = o.ConnMaxLifetime
universalOptions.TLSConfig = o.TLSConfig
}
if o, ok := r.opts.Context.Value(universalConfigKey{}).(*goredis.UniversalOptions); ok {
universalOptions = o
} }
} }
if len(r.opts.Addrs) > 0 { if redisOptions != nil && redisClusterOptions != nil {
universalOptions.Addrs = r.opts.Addrs return fmt.Errorf("must specify only one option Config or ClusterConfig")
} }
if len(universalOptions.Addrs) == 0 { if redisOptions == nil && redisClusterOptions == nil && r.cli != nil {
universalOptions.Addrs = []string{"127.0.0.1:6379"} return nil
} }
r.cli = goredis.NewUniversalClient(universalOptions) if redisOptions == nil && redisClusterOptions == nil && len(nodes) == 1 {
setTracing(r.cli, r.opts.Tracer) redisOptions, err = redis.ParseURL(nodes[0])
r.cli.AddHook(newEventHook(r)) if err != nil {
redisOptions = DefaultOptions
redisOptions.Addr = r.opts.Addrs[0]
redisOptions.TLSConfig = r.opts.TLSConfig
}
} else if redisOptions == nil && redisClusterOptions == nil && len(nodes) > 1 {
redisClusterOptions = DefaultClusterOptions
redisClusterOptions.Addrs = r.opts.Addrs
redisClusterOptions.TLSConfig = r.opts.TLSConfig
}
r.pool = pool.NewStringsPool(50) if redisOptions != nil {
c := redis.NewClient(redisOptions)
setTracing(c, r.opts.Tracer)
r.cli = &wrappedClient{Client: c}
} else if redisClusterOptions != nil {
c := redis.NewClusterClient(redisClusterOptions)
setTracing(c, r.opts.Tracer)
r.cli = &wrappedClient{ClusterClient: c}
}
r.pool = pool.NewPool(func() *strings.Builder { return &strings.Builder{} })
r.statsMeter() r.statsMeter()
return nil return nil
} }
func (r *Store) getKey(b *strings.Builder, mainNamespace string, opNamespace string, key string) string { func (r *Store) getKey(mainNamespace string, opNamespace string, key string) string {
b := r.pool.Get()
defer r.pool.Put(b)
b.Reset()
if opNamespace == "" { if opNamespace == "" {
opNamespace = mainNamespace opNamespace = mainNamespace
} }
if opNamespace != "" { if opNamespace != "" {
b.WriteString(opNamespace) b.WriteString(opNamespace)
b.WriteString(r.opts.Separator) b.WriteString(DefaultPathSeparator)
} }
b.WriteString(key) b.WriteString(key)
return b.String() return b.String()
} }
func (r *Store) Watch(ctx context.Context, opts ...store.WatchOption) (store.Watcher, error) {
id, err := id.New()
if err != nil {
return nil, err
}
wo, err := store.NewWatchOptions(opts...)
if err != nil {
return nil, err
}
// construct the watcher
w := &watcher{
exit: make(chan bool),
ch: make(chan store.Event),
id: id,
opts: wo,
}
r.mu.Lock()
r.watchers[w.id] = w
r.mu.Unlock()
return w, nil
}
func (r *Store) sendEvent(e store.Event) {
r.mu.RLock()
watchers := make([]*watcher, 0, len(r.watchers))
for _, w := range r.watchers {
watchers = append(watchers, w)
}
r.mu.RUnlock()
for _, w := range watchers {
select {
case <-w.exit:
r.mu.Lock()
delete(r.watchers, w.id)
r.mu.Unlock()
default:
select {
case w.ch <- e:
case <-time.After(sendEventTime):
}
}
}
}
type watcher struct {
ch chan store.Event
exit chan bool
opts store.WatchOptions
id string
}
func (w *watcher) Next() (store.Event, error) {
for {
select {
case e := <-w.ch:
return e, nil
case <-w.exit:
return nil, store.ErrWatcherStopped
}
}
}
func (w *watcher) Stop() {
select {
case <-w.exit:
return
default:
close(w.exit)
}
}
type event struct {
ts time.Time
t store.EventType
err error
}
func (e *event) Error() error {
return e.err
}
func (e *event) Timestamp() time.Time {
return e.ts
}
func (e *event) Type() store.EventType {
return e.t
}

View File

@@ -4,36 +4,13 @@ import (
"bytes" "bytes"
"context" "context"
"os" "os"
"sync/atomic"
"testing" "testing"
"time" "time"
goredis "github.com/redis/go-redis/v9" "go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v4/store" "go.unistack.org/micro/v3/tracer"
"go.unistack.org/micro/v4/tracer"
) )
func TestLazyConnect(t *testing.T) {
t.Skip("skipping test for manual check")
ctx := context.Background()
var err error
r := NewStore()
if err = r.Init(); err != nil {
t.Fatal(err)
}
if err = r.Connect(ctx); err != nil {
t.Logf("connect failed %v", err)
}
for {
if err = r.Write(ctx, "mykey", "myval"); err != nil {
t.Logf("failed to write %v", err)
}
}
}
func TestKeepTTL(t *testing.T) { func TestKeepTTL(t *testing.T) {
ctx := context.Background() ctx := context.Background()
@@ -42,7 +19,7 @@ func TestKeepTTL(t *testing.T) {
} }
r := NewStore(store.Addrs(os.Getenv("STORE_NODES"))) r := NewStore(store.Addrs(os.Getenv("STORE_NODES")))
if err := r.Init(store.LazyConnect(true)); err != nil { if err := r.Init(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := r.Connect(ctx); err != nil { if err := r.Connect(ctx); err != nil {
@@ -64,7 +41,7 @@ func TestKeepTTL(t *testing.T) {
func Test_rkv_configure(t *testing.T) { func Test_rkv_configure(t *testing.T) {
type fields struct { type fields struct {
options store.Options options store.Options
Client goredis.UniversalClient Client *wrappedClient
} }
type wantValues struct { type wantValues struct {
username string username string
@@ -121,11 +98,9 @@ func Test_rkv_configure(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
b := atomic.Uint32{}
rc := &Store{ rc := &Store{
opts: tt.fields.options, opts: tt.fields.options,
cli: tt.fields.Client, cli: tt.fields.Client,
connected: &b,
} }
err := rc.configure() err := rc.configure()
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {

View File

@@ -3,8 +3,7 @@ package redis
import ( import (
"time" "time"
goredis "github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/meter"
) )
var ( var (
@@ -14,36 +13,45 @@ var (
PoolConnTotalCurrent = "pool_conn_total_current" PoolConnTotalCurrent = "pool_conn_total_current"
PoolConnIdleCurrent = "pool_conn_idle_current" PoolConnIdleCurrent = "pool_conn_idle_current"
PoolConnStaleTotal = "pool_conn_stale_total" PoolConnStaleTotal = "pool_conn_stale_total"
meterRequestTotal = "request_total"
meterRequestLatencyMicroseconds = "latency_microseconds"
meterRequestDurationSeconds = "request_duration_seconds"
) )
type Statser interface { type Statser interface {
PoolStats() *goredis.PoolStats PoolStats() *redis.PoolStats
} }
func (r *Store) statsMeter() { func (r *Store) statsMeter() {
var st Statser var st Statser
if r.cli != nil { if r.cli.Client != nil {
st = r.cli st = r.cli.Client
} else if r.cli.ClusterClient != nil {
st = r.cli.ClusterClient
} else { } else {
return return
} }
go func() { go func() {
ticker := time.NewTicker(meter.DefaultMeterStatsInterval) ticker := time.NewTicker(DefaultMeterStatsInterval)
defer ticker.Stop() defer ticker.Stop()
for range ticker.C { for {
if st == nil { select {
return case <-ticker.C:
if st == nil {
return
}
stats := st.PoolStats()
r.opts.Meter.Counter(PoolHitsTotal).Set(uint64(stats.Hits))
r.opts.Meter.Counter(PoolMissesTotal).Set(uint64(stats.Misses))
r.opts.Meter.Counter(PoolTimeoutTotal).Set(uint64(stats.Timeouts))
r.opts.Meter.Counter(PoolConnTotalCurrent).Set(uint64(stats.TotalConns))
r.opts.Meter.Counter(PoolConnIdleCurrent).Set(uint64(stats.IdleConns))
r.opts.Meter.Counter(PoolConnStaleTotal).Set(uint64(stats.StaleConns))
} }
stats := st.PoolStats()
r.opts.Meter.Counter(PoolHitsTotal).Set(uint64(stats.Hits))
r.opts.Meter.Counter(PoolMissesTotal).Set(uint64(stats.Misses))
r.opts.Meter.Counter(PoolTimeoutTotal).Set(uint64(stats.Timeouts))
r.opts.Meter.Counter(PoolConnTotalCurrent).Set(uint64(stats.TotalConns))
r.opts.Meter.Counter(PoolConnIdleCurrent).Set(uint64(stats.IdleConns))
r.opts.Meter.Counter(PoolConnStaleTotal).Set(uint64(stats.StaleConns))
} }
}() }()
} }

View File

@@ -7,24 +7,24 @@ import (
"strconv" "strconv"
rediscmd "github.com/redis/go-redis/extra/rediscmd/v9" rediscmd "github.com/redis/go-redis/extra/rediscmd/v9"
goredis "github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"go.unistack.org/micro/v4/tracer" "go.unistack.org/micro/v3/tracer"
) )
func setTracing(rdb goredis.UniversalClient, tr tracer.Tracer, opts ...tracer.SpanOption) { func setTracing(rdb redis.UniversalClient, tr tracer.Tracer, opts ...tracer.SpanOption) {
switch rdb := rdb.(type) { switch rdb := rdb.(type) {
case *goredis.Client: case *redis.Client:
opt := rdb.Options() opt := rdb.Options()
connString := formatDBConnString(opt.Network, opt.Addr) connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr)) rdb.AddHook(newTracingHook(connString, tr))
case *goredis.ClusterClient: case *redis.ClusterClient:
rdb.OnNewNode(func(rdb *goredis.Client) { rdb.OnNewNode(func(rdb *redis.Client) {
opt := rdb.Options() opt := rdb.Options()
connString := formatDBConnString(opt.Network, opt.Addr) connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr)) rdb.AddHook(newTracingHook(connString, tr))
}) })
case *goredis.Ring: case *redis.Ring:
rdb.OnNewNode(func(rdb *goredis.Client) { rdb.OnNewNode(func(rdb *redis.Client) {
opt := rdb.Options() opt := rdb.Options()
connString := formatDBConnString(opt.Network, opt.Addr) connString := formatDBConnString(opt.Network, opt.Addr)
rdb.AddHook(newTracingHook(connString, tr)) rdb.AddHook(newTracingHook(connString, tr))
@@ -37,7 +37,7 @@ type tracingHook struct {
opts []tracer.SpanOption opts []tracer.SpanOption
} }
var _ goredis.Hook = (*tracingHook)(nil) var _ redis.Hook = (*tracingHook)(nil)
func newTracingHook(connString string, tr tracer.Tracer, opts ...tracer.SpanOption) *tracingHook { func newTracingHook(connString string, tr tracer.Tracer, opts ...tracer.SpanOption) *tracingHook {
opts = append(opts, tracer.WithSpanKind(tracer.SpanKindClient)) opts = append(opts, tracer.WithSpanKind(tracer.SpanKindClient))
@@ -51,10 +51,10 @@ func newTracingHook(connString string, tr tracer.Tracer, opts ...tracer.SpanOpti
} }
} }
func (h *tracingHook) DialHook(hook goredis.DialHook) goredis.DialHook { func (h *tracingHook) DialHook(hook redis.DialHook) redis.DialHook {
return func(ctx context.Context, network, addr string) (net.Conn, error) { return func(ctx context.Context, network, addr string) (net.Conn, error) {
/* /*
_, span := h.tr.Start(ctx, "goredis.dial", h.opts...) _, span := h.tr.Start(ctx, "redis.dial", h.opts...)
defer span.Finish() defer span.Finish()
*/ */
conn, err := hook(ctx, network, addr) conn, err := hook(ctx, network, addr)
@@ -64,8 +64,8 @@ func (h *tracingHook) DialHook(hook goredis.DialHook) goredis.DialHook {
} }
} }
func (h *tracingHook) ProcessHook(hook goredis.ProcessHook) goredis.ProcessHook { func (h *tracingHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
return func(ctx context.Context, cmd goredis.Cmder) error { return func(ctx context.Context, cmd redis.Cmder) error {
cmdString := rediscmd.CmdString(cmd) cmdString := rediscmd.CmdString(cmd)
var err error var err error
@@ -73,7 +73,7 @@ func (h *tracingHook) ProcessHook(hook goredis.ProcessHook) goredis.ProcessHook
case "cluster slots": case "cluster slots":
break break
default: default:
_, span := h.tr.Start(ctx, "sdk.database", append(h.opts, tracer.WithSpanLabels("db.statement", cmdString))...) _, span := h.tr.Start(ctx, "redis.process", append(h.opts, tracer.WithSpanLabels("db.statement", cmdString))...)
defer func() { defer func() {
recordError(span, err) recordError(span, err)
span.Finish() span.Finish()
@@ -86,16 +86,16 @@ func (h *tracingHook) ProcessHook(hook goredis.ProcessHook) goredis.ProcessHook
} }
} }
func (h *tracingHook) ProcessPipelineHook(hook goredis.ProcessPipelineHook) goredis.ProcessPipelineHook { func (h *tracingHook) ProcessPipelineHook(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook {
return func(ctx context.Context, cmds []goredis.Cmder) error { return func(ctx context.Context, cmds []redis.Cmder) error {
_, cmdsString := rediscmd.CmdsString(cmds) _, cmdsString := rediscmd.CmdsString(cmds)
opts := append(h.opts, tracer.WithSpanLabels( opts := append(h.opts, tracer.WithSpanLabels(
"db.database.num_cmd", strconv.Itoa(len(cmds)), "db.redis.num_cmd", strconv.Itoa(len(cmds)),
"db.statement", cmdsString, "db.statement", cmdsString,
)) ))
_, span := h.tr.Start(ctx, "sdk.database", opts...) _, span := h.tr.Start(ctx, "redis.process_pipeline", opts...)
defer span.Finish() defer span.Finish()
err := hook(ctx, cmds) err := hook(ctx, cmds)
@@ -106,7 +106,7 @@ func (h *tracingHook) ProcessPipelineHook(hook goredis.ProcessPipelineHook) gore
} }
func setSpanError(ctx context.Context, err error) { func setSpanError(ctx context.Context, err error) {
if err == nil || err == goredis.Nil { if err == nil || err == redis.Nil {
return return
} }
if sp, ok := tracer.SpanFromContext(ctx); !ok && sp != nil { if sp, ok := tracer.SpanFromContext(ctx); !ok && sp != nil {
@@ -115,7 +115,7 @@ func setSpanError(ctx context.Context, err error) {
} }
func recordError(span tracer.Span, err error) { func recordError(span tracer.Span, err error) {
if err != nil && err != goredis.Nil { if err != nil && err != redis.Nil {
span.SetStatus(tracer.SpanStatusError, err.Error()) span.SetStatus(tracer.SpanStatusError, err.Error())
} }
} }