Compare commits

..

79 Commits

Author SHA1 Message Date
7b9a4924ad Merge pull request 'logger: slog logger' (#266) from slog into master
All checks were successful
/ autoupdate (push) Successful in 1m12s
Reviewed-on: #266
2023-10-14 18:05:46 +03:00
40939e56a3 fixup
All checks were successful
lint / lint (pull_request) Successful in 1m28s
pr / test (pull_request) Successful in 1m11s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-14 17:23:01 +03:00
0df97183f8 tracer: update wrapper
Some checks failed
lint / lint (pull_request) Successful in 1m13s
pr / test (pull_request) Failing after 59s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-14 16:58:40 +03:00
08c40ff7d5 update workflow
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 13:58:39 +03:00
afc6f88852 update workflow
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 13:49:04 +03:00
dbcc2bc262 update workflow
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 12:07:51 +03:00
ccffa5cd72 Merge pull request 'Bump github.com/google/uuid from v1.3.0 to v1.3.1' (#265) from pkgdash/go_modules/github.com/google/uuid-v1.3.1 into master
Some checks are pending
/ autoupdate (push) Has started running
Reviewed-on: #265
2023-10-03 00:39:35 +03:00
gitea-actions
3612f639d6 2023-10-02 21:38:45 +00:00
32d0b13f91 update workflow
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 00:32:44 +03:00
4ad61003b9 update workflow
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 00:32:33 +03:00
7647a0d3b3 update workflow
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 00:32:07 +03:00
c16961e852 update workflow
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 00:24:17 +03:00
968feb931c update workflow
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-03 00:15:55 +03:00
911a863313 fixup deps
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-02 02:14:34 +03:00
fcaf323b59 fixup deps
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-02 02:13:17 +03:00
52fecd1772 fixup deps
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-02 01:19:37 +03:00
d5f35fe88a fixup deps
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-02 01:16:31 +03:00
1952053c1b fixup deps
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-02 00:46:29 +03:00
75b45d7313 fixup deps
All checks were successful
/ autoupdate (push) Successful in 1m10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-02 00:35:21 +03:00
pkgdash
eec350c897 2023-10-01 18:38:57 +00:00
pkgdash
360feb0f41 2023-10-01 18:38:48 +00:00
e1975ab66a fixup deps
Some checks failed
/ autoupdate (push) Failing after 1m12s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-01 21:32:00 +03:00
2504345815 fixup deps
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-01 21:28:47 +03:00
9a95fda93f fixup
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-01 16:55:47 +03:00
7ee309a4ae fixup
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-01 16:46:09 +03:00
a47b6b92c4 fixup
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-10-01 16:28:24 +03:00
8d46a6498d update
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-30 01:32:47 +03:00
65a1612b51 update
Some checks are pending
/ autoupdate (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-30 00:55:27 +03:00
26783b341b update
Some checks failed
/ autoupdate (push) Failing after 37s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-30 00:53:53 +03:00
ec21d4f307 update
Some checks failed
/ autoupdate (push) Failing after 53s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-30 00:51:52 +03:00
a3ca5ebb97 update
Some checks failed
/ autoupdate (push) Failing after 54s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-30 00:49:32 +03:00
f67ee26042 update
Some checks failed
/ autoupdate (push) Failing after 38s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-30 00:47:29 +03:00
6b7bba709e fixup autoupdate
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-30 00:27:56 +03:00
8b082b90df disable workflow
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-27 14:42:42 +03:00
b499fe99ca rename cli
All checks were successful
/ autoupdate (push) Successful in 40s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-27 14:27:21 +03:00
178e6e956d updates
Some checks failed
/ autoupdate (push) Failing after 38s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-23 23:43:16 +03:00
a52db1969b add autoupdate
Some checks failed
/ autoupdate (push) Failing after 36s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-23 13:35:04 +03:00
27bf1c3d84 add autoupdate
All checks were successful
/ autoupdate (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-23 12:54:15 +03:00
58b687f134 add autoupdate
All checks were successful
/ test_schedule (push) Successful in 4s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-23 12:52:02 +03:00
8a354bd468 add autoupdate
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-22 23:31:14 +03:00
f600195ee2 go mod tidy
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled
2023-09-18 10:59:46 +03:00
fd471a89e2 deleted unnecessary operations
Some checks failed
pr / test (pull_request) Has been cancelled
lint / lint (pull_request) Has been cancelled
2023-09-18 10:59:21 +03:00
c346ac43dd add google/slog logger
Some checks failed
lint / lint (pull_request) Has been cancelled
pr / test (pull_request) Has been cancelled
2023-09-16 14:20:02 +03:00
666176138b Merge pull request 'util/reflect: add Equal func with ability to skip some fields' (#243) from util-reflect into master
Reviewed-on: #243
2023-09-12 10:30:48 +03:00
884a96ad32 util/reflect: add Equal func with ability to skip some fields
Some checks failed
lint / lint (pull_request) Successful in 1m18s
pr / test (pull_request) Failing after 1m5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-12 10:30:19 +03:00
65d715c9da Merge pull request 'tracer: improve' (#242) from tracer into master
Reviewed-on: #242
2023-09-08 14:01:01 +03:00
247c7f6fa2 tracer: improve
Some checks failed
pr / test (pull_request) Failing after 2m43s
lint / lint (pull_request) Failing after 1m28s
2023-09-08 13:58:11 +03:00
f890f1d8b4 tracer: add missing UniqLabels function
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-01 15:48:27 +03:00
54b13bbded Merge pull request 'tracer: improve tracing wrapper' (#240) from trracer into master
Reviewed-on: #240
2023-09-01 15:25:45 +03:00
d3f9078bec tracer: improve tracing wrapper
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m40s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-01 15:25:25 +03:00
fcc29f9eac tracer: update wrapper
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-09-01 08:49:45 +03:00
e724425ce2 Merge pull request 'fix some options' (#237) from fixes into master
Reviewed-on: #237
2023-08-15 07:55:44 +03:00
bc55c2fa6f fix some options
Some checks failed
lint / lint (pull_request) Failing after 1m29s
pr / test (pull_request) Failing after 2m39s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-08-15 07:55:26 +03:00
fdd1589122 Merge pull request 'move options to dedicated package' (#234) from options_rewrite into master
Reviewed-on: #234
2023-07-29 00:47:28 +03:00
6f6f850af6 move options to dedicated package
Some checks failed
lint / lint (pull_request) Failing after 1m31s
pr / test (pull_request) Failing after 2m37s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-07-29 00:40:58 +03:00
b1dbd99ce2 Merge pull request 'config/default: add micro:generate uuid/id' (#231) from config-default-gen into master
Reviewed-on: #231
2023-07-13 20:11:12 +03:00
6e24807edf config/default: add micro:generate uuid/id
Some checks failed
lint / lint (pull_request) Failing after 1m28s
pr / test (pull_request) Failing after 2m43s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-07-13 20:10:47 +03:00
654d8fa7e4 Merge pull request 'tracer: add Flush method' (#224) from tracerimp into master
Reviewed-on: #224
2023-07-04 00:24:09 +03:00
dd1a9cd25a tracer: add Flush method
Some checks failed
lint / lint (pull_request) Failing after 1m29s
pr / test (pull_request) Failing after 2m37s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-07-04 00:23:50 +03:00
d463eb20cb Merge pull request 'util/time: ParseDuration fix' (#221) from timefix into master
Reviewed-on: #221
2023-05-29 13:59:59 +03:00
8d5e25f8cf util/time: ParseDuration fix
Some checks failed
lint / lint (pull_request) Successful in 1m8s
pr / test (pull_request) Failing after 1m6s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-05-29 13:59:30 +03:00
27e8043fed Merge pull request 'util/time: fix duration parsing' (#220) from mastertime into master
Reviewed-on: #220
2023-05-27 23:57:27 +03:00
4e86df1721 util/time: fix duration parsing
Some checks failed
pr / test (pull_request) Failing after 1m6s
lint / lint (pull_request) Successful in 1m10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-05-27 23:56:51 +03:00
03de1ec38f Merge pull request 'cleanup interfaces for v4' (#217) from v4 into master
Reviewed-on: #217
2023-05-09 20:04:40 +03:00
819ad1117a cleanup interfaces for v4
Some checks failed
lint / lint (pull_request) Successful in 1m9s
pr / test (pull_request) Failing after 1m4s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-05-09 20:04:15 +03:00
a2a383606d Merge pull request 'util/test: update test cases code' (#216) from testcase into master
Reviewed-on: #216
2023-04-28 07:10:40 +03:00
55ce58617b util/test: update test cases code
Some checks failed
lint / lint (pull_request) Successful in 51s
pr / test (pull_request) Failing after 52s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-28 07:10:19 +03:00
0e587d923e Merge pull request 'meter: move metrics handling in broker implementations' (#215) from metrics into master
Reviewed-on: #215
2023-04-27 15:32:56 +03:00
fa0248c80c cleanup
All checks were successful
pr / test (pull_request) Successful in 50s
lint / lint (pull_request) Successful in 49s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-27 15:31:59 +03:00
054bd02b59 meter: move metrics handling in broker implementations
All checks were successful
lint / lint (pull_request) Successful in 1m4s
pr / test (pull_request) Successful in 50s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-27 15:30:55 +03:00
0cf246d2d6 Merge pull request 'util/io: add RedirectStderr' (#214) from io-redirect into master
Reviewed-on: #214
2023-04-24 12:59:31 +03:00
af278bd7d3 util/io: add RedirectStderr
All checks were successful
lint / lint (pull_request) Successful in 46s
pr / test (pull_request) Successful in 50s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-24 12:58:05 +03:00
814b90efe5 Merge pull request 'util/test: export GetCases func' (#213) from GetCases into master
Reviewed-on: #213
2023-04-19 01:23:53 +03:00
e403ae3d8e util/test: export GetCases func
All checks were successful
lint
test
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-19 01:23:34 +03:00
c9816a3957 Merge pull request 'util/test: add helper funcs' (#212) from test into master
Reviewed-on: #212
2023-04-19 00:33:28 +03:00
5691238a6a util/test: add helper funcs
All checks were successful
lint
test
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-18 23:47:12 +03:00
963a0fa7b7 Merge pull request 'gofmt -s code' (#209) from gofmt into master
Reviewed-on: #209
2023-04-11 23:34:41 +03:00
485257035c gofmt -s code
Some checks failed
lint
test
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-11 23:32:58 +03:00
ebd8ddf05b move to v4
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2023-04-11 22:21:25 +03:00
229 changed files with 5315 additions and 33302 deletions

3
.gitea/pkgdashcli.yaml Normal file
View File

@@ -0,0 +1,3 @@
branches:
- master
- v3

View File

@@ -0,0 +1,24 @@
on:
push:
branches:
- 'main'
- 'master'
- 'v3'
schedule:
- cron: '* * * * *'
#- cron: '@hourly'
jobs:
autoupdate:
runs-on: ubuntu-latest
steps:
- name: setup-go
uses: https://gitea.com/actions/setup-go@v3
with:
go-version: 1.21
- name: checkout
uses: https://gitea.com/actions/checkout@v3
- name: get pkgdashcli
run: GOPROXY=direct GONOSUMDB="git.unistack.org/*" GONOPROXY="git.unistack.org/*" GOBIN=/bin go install git.unistack.org/unistack-org/pkgdash/cmd/pkgdashcli@latest
- name: pkgdashcli check
run: /bin/pkgdashcli check

24
.gitea/workflows/lint.yml Normal file
View File

@@ -0,0 +1,24 @@
name: lint
on:
pull_request:
branches:
- master
- v3
jobs:
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: setup-go
uses: https://gitea.com/actions/setup-go@v3
with:
go-version: 1.21
- name: checkout
uses: https://gitea.com/actions/checkout@v3
- name: deps
run: go get -v -d ./...
- name: lint
uses: https://github.com/golangci/golangci-lint-action@v3.4.0
continue-on-error: true
with:
version: v1.52

23
.gitea/workflows/pr.yml Normal file
View File

@@ -0,0 +1,23 @@
name: pr
on:
pull_request:
branches:
- master
- v3
jobs:
test:
name: test
runs-on: ubuntu-latest
steps:
- name: checkout
uses: https://gitea.com/actions/checkout@v3
- name: setup-go
uses: https://gitea.com/actions/setup-go@v3
with:
go-version: 1.21
- name: deps
run: go get -v -t -d ./...
- name: test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...

View File

@@ -1,53 +0,0 @@
name: coverage
on:
push:
branches: [ main, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
pull_request:
branches: [ main, v3, v4 ]
jobs:
build:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: test coverage
run: |
go test -v -cover ./... -covermode=count -coverprofile coverage.out -coverpkg ./...
go tool cover -func coverage.out -o coverage.out
- name: coverage badge
uses: tj-actions/coverage-badge-go@v2
with:
green: 80
filename: coverage.out
- uses: stefanzweifel/git-auto-commit-action@v4
name: autocommit
with:
commit_message: Apply Code Coverage Badge
skip_fetch: false
skip_checkout: false
file_pattern: ./README.md
- name: push
if: steps.auto-commit-action.outputs.changes_detected == 'true'
uses: ad-m/github-push-action@master
with:
github_token: ${{ github.token }}
branch: ${{ github.ref }}

View File

@@ -1,29 +0,0 @@
name: lint
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run lint
uses: golangci/golangci-lint-action@v6
with:
version: 'latest'

View File

@@ -1,94 +0,0 @@
name: sync
on:
schedule:
- cron: '*/5 * * * *'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
sync:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: init
run: |
git config --global user.email "vtolstov <vtolstov@users.noreply.github.com>"
git config --global user.name "github-actions[bot]"
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
- name: check master
id: check_master
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync master
if: steps.check_master.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream master
git push upstream master --progress
git push origin master --progress
cd ../
rm -rf repo
- name: check v3
id: check_v3
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v3
if: steps.check_v3.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v3
git push upstream v3 --progress
git push origin v3 --progress
cd ../
rm -rf repo
- name: check v4
id: check_v4
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v4
if: steps.check_v4.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v4
git push upstream v4 --progress
git push origin v4 --progress
cd ../
rm -rf repo

View File

@@ -1,31 +0,0 @@
name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...

View File

@@ -1,50 +0,0 @@
name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: checkout tests
uses: actions/checkout@v4
with:
ref: master
filter: 'blob:none'
repository: unistack-org/micro-tests
path: micro-tests
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup go work
env:
GOWORK: ${{ github.workspace }}/go.work
run: |
go work init
go work use .
go work use micro-tests
- name: setup deps
env:
GOWORK: ${{ github.workspace }}/go.work
run: go get -v ./...
- name: run tests
env:
INTEGRATION_TESTS: yes
GOWORK: ${{ github.workspace }}/go.work
run: |
cd micro-tests
go test -mod readonly -v ./... || true

3
.gitignore vendored
View File

@@ -1,8 +1,6 @@
# Develop tools
/.vscode/
/.idea/
.idea
.vscode
# Binaries for programs and plugins
*.exe
@@ -15,7 +13,6 @@
_obj
_test
_build
.DS_Store
# Architecture specific extensions/prefixes
*.[568vq]

View File

@@ -1,5 +1,44 @@
run:
concurrency: 8
timeout: 5m
concurrency: 4
deadline: 5m
issues-exit-code: 1
tests: true
linters-settings:
govet:
check-shadowing: true
enable:
- fieldalignment
linters:
enable:
- govet
- deadcode
- errcheck
- govet
- ineffassign
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
- bodyclose
- gci
- goconst
- gocritic
- gosimple
- gofmt
- gofumpt
- goimports
- revive
- gosec
- makezero
- misspell
- nakedret
- nestif
- nilerr
- noctx
- prealloc
- unconvert
- unparam
disable-all: false

View File

@@ -1,9 +1,4 @@
# Micro
![Coverage](https://img.shields.io/badge/Coverage-33.6%25-yellow)
[![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview)
[![Status](https://git.unistack.org/unistack-org/micro/actions/workflows/job_tests.yml/badge.svg?branch=v4)](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush)
[![Lint](https://goreportcard.com/badge/go.unistack.org/micro/v4)](https://goreportcard.com/report/go.unistack.org/micro/v4)
# Micro [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview) [![Status](https://github.com/unistack-org/micro/workflows/build/badge.svg?branch=master)](https://github.com/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Amaster+event%3Apush) [![Lint](https://goreportcard.com/badge/go.unistack.org/micro/v4)](https://goreportcard.com/report/go.unistack.org/micro/v4) [![Coverage](https://codecov.io/gh/unistack-org/micro/branch/v4/graph/badge.svg?token=OZPO2LP7VS)](https://codecov.io/gh/unistack-org/micro)
Micro is a standard library for microservices.
@@ -15,20 +10,30 @@ Micro provides the core requirements for distributed systems development includi
Micro abstracts away the details of distributed systems. Here are the main features.
- **Authentication** - Auth is built in as a first class citizen. Authentication and authorization enable secure
zero trust networking by providing every service an identity and certificates. This additionally includes rule
based access control.
- **Dynamic Config** - Load and hot reload dynamic config from anywhere. The config interface provides a way to load application
level config from any source such as env vars, cmdline, file, consul, vault... You can merge the sources and even define fallbacks.
level config from any source such as env vars, file, etcd. You can merge the sources and even define fallbacks.
- **Data Storage** - A simple data store interface to read, write and delete records. It includes support for memory, file and
s3. State and persistence becomes a core requirement beyond prototyping and Micro looks to build that into the framework.
CockroachDB by default. State and persistence becomes a core requirement beyond prototyping and Micro looks to build that into the framework.
- **Service Discovery** - Automatic service registration and name resolution. Service discovery is at the core of micro service
development. When service A needs to speak to service B it needs the location of that service.
- **Load Balancing** - Client side load balancing built on service discovery. Once we have the addresses of any number of instances
of a service we now need a way to decide which node to route to. We use random hashed load balancing to provide even distribution
across the services and retry a different node if there's a problem.
- **Message Encoding** - Dynamic message encoding based on content-type. The client and server will use codecs along with content-type
to seamlessly encode and decode Go types for you. Any variety of messages could be encoded and sent from different clients. The client
and server handle this by default.
- **Async Messaging** - Pub/Sub is built in as a first class citizen for asynchronous communication and event driven architectures.
- **Transport** - gRPC or http based request/response with support for bidirectional streaming. We provide an abstraction for synchronous communication. A request made to a service will be automatically resolved, load balanced, dialled and streamed.
- **Async Messaging** - PubSub is built in as a first class citizen for asynchronous communication and event driven architectures.
Event notifications are a core pattern in micro service development.
- **Synchronization** - Distributed systems are often built in an eventually consistent manner. Support for distributed locking and
@@ -37,6 +42,10 @@ leadership are built in as a Sync interface. When using an eventually consistent
- **Pluggable Interfaces** - Micro makes use of Go interfaces for each system abstraction. Because of this these interfaces
are pluggable and allows Micro to be runtime agnostic.
## Getting Started
To be created.
## License
Micro is Apache 2.0 licensed.

15
SECURITY.md Normal file
View File

@@ -0,0 +1,15 @@
# Security Policy
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 3.7.x | :white_check_mark: |
| < 3.7.0 | :x: |
## Reporting a Vulnerability
If you find any issue, please create github issue in this repo

View File

@@ -1,13 +1,12 @@
// Package broker is an interface used for asynchronous messaging
package broker
package broker // import "go.unistack.org/micro/v4/broker"
import (
"context"
"errors"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
)
// DefaultBroker default memory broker
@@ -18,12 +17,8 @@ var (
ErrNotConnected = errors.New("broker not connected")
// ErrDisconnected returns when broker disconnected
ErrDisconnected = errors.New("broker disconnected")
// ErrInvalidMessage returns when invalid Message passed
ErrInvalidMessage = errors.New("invalid message")
// ErrInvalidHandler returns when subscriber passed to Subscribe
ErrInvalidHandler = errors.New("invalid handler, ony func(Message) error and func([]Message) error supported")
// DefaultGracefulTimeout
DefaultGracefulTimeout = 5 * time.Second
// ErrInvalidMessage returns when message has nvalid format
ErrInvalidMessage = errors.New("broker message has invalid format")
)
// Broker is an interface used for asynchronous messaging.
@@ -31,7 +26,7 @@ type Broker interface {
// Name returns broker instance name
Name() string
// Init initilize broker
Init(opts ...Option) error
Init(opts ...options.Option) error
// Options returns broker options
Options() Options
// Address return configured address
@@ -40,44 +35,29 @@ type Broker interface {
Connect(ctx context.Context) error
// Disconnect disconnect from broker
Disconnect(ctx context.Context) error
// NewMessage create new broker message to publish.
NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error)
// Publish message to broker topic
Publish(ctx context.Context, topic string, messages ...Message) error
// Publish message, msg can be single broker.Message or []broker.Message
Publish(ctx context.Context, msg interface{}, opts ...options.Option) error
// Subscribe subscribes to topic message via handler
Subscribe(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error)
Subscribe(ctx context.Context, topic string, handler interface{}, opts ...options.Option) (Subscriber, error)
// String type of broker
String() string
// Live returns broker liveness
Live() bool
// Ready returns broker readiness
Ready() bool
// Health returns broker health
Health() bool
}
type (
FuncPublish func(ctx context.Context, topic string, messages ...Message) error
HookPublish func(next FuncPublish) FuncPublish
FuncSubscribe func(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error)
HookSubscribe func(next FuncSubscribe) FuncSubscribe
)
// Message is given to a subscription handler for processing
type Message interface {
// Context for the message.
// Context for the message
Context() context.Context
// Topic returns message destination topic.
// Topic
Topic() string
// Header returns message headers.
// Header returns message headers
Header() metadata.Metadata
// Body returns broker message []byte slice
Body() []byte
// Unmarshal try to decode message body to dst.
// This is helper method that uses codec.Unmarshal.
Unmarshal(dst interface{}, opts ...codec.Option) error
// Ack acknowledge message if supported.
// Body returns broker message may be []byte slice or some go struct
Body() interface{}
// Ack acknowledge message
Ack() error
// Error returns message error (like decoding errors or some other)
// In this case Body contains raw []byte from broker
Error() error
}
// Subscriber is a convenience return type for the Subscribe method
@@ -89,3 +69,9 @@ type Subscriber interface {
// Unsubscribe from topic
Unsubscribe(ctx context.Context) error
}
// MessageHandler func signature for single message processing
type MessageHandler func(Message) error
// MessagesHandler func signature for batch message processing
type MessagesHandler func([]Message) error

View File

@@ -15,15 +15,6 @@ func FromContext(ctx context.Context) (Broker, bool) {
return c, ok
}
// MustContext returns broker from passed context
func MustContext(ctx context.Context) Broker {
b, ok := FromContext(ctx)
if !ok {
panic("missing broker")
}
return b
}
// NewContext savess broker in context
func NewContext(ctx context.Context, s Broker) context.Context {
if ctx == nil {
@@ -31,33 +22,3 @@ func NewContext(ctx context.Context, s Broker) context.Context {
}
return context.WithValue(ctx, brokerKey{}, s)
}
// SetSubscribeOption returns a function to setup a context with given value
func SetSubscribeOption(k, v interface{}) SubscribeOption {
return func(o *SubscribeOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetPublishOption returns a function to setup a context with given value
func SetPublishOption(k, v interface{}) PublishOption {
return func(o *PublishOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}

View File

@@ -37,25 +37,3 @@ func TestNewNilContext(t *testing.T) {
t.Fatal("NewContext not works")
}
}
func TestSetSubscribeOption(t *testing.T) {
type key struct{}
o := SetSubscribeOption(key{}, "test")
opts := &SubscribeOptions{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetSubscribeOption not works")
}
}
func TestSetOption(t *testing.T) {
type key struct{}
o := SetOption(key{}, "test")
opts := &Options{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetOption not works")
}
}

334
broker/memory.go Normal file
View File

@@ -0,0 +1,334 @@
package broker
import (
"context"
"fmt"
"sync"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/semconv"
maddr "go.unistack.org/micro/v4/util/addr"
"go.unistack.org/micro/v4/util/id"
mnet "go.unistack.org/micro/v4/util/net"
"go.unistack.org/micro/v4/util/rand"
)
type MemoryBroker struct {
subscribers map[string][]*memorySubscriber
addr string
opts Options
sync.RWMutex
connected bool
}
func (m *MemoryBroker) Options() Options {
return m.opts
}
func (m *MemoryBroker) Address() string {
return m.addr
}
func (m *MemoryBroker) Connect(ctx context.Context) error {
m.Lock()
defer m.Unlock()
if m.connected {
return nil
}
// use 127.0.0.1 to avoid scan of all network interfaces
addr, err := maddr.Extract("127.0.0.1")
if err != nil {
return err
}
var rng rand.Rand
i := rng.Intn(20000)
// set addr with port
addr = mnet.HostPort(addr, 10000+i)
m.addr = addr
m.connected = true
return nil
}
func (m *MemoryBroker) Disconnect(ctx context.Context) error {
m.Lock()
defer m.Unlock()
select {
case <-ctx.Done():
return ctx.Err()
default:
if m.connected {
m.connected = false
}
}
return nil
}
func (m *MemoryBroker) Init(opts ...options.Option) error {
var err error
for _, o := range opts {
if err = o(&m.opts); err != nil {
return err
}
}
return nil
}
func (m *MemoryBroker) Publish(ctx context.Context, message interface{}, opts ...options.Option) error {
m.RLock()
if !m.connected {
m.RUnlock()
return ErrNotConnected
}
m.RUnlock()
var err error
select {
case <-ctx.Done():
return ctx.Err()
default:
options := NewPublishOptions(opts...)
var msgs []Message
switch v := message.(type) {
case []Message:
msgs = v
case Message:
msgs = append(msgs, v)
default:
return ErrInvalidMessage
}
msgTopicMap := make(map[string][]*memoryMessage)
for _, msg := range msgs {
p := &memoryMessage{opts: options}
p.topic, _ = msg.Header().Get(metadata.HeaderTopic)
if v, ok := msg.Body().(*codec.Frame); ok {
p.body = msg.Body()
} else if len(m.opts.Codecs) == 0 {
p.body = msg.Body()
} else {
cf, ok := m.opts.Codecs[options.ContentType]
if !ok {
return fmt.Errorf("%s: %s", codec.ErrUnknownContentType, options.ContentType)
}
p.body, err = cf.Marshal(v)
if err != nil {
return err
}
}
msgTopicMap[p.topic] = append(msgTopicMap[p.topic], p)
}
eh := m.opts.ErrorHandler
for t, ms := range msgTopicMap {
ts := time.Now()
m.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", t).Add(len(ms))
m.opts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", t).Add(len(ms))
m.RLock()
subs, ok := m.subscribers[t]
m.RUnlock()
if !ok {
m.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", t, "status", "failure").Add(len(ms))
m.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", t).Add(-len(ms))
m.opts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", t).Add(-len(ms))
continue
}
m.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", t, "status", "success").Add(len(ms))
for _, sub := range subs {
if sub.opts.ErrorHandler != nil {
eh = sub.opts.ErrorHandler
}
switch mh := sub.handler.(type) {
case MessagesHandler:
mhs := make([]Message, 0, len(ms))
for _, m := range ms {
mhs = append(mhs, m)
}
if err = mh(mhs); err != nil {
m.opts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", t, "status", "failure").Add(len(ms))
if eh != nil {
switch meh := eh.(type) {
case MessagesHandler:
_ = meh(mhs)
case MessageHandler:
for _, me := range mhs {
_ = meh(me)
}
}
} else if m.opts.Logger.V(logger.ErrorLevel) {
m.opts.Logger.Error(m.opts.Context, err.Error())
}
}
case MessageHandler:
for _, p := range ms {
if err = mh(p); err != nil {
m.opts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", t, "status", "failure").Inc()
if eh != nil {
switch meh := eh.(type) {
case MessageHandler:
_ = meh(p)
case MessagesHandler:
_ = meh([]Message{p})
}
} else if m.opts.Logger.V(logger.ErrorLevel) {
m.opts.Logger.Error(m.opts.Context, err.Error())
}
} else {
if sub.opts.AutoAck {
if err = p.Ack(); err != nil {
m.opts.Logger.Errorf(m.opts.Context, "ack failed: %v", err)
m.opts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", t, "status", "failure").Inc()
} else {
m.opts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", t, "status", "success").Inc()
}
} else {
m.opts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", t, "status", "success").Inc()
}
}
m.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", t).Add(-1)
m.opts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", t).Add(-1)
}
}
}
te := time.Since(ts)
m.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", t).Update(te.Seconds())
m.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", t).Update(te.Seconds())
m.opts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", t).Update(te.Seconds())
m.opts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", t).Update(te.Seconds())
}
}
return nil
}
func (m *MemoryBroker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...options.Option) (Subscriber, error) {
m.RLock()
if !m.connected {
m.RUnlock()
return nil, ErrNotConnected
}
m.RUnlock()
sid, err := id.New()
if err != nil {
return nil, err
}
options := NewSubscribeOptions(opts...)
sub := &memorySubscriber{
exit: make(chan bool, 1),
id: sid,
topic: topic,
handler: handler,
opts: options,
ctx: ctx,
}
m.Lock()
m.subscribers[topic] = append(m.subscribers[topic], sub)
m.Unlock()
go func() {
<-sub.exit
m.Lock()
newSubscribers := make([]*memorySubscriber, 0, len(m.subscribers)-1)
for _, sb := range m.subscribers[topic] {
if sb.id == sub.id {
continue
}
newSubscribers = append(newSubscribers, sb)
}
m.subscribers[topic] = newSubscribers
m.Unlock()
}()
return sub, nil
}
func (m *MemoryBroker) String() string {
return "memory"
}
func (m *MemoryBroker) Name() string {
return m.opts.Name
}
type memoryMessage struct {
err error
body interface{}
topic string
header metadata.Metadata
opts PublishOptions
ctx context.Context
}
func (m *memoryMessage) Topic() string {
return m.topic
}
func (m *memoryMessage) Header() metadata.Metadata {
return m.header
}
func (m *memoryMessage) Body() interface{} {
return m.body
}
func (m *memoryMessage) Ack() error {
return nil
}
func (m *memoryMessage) Error() error {
return m.err
}
func (m *memoryMessage) Context() context.Context {
return m.ctx
}
type memorySubscriber struct {
ctx context.Context
exit chan bool
handler interface{}
id string
topic string
opts SubscribeOptions
}
func (m *memorySubscriber) Options() SubscribeOptions {
return m.opts
}
func (m *memorySubscriber) Topic() string {
return m.topic
}
func (m *memorySubscriber) Unsubscribe(ctx context.Context) error {
m.exit <- true
return nil
}
// NewBroker return new memory broker
func NewBroker(opts ...options.Option) *MemoryBroker {
return &MemoryBroker{
opts: NewOptions(opts...),
subscribers: make(map[string][]*memorySubscriber),
}
}

View File

@@ -1,338 +0,0 @@
package broker
import (
"context"
"strings"
"sync"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
maddr "go.unistack.org/micro/v4/util/addr"
"go.unistack.org/micro/v4/util/id"
mnet "go.unistack.org/micro/v4/util/net"
"go.unistack.org/micro/v4/util/rand"
)
type Broker struct {
funcPublish broker.FuncPublish
funcSubscribe broker.FuncSubscribe
subscribers map[string][]*Subscriber
addr string
opts broker.Options
sync.RWMutex
connected bool
}
type memoryMessage struct {
c codec.Codec
topic string
ctx context.Context
body []byte
hdr metadata.Metadata
opts broker.PublishOptions
}
func (m *memoryMessage) Ack() error {
return nil
}
func (m *memoryMessage) Body() []byte {
return m.body
}
func (m *memoryMessage) Header() metadata.Metadata {
return m.hdr
}
func (m *memoryMessage) Context() context.Context {
return m.ctx
}
func (m *memoryMessage) Topic() string {
return ""
}
func (m *memoryMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
return m.c.Unmarshal(m.body, dst)
}
type Subscriber struct {
ctx context.Context
exit chan bool
handler interface{}
id string
topic string
opts broker.SubscribeOptions
}
func (b *Broker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx]
}
b.RLock()
c, ok := b.opts.Codecs[ct]
b.RUnlock()
if ok {
return c, nil
}
return nil, codec.ErrUnknownContentType
}
func (b *Broker) Options() broker.Options {
return b.opts
}
func (b *Broker) Address() string {
return b.addr
}
func (b *Broker) Connect(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
b.Lock()
defer b.Unlock()
if b.connected {
return nil
}
// use 127.0.0.1 to avoid scan of all network interfaces
addr, err := maddr.Extract("127.0.0.1")
if err != nil {
return err
}
var rng rand.Rand
i := rng.Intn(20000)
// set addr with port
addr = mnet.HostPort(addr, 10000+i)
b.addr = addr
b.connected = true
return nil
}
func (b *Broker) Disconnect(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
b.Lock()
defer b.Unlock()
if !b.connected {
return nil
}
b.connected = false
return nil
}
func (b *Broker) Init(opts ...broker.Option) error {
for _, o := range opts {
o(&b.opts)
}
b.funcPublish = b.fnPublish
b.funcSubscribe = b.fnSubscribe
b.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
case broker.HookPublish:
b.funcPublish = h(b.funcPublish)
case broker.HookSubscribe:
b.funcSubscribe = h(b.funcSubscribe)
}
})
return nil
}
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.PublishOption) (broker.Message, error) {
options := broker.NewPublishOptions(opts...)
m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType)
if err == nil {
m.body, err = c.Marshal(body)
}
if err != nil {
return nil, err
}
return m, nil
}
func (b *Broker) Publish(ctx context.Context, topic string, messages ...broker.Message) error {
return b.funcPublish(ctx, topic, messages...)
}
func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker.Message) error {
return b.publish(ctx, topic, messages...)
}
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
b.RLock()
if !b.connected {
b.RUnlock()
return broker.ErrNotConnected
}
b.RUnlock()
select {
case <-ctx.Done():
return ctx.Err()
default:
}
b.RLock()
subs, ok := b.subscribers[topic]
b.RUnlock()
if !ok {
return nil
}
var err error
for _, sub := range subs {
switch s := sub.handler.(type) {
default:
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", broker.ErrInvalidHandler)
}
case func(broker.Message) error:
for _, message := range messages {
msg, ok := message.(*memoryMessage)
if !ok {
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", broker.ErrInvalidMessage)
}
}
msg.topic = topic
if err = s(msg); err == nil && sub.opts.AutoAck {
err = msg.Ack()
}
if err != nil {
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", err)
}
}
}
case func([]broker.Message) error:
if err = s(messages); err == nil && sub.opts.AutoAck {
for _, message := range messages {
err = message.Ack()
if err != nil {
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", err)
}
}
}
}
}
}
return nil
}
func (b *Broker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return b.funcSubscribe(ctx, topic, handler, opts...)
}
func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
if err := broker.IsValidHandler(handler); err != nil {
return nil, err
}
b.RLock()
if !b.connected {
b.RUnlock()
return nil, broker.ErrNotConnected
}
b.RUnlock()
sid, err := id.New()
if err != nil {
return nil, err
}
options := broker.NewSubscribeOptions(opts...)
sub := &Subscriber{
exit: make(chan bool, 1),
id: sid,
topic: topic,
handler: handler,
opts: options,
ctx: ctx,
}
b.Lock()
b.subscribers[topic] = append(b.subscribers[topic], sub)
b.Unlock()
go func() {
<-sub.exit
b.Lock()
newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1)
for _, sb := range b.subscribers[topic] {
if sb.id == sub.id {
continue
}
newSubscribers = append(newSubscribers, sb)
}
b.subscribers[topic] = newSubscribers
b.Unlock()
}()
return sub, nil
}
func (b *Broker) String() string {
return "memory"
}
func (b *Broker) Name() string {
return b.opts.Name
}
func (b *Broker) Live() bool {
return true
}
func (b *Broker) Ready() bool {
return true
}
func (b *Broker) Health() bool {
return true
}
func (m *Subscriber) Options() broker.SubscribeOptions {
return m.opts
}
func (m *Subscriber) Topic() string {
return m.topic
}
func (m *Subscriber) Unsubscribe(ctx context.Context) error {
m.exit <- true
return nil
}
// NewBroker return new memory broker
func NewBroker(opts ...broker.Option) broker.Broker {
return &Broker{
opts: broker.NewOptions(opts...),
subscribers: make(map[string][]*Subscriber),
}
}

View File

@@ -1,74 +0,0 @@
package broker
import (
"context"
"fmt"
"testing"
"go.uber.org/atomic"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
)
type hldr struct {
c atomic.Int64
}
func (h *hldr) Handler(m broker.Message) error {
h.c.Add(1)
return nil
}
func TestMemoryBroker(t *testing.T) {
b := NewBroker(broker.Codec("application/octet-stream", codec.NewCodec()))
ctx := context.Background()
if err := b.Init(); err != nil {
t.Fatalf("Unexpected init error %v", err)
}
if err := b.Connect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
topic := "test"
count := int64(10)
h := &hldr{}
sub, err := b.Subscribe(ctx, topic, h.Handler)
if err != nil {
t.Fatalf("Unexpected error subscribing %v", err)
}
for i := int64(0); i < count; i++ {
message, err := b.NewMessage(ctx,
metadata.Pairs(
"foo", "bar",
"id", fmt.Sprintf("%d", i),
),
[]byte(`"hello world"`),
broker.PublishContentType("application/octet-stream"),
)
if err != nil {
t.Fatal(err)
}
if err := b.Publish(ctx, topic, message); err != nil {
t.Fatalf("Unexpected error publishing %d err: %v", i, err)
}
}
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatalf("Unexpected error unsubscribing from %s: %v", topic, err)
}
if err := b.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
if h.c.Load() != count {
t.Fatal("invalid messages count received")
}
}

107
broker/memory_test.go Normal file
View File

@@ -0,0 +1,107 @@
package broker
import (
"context"
"fmt"
"testing"
"go.unistack.org/micro/v4/metadata"
)
func TestMemoryBatchBroker(t *testing.T) {
b := NewBroker()
ctx := context.Background()
if err := b.Connect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
topic := "test"
count := 10
fn := func(evts []Message) error {
var err error
for _, evt := range evts {
if err = evt.Ack(); err != nil {
return err
}
}
return nil
}
sub, err := b.Subscribe(ctx, topic, fn)
if err != nil {
t.Fatalf("Unexpected error subscribing %v", err)
}
msgs := make([]Message, 0, count)
for i := 0; i < count; i++ {
message := &memoryMessage{
header: map[string]string{
metadata.HeaderTopic: topic,
"foo": "bar",
"id": fmt.Sprintf("%d", i),
},
body: []byte(`"hello world"`),
}
msgs = append(msgs, message)
}
if err := b.Publish(ctx, msgs); err != nil {
t.Fatalf("Unexpected error publishing %v", err)
}
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatalf("Unexpected error unsubscribing from %s: %v", topic, err)
}
if err := b.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
}
func TestMemoryBroker(t *testing.T) {
b := NewBroker()
ctx := context.Background()
if err := b.Connect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
topic := "test"
count := 10
fn := func(p Message) error {
return p.Ack()
}
sub, err := b.Subscribe(ctx, topic, fn)
if err != nil {
t.Fatalf("Unexpected error subscribing %v", err)
}
msgs := make([]Message, 0, count)
for i := 0; i < count; i++ {
message := &memoryMessage{
header: map[string]string{
metadata.HeaderTopic: topic,
"foo": "bar",
"id": fmt.Sprintf("%d", i),
},
body: []byte(`"hello world"`),
}
msgs = append(msgs, message)
}
if err := b.Publish(ctx, msgs); err != nil {
t.Fatalf("Unexpected error publishing %v", err)
}
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatalf("Unexpected error unsubscribing from %s: %v", topic, err)
}
if err := b.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
}

View File

@@ -1,176 +0,0 @@
package broker
import (
"context"
"strings"
"sync"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
)
type NoopBroker struct {
funcPublish FuncPublish
funcSubscribe FuncSubscribe
opts Options
sync.RWMutex
}
func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx]
}
b.RLock()
c, ok := b.opts.Codecs[ct]
b.RUnlock()
if ok {
return c, nil
}
return nil, codec.ErrUnknownContentType
}
func NewBroker(opts ...Option) *NoopBroker {
b := &NoopBroker{opts: NewOptions(opts...)}
b.funcPublish = b.fnPublish
b.funcSubscribe = b.fnSubscribe
return b
}
func (b *NoopBroker) Health() bool {
return true
}
func (b *NoopBroker) Live() bool {
return true
}
func (b *NoopBroker) Ready() bool {
return true
}
func (b *NoopBroker) Name() string {
return b.opts.Name
}
func (b *NoopBroker) String() string {
return "noop"
}
func (b *NoopBroker) Options() Options {
return b.opts
}
func (b *NoopBroker) Init(opts ...Option) error {
for _, opt := range opts {
opt(&b.opts)
}
b.funcPublish = b.fnPublish
b.funcSubscribe = b.fnSubscribe
b.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
case HookPublish:
b.funcPublish = h(b.funcPublish)
case HookSubscribe:
b.funcSubscribe = h(b.funcSubscribe)
}
})
return nil
}
func (b *NoopBroker) Connect(_ context.Context) error {
return nil
}
func (b *NoopBroker) Disconnect(_ context.Context) error {
return nil
}
func (b *NoopBroker) Address() string {
return strings.Join(b.opts.Addrs, ",")
}
type noopMessage struct {
c codec.Codec
ctx context.Context
body []byte
hdr metadata.Metadata
opts PublishOptions
}
func (m *noopMessage) Ack() error {
return nil
}
func (m *noopMessage) Body() []byte {
return m.body
}
func (m *noopMessage) Header() metadata.Metadata {
return m.hdr
}
func (m *noopMessage) Context() context.Context {
return m.ctx
}
func (m *noopMessage) Topic() string {
return ""
}
func (m *noopMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
return m.c.Unmarshal(m.body, dst)
}
func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error) {
options := NewPublishOptions(opts...)
m := &noopMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType)
if err == nil {
m.body, err = c.Marshal(body)
}
if err != nil {
return nil, err
}
return m, nil
}
func (b *NoopBroker) fnPublish(_ context.Context, _ string, _ ...Message) error {
return nil
}
func (b *NoopBroker) Publish(ctx context.Context, topic string, msg ...Message) error {
return b.funcPublish(ctx, topic, msg...)
}
type NoopSubscriber struct {
ctx context.Context
topic string
handler interface{}
opts SubscribeOptions
}
func (b *NoopBroker) fnSubscribe(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error) {
return &NoopSubscriber{ctx: ctx, topic: topic, opts: NewSubscribeOptions(opts...), handler: handler}, nil
}
func (b *NoopBroker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error) {
return b.funcSubscribe(ctx, topic, handler, opts...)
}
func (s *NoopSubscriber) Options() SubscribeOptions {
return s.opts
}
func (s *NoopSubscriber) Topic() string {
return s.topic
}
func (s *NoopSubscriber) Unsubscribe(_ context.Context) error {
return nil
}

View File

@@ -1,35 +0,0 @@
package broker
import (
"context"
"testing"
)
type testHook struct {
f bool
}
func (t *testHook) Publish1(fn FuncPublish) FuncPublish {
return func(ctx context.Context, topic string, messages ...Message) error {
t.f = true
return fn(ctx, topic, messages...)
}
}
func TestNoopHook(t *testing.T) {
h := &testHook{}
b := NewBroker(Hooks(HookPublish(h.Publish1)))
if err := b.Init(); err != nil {
t.Fatal(err)
}
if err := b.Publish(context.TODO(), "", nil); err != nil {
t.Fatal(err)
}
if !h.f {
t.Fatal("hook not works")
}
}

View File

@@ -7,23 +7,20 @@ import (
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/register"
"go.unistack.org/micro/v4/sync"
"go.unistack.org/micro/v4/tracer"
)
// Options struct
type Options struct {
// Name holds the broker name
Name string
// Tracer used for tracing
Tracer tracer.Tracer
// Register can be used for clustering
Register register.Register
// Codecs holds the codecs for marshal/unmarshal based on content-type
// Codecs holds the codec for marshal/unmarshal
Codecs map[string]codec.Codec
// Logger used for logging
Logger logger.Logger
@@ -31,60 +28,48 @@ type Options struct {
Meter meter.Meter
// Context holds external options
Context context.Context
// Wait waits for a collection of goroutines to finish
Wait *sync.WaitGroup
// TLSConfig holds tls.TLSConfig options
TLSConfig *tls.Config
// Addrs holds the broker address
Addrs []string
// Hooks can be run before broker Publish/BatchPublish and
// Subscribe/BatchSubscribe methods
Hooks options.Hooks
// GracefulTimeout contains time to wait to finish in flight requests
GracefulTimeout time.Duration
// ErrorHandler used when broker have error while processing message
ErrorHandler interface{}
// Name holds the broker name
Name string
// Address holds the broker address
Address []string
}
// NewOptions create new Options
func NewOptions(opts ...Option) Options {
func NewOptions(opts ...options.Option) Options {
options := Options{
Register: register.DefaultRegister,
Logger: logger.DefaultLogger,
Context: context.Background(),
Meter: meter.DefaultMeter,
Codecs: make(map[string]codec.Codec),
Tracer: tracer.DefaultTracer,
GracefulTimeout: DefaultGracefulTimeout,
Register: register.DefaultRegister,
Logger: logger.DefaultLogger,
Context: context.Background(),
Meter: meter.DefaultMeter,
Codecs: make(map[string]codec.Codec),
Tracer: tracer.DefaultTracer,
}
for _, o := range opts {
o(&options)
}
return options
}
// Context sets the context option
func Context(ctx context.Context) Option {
return func(o *Options) {
o.Context = ctx
}
}
// PublishOptions struct
type PublishOptions struct {
// ContentType for message body
ContentType string
// BodyOnly flag says the message contains raw body bytes and don't need
// codec Marshal method
BodyOnly bool
// Context holds custom options
// Context holds external options
Context context.Context
// BodyOnly flag says the message contains raw body bytes
BodyOnly bool
// Message metadata usually passed as message headers
Metadata metadata.Metadata
// Content-Type of message for marshal
ContentType string
// Topic destination
Topic string
}
// NewPublishOptions creates PublishOptions struct
func NewPublishOptions(opts ...PublishOption) PublishOptions {
func NewPublishOptions(opts ...options.Option) PublishOptions {
options := PublishOptions{
Context: context.Background(),
}
@@ -94,12 +79,21 @@ func NewPublishOptions(opts ...PublishOption) PublishOptions {
return options
}
// PublishTopic pass topic for messages
func PublishTopic(t string) options.Option {
return func(src interface{}) error {
return options.Set(src, t, ".Topic")
}
}
// SubscribeOptions struct
type SubscribeOptions struct {
// Context holds external options
Context context.Context
// Group holds consumer group
Group string
// ErrorHandler used when broker have error while processing message
ErrorHandler interface{}
// QueueGroup holds consumer group
QueueGroup string
// AutoAck flag specifies auto ack of incoming message when no error happens
AutoAck bool
// BodyOnly flag specifies that message contains only body bytes without header
@@ -110,137 +104,16 @@ type SubscribeOptions struct {
BatchWait time.Duration
}
// Option func
type Option func(*Options)
// PublishOption func
type PublishOption func(*PublishOptions)
// PublishContentType sets message content-type that used to Marshal
func PublishContentType(ct string) PublishOption {
return func(o *PublishOptions) {
o.ContentType = ct
// ErrorHandler will catch all broker errors that cant be handled
// in normal way, for example Codec errors
func ErrorHandler(h interface{}) options.Option {
return func(src interface{}) error {
return options.Set(src, h, ".ErrorHandler")
}
}
// PublishBodyOnly publish only body of the message
func PublishBodyOnly(b bool) PublishOption {
return func(o *PublishOptions) {
o.BodyOnly = b
}
}
// Addrs sets the host addresses to be used by the broker
func Addrs(addrs ...string) Option {
return func(o *Options) {
o.Addrs = addrs
}
}
// Codec sets the codec used for encoding/decoding messages
func Codec(ct string, c codec.Codec) Option {
return func(o *Options) {
o.Codecs[ct] = c
}
}
// SubscribeGroup sets the name of the queue to share messages on
func SubscribeGroup(name string) SubscribeOption {
return func(o *SubscribeOptions) {
o.Group = name
}
}
// Register sets register option
func Register(r register.Register) Option {
return func(o *Options) {
o.Register = r
}
}
// TLSConfig sets the TLS Config
func TLSConfig(t *tls.Config) Option {
return func(o *Options) {
o.TLSConfig = t
}
}
// Logger sets the logger
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// Tracer to be used for tracing
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
// Meter sets the meter
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Name sets the name
func Name(n string) Option {
return func(o *Options) {
o.Name = n
}
}
// Hooks sets hook runs before action
func Hooks(h ...options.Hook) Option {
return func(o *Options) {
o.Hooks = append(o.Hooks, h...)
}
}
// SubscribeContext set context
func SubscribeContext(ctx context.Context) SubscribeOption {
return func(o *SubscribeOptions) {
o.Context = ctx
}
}
// SubscribeAutoAck contol auto acking of messages
// after they have been handled.
func SubscribeAutoAck(b bool) SubscribeOption {
return func(o *SubscribeOptions) {
o.AutoAck = b
}
}
// SubscribeBodyOnly consumes only body of the message
func SubscribeBodyOnly(b bool) SubscribeOption {
return func(o *SubscribeOptions) {
o.BodyOnly = b
}
}
// SubscribeBatchSize specifies max batch size
func SubscribeBatchSize(n int) SubscribeOption {
return func(o *SubscribeOptions) {
o.BatchSize = n
}
}
// SubscribeBatchWait specifies max batch wait time
func SubscribeBatchWait(td time.Duration) SubscribeOption {
return func(o *SubscribeOptions) {
o.BatchWait = td
}
}
// SubscribeOption func
type SubscribeOption func(*SubscribeOptions)
// NewSubscribeOptions creates new SubscribeOptions
func NewSubscribeOptions(opts ...SubscribeOption) SubscribeOptions {
func NewSubscribeOptions(opts ...options.Option) SubscribeOptions {
options := SubscribeOptions{
AutoAck: true,
Context: context.Background(),
@@ -250,3 +123,39 @@ func NewSubscribeOptions(opts ...SubscribeOption) SubscribeOptions {
}
return options
}
// SubscribeAutoAck contol auto acking of messages
// after they have been handled.
func SubscribeAutoAck(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".AutoAck")
}
}
// BodyOnly transfer only body without
func BodyOnly(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".BodyOnly")
}
}
// SubscribeBatchSize specifies max batch size
func SubscribeBatchSize(n int) options.Option {
return func(src interface{}) error {
return options.Set(src, n, ".BatchSize")
}
}
// SubscribeBatchWait specifies max batch wait time
func SubscribeBatchWait(td time.Duration) options.Option {
return func(src interface{}) error {
return options.Set(src, td, ".BatchWait")
}
}
// SubscribeQueueGroup sets the shared queue name distributed messages across subscribers
func SubscribeQueueGroup(n string) options.Option {
return func(src interface{}) error {
return options.Set(src, n, ".QueueGroup")
}
}

View File

@@ -1,14 +1,98 @@
package broker
// IsValidHandler func signature
func IsValidHandler(sub interface{}) error {
switch sub.(type) {
default:
return ErrInvalidHandler
case func(Message) error:
break
case func([]Message) error:
break
import (
"fmt"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
const (
subSig = "func(context.Context, interface{}) error"
batchSubSig = "func([]context.Context, []interface{}) error"
)
// Precompute the reflect type for error. Can't use error directly
// because Typeof takes an empty interface value. This is annoying.
var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
// Is this an exported - upper case - name?
func isExported(name string) bool {
rune, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(rune)
}
// Is this type exported or a builtin?
func isExportedOrBuiltinType(t reflect.Type) bool {
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
// PkgPath will be non-empty even for an exported type,
// so we need to check the type name as well.
return isExported(t.Name()) || t.PkgPath() == ""
}
// ValidateSubscriber func signature
func ValidateSubscriber(sub interface{}) error {
typ := reflect.TypeOf(sub)
var argType reflect.Type
switch typ.Kind() {
case reflect.Func:
name := "Func"
switch typ.NumIn() {
case 1: // func(Message) error
case 2: // func(context.Context, Message) error or func(context.Context, []Message) error
argType = typ.In(2)
// if sub.Options().Batch {
if argType.Kind() != reflect.Slice {
return fmt.Errorf("subscriber %v dont have required signature %s", name, batchSubSig)
}
if strings.Compare(fmt.Sprintf("%v", argType), "[]interface{}") == 0 {
return fmt.Errorf("subscriber %v dont have required signaure %s", name, batchSubSig)
}
// }
default:
return fmt.Errorf("subscriber %v takes wrong number of args: %v required signature %s or %s", name, typ.NumIn(), subSig, batchSubSig)
}
if !isExportedOrBuiltinType(argType) {
return fmt.Errorf("subscriber %v argument type not exported: %v", name, argType)
}
if typ.NumOut() != 1 {
return fmt.Errorf("subscriber %v has wrong number of return values: %v require signature %s or %s",
name, typ.NumOut(), subSig, batchSubSig)
}
if returnType := typ.Out(0); returnType != typeOfError {
return fmt.Errorf("subscriber %v returns %v not error", name, returnType.String())
}
default:
hdlr := reflect.ValueOf(sub)
name := reflect.Indirect(hdlr).Type().Name()
for m := 0; m < typ.NumMethod(); m++ {
method := typ.Method(m)
switch method.Type.NumIn() {
case 3:
argType = method.Type.In(2)
default:
return fmt.Errorf("subscriber %v.%v takes wrong number of args: %v required signature %s or %s",
name, method.Name, method.Type.NumIn(), subSig, batchSubSig)
}
if !isExportedOrBuiltinType(argType) {
return fmt.Errorf("%v argument type not exported: %v", name, argType)
}
if method.Type.NumOut() != 1 {
return fmt.Errorf(
"subscriber %v.%v has wrong number of return values: %v require signature %s or %s",
name, method.Name, method.Type.NumOut(), subSig, batchSubSig)
}
if returnType := method.Type.Out(0); returnType != typeOfError {
return fmt.Errorf("subscriber %v.%v returns %v not error", name, method.Name, returnType.String())
}
}
}
return nil
}

View File

@@ -17,13 +17,13 @@ func BackoffExp(_ context.Context, _ Request, attempts int) (time.Duration, erro
}
// BackoffInterval specifies randomization interval for backoff func
func BackoffInterval(minTime time.Duration, maxTime time.Duration) BackoffFunc {
func BackoffInterval(min time.Duration, max time.Duration) BackoffFunc {
return func(_ context.Context, _ Request, attempts int) (time.Duration, error) {
td := time.Duration(math.Pow(float64(attempts), math.E)) * time.Millisecond * 100
if td < minTime {
return minTime, nil
} else if td > maxTime {
return maxTime, nil
if td < min {
return min, nil
} else if td > max {
return max, nil
}
return td, nil
}

View File

@@ -34,23 +34,23 @@ func TestBackoffExp(t *testing.T) {
}
func TestBackoffInterval(t *testing.T) {
minTime := 100 * time.Millisecond
maxTime := 300 * time.Millisecond
min := 100 * time.Millisecond
max := 300 * time.Millisecond
r := &testRequest{
service: "test",
method: "test",
}
fn := BackoffInterval(minTime, maxTime)
fn := BackoffInterval(min, max)
for i := 0; i < 5; i++ {
d, err := fn(context.TODO(), r, i)
if err != nil {
t.Fatal(err)
}
if d < minTime || d > maxTime {
t.Fatalf("Expected %v < %v < %v", minTime, d, maxTime)
if d < min || d > max {
t.Fatalf("Expected %v < %v < %v", min, d, max)
}
}
}

View File

@@ -1,12 +1,12 @@
// Package client is an interface for an RPC client
package client
package client // import "go.unistack.org/micro/v4/client"
import (
"context"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
)
var (
@@ -22,6 +22,8 @@ var (
DefaultRetries = 0
// DefaultRequestTimeout is the default request timeout
DefaultRequestTimeout = time.Second * 5
// DefaultDialTimeout the default dial timeout
DefaultDialTimeout = time.Second * 5
// DefaultPoolSize sets the connection pool size
DefaultPoolSize = 100
// DefaultPoolTTL sets the connection pool ttl
@@ -29,24 +31,18 @@ var (
)
// Client is the interface used to make requests to services.
// It supports Request/Response via Transport and Publishing via the Broker.
// It also supports bidirectional streaming of requests.
type Client interface {
Name() string
Init(opts ...Option) error
Init(opts ...options.Option) error
Options() Options
NewRequest(service string, endpoint string, req interface{}, opts ...RequestOption) Request
Call(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error
Stream(ctx context.Context, req Request, opts ...CallOption) (Stream, error)
NewRequest(service string, endpoint string, req interface{}, opts ...options.Option) Request
Call(ctx context.Context, req Request, rsp interface{}, opts ...options.Option) error
Stream(ctx context.Context, req Request, opts ...options.Option) (Stream, error)
String() string
}
type (
FuncCall func(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error
HookCall func(next FuncCall) FuncCall
FuncStream func(ctx context.Context, req Request, opts ...CallOption) (Stream, error)
HookStream func(next FuncStream) FuncStream
)
// Request is the interface for a synchronous request used by Call or Stream
type Request interface {
// The service to call
@@ -63,16 +59,22 @@ type Request interface {
Codec() codec.Codec
// indicates whether the request will be a streaming one rather than unary
Stream() bool
// Header data
// Header() metadata.Metadata
}
// Response is the response received from a service
type Response interface {
// Read the response
Codec() codec.Codec
// The content type
// ContentType() string
// Header data
Header() metadata.Metadata
// Header() metadata.Metadata
// Read the undecoded response
Read() ([]byte, error)
// The unencoded request body
// Body() interface{}
}
// Stream is the interface for a bidirectional synchronous stream
@@ -98,12 +100,3 @@ type Stream interface {
// CloseSend closes the send direction of the stream
CloseSend() error
}
// Option used by the Client
type Option func(*Options)
// CallOption used by Call or Stream
type CallOption func(*CallOptions)
// RequestOption used by NewRequest
type RequestOption func(*RequestOptions)

View File

@@ -2,22 +2,24 @@ package client
import (
"context"
"go.unistack.org/micro/v4/options"
)
type clientCallOptions struct {
Client
opts []CallOption
opts []options.Option
}
func (s *clientCallOptions) Call(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error {
func (s *clientCallOptions) Call(ctx context.Context, req Request, rsp interface{}, opts ...options.Option) error {
return s.Client.Call(ctx, req, rsp, append(s.opts, opts...)...)
}
func (s *clientCallOptions) Stream(ctx context.Context, req Request, opts ...CallOption) (Stream, error) {
func (s *clientCallOptions) Stream(ctx context.Context, req Request, opts ...options.Option) (Stream, error) {
return s.Client.Stream(ctx, req, append(s.opts, opts...)...)
}
// NewClientCallOptions add CallOption to every call
func NewClientCallOptions(c Client, opts ...CallOption) Client {
func NewClientCallOptions(c Client, opts ...options.Option) Client {
return &clientCallOptions{c, opts}
}

View File

@@ -0,0 +1,28 @@
package client
import (
"context"
"testing"
"time"
"go.unistack.org/micro/v4/options"
)
func TestNewClientCallOptions(t *testing.T) {
var flag bool
w := func(fn CallFunc) CallFunc {
flag = true
return fn
}
c := NewClientCallOptions(NewClient(),
options.Address("127.0.0.1"),
WithCallWrapper(w),
RequestTimeout(1*time.Millisecond),
Retries(0),
Backoff(BackoffInterval(10*time.Millisecond, 100*time.Millisecond)),
)
_ = c.Call(context.TODO(), c.NewRequest("service", "endpoint", nil), nil)
if !flag {
t.Fatalf("NewClientCallOptions not works")
}
}

View File

@@ -15,15 +15,6 @@ func FromContext(ctx context.Context) (Client, bool) {
return c, ok
}
// MustContext get client from context
func MustContext(ctx context.Context) Client {
c, ok := FromContext(ctx)
if !ok {
panic("missing client")
}
return c
}
// NewContext put client in context
func NewContext(ctx context.Context, c Client) context.Context {
if ctx == nil {
@@ -31,23 +22,3 @@ func NewContext(ctx context.Context, c Client) context.Context {
}
return context.WithValue(ctx, clientKey{}, c)
}
// SetCallOption returns a function to setup a context with given value
func SetCallOption(k, v interface{}) CallOption {
return func(o *CallOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}

View File

@@ -38,25 +38,3 @@ func TestNewNilContext(t *testing.T) {
t.Fatal("NewContext not works")
}
}
func TestSetCallOption(t *testing.T) {
type key struct{}
o := SetCallOption(key{}, "test")
opts := &CallOptions{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetCallOption not works")
}
}
func TestSetOption(t *testing.T) {
type key struct{}
o := SetOption(key{}, "test")
opts := &Options{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetOption not works")
}
}

View File

@@ -3,7 +3,6 @@ package client
import (
"context"
"fmt"
"strconv"
"time"
"go.unistack.org/micro/v4/codec"
@@ -12,7 +11,6 @@ import (
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/selector"
"go.unistack.org/micro/v4/semconv"
"go.unistack.org/micro/v4/tracer"
)
// DefaultCodecs will be used to encode/decode data
@@ -21,9 +19,7 @@ var DefaultCodecs = map[string]codec.Codec{
}
type noopClient struct {
funcCall FuncCall
funcStream FuncStream
opts Options
opts Options
}
type noopRequest struct {
@@ -37,13 +33,13 @@ type noopRequest struct {
}
// NewClient returns new noop client
func NewClient(opts ...Option) Client {
n := &noopClient{opts: NewOptions(opts...)}
func NewClient(opts ...options.Option) Client {
nc := &noopClient{opts: NewOptions(opts...)}
// wrap in reverse
n.funcCall = n.fnCall
n.funcStream = n.fnStream
c := Client(nc)
return n
return c
}
func (n *noopClient) Name() string {
@@ -95,13 +91,10 @@ func (n *noopResponse) Read() ([]byte, error) {
return nil, nil
}
type noopStream struct {
err error
ctx context.Context
}
type noopStream struct{}
func (n *noopStream) Context() context.Context {
return n.ctx
return context.Background()
}
func (n *noopStream) Request() Request {
@@ -129,40 +122,31 @@ func (n *noopStream) RecvMsg(interface{}) error {
}
func (n *noopStream) Error() error {
return n.err
return nil
}
func (n *noopStream) Close() error {
if sp, ok := tracer.SpanFromContext(n.ctx); ok && sp != nil {
if n.err != nil {
sp.SetStatus(tracer.SpanStatusError, n.err.Error())
}
sp.Finish()
}
return n.err
return nil
}
func (n *noopStream) CloseSend() error {
return n.err
return nil
}
func (n *noopClient) Init(opts ...Option) error {
func (n *noopClient) newCodec(contentType string) (codec.Codec, error) {
if cf, ok := n.opts.Codecs[contentType]; ok {
return cf, nil
}
if cf, ok := DefaultCodecs[contentType]; ok {
return cf, nil
}
return nil, codec.ErrUnknownContentType
}
func (n *noopClient) Init(opts ...options.Option) error {
for _, o := range opts {
o(&n.opts)
}
n.funcCall = n.fnCall
n.funcStream = n.fnStream
n.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
case HookCall:
n.funcCall = h(n.funcCall)
case HookStream:
n.funcStream = h(n.funcStream)
}
})
return nil
}
@@ -174,32 +158,7 @@ func (n *noopClient) String() string {
return "noop"
}
func (n *noopClient) Call(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error {
ts := time.Now()
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", req.Endpoint()).Inc()
var sp tracer.Span
ctx, sp = n.opts.Tracer.Start(ctx, "rpc-client",
tracer.WithSpanKind(tracer.SpanKindClient),
tracer.WithSpanLabels("endpoint", req.Endpoint()),
)
err := n.funcCall(ctx, req, rsp, opts...)
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", req.Endpoint()).Dec()
te := time.Since(ts)
n.opts.Meter.Summary(semconv.ClientRequestLatencyMicroseconds, "endpoint", req.Endpoint()).Update(te.Seconds())
n.opts.Meter.Histogram(semconv.ClientRequestDurationSeconds, "endpoint", req.Endpoint()).Update(te.Seconds())
if me := errors.FromError(err); me == nil {
sp.Finish()
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", req.Endpoint(), "status", "success", "code", strconv.Itoa(int(200))).Inc()
} else {
sp.SetStatus(tracer.SpanStatusError, err.Error())
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", req.Endpoint(), "status", "failure", "code", strconv.Itoa(int(me.Code))).Inc()
}
return err
}
func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error {
func (n *noopClient) Call(ctx context.Context, req Request, rsp interface{}, opts ...options.Option) error {
// make a copy of call opts
callOpts := n.opts.CallOptions
for _, opt := range opts {
@@ -216,7 +175,7 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along
opt := WithRequestTimeout(time.Until(d))
opt := RequestTimeout(time.Until(d))
opt(&callOpts)
}
@@ -228,8 +187,11 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
}
// make copy of call method
hcall := func(ctx context.Context, addr string, req Request, rsp interface{}, opts CallOptions) error {
return nil
hcall := n.call
// wrap the call in reverse
for i := len(callOpts.CallWrappers); i > 0; i-- {
hcall = callOpts.CallWrappers[i-1](hcall)
}
// use the router passed as a call option, or fallback to the rpc clients router
@@ -254,7 +216,7 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return errors.InternalServerError("go.micro.client", "%s", err)
return errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
@@ -268,7 +230,7 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
// TODO apply any filtering here
routes, err = n.opts.Lookup(ctx, req, callOpts)
if err != nil {
return errors.InternalServerError("go.micro.client", "%s", err)
return errors.InternalServerError("go.micro.client", err.Error())
}
// balance the list of nodes
@@ -298,6 +260,9 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
ch := make(chan error, callOpts.Retries)
var gerr error
ts := time.Now()
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", endpoint).Inc()
for i := 0; i <= callOpts.Retries; i++ {
go func() {
ch <- call(i)
@@ -325,39 +290,28 @@ func (n *noopClient) fnCall(ctx context.Context, req Request, rsp interface{}, o
}
}
if gerr != nil {
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", endpoint, "status", "failure").Inc()
} else {
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", endpoint, "status", "success").Inc()
}
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", endpoint).Dec()
te := time.Since(ts)
n.opts.Meter.Summary(semconv.ClientRequestLatencyMicroseconds, "endpoint", endpoint).Update(te.Seconds())
n.opts.Meter.Histogram(semconv.ClientRequestDurationSeconds, "endpoint", endpoint).Update(te.Seconds())
return gerr
}
func (n *noopClient) NewRequest(service, endpoint string, _ interface{}, _ ...RequestOption) Request {
func (n *noopClient) call(ctx context.Context, addr string, req Request, rsp interface{}, opts CallOptions) error {
return nil
}
func (n *noopClient) NewRequest(service, endpoint string, req interface{}, opts ...options.Option) Request {
return &noopRequest{service: service, endpoint: endpoint}
}
func (n *noopClient) Stream(ctx context.Context, req Request, opts ...CallOption) (Stream, error) {
ts := time.Now()
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", req.Endpoint()).Inc()
var sp tracer.Span
ctx, sp = n.opts.Tracer.Start(ctx, "rpc-client",
tracer.WithSpanKind(tracer.SpanKindClient),
tracer.WithSpanLabels("endpoint", req.Endpoint()),
)
stream, err := n.funcStream(ctx, req, opts...)
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", req.Endpoint()).Dec()
te := time.Since(ts)
n.opts.Meter.Summary(semconv.ClientRequestLatencyMicroseconds, "endpoint", req.Endpoint()).Update(te.Seconds())
n.opts.Meter.Histogram(semconv.ClientRequestDurationSeconds, "endpoint", req.Endpoint()).Update(te.Seconds())
if me := errors.FromError(err); me == nil {
sp.Finish()
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", req.Endpoint(), "status", "success", "code", strconv.Itoa(int(200))).Inc()
} else {
sp.SetStatus(tracer.SpanStatusError, err.Error())
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", req.Endpoint(), "status", "failure", "code", strconv.Itoa(int(me.Code))).Inc()
}
return stream, err
}
func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOption) (Stream, error) {
func (n *noopClient) Stream(ctx context.Context, req Request, opts ...options.Option) (Stream, error) {
var err error
// make a copy of call opts
@@ -376,7 +330,7 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along
o := WithStreamTimeout(time.Until(d))
o := StreamTimeout(time.Until(d))
o(&callOpts)
}
@@ -417,7 +371,7 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
// call backoff first. Someone may want an initial start delay
t, cerr := callOpts.Backoff(ctx, req, i)
if cerr != nil {
return nil, errors.InternalServerError("go.micro.client", "%s", cerr)
return nil, errors.InternalServerError("go.micro.client", cerr.Error())
}
// only sleep if greater than 0
@@ -431,7 +385,7 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
// TODO apply any filtering here
routes, err = n.opts.Lookup(ctx, req, callOpts)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", "%s", err)
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
// balance the list of nodes
@@ -443,7 +397,15 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
node := next()
// ts := time.Now()
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", endpoint).Inc()
stream, cerr := n.stream(ctx, node, req, callOpts)
if cerr != nil {
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", endpoint, "status", "failure").Inc()
} else {
n.opts.Meter.Counter(semconv.ClientRequestTotal, "endpoint", endpoint, "status", "success").Inc()
}
// record the result of the call to inform future routing decisions
if verr := n.opts.Selector.Record(node, cerr); verr != nil {
@@ -497,6 +459,6 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
return nil, grr
}
func (n *noopClient) stream(ctx context.Context, _ string, _ Request, _ CallOptions) (Stream, error) {
return &noopStream{ctx: ctx}, nil
func (n *noopClient) stream(ctx context.Context, addr string, req Request, opts CallOptions) (*noopStream, error) {
return &noopStream{}, nil
}

View File

@@ -6,13 +6,11 @@ import (
"net"
"time"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/register"
"go.unistack.org/micro/v4/router"
"go.unistack.org/micro/v4/selector"
"go.unistack.org/micro/v4/selector/random"
@@ -21,56 +19,44 @@ import (
// Options holds client options
type Options struct {
// Codecs map
Codecs map[string]codec.Codec
// Proxy is used for proxy requests
Proxy string
// ContentType is used to select codec
ContentType string
// Name is the client name
Name string
// Selector used to select needed address
Selector selector.Selector
// Logger used to log messages
Logger logger.Logger
// Tracer used for tracing
Tracer tracer.Tracer
// Broker used to publish messages
Broker broker.Broker
// Meter used for metrics
Meter meter.Meter
// Context is used for external options
Context context.Context
// Router used to get route
Router router.Router
// TLSConfig specifies tls.Config for secure connection
TLSConfig *tls.Config
// Codecs map
Codecs map[string]codec.Codec
// Lookup func used to get destination addr
Lookup LookupFunc
// ContextDialer used to connect
ContextDialer func(context.Context, string) (net.Conn, error)
// Wrappers contains wrappers
Wrappers []Wrapper
// Hooks can be run before broker Publish/BatchPublish and
// Subscribe/BatchSubscribe methods
Hooks options.Hooks
// Proxy is used for proxy requests
Proxy string
// ContentType is used to select codec
ContentType string
// Name is the client name
Name string
// CallOptions contains default CallOptions
CallOptions CallOptions
// PoolSize connection pool size
PoolSize int
// PoolTTL connection pool ttl
PoolTTL time.Duration
// ContextDialer used to connect
ContextDialer func(context.Context, string) (net.Conn, error)
// Hooks may contains Client func wrapper
Hooks options.Hooks
}
// NewCallOptions creates new call options struct
func NewCallOptions(opts ...CallOption) CallOptions {
func NewCallOptions(opts ...options.Option) CallOptions {
options := CallOptions{}
for _, o := range opts {
o(&options)
@@ -80,16 +66,6 @@ func NewCallOptions(opts ...CallOption) CallOptions {
// CallOptions holds client call options
type CallOptions struct {
// RequestMetadata holds additional metadata for call
RequestMetadata metadata.Metadata
// Network name
Network string
// Content-Type
ContentType string
// AuthToken string
AuthToken string
// Selector selects addr
Selector selector.Selector
// Context used for deadline
@@ -97,48 +73,46 @@ type CallOptions struct {
// Router used for route
Router router.Router
// Retry func used for retries
// ResponseMetadata holds additional metadata from call
ResponseMetadata *metadata.Metadata
Retry RetryFunc
// Backoff func used for backoff when retry
Backoff BackoffFunc
// ContextDialer used to connect
ContextDialer func(context.Context, string) (net.Conn, error)
// Network name
Network string
// Content-Type
ContentType string
// AuthToken string
AuthToken string
// Address specifies static addr list
Address []string
// SelectOptions selector options
SelectOptions []selector.SelectOption
// CallWrappers call wrappers
CallWrappers []CallWrapper
// StreamTimeout stream timeout
StreamTimeout time.Duration
// RequestTimeout request timeout
RequestTimeout time.Duration
// RequestMetadata holds additional metadata for call
RequestMetadata metadata.Metadata
// ResponseMetadata holds additional metadata from call
ResponseMetadata *metadata.Metadata
// DialTimeout dial timeout
DialTimeout time.Duration
// Retries specifies retries num
Retries int
// ContextDialer used to connect
ContextDialer func(context.Context, string) (net.Conn, error)
}
// ContextDialer pass ContextDialer to client
func ContextDialer(fn func(context.Context, string) (net.Conn, error)) Option {
return func(o *Options) {
o.ContextDialer = fn
}
}
// Context pass context to client
func Context(ctx context.Context) Option {
return func(o *Options) {
o.Context = ctx
func ContextDialer(fn func(context.Context, string) (net.Conn, error)) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".ContextDialer")
}
}
// NewRequestOptions creates new RequestOptions struct
func NewRequestOptions(opts ...RequestOption) RequestOptions {
func NewRequestOptions(opts ...options.Option) RequestOptions {
options := RequestOptions{}
for _, o := range opts {
o(&options)
@@ -157,24 +131,24 @@ type RequestOptions struct {
}
// NewOptions creates new options struct
func NewOptions(opts ...Option) Options {
func NewOptions(opts ...options.Option) Options {
options := Options{
Context: context.Background(),
ContentType: DefaultContentType,
Codecs: DefaultCodecs,
Codecs: make(map[string]codec.Codec),
CallOptions: CallOptions{
Context: context.Background(),
Backoff: DefaultBackoff,
Retry: DefaultRetry,
Retries: DefaultRetries,
RequestTimeout: DefaultRequestTimeout,
DialTimeout: DefaultDialTimeout,
},
Lookup: LookupRoute,
PoolSize: DefaultPoolSize,
PoolTTL: DefaultPoolTTL,
Selector: random.NewSelector(),
Logger: logger.DefaultLogger,
Broker: broker.DefaultBroker,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
Router: router.DefaultRouter,
@@ -187,284 +161,131 @@ func NewOptions(opts ...Option) Options {
return options
}
// Broker to be used for pub/sub
func Broker(b broker.Broker) Option {
return func(o *Options) {
o.Broker = b
}
}
// Tracer to be used for tracing
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
// Logger to be used for log mesages
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// Meter to be used for metrics
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Codec to be used to encode/decode requests for a given content type
func Codec(contentType string, c codec.Codec) Option {
return func(o *Options) {
o.Codecs[contentType] = c
}
}
// ContentType used by default if not specified
func ContentType(ct string) Option {
return func(o *Options) {
o.ContentType = ct
}
}
// Proxy sets the proxy address
func Proxy(addr string) Option {
return func(o *Options) {
o.Proxy = addr
func Proxy(addr string) options.Option {
return func(src interface{}) error {
return options.Set(src, addr, ".Proxy")
}
}
// PoolSize sets the connection pool size
func PoolSize(d int) Option {
return func(o *Options) {
o.PoolSize = d
func PoolSize(d int) options.Option {
return func(src interface{}) error {
return options.Set(src, d, ".PoolSize")
}
}
// PoolTTL sets the connection pool ttl
func PoolTTL(d time.Duration) Option {
return func(o *Options) {
o.PoolTTL = d
}
}
// Register sets the routers register
func Register(r register.Register) Option {
return func(o *Options) {
if o.Router != nil {
_ = o.Router.Init(router.Register(r))
}
}
}
// Router is used to lookup routes for a service
func Router(r router.Router) Option {
return func(o *Options) {
o.Router = r
func PoolTTL(td time.Duration) options.Option {
return func(src interface{}) error {
return options.Set(src, td, ".PoolTTL")
}
}
// Selector is used to select a route
func Selector(s selector.Selector) Option {
return func(o *Options) {
o.Selector = s
func Selector(s selector.Selector) options.Option {
return func(src interface{}) error {
return options.Set(src, s, ".Selector")
}
}
// Backoff is used to set the backoff function used when retrying Calls
func Backoff(fn BackoffFunc) Option {
return func(o *Options) {
o.CallOptions.Backoff = fn
}
}
// Name sets the client name
func Name(n string) Option {
return func(o *Options) {
o.Name = n
func Backoff(fn BackoffFunc) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".Backoff")
}
}
// Lookup sets the lookup function to use for resolving service names
func Lookup(l LookupFunc) Option {
return func(o *Options) {
o.Lookup = l
func Lookup(fn LookupFunc) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".Lookup")
}
}
// TLSConfig specifies a *tls.Config
func TLSConfig(t *tls.Config) Option {
return func(o *Options) {
// set the internal tls
o.TLSConfig = t
// WithCallWrapper sets the retry function to be used when re-trying.
func WithCallWrapper(fn CallWrapper) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".CallWrappers")
}
}
// Retries sets the retry count when making the request.
func Retries(i int) Option {
return func(o *Options) {
o.CallOptions.Retries = i
func Retries(n int) options.Option {
return func(src interface{}) error {
return options.Set(src, n, ".Retries")
}
}
// Retry sets the retry function to be used when re-trying.
func Retry(fn RetryFunc) Option {
return func(o *Options) {
o.CallOptions.Retry = fn
func Retry(fn RetryFunc) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".Retry")
}
}
// RequestTimeout is the request timeout.
func RequestTimeout(d time.Duration) Option {
return func(o *Options) {
o.CallOptions.RequestTimeout = d
func RequestTimeout(td time.Duration) options.Option {
return func(src interface{}) error {
return options.Set(src, td, ".RequestTimeout")
}
}
// StreamTimeout sets the stream timeout
func StreamTimeout(d time.Duration) Option {
return func(o *Options) {
o.CallOptions.StreamTimeout = d
func StreamTimeout(td time.Duration) options.Option {
return func(src interface{}) error {
return options.Set(src, td, ".StreamTimeout")
}
}
// DialTimeout sets the dial timeout
func DialTimeout(d time.Duration) Option {
return func(o *Options) {
o.CallOptions.DialTimeout = d
}
}
// WithContextDialer pass ContextDialer to client call
func WithContextDialer(fn func(context.Context, string) (net.Conn, error)) CallOption {
return func(o *CallOptions) {
o.ContextDialer = fn
}
}
// WithContentType specifies call content type
func WithContentType(ct string) CallOption {
return func(o *CallOptions) {
o.ContentType = ct
}
}
// WithAddress sets the remote addresses to use rather than using service discovery
func WithAddress(a ...string) CallOption {
return func(o *CallOptions) {
o.Address = a
}
}
// WithBackoff is a CallOption which overrides that which
// set in Options.CallOptions
func WithBackoff(fn BackoffFunc) CallOption {
return func(o *CallOptions) {
o.Backoff = fn
}
}
// WithRetry is a CallOption which overrides that which
// set in Options.CallOptions
func WithRetry(fn RetryFunc) CallOption {
return func(o *CallOptions) {
o.Retry = fn
}
}
// WithRetries is a CallOption which overrides that which
// set in Options.CallOptions
func WithRetries(i int) CallOption {
return func(o *CallOptions) {
o.Retries = i
func DialTimeout(td time.Duration) options.Option {
return func(src interface{}) error {
return options.Set(src, td, ".DialTimeout")
}
}
// WithResponseMetadata is a CallOption which adds metadata.Metadata to Options.CallOptions
func WithResponseMetadata(md *metadata.Metadata) CallOption {
return func(o *CallOptions) {
o.ResponseMetadata = md
func ResponseMetadata(md *metadata.Metadata) options.Option {
return func(src interface{}) error {
return options.Set(src, md, ".ResponseMetadata")
}
}
// WithRequestMetadata is a CallOption which adds metadata.Metadata to Options.CallOptions
func WithRequestMetadata(md metadata.Metadata) CallOption {
return func(o *CallOptions) {
o.RequestMetadata = md
func RequestMetadata(md metadata.Metadata) options.Option {
return func(src interface{}) error {
return options.Set(src, metadata.Copy(md), ".RequestMetadata")
}
}
// WithRequestTimeout is a CallOption which overrides that which
// set in Options.CallOptions
func WithRequestTimeout(d time.Duration) CallOption {
return func(o *CallOptions) {
o.RequestTimeout = d
}
}
// WithStreamTimeout sets the stream timeout
func WithStreamTimeout(d time.Duration) CallOption {
return func(o *CallOptions) {
o.StreamTimeout = d
}
}
// WithDialTimeout is a CallOption which overrides that which
// set in Options.CallOptions
func WithDialTimeout(d time.Duration) CallOption {
return func(o *CallOptions) {
o.DialTimeout = d
}
}
// WithAuthToken is a CallOption which overrides the
// AuthToken is a CallOption which overrides the
// authorization header with the services own auth token
func WithAuthToken(t string) CallOption {
return func(o *CallOptions) {
o.AuthToken = t
func AuthToken(t string) options.Option {
return func(src interface{}) error {
return options.Set(src, t, ".AuthToken")
}
}
// WithRouter sets the router to use for this call
func WithRouter(r router.Router) CallOption {
return func(o *CallOptions) {
o.Router = r
}
}
// WithSelector sets the selector to use for this call
func WithSelector(s selector.Selector) CallOption {
return func(o *CallOptions) {
o.Selector = s
// Network is a CallOption which sets the network attribute
func Network(n string) options.Option {
return func(src interface{}) error {
return options.Set(src, n, ".Network")
}
}
/*
// WithSelectOptions sets the options to pass to the selector for this call
func WithSelectOptions(sops ...selector.SelectOption) CallOption {
func WithSelectOptions(sops ...selector.SelectOption) options.Option {
return func(o *CallOptions) {
o.SelectOptions = sops
}
}
*/
// StreamingRequest specifies that request is streaming
func StreamingRequest(b bool) RequestOption {
return func(o *RequestOptions) {
o.Stream = b
}
}
// RequestContentType specifies request content type
func RequestContentType(ct string) RequestOption {
return func(o *RequestOptions) {
o.ContentType = ct
}
}
// Hooks sets hook runs before action
func Hooks(h ...options.Hook) Option {
return func(o *Options) {
o.Hooks = append(o.Hooks, h...)
func StreamingRequest(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".Stream")
}
}

View File

@@ -1,47 +0,0 @@
package cluster
import (
"context"
"go.unistack.org/micro/v4/metadata"
)
// Message sent to member in cluster
type Message interface {
// Header returns message headers
Header() metadata.Metadata
// Body returns broker message may be []byte slice or some go struct or interface
Body() interface{}
}
type Node interface {
// Name returns node name
Name() string
// Address returns node address
Address() string
// Metadata returns node metadata
Metadata() metadata.Metadata
}
// Cluster interface used for cluster communication across nodes
type Cluster interface {
// Join is used to take an existing members and performing state sync
Join(ctx context.Context, addr ...string) error
// Leave broadcast a leave message and stop listeners
Leave(ctx context.Context) error
// Ping is used to probe live status of the node
Ping(ctx context.Context, node Node, payload []byte) error
// Members returns the cluster members
Members() ([]Node, error)
// Broadcast send message for all members in cluster, if filter is not nil, nodes may be filtered
// by key/value pairs
Broadcast(ctx context.Context, msg Message, filter ...string) error
// Unicast send message to single member in cluster
Unicast(ctx context.Context, node Node, msg Message) error
// Live returns cluster liveness
Live() bool
// Ready returns cluster readiness
Ready() bool
// Health returns cluster health
Health() bool
}

View File

@@ -1,10 +1,19 @@
// Package codec is an interface for encoding messages
package codec
package codec // import "go.unistack.org/micro/v4/codec"
import (
"errors"
"io"
"gopkg.in/yaml.v3"
"go.unistack.org/micro/v4/metadata"
)
// Message types
const (
Error MessageType = iota
Request
Response
Event
)
var (
@@ -15,23 +24,65 @@ var (
)
var (
// DefaultMaxMsgSize specifies how much data codec can handle
DefaultMaxMsgSize = 1024 * 1024 * 4 // 4Mb
// DefaultCodec is the global default codec
DefaultCodec = NewCodec()
// DefaultTagName specifies struct tag name to control codec Marshal/Unmarshal
DefaultTagName = "codec"
)
// Codec encodes/decodes various types of messages.
// MessageType specifies message type for codec
type MessageType int
// Codec encodes/decodes various types of messages used within micro.
// ReadHeader and ReadBody are called in pairs to read requests/responses
// from the connection. Close is called when finished with the
// connection. ReadBody may be called with a nil argument to force the
// body to be read and discarded.
type Codec interface {
ReadHeader(r io.Reader, m *Message, mt MessageType) error
ReadBody(r io.Reader, v interface{}) error
Write(w io.Writer, m *Message, v interface{}) error
Marshal(v interface{}, opts ...Option) ([]byte, error)
Unmarshal(b []byte, v interface{}, opts ...Option) error
String() string
}
type CodecV2 interface {
Marshal(buf []byte, v interface{}, opts ...Option) ([]byte, error)
Unmarshal(buf []byte, v interface{}, opts ...Option) error
String() string
// Message represents detailed information about
// the communication, likely followed by the body.
// In the case of an error, body may be nil.
type Message struct {
Header metadata.Metadata
Target string
Method string
Endpoint string
Error string
ID string
Body []byte
Type MessageType
}
// NewMessage creates new codec message
func NewMessage(t MessageType) *Message {
return &Message{Type: t, Header: metadata.New(0)}
}
// MarshalAppend calls codec.Marshal(v) and returns the data appended to buf.
// If codec implements MarshalAppend, that is called instead.
func MarshalAppend(buf []byte, c Codec, v interface{}, opts ...Option) ([]byte, error) {
if nc, ok := c.(interface {
MarshalAppend([]byte, interface{}, ...Option) ([]byte, error)
}); ok {
return nc.MarshalAppend(buf, v, opts...)
}
mbuf, err := c.Marshal(v, opts...)
if err != nil {
return nil, err
}
return append(buf, mbuf...), nil
}
// RawMessage is a raw encoded JSON value.
@@ -42,8 +93,6 @@ type RawMessage []byte
func (m *RawMessage) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
} else if len(*m) == 0 {
return []byte("null"), nil
}
return *m, nil
}
@@ -56,22 +105,3 @@ func (m *RawMessage) UnmarshalJSON(data []byte) error {
*m = append((*m)[0:0], data...)
return nil
}
// MarshalYAML returns m as the JSON encoding of m.
func (m *RawMessage) MarshalYAML() ([]byte, error) {
if m == nil {
return []byte("null"), nil
} else if len(*m) == 0 {
return []byte("null"), nil
}
return *m, nil
}
// UnmarshalYAML sets *m to a copy of data.
func (m *RawMessage) UnmarshalYAML(n *yaml.Node) error {
if m == nil {
return errors.New("RawMessage UnmarshalYAML on nil pointer")
}
*m = append((*m)[0:0], []byte(n.Value)...)
return nil
}

View File

@@ -15,15 +15,6 @@ func FromContext(ctx context.Context) (Codec, bool) {
return c, ok
}
// MustContext returns codec from context
func MustContext(ctx context.Context) Codec {
c, ok := FromContext(ctx)
if !ok {
panic("missing codec")
}
return c
}
// NewContext put codec in context
func NewContext(ctx context.Context, c Codec) context.Context {
if ctx == nil {

View File

@@ -1,7 +1,5 @@
package codec
import "gopkg.in/yaml.v3"
// Frame gives us the ability to define raw data to send over the pipes
type Frame struct {
Data []byte
@@ -22,17 +20,6 @@ func (m *Frame) UnmarshalJSON(data []byte) error {
return m.Unmarshal(data)
}
// MarshalYAML returns frame data
func (m *Frame) MarshalYAML() ([]byte, error) {
return m.Marshal()
}
// UnmarshalYAML set frame data
func (m *Frame) UnmarshalYAML(n *yaml.Node) error {
m.Data = []byte(n.Value)
return nil
}
// ProtoMessage noop func
func (m *Frame) ProtoMessage() {}

View File

@@ -1,4 +1,4 @@
// Copyright 2021 Unistack LLC
// Copyright 2021-2023 Unistack LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@@ -2,14 +2,70 @@ package codec
import (
"encoding/json"
codecpb "go.unistack.org/micro-proto/v4/codec"
"io"
)
type noopCodec struct {
opts Options
}
func (c *noopCodec) ReadHeader(conn io.Reader, m *Message, t MessageType) error {
return nil
}
func (c *noopCodec) ReadBody(conn io.Reader, b interface{}) error {
// read bytes
buf, err := io.ReadAll(conn)
if err != nil {
return err
}
if b == nil {
return nil
}
switch v := b.(type) {
case *string:
*v = string(buf)
case *[]byte:
*v = buf
case *Frame:
v.Data = buf
default:
return json.Unmarshal(buf, v)
}
return nil
}
func (c *noopCodec) Write(conn io.Writer, m *Message, b interface{}) error {
if b == nil {
return nil
}
var v []byte
switch vb := b.(type) {
case *Frame:
v = vb.Data
case string:
v = []byte(vb)
case *string:
v = []byte(*vb)
case *[]byte:
v = *vb
case []byte:
v = vb
default:
var err error
v, err = json.Marshal(vb)
if err != nil {
return err
}
}
_, err := conn.Write(v)
return err
}
func (c *noopCodec) String() string {
return "noop"
}
@@ -35,8 +91,8 @@ func (c *noopCodec) Marshal(v interface{}, opts ...Option) ([]byte, error) {
return ve, nil
case *Frame:
return ve.Data, nil
case *codecpb.Frame:
return ve.Data, nil
case *Message:
return ve.Body, nil
}
return json.Marshal(v)
@@ -59,8 +115,8 @@ func (c *noopCodec) Unmarshal(d []byte, v interface{}, opts ...Option) error {
case *Frame:
ve.Data = d
return nil
case *codecpb.Frame:
ve.Data = d
case *Message:
ve.Body = d
return nil
}

View File

@@ -23,8 +23,15 @@ type Options struct {
Context context.Context
// TagName specifies tag name in struct to control codec
TagName string
// Flatten specifies that struct must be analyzed for flatten tag
Flatten bool
// MaxMsgSize specifies max messages size that reads by codec
MaxMsgSize int
}
// MaxMsgSize sets the max message size
func MaxMsgSize(n int) Option {
return func(o *Options) {
o.MaxMsgSize = n
}
}
// TagName sets the codec tag name in struct
@@ -34,13 +41,6 @@ func TagName(n string) Option {
}
}
// Flatten enables checking for flatten tag name
func Flatten(b bool) Option {
return func(o *Options) {
o.Flatten = b
}
}
// Logger sets the logger
func Logger(l logger.Logger) Option {
return func(o *Options) {
@@ -65,12 +65,12 @@ func Meter(m meter.Meter) Option {
// NewOptions returns new options
func NewOptions(opts ...Option) Options {
options := Options{
Context: context.Background(),
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
TagName: DefaultTagName,
Flatten: false,
Context: context.Background(),
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
MaxMsgSize: DefaultMaxMsgSize,
TagName: DefaultTagName,
}
for _, o := range opts {

View File

@@ -1,11 +1,13 @@
// Package config is an interface for dynamic configuration.
package config
package config // import "go.unistack.org/micro/v4/config"
import (
"context"
"errors"
"reflect"
"time"
"go.unistack.org/micro/v4/options"
)
type Validator interface {
@@ -37,26 +39,19 @@ type Config interface {
// Name returns name of config
Name() string
// Init the config
Init(opts ...Option) error
Init(opts ...options.Option) error
// Options in the config
Options() Options
// Load config from sources
Load(context.Context, ...LoadOption) error
Load(context.Context, ...options.Option) error
// Save config to sources
Save(context.Context, ...SaveOption) error
Save(context.Context, ...options.Option) error
// Watch a config for changes
Watch(context.Context, ...WatchOption) (Watcher, error)
Watch(context.Context, ...options.Option) (Watcher, error)
// String returns config type name
String() string
}
type (
FuncLoad func(ctx context.Context, opts ...LoadOption) error
HookLoad func(next FuncLoad) FuncLoad
FuncSave func(ctx context.Context, opts ...SaveOption) error
HookSave func(next FuncSave) FuncSave
)
// Watcher is the config watcher
type Watcher interface {
// Next blocks until update happens or error returned
@@ -66,7 +61,7 @@ type Watcher interface {
}
// Load loads config from config sources
func Load(ctx context.Context, cs []Config, opts ...LoadOption) error {
func Load(ctx context.Context, cs []Config, opts ...options.Option) error {
var err error
for _, c := range cs {
if err = c.Init(); err != nil {
@@ -138,7 +133,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Error(ctx, c.String()+" BeforeLoad error", err)
c.Options().Logger.Errorf(ctx, "%s BeforeLoad err: %v", c.String(), err)
if !c.Options().AllowFail {
return err
}
@@ -153,7 +148,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Error(ctx, c.String()+" AfterLoad error", err)
c.Options().Logger.Errorf(ctx, "%s AfterLoad err: %v", c.String(), err)
if !c.Options().AllowFail {
return err
}
@@ -168,7 +163,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Error(ctx, c.String()+" BeforeSave error", err)
c.Options().Logger.Errorf(ctx, "%s BeforeSave err: %v", c.String(), err)
if !c.Options().AllowFail {
return err
}
@@ -183,7 +178,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Error(ctx, c.String()+" AfterSave error", err)
c.Options().Logger.Errorf(ctx, "%s AfterSave err: %v", c.String(), err)
if !c.Options().AllowFail {
return err
}
@@ -198,7 +193,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Error(ctx, c.String()+" BeforeInit error", err)
c.Options().Logger.Errorf(ctx, "%s BeforeInit err: %v", c.String(), err)
if !c.Options().AllowFail {
return err
}
@@ -213,7 +208,7 @@ var (
return nil
}
if err := fn(ctx, c); err != nil {
c.Options().Logger.Error(ctx, c.String()+" AfterInit error", err)
c.Options().Logger.Errorf(ctx, "%s AfterInit err: %v", c.String(), err)
if !c.Options().AllowFail {
return err
}

View File

@@ -15,15 +15,6 @@ func FromContext(ctx context.Context) (Config, bool) {
return c, ok
}
// MustContext returns store from context
func MustContext(ctx context.Context) Config {
c, ok := FromContext(ctx)
if !ok {
panic("missing config")
}
return c
}
// NewContext put store in context
func NewContext(ctx context.Context, c Config) context.Context {
if ctx == nil {
@@ -31,43 +22,3 @@ func NewContext(ctx context.Context, c Config) context.Context {
}
return context.WithValue(ctx, configKey{}, c)
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetSaveOption returns a function to setup a context with given value
func SetSaveOption(k, v interface{}) SaveOption {
return func(o *SaveOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetLoadOption returns a function to setup a context with given value
func SetLoadOption(k, v interface{}) LoadOption {
return func(o *LoadOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetWatchOption returns a function to setup a context with given value
func SetWatchOption(k, v interface{}) WatchOption {
return func(o *WatchOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}

View File

@@ -40,47 +40,3 @@ func TestNewContext(t *testing.T) {
t.Fatal("NewContext not works")
}
}
func TestSetOption(t *testing.T) {
type key struct{}
o := SetOption(key{}, "test")
opts := &Options{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetOption not works")
}
}
func TestSetSaveOption(t *testing.T) {
type key struct{}
o := SetSaveOption(key{}, "test")
opts := &SaveOptions{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetSaveOption not works")
}
}
func TestSetLoadOption(t *testing.T) {
type key struct{}
o := SetLoadOption(key{}, "test")
opts := &LoadOptions{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetLoadOption not works")
}
}
func TestSetWatchOption(t *testing.T) {
type key struct{}
o := SetWatchOption(key{}, "test")
opts := &WatchOptions{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetWatchOption not works")
}
}

View File

@@ -7,8 +7,8 @@ import (
"strings"
"time"
"dario.cat/mergo"
"github.com/google/uuid"
"dario.cat/mergo"
"go.unistack.org/micro/v4/options"
mid "go.unistack.org/micro/v4/util/id"
rutil "go.unistack.org/micro/v4/util/reflect"
@@ -16,16 +16,14 @@ import (
)
type defaultConfig struct {
funcLoad FuncLoad
funcSave FuncSave
opts Options
opts Options
}
func (c *defaultConfig) Options() Options {
return c.opts
}
func (c *defaultConfig) Init(opts ...Option) error {
func (c *defaultConfig) Init(opts ...options.Option) error {
for _, o := range opts {
o(&c.opts)
}
@@ -34,18 +32,6 @@ func (c *defaultConfig) Init(opts ...Option) error {
return err
}
c.funcLoad = c.fnLoad
c.funcSave = c.fnSave
c.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
case HookLoad:
c.funcLoad = h(c.funcLoad)
case HookSave:
c.funcSave = h(c.funcSave)
}
})
if err := DefaultAfterInit(c.opts.Context, c); err != nil && !c.opts.AllowFail {
return err
}
@@ -53,18 +39,8 @@ func (c *defaultConfig) Init(opts ...Option) error {
return nil
}
func (c *defaultConfig) Load(ctx context.Context, opts ...LoadOption) error {
return c.funcLoad(ctx, opts...)
}
func (c *defaultConfig) fnLoad(ctx context.Context, opts ...LoadOption) error {
var err error
if c.opts.SkipLoad != nil && c.opts.SkipLoad(ctx, c) {
return nil
}
if err = DefaultBeforeLoad(ctx, c); err != nil && !c.opts.AllowFail {
func (c *defaultConfig) Load(ctx context.Context, opts ...options.Option) error {
if err := DefaultBeforeLoad(ctx, c); err != nil && !c.opts.AllowFail {
return err
}
@@ -254,7 +230,6 @@ func fillValue(value reflect.Value, val string) error {
}
value.Set(reflect.ValueOf(v))
}
return nil
}
@@ -317,15 +292,7 @@ func fillValues(valueOf reflect.Value, tname string) error {
return nil
}
func (c *defaultConfig) Save(ctx context.Context, opts ...SaveOption) error {
return c.funcSave(ctx, opts...)
}
func (c *defaultConfig) fnSave(ctx context.Context, opts ...SaveOption) error {
if c.opts.SkipSave != nil && c.opts.SkipSave(ctx, c) {
return nil
}
func (c *defaultConfig) Save(ctx context.Context, opts ...options.Option) error {
if err := DefaultBeforeSave(ctx, c); err != nil {
return err
}
@@ -345,19 +312,15 @@ func (c *defaultConfig) Name() string {
return c.opts.Name
}
func (c *defaultConfig) Watch(_ context.Context, _ ...WatchOption) (Watcher, error) {
func (c *defaultConfig) Watch(ctx context.Context, opts ...options.Option) (Watcher, error) {
return nil, ErrWatcherNotImplemented
}
// NewConfig returns new default config source
func NewConfig(opts ...Option) Config {
func NewConfig(opts ...options.Option) Config {
options := NewOptions(opts...)
if len(options.StructTag) == 0 {
options.StructTag = "default"
}
c := &defaultConfig{opts: options}
c.funcLoad = c.fnLoad
c.funcSave = c.fnSave
return c
return &defaultConfig{opts: options}
}

View File

@@ -3,26 +3,24 @@ package config_test
import (
"context"
"fmt"
"reflect"
"testing"
"time"
"go.unistack.org/micro/v4/config"
mid "go.unistack.org/micro/v4/util/id"
mtime "go.unistack.org/micro/v4/util/time"
)
type cfg struct {
MapValue map[string]bool `default:"key1=true,key2=false"`
StructValue *cfgStructValue
StringValue string `default:"string_value"`
IgnoreValue string `json:"-"`
UUIDValue string `default:"micro:generate uuid"`
IDValue string `default:"micro:generate id"`
DurationValue time.Duration `default:"10s"`
MDurationValue mtime.Duration `default:"10s"`
IntValue int `default:"99"`
StringValue string `default:"string_value"`
IgnoreValue string `json:"-"`
StructValue *cfgStructValue
IntValue int `default:"99"`
DurationValue time.Duration `default:"10s"`
MDurationValue mtime.Duration `default:"10s"`
MapValue map[string]bool `default:"key1=true,key2=false"`
UUIDValue string `default:"micro:generate uuid"`
IDValue string `default:"micro:generate id"`
}
type cfgStructValue struct {
@@ -43,35 +41,6 @@ func (c *cfgStructValue) Validate() error {
return nil
}
type testHook struct {
f bool
}
func (t *testHook) Load(fn config.FuncLoad) config.FuncLoad {
return func(ctx context.Context, opts ...config.LoadOption) error {
t.f = true
return fn(ctx, opts...)
}
}
func TestHook(t *testing.T) {
h := &testHook{}
c := config.NewConfig(config.Struct(h), config.Hooks(config.HookLoad(h.Load)))
if err := c.Init(); err != nil {
t.Fatal(err)
}
if err := c.Load(context.TODO()); err != nil {
t.Fatal(err)
}
if !h.f {
t.Fatal("hook not works")
}
}
func TestDefault(t *testing.T) {
ctx := context.Background()
conf := &cfg{IntValue: 10}
@@ -114,6 +83,8 @@ func TestDefault(t *testing.T) {
if conf.IDValue == "" {
t.Fatalf("id value empty")
} else if len(conf.IDValue) != mid.DefaultSize {
t.Fatalf("id value invalid: %s", conf.IDValue)
}
_ = conf
// t.Logf("%#+v\n", conf)
@@ -134,13 +105,3 @@ func TestValidate(t *testing.T) {
t.Fatal(err)
}
}
func Test_SizeOf(t *testing.T) {
st := cfg{}
tVal := reflect.TypeOf(st)
for i := 0; i < tVal.NumField(); i++ {
field := tVal.Field(i)
fmt.Printf("Field: %s, Offset: %d, Size: %d\n", field.Name, field.Offset, field.Type.Size())
}
}

View File

@@ -41,23 +41,12 @@ type Options struct {
BeforeInit []func(context.Context, Config) error
// AfterInit contains slice of funcs that runs after Init
AfterInit []func(context.Context, Config) error
// SkipLoad runs only if condition returns true
SkipLoad func(context.Context, Config) bool
// SkipSave runs only if condition returns true
SkipSave func(context.Context, Config) bool
// Hooks can be run before/after config Save/Load
Hooks options.Hooks
// AllowFail flag to allow fail in config source
AllowFail bool
}
// Option function signature
type Option func(o *Options)
// NewOptions new options struct with filed values
func NewOptions(opts ...Option) Options {
func NewOptions(opts ...options.Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
@@ -71,9 +60,6 @@ func NewOptions(opts ...Option) Options {
return options
}
// LoadOption function signature
type LoadOption func(o *LoadOptions)
// LoadOptions struct
type LoadOptions struct {
Struct interface{}
@@ -83,7 +69,7 @@ type LoadOptions struct {
}
// NewLoadOptions create LoadOptions struct with provided opts
func NewLoadOptions(opts ...LoadOption) LoadOptions {
func NewLoadOptions(opts ...options.Option) LoadOptions {
options := LoadOptions{}
for _, o := range opts {
o(&options)
@@ -92,44 +78,27 @@ func NewLoadOptions(opts ...LoadOption) LoadOptions {
}
// LoadOverride override values when load
func LoadOverride(b bool) LoadOption {
return func(o *LoadOptions) {
o.Override = b
func LoadOverride(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".Override")
}
}
// LoadAppend override values when load
func LoadAppend(b bool) LoadOption {
return func(o *LoadOptions) {
o.Append = b
func LoadAppend(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".Append")
}
}
// LoadStruct override struct for loading
func LoadStruct(src interface{}) LoadOption {
return func(o *LoadOptions) {
o.Struct = src
}
}
// SaveOption function signature
type SaveOption func(o *SaveOptions)
// SaveOptions struct
type SaveOptions struct {
Struct interface{}
Context context.Context
}
// SaveStruct override struct for save to config
func SaveStruct(src interface{}) SaveOption {
return func(o *SaveOptions) {
o.Struct = src
}
}
// NewSaveOptions fill SaveOptions struct
func NewSaveOptions(opts ...SaveOption) SaveOptions {
func NewSaveOptions(opts ...options.Option) SaveOptions {
options := SaveOptions{}
for _, o := range opts {
o(&options)
@@ -138,100 +107,65 @@ func NewSaveOptions(opts ...SaveOption) SaveOptions {
}
// AllowFail allows config source to fail
func AllowFail(b bool) Option {
return func(o *Options) {
o.AllowFail = b
func AllowFail(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".AllowFail")
}
}
// BeforeInit run funcs before config Init
func BeforeInit(fn ...func(context.Context, Config) error) Option {
return func(o *Options) {
o.BeforeInit = fn
func BeforeInit(fn ...func(context.Context, Config) error) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".BeforeInit")
}
}
// AfterInit run funcs after config Init
func AfterInit(fn ...func(context.Context, Config) error) Option {
return func(o *Options) {
o.AfterInit = fn
func AfterInit(fn ...func(context.Context, Config) error) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".AfterInit")
}
}
// BeforeLoad run funcs before config load
func BeforeLoad(fn ...func(context.Context, Config) error) Option {
return func(o *Options) {
o.BeforeLoad = fn
func BeforeLoad(fn ...func(context.Context, Config) error) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".BeforeLoad")
}
}
// AfterLoad run funcs after config load
func AfterLoad(fn ...func(context.Context, Config) error) Option {
return func(o *Options) {
o.AfterLoad = fn
func AfterLoad(fn ...func(context.Context, Config) error) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".AfterLoad")
}
}
// BeforeSave run funcs before save
func BeforeSave(fn ...func(context.Context, Config) error) Option {
return func(o *Options) {
o.BeforeSave = fn
func BeforeSave(fn ...func(context.Context, Config) error) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".BeforeSave")
}
}
// AfterSave run fncs after save
func AfterSave(fn ...func(context.Context, Config) error) Option {
return func(o *Options) {
o.AfterSave = fn
}
}
// Context pass context
func Context(ctx context.Context) Option {
return func(o *Options) {
o.Context = ctx
}
}
// Codec sets the source codec
func Codec(c codec.Codec) Option {
return func(o *Options) {
o.Codec = c
}
}
// Logger sets the logger
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// Tracer to be used for tracing
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
func AfterSave(fn ...func(context.Context, Config) error) options.Option {
return func(src interface{}) error {
return options.Set(src, fn, ".AfterSave")
}
}
// Struct used as config
func Struct(v interface{}) Option {
return func(o *Options) {
o.Struct = v
func Struct(v interface{}) options.Option {
return func(src interface{}) error {
return options.Set(src, v, ".Struct")
}
}
// StructTag sets the struct tag that used for filling
func StructTag(name string) Option {
return func(o *Options) {
o.StructTag = name
}
}
// Name sets the name
func Name(n string) Option {
return func(o *Options) {
o.Name = n
func StructTag(name string) options.Option {
return func(src interface{}) error {
return options.Set(src, name, ".StructTag")
}
}
@@ -249,11 +183,8 @@ type WatchOptions struct {
Coalesce bool
}
// WatchOption func signature
type WatchOption func(*WatchOptions)
// NewWatchOptions create WatchOptions struct with provided opts
func NewWatchOptions(opts ...WatchOption) WatchOptions {
func NewWatchOptions(opts ...options.Option) WatchOptions {
options := WatchOptions{
Context: context.Background(),
MinInterval: DefaultWatcherMinInterval,
@@ -265,38 +196,20 @@ func NewWatchOptions(opts ...WatchOption) WatchOptions {
return options
}
// WatchContext pass context
func WatchContext(ctx context.Context) WatchOption {
return func(o *WatchOptions) {
o.Context = ctx
}
}
// WatchCoalesce controls watch event combining
func WatchCoalesce(b bool) WatchOption {
return func(o *WatchOptions) {
o.Coalesce = b
// Coalesce controls watch event combining
func WatchCoalesce(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".Coalesce")
}
}
// WatchInterval specifies min and max time.Duration for pulling changes
func WatchInterval(minTime, maxTime time.Duration) WatchOption {
return func(o *WatchOptions) {
o.MinInterval = minTime
o.MaxInterval = maxTime
}
}
// WatchStruct overrides struct for fill
func WatchStruct(src interface{}) WatchOption {
return func(o *WatchOptions) {
o.Struct = src
}
}
// Hooks sets hook runs before action
func Hooks(h ...options.Hook) Option {
return func(o *Options) {
o.Hooks = append(o.Hooks, h...)
func WatchInterval(min, max time.Duration) options.Option {
return func(src interface{}) error {
var err error
if err = options.Set(src, min, ".MinInterval"); err == nil {
err = options.Set(src, max, ".MaxInterval")
}
return err
}
}

View File

@@ -1,157 +0,0 @@
package database
import (
"crypto/tls"
"errors"
"fmt"
"net/url"
"strings"
)
var (
ErrInvalidDSNAddr = errors.New("invalid dsn addr")
ErrInvalidDSNUnescaped = errors.New("dsn must be escaped")
ErrInvalidDSNNoSlash = errors.New("dsn must contains slash")
)
type Config struct {
TLSConfig *tls.Config
Username string
Password string
Scheme string
Host string
Port string
Database string
Params []string
}
func (cfg *Config) FormatDSN() string {
var s strings.Builder
if len(cfg.Scheme) > 0 {
s.WriteString(cfg.Scheme + "://")
}
// [username[:password]@]
if len(cfg.Username) > 0 {
s.WriteString(cfg.Username)
if len(cfg.Password) > 0 {
s.WriteByte(':')
s.WriteString(url.PathEscape(cfg.Password))
}
s.WriteByte('@')
}
// [host:port]
if len(cfg.Host) > 0 {
s.WriteString(cfg.Host)
if len(cfg.Port) > 0 {
s.WriteByte(':')
s.WriteString(cfg.Port)
}
}
// /dbname
s.WriteByte('/')
s.WriteString(url.PathEscape(cfg.Database))
for i := 0; i < len(cfg.Params); i += 2 {
if i == 0 {
s.WriteString("?")
} else {
s.WriteString("&")
}
s.WriteString(cfg.Params[i])
s.WriteString("=")
s.WriteString(cfg.Params[i+1])
}
return s.String()
}
func ParseDSN(dsn string) (*Config, error) {
cfg := &Config{}
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find last '/' that goes before dbname
foundSlash := false
for i := len(dsn) - 1; i >= 0; i-- {
if dsn[i] == '/' {
foundSlash = true
var j, k int
// left part is empty if i <= 0
if i > 0 {
// Find the first ':' in dsn
for j = i; j >= 0; j-- {
if dsn[j] == ':' {
cfg.Scheme = dsn[0:j]
}
}
// [username[:password]@][host]
// Find the last '@' in dsn[:i]
for j = i; j >= 0; j-- {
if dsn[j] == '@' {
// username[:password]
// Find the second ':' in dsn[:j]
for k = 0; k < j; k++ {
if dsn[k] == ':' {
if cfg.Scheme == dsn[:k] {
continue
}
var err error
cfg.Password, err = url.PathUnescape(dsn[k+1 : j])
if err != nil {
return nil, err
}
break
}
}
cfg.Username = dsn[len(cfg.Scheme)+3 : k]
break
}
}
for k = j + 1; k < i; k++ {
if dsn[k] == ':' {
cfg.Host = dsn[j+1 : k]
cfg.Port = dsn[k+1 : i]
break
}
}
}
// dbname[?param1=value1&...&paramN=valueN]
// Find the first '?' in dsn[i+1:]
for j = i + 1; j < len(dsn); j++ {
if dsn[j] == '?' {
parts := strings.Split(dsn[j+1:], "&")
cfg.Params = make([]string, 0, len(parts)*2)
for _, p := range parts {
k, v, found := strings.Cut(p, "=")
if !found {
continue
}
cfg.Params = append(cfg.Params, k, v)
}
break
}
}
var err error
dbname := dsn[i+1 : j]
if cfg.Database, err = url.PathUnescape(dbname); err != nil {
return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
}
break
}
}
if !foundSlash && len(dsn) > 0 {
return nil, ErrInvalidDSNNoSlash
}
return cfg, nil
}

View File

@@ -1,31 +0,0 @@
package database
import (
"net/url"
"testing"
)
func TestParseDSN(t *testing.T) {
cfg, err := ParseDSN("postgres://username:p@ssword#@host:12345/dbname?key1=val2&key2=val2")
if err != nil {
t.Fatal(err)
}
if cfg.Password != "p@ssword#" {
t.Fatalf("parsing error")
}
}
func TestFormatDSN(t *testing.T) {
src := "postgres://username:p@ssword#@host:12345/dbname?key1=val2&key2=val2"
cfg, err := ParseDSN(src)
if err != nil {
t.Fatal(err)
}
dst, err := url.PathUnescape(cfg.FormatDSN())
if err != nil {
t.Fatal(err)
}
if src != dst {
t.Fatalf("\n%s\n%s", src, dst)
}
}

View File

@@ -1,20 +1,14 @@
// Package errors provides a way to return detailed information
// for an RPC request error. The error is normally JSON encoded.
package errors
package errors // import "go.unistack.org/micro/v4/errors"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var (
@@ -44,20 +38,6 @@ var (
ErrGatewayTimeout = &Error{Code: 504}
)
const ProblemContentType = "application/problem+json"
type Problem struct {
Type string `json:"type,omitempty"`
Title string `json:"title,omitempty"`
Detail string `json:"detail,omitempty"`
Instance string `json:"instance,omitempty"`
Errors []struct {
Title string `json:"title,omitempty"`
Detail string `json:"detail,omitempty"`
} `json:"errors,omitempty"`
Status int `json:"status,omitempty"`
}
// Error type
type Error struct {
// ID holds error id or service, usually someting like my_service or id
@@ -276,10 +256,6 @@ func CodeIn(err interface{}, codes ...int32) bool {
// FromError try to convert go error to *Error
func FromError(err error) *Error {
if err == nil {
return nil
}
if verr, ok := err.(*Error); ok && verr != nil {
return verr
}
@@ -364,135 +340,3 @@ func addslashes(str string) string {
}
return buf.String()
}
type retryableError struct {
err error
}
// Retryable returns error that can be retried later
func Retryable(err error) error {
return &retryableError{err: err}
}
type IsRetryableFunc func(error) bool
var (
RetrayableOracleErrors = []IsRetryableFunc{
func(err error) bool {
errmsg := err.Error()
switch {
case strings.Contains(errmsg, `ORA-`):
return true
case strings.Contains(errmsg, `can not assign`):
return true
case strings.Contains(errmsg, `can't assign`):
return true
}
return false
},
}
RetrayablePostgresErrors = []IsRetryableFunc{
func(err error) bool {
errmsg := err.Error()
switch {
case strings.Contains(errmsg, `number of field descriptions must equal number of`):
return true
case strings.Contains(errmsg, `not a pointer`):
return true
case strings.Contains(errmsg, `values, but dst struct has only`):
return true
case strings.Contains(errmsg, `struct doesn't have corresponding row field`):
return true
case strings.Contains(errmsg, `cannot find field`):
return true
case strings.Contains(errmsg, `cannot scan`) || strings.Contains(errmsg, `cannot convert`):
return true
case strings.Contains(errmsg, `failed to connect to`):
return true
}
return false
},
}
RetryableMicroErrors = []IsRetryableFunc{
func(err error) bool {
switch verr := err.(type) {
case *Error:
switch verr.Code {
case 401, 403, 408, 500, 501, 502, 503, 504:
return true
default:
return false
}
case *retryableError:
return true
}
return false
},
}
RetryableGoErrors = []IsRetryableFunc{
func(err error) bool {
switch verr := err.(type) {
case interface{ SafeToRetry() bool }:
return verr.SafeToRetry()
case interface{ Timeout() bool }:
return verr.Timeout()
}
switch {
case errors.Is(err, io.EOF), errors.Is(err, io.ErrUnexpectedEOF):
return true
case errors.Is(err, context.DeadlineExceeded):
return true
case errors.Is(err, io.ErrClosedPipe), errors.Is(err, io.ErrShortBuffer), errors.Is(err, io.ErrShortWrite):
return true
}
return false
},
}
RetryableGrpcErrors = []IsRetryableFunc{
func(err error) bool {
st, ok := status.FromError(err)
if !ok {
return false
}
switch st.Code() {
case codes.Unavailable, codes.ResourceExhausted:
return true
case codes.DeadlineExceeded:
return true
case codes.Internal:
switch {
case strings.Contains(st.Message(), `transport: received the unexpected content-type "text/html; charset=UTF-8"`):
return true
case strings.Contains(st.Message(), io.ErrUnexpectedEOF.Error()):
return true
case strings.Contains(st.Message(), `stream terminated by RST_STREAM with error code: INTERNAL_ERROR`):
return true
}
}
return false
},
}
)
// Unwrap provides error wrapping
func (e *retryableError) Unwrap() error {
return e.err
}
// Error returns the error string
func (e *retryableError) Error() string {
if e.err == nil {
return ""
}
return e.err.Error()
}
// IsRetryable checks error for ability to retry later
func IsRetryable(err error, fns ...IsRetryableFunc) bool {
for _, fn := range fns {
if ok := fn(err); ok {
return true
}
}
return false
}

View File

@@ -1,4 +1,4 @@
// Copyright 2021 Unistack LLC
// Copyright 2021-2023 Unistack LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@@ -2,19 +2,12 @@ package errors
import (
"encoding/json"
"errors"
er "errors"
"fmt"
"net/http"
"testing"
)
func TestIsRetrayable(t *testing.T) {
err := fmt.Errorf("ORA-")
if !IsRetryable(err, RetrayableOracleErrors...) {
t.Fatalf("IsRetrayable not works")
}
}
func TestMarshalJSON(t *testing.T) {
e := InternalServerError("id", "err: %v", fmt.Errorf("err: %v", `xxx: "UNIX_TIMESTAMP": invalid identifier`))
_, err := json.Marshal(e)
@@ -26,7 +19,7 @@ func TestMarshalJSON(t *testing.T) {
func TestEmpty(t *testing.T) {
msg := "test"
var err *Error
err = FromError(errors.New(msg))
err = FromError(fmt.Errorf(msg))
if err.Detail != msg {
t.Fatalf("invalid error %v", err)
}
@@ -42,7 +35,7 @@ func TestFromError(t *testing.T) {
if merr.ID != "go.micro.test" || merr.Code != 404 {
t.Fatalf("invalid conversation %v != %v", err, merr)
}
err = errors.New(err.Error())
err = er.New(err.Error())
merr = FromError(err)
if merr.ID != "go.micro.test" || merr.Code != 404 {
t.Fatalf("invalid conversation %v != %v", err, merr)
@@ -57,7 +50,7 @@ func TestEqual(t *testing.T) {
t.Fatal("errors must be equal")
}
err3 := errors.New("my test err")
err3 := er.New("my test err")
if Equal(err1, err3) {
t.Fatal("errors must be not equal")
}

View File

@@ -22,13 +22,3 @@ func NewContext(ctx context.Context, f Flow) context.Context {
}
return context.WithValue(ctx, flowKey{}, f)
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}

View File

@@ -1,5 +1,3 @@
//go:build ignore
package flow
import (
@@ -42,14 +40,3 @@ func TestNewContext(t *testing.T) {
t.Fatal("NewContext not works")
}
}
func TestSetOption(t *testing.T) {
type key struct{}
o := SetOption(key{}, "test")
opts := &Options{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetOption not works")
}
}

View File

@@ -1,17 +1,17 @@
//go:build ignore
package flow
import (
"context"
"fmt"
"path/filepath"
"sync"
"github.com/heimdalr/dag"
"github.com/silas/dag"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
moptions "go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/store"
"go.unistack.org/micro/v4/util/id"
)
@@ -22,7 +22,7 @@ type microFlow struct {
type microWorkflow struct {
opts Options
g *dag.DAG
g *dag.AcyclicGraph
steps map[string]Step
id string
status Status
@@ -34,20 +34,20 @@ func (w *microWorkflow) ID() string {
return w.id
}
func (w *microWorkflow) Steps() ([][]Step, error) {
return w.getSteps("", false)
}
func (w *microWorkflow) Status() Status {
return w.status
}
func (w *microWorkflow) AppendSteps(steps ...Step) error {
var err error
w.Lock()
defer w.Unlock()
for _, s := range steps {
w.steps[s.String()] = s
if _, err = w.g.AddVertex(s); err != nil {
return err
}
w.g.Add(s)
}
for _, dst := range steps {
@@ -56,13 +56,18 @@ func (w *microWorkflow) AppendSteps(steps ...Step) error {
if !ok {
return ErrStepNotExists
}
if err = w.g.AddEdge(src.String(), dst.String()); err != nil {
return err
}
w.g.Connect(dag.BasicEdge(src, dst))
}
}
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
w.Unlock()
return err
}
w.g.TransitiveReduction()
w.Unlock()
return nil
}
@@ -71,11 +76,10 @@ func (w *microWorkflow) RemoveSteps(steps ...Step) error {
// TODO: handle case when some step requires or required by removed step
w.Lock()
defer w.Unlock()
for _, s := range steps {
delete(w.steps, s.String())
w.g.DeleteVertex(s.String())
w.g.Remove(s)
}
for _, dst := range steps {
@@ -84,34 +88,91 @@ func (w *microWorkflow) RemoveSteps(steps ...Step) error {
if !ok {
return ErrStepNotExists
}
w.g.AddEdge(src.String(), dst.String())
w.g.Connect(dag.BasicEdge(src, dst))
}
}
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
w.Unlock()
return err
}
w.g.TransitiveReduction()
w.Unlock()
return nil
}
func (w *microWorkflow) getSteps(start string, reverse bool) ([][]Step, error) {
var steps [][]Step
var root dag.Vertex
var err error
fn := func(n dag.Vertex, idx int) error {
if idx == 0 {
steps = make([][]Step, 1)
steps[0] = make([]Step, 0, 1)
} else if idx >= len(steps) {
tsteps := make([][]Step, idx+1)
copy(tsteps, steps)
steps = tsteps
steps[idx] = make([]Step, 0, 1)
}
steps[idx] = append(steps[idx], n.(Step))
return nil
}
if start != "" {
var ok bool
w.RLock()
root, ok = w.steps[start]
w.RUnlock()
if !ok {
return nil, ErrStepNotExists
}
} else {
root, err = w.g.Root()
if err != nil {
return nil, err
}
}
if reverse {
err = w.g.SortedReverseDepthFirstWalk([]dag.Vertex{root}, fn)
} else {
err = w.g.SortedDepthFirstWalk([]dag.Vertex{root}, fn)
}
if err != nil {
return nil, err
}
return steps, nil
}
func (w *microWorkflow) Abort(ctx context.Context, id string) error {
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+id)
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusAborted.String())})
}
func (w *microWorkflow) Suspend(ctx context.Context, id string) error {
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+id)
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusSuspend.String())})
}
func (w *microWorkflow) Resume(ctx context.Context, id string) error {
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+id)
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusRunning.String())})
}
func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (string, error) {
func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...options.Option) (string, error) {
w.Lock()
if !w.init {
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
w.Unlock()
return "", err
}
w.g.TransitiveReduction()
w.init = true
}
w.Unlock()
@@ -121,292 +182,176 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
return "", err
}
// stepStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("steps", eid))
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", eid))
stepStore := store.NewNamespaceStore(w.opts.Store, "steps"+w.opts.Store.Options().Separator+eid)
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+eid)
options := NewExecuteOptions(opts...)
nopts := make([]ExecuteOption, 0, len(opts)+5)
steps, err := w.getSteps(options.Start, options.Reverse)
if err != nil {
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
return "", err
}
var wg sync.WaitGroup
cherr := make(chan error, 1)
chstatus := make(chan Status, 1)
nctx, cancel := context.WithCancel(ctx)
defer cancel()
nopts := make([]moptions.Option, 0, len(opts)+5)
nopts = append(nopts,
ExecuteClient(w.opts.Client),
ExecuteTracer(w.opts.Tracer),
ExecuteLogger(w.opts.Logger),
ExecuteMeter(w.opts.Meter),
moptions.Client(w.opts.Client),
moptions.Tracer(w.opts.Tracer),
moptions.Logger(w.opts.Logger),
moptions.Meter(w.opts.Meter),
)
nopts = append(nopts, opts...)
done := make(chan struct{})
if werr := workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
w.opts.Logger.Error(ctx, "store error: %v", werr)
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
return eid, werr
}
var startID string
if options.Start == "" {
mp := w.g.GetRoots()
if len(mp) != 1 {
return eid, ErrStepNotExists
}
for k := range mp {
startID = k
}
} else {
for k, v := range w.g.GetVertices() {
if v == options.Start {
startID = k
for idx := range steps {
for nidx := range steps[idx] {
cstep := steps[idx][nidx]
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
return eid, werr
}
}
}
if startID == "" {
return eid, ErrStepNotExists
}
if options.Async {
go w.handleWorkflow(startID, nopts...)
return eid, nil
}
return eid, w.handleWorkflow(startID, nopts...)
}
func (w *microWorkflow) handleWorkflow(startID string, opts ...ExecuteOption) error {
w.RLock()
defer w.RUnlock()
// stepStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("steps", eid))
// workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", eid))
// Get IDs of all descendant vertices.
flowIDs, errDes := w.g.GetDescendants(startID)
if errDes != nil {
return errDes
}
// inputChannels provides for input channels for each of the descendant vertices (+ the start-vertex).
inputChannels := make(map[string]chan FlowResult, len(flowIDs)+1)
// Iterate vertex IDs and create an input channel for each of them and a single
// output channel for leaves. Note, this "pre-flight" is needed to ensure we
// really have an input channel regardless of how we traverse the tree and spawn
// workers.
leafCount := 0
for id := range flowIDs {
// Get all parents of this vertex.
parents, errPar := w.g.GetParents(id)
if errPar != nil {
return errPar
}
// Create a buffered input channel that has capacity for all parent results.
inputChannels[id] = make(chan FlowResult, len(parents))
if ok, err := w.g.IsLeaf(id); ok && err == nil {
leafCount += 1
}
}
// outputChannel caries the results of leaf vertices.
outputChannel := make(chan FlowResult, leafCount)
// To also process the start vertex and to have its results being passed to its
// children, add it to the vertex IDs. Also add an input channel for the start
// vertex and feed the inputs to this channel.
flowIDs[startID] = struct{}{}
inputChannels[startID] = make(chan FlowResult, len(inputs))
for _, i := range inputs {
inputChannels[startID] <- i
}
wg := sync.WaitGroup{}
// Iterate all vertex IDs (now incl. start vertex) and handle each worker (incl.
// inputs and outputs) in a separate goroutine.
for id := range flowIDs {
// Get all children of this vertex that later need to be notified. Note, we
// collect all children before the goroutine to be able to release the read
// lock as early as possible.
children, errChildren := w.g.GetChildren(id)
if errChildren != nil {
return errChildren
}
// Remember to wait for this goroutine.
wg.Add(1)
go func(id string) {
// Get this vertex's input channel.
// Note, only concurrent read here, which is fine.
c := inputChannels[id]
// Await all parent inputs and stuff them into a slice.
parentCount := cap(c)
parentResults := make([]FlowResult, parentCount)
for i := 0; i < parentCount; i++ {
parentResults[i] = <-c
}
// Execute the worker.
errWorker := callback(w.g, id, parentResults)
if errWorker != nil {
return errWorker
}
// Send this worker's FlowResult onto all children's input channels or, if it is
// a leaf (i.e. no children), send the result onto the output channel.
if len(children) > 0 {
for child := range children {
inputChannels[child] <- flowResult
go func() {
for idx := range steps {
for nidx := range steps[idx] {
wStatus := &codec.Frame{}
if werr := workflowStore.Read(w.opts.Context, "status", wStatus); werr != nil {
cherr <- werr
return
}
} else {
outputChannel <- flowResult
}
// "Sign off".
wg.Done()
}(id)
}
// Wait for all go routines to finish.
wg.Wait()
// Await all leaf vertex results and stuff them into a slice.
resultCount := cap(outputChannel)
results := make([]FlowResult, resultCount)
for i := 0; i < resultCount; i++ {
results[i] = <-outputChannel
}
/*
go func() {
for idx := range steps {
for nidx := range steps[idx] {
wStatus := &codec.Frame{}
if werr := workflowStore.Read(w.opts.Context, "status", wStatus); werr != nil {
cherr <- werr
return
}
if status := StringStatus[string(wStatus.Data)]; status != StatusRunning {
chstatus <- status
return
}
if w.opts.Logger.V(logger.TraceLevel) {
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
}
cstep := steps[idx][nidx]
// nolint: nestif
if len(cstep.Requires()) == 0 {
wg.Add(1)
go func(step Step) {
defer wg.Done()
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "req"), req); werr != nil {
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
cherr <- werr
return
}
rsp, serr := step.Execute(nctx, req, nopts...)
if serr != nil {
step.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "rsp"), serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "rsp"), rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
cherr <- werr
return
}
}(cstep)
wg.Wait()
} else {
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "req"), req); werr != nil {
if status := StringStatus[string(wStatus.Data)]; status != StatusRunning {
chstatus <- status
return
}
if w.opts.Logger.V(logger.TraceLevel) {
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
}
cstep := steps[idx][nidx]
// nolint: nestif
if len(cstep.Requires()) == 0 {
wg.Add(1)
go func(step Step) {
defer wg.Done()
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"req", req); werr != nil {
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
cherr <- werr
return
}
rsp, serr := cstep.Execute(nctx, req, nopts...)
rsp, serr := step.Execute(nctx, req, nopts...)
if serr != nil {
cstep.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "rsp"), serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
step.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "rsp"), rsp); werr != nil {
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
cherr <- werr
return
}
}(cstep)
wg.Wait()
} else {
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"req", req); werr != nil {
cherr <- werr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
cherr <- werr
return
}
rsp, serr := cstep.Execute(nctx, req, nopts...)
if serr != nil {
cstep.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
cherr <- werr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
cherr <- werr
return
}
}
}
close(done)
}()
if options.Async {
return eid, nil
}
close(done)
}()
logger.Tracef(ctx, "wait for finish or error")
select {
case <-nctx.Done():
err = nctx.Err()
case cerr := <-cherr:
err = cerr
case <-done:
close(cherr)
case <-chstatus:
close(chstatus)
return eid, nil
}
if options.Async {
return eid, nil
}
switch {
case nctx.Err() != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
case err == nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
case err != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
logger.Tracef(ctx, "wait for finish or error")
select {
case <-nctx.Done():
err = nctx.Err()
case cerr := <-cherr:
err = cerr
case <-done:
close(cherr)
case <-chstatus:
close(chstatus)
return eid, nil
}
switch {
case nctx.Err() != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
*/
return err
case err == nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
case err != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
}
return eid, err
}
// NewFlow create new flow
func NewFlow(opts ...Option) Flow {
func NewFlow(opts ...options.Option) Flow {
options := NewOptions(opts...)
return &microFlow{opts: options}
}
@@ -415,7 +360,7 @@ func (f *microFlow) Options() Options {
return f.opts
}
func (f *microFlow) Init(opts ...Option) error {
func (f *microFlow) Init(opts ...options.Option) error {
for _, o := range opts {
o(&f.opts)
}
@@ -442,11 +387,11 @@ func (f *microFlow) WorkflowList(ctx context.Context) ([]Workflow, error) {
}
func (f *microFlow) WorkflowCreate(ctx context.Context, id string, steps ...Step) (Workflow, error) {
w := &microWorkflow{opts: f.opts, id: id, g: &dag.DAG{}, steps: make(map[string]Step, len(steps))}
w := &microWorkflow{opts: f.opts, id: id, g: &dag.AcyclicGraph{}, steps: make(map[string]Step, len(steps))}
for _, s := range steps {
w.steps[s.String()] = s
w.g.AddVertex(s)
w.g.Add(s)
}
for _, dst := range steps {
@@ -455,11 +400,14 @@ func (f *microFlow) WorkflowCreate(ctx context.Context, id string, steps ...Step
if !ok {
return nil, ErrStepNotExists
}
w.g.AddEdge(src.String(), dst.String())
w.g.Connect(dag.BasicEdge(src, dst))
}
}
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
return nil, err
}
w.g.TransitiveReduction()
w.init = true
@@ -541,17 +489,17 @@ func (s *microCallStep) SetStatus(status Status) {
s.status = status
}
func (s *microCallStep) Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (*Message, error) {
func (s *microCallStep) Execute(ctx context.Context, req *Message, opts ...options.Option) (*Message, error) {
options := NewExecuteOptions(opts...)
if options.Client == nil {
return nil, ErrMissingClient
}
rsp := &codec.Frame{}
copts := []client.CallOption{client.WithRetries(0)}
copts := []moptions.Option{client.Retries(0)}
if options.Timeout > 0 {
copts = append(copts,
client.WithRequestTimeout(options.Timeout),
client.WithDialTimeout(options.Timeout))
client.RequestTimeout(options.Timeout),
client.DialTimeout(options.Timeout))
}
nctx := metadata.NewOutgoingContext(ctx, req.Header)
err := options.Client.Call(nctx, options.Client.NewRequest(s.service, s.method, &codec.Frame{Data: req.Body}), rsp, copts...)
@@ -624,18 +572,18 @@ func (s *microPublishStep) SetStatus(status Status) {
s.status = status
}
func (s *microPublishStep) Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (*Message, error) {
func (s *microPublishStep) Execute(ctx context.Context, req *Message, opts ...options.Option) (*Message, error) {
return nil, nil
}
// NewCallStep create new step with client.Call
func NewCallStep(service string, name string, method string, opts ...StepOption) Step {
func NewCallStep(service string, name string, method string, opts ...options.Option) Step {
options := NewStepOptions(opts...)
return &microCallStep{service: service, method: name + "." + method, opts: options}
}
// NewPublishStep create new step with client.Publish
func NewPublishStep(topic string, opts ...StepOption) Step {
func NewPublishStep(topic string, opts ...options.Option) Step {
options := NewStepOptions(opts...)
return &microPublishStep{topic: topic, opts: options}
}

View File

@@ -8,6 +8,7 @@ import (
"sync/atomic"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
)
var (
@@ -51,7 +52,7 @@ type Step interface {
// Endpoint returns rpc endpoint service_name.service_method or broker topic
Endpoint() string
// Execute step run
Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (*Message, error)
Execute(ctx context.Context, req *Message, opts ...options.Option) (*Message, error)
// Requires returns dependent steps
Requires() []string
// Options returns step options
@@ -118,13 +119,15 @@ type Workflow interface {
// ID returns id of the workflow
ID() string
// Execute workflow with args, return execution id and error
Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (string, error)
Execute(ctx context.Context, req *Message, opts ...options.Option) (string, error)
// RemoveSteps remove steps from workflow
RemoveSteps(steps ...Step) error
// AppendSteps append steps to workflow
AppendSteps(steps ...Step) error
// Status returns workflow status
Status() Status
// Steps returns steps slice where parallel steps returned on the same level
Steps() ([][]Step, error)
// Suspend suspends execution
Suspend(ctx context.Context, id string) error
// Resume resumes execution
@@ -138,7 +141,7 @@ type Flow interface {
// Options returns options
Options() Options
// Init initialize
Init(...Option) error
Init(...options.Option) error
// WorkflowCreate creates new workflow with specific id and steps
WorkflowCreate(ctx context.Context, id string, steps ...Step) (Workflow, error)
// WorkflowSave saves workflow

View File

@@ -7,13 +7,11 @@ import (
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/store"
"go.unistack.org/micro/v4/tracer"
)
// Option func
type Option func(*Options)
// Options server struct
type Options struct {
// Context holds the external options and can be used for flow shutdown
@@ -31,7 +29,7 @@ type Options struct {
}
// NewOptions returns new options struct with default or passed values
func NewOptions(opts ...Option) Options {
func NewOptions(opts ...options.Option) Options {
options := Options{
Context: context.Background(),
Logger: logger.DefaultLogger,
@@ -47,66 +45,12 @@ func NewOptions(opts ...Option) Options {
return options
}
// Logger sets the logger option
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// Meter sets the meter option
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Client to use for sync/async communication
func Client(c client.Client) Option {
return func(o *Options) {
o.Client = c
}
}
// Context specifies a context for the service.
// Can be used to signal shutdown of the flow
// or can be used for extra option values.
func Context(ctx context.Context) Option {
return func(o *Options) {
o.Context = ctx
}
}
// Tracer mechanism for distributed tracking
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
// Store used for intermediate results
func Store(s store.Store) Option {
return func(o *Options) {
o.Store = s
}
}
// WorkflowOption func signature
type WorkflowOption func(*WorkflowOptions)
// WorkflowOptions holds workflow options
type WorkflowOptions struct {
Context context.Context
ID string
}
// WorkflowID set workflow id
func WorkflowID(id string) WorkflowOption {
return func(o *WorkflowOptions) {
o.ID = id
}
}
// ExecuteOptions holds execute options
type ExecuteOptions struct {
// Client holds the client.Client
@@ -123,64 +67,28 @@ type ExecuteOptions struct {
Start string
// Timeout for execution
Timeout time.Duration
// Reverse execution
Reverse bool
// Async enables async execution
Async bool
}
// ExecuteOption func signature
type ExecuteOption func(*ExecuteOptions)
// ExecuteClient pass client.Client to ExecuteOption
func ExecuteClient(c client.Client) ExecuteOption {
return func(o *ExecuteOptions) {
o.Client = c
// Reverse says that dag must be run in reverse order
func Reverse(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".Reverse")
}
}
// ExecuteTracer pass tracer.Tracer to ExecuteOption
func ExecuteTracer(t tracer.Tracer) ExecuteOption {
return func(o *ExecuteOptions) {
o.Tracer = t
}
}
// ExecuteLogger pass logger.Logger to ExecuteOption
func ExecuteLogger(l logger.Logger) ExecuteOption {
return func(o *ExecuteOptions) {
o.Logger = l
}
}
// ExecuteMeter pass meter.Meter to ExecuteOption
func ExecuteMeter(m meter.Meter) ExecuteOption {
return func(o *ExecuteOptions) {
o.Meter = m
}
}
// ExecuteContext pass context.Context ot ExecuteOption
func ExecuteContext(ctx context.Context) ExecuteOption {
return func(o *ExecuteOptions) {
o.Context = ctx
}
}
// ExecuteTimeout pass timeout time.Duration for execution
func ExecuteTimeout(td time.Duration) ExecuteOption {
return func(o *ExecuteOptions) {
o.Timeout = td
}
}
// ExecuteAsync says that caller does not wait for execution complete
func ExecuteAsync(b bool) ExecuteOption {
return func(o *ExecuteOptions) {
o.Async = b
// Async says that caller does not wait for execution complete
func Async(b bool) options.Option {
return func(src interface{}) error {
return options.Set(src, b, ".Async")
}
}
// NewExecuteOptions create new ExecuteOptions struct
func NewExecuteOptions(opts ...ExecuteOption) ExecuteOptions {
func NewExecuteOptions(opts ...options.Option) ExecuteOptions {
options := ExecuteOptions{
Client: client.DefaultClient,
Logger: logger.DefaultLogger,
@@ -202,11 +110,8 @@ type StepOptions struct {
Requires []string
}
// StepOption func signature
type StepOption func(*StepOptions)
// NewStepOptions create new StepOptions struct
func NewStepOptions(opts ...StepOption) StepOptions {
func NewStepOptions(opts ...options.Option) StepOptions {
options := StepOptions{
Context: context.Background(),
}
@@ -216,23 +121,23 @@ func NewStepOptions(opts ...StepOption) StepOptions {
return options
}
// StepID sets the step id for dag
func StepID(id string) StepOption {
return func(o *StepOptions) {
o.ID = id
// Requires specifies required steps
func Requires(steps ...string) options.Option {
return func(src interface{}) error {
return options.Set(src, steps, ".Requires")
}
}
// StepRequires specifies required steps
func StepRequires(steps ...string) StepOption {
return func(o *StepOptions) {
o.Requires = steps
// Fallback set the step to run on error
func Fallback(step string) options.Option {
return func(src interface{}) error {
return options.Set(src, step, ".Fallback")
}
}
// StepFallback set the step to run on error
func StepFallback(step string) StepOption {
return func(o *StepOptions) {
o.Fallback = step
// ID sets the step ID
func StepID(id string) options.Option {
return func(src interface{}) error {
return options.Set(src, id, ".ID")
}
}

View File

@@ -32,7 +32,7 @@ type fsm struct {
// NewFSM creates a new finite state machine having the specified initial state
// with specified options
func NewFSM(opts ...Option) FSM {
func NewFSM(opts ...Option) *fsm {
return &fsm{
statesMap: map[string]StateFunc{},
opts: NewOptions(opts...),

View File

@@ -1,4 +1,4 @@
package fsm
package fsm // import "go.unistack.org/micro/v4/fsm"
import (
"context"

View File

@@ -17,7 +17,7 @@ func TestFSMStart(t *testing.T) {
wrapper := func(next StateFunc) StateFunc {
return func(sctx context.Context, s State, opts ...StateOption) (State, error) {
sctx = logger.NewContext(sctx, logger.DefaultLogger.Fields("state", s.Name()))
sctx = logger.NewContext(sctx, logger.Fields("state", s.Name()))
return next(sctx, s, opts...)
}
}

35
go.mod
View File

@@ -1,33 +1,22 @@
module go.unistack.org/micro/v4
go 1.22.0
go 1.20
require (
dario.cat/mergo v1.0.1
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/KimMachineGun/automemlimit v0.7.0
github.com/google/uuid v1.6.0
github.com/matoous/go-nanoid v1.5.1
dario.cat/mergo v1.0.0
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/google/uuid v1.3.1
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
github.com/spf13/cast v1.7.1
github.com/stretchr/testify v1.10.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.unistack.org/micro-proto/v4 v4.1.0
golang.org/x/sync v0.10.0
google.golang.org/grpc v1.69.4
google.golang.org/protobuf v1.36.3
gopkg.in/yaml.v3 v3.0.1
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.3.0
golang.org/x/sys v0.12.0
google.golang.org/grpc v1.58.2
google.golang.org/protobuf v1.31.0
)
require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/sys v0.29.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
github.com/golang/protobuf v1.5.3 // indirect
golang.org/x/net v0.15.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
)

89
go.sum
View File

@@ -1,67 +1,36 @@
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88=
github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/matoous/go-nanoid v1.5.1 h1:aCjdvTyO9LLnTIi0fgdXhOPPvOHjpXN6Ik9DaNjIct4=
github.com/matoous/go-nanoid v1.5.1/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5 h1:G/FZtUu7a6NTWl3KUHMV9jkLAh/Rvtf03NWMHaEDl+E=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.unistack.org/micro-proto/v4 v4.1.0 h1:qPwL2n/oqh9RE3RTTDgt28XK3QzV597VugQPaw9lKUk=
go.unistack.org/micro-proto/v4 v4.1.0/go.mod h1:ArmK7o+uFvxSY3dbJhKBBX4Pm1rhWdLEFf3LxBrMtec=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,117 +0,0 @@
package metadata
import (
"context"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/server"
)
type wrapper struct {
keys []string
client.Client
}
func NewClientWrapper(keys ...string) client.Wrapper {
return func(c client.Client) client.Client {
handler := &wrapper{
Client: c,
keys: keys,
}
return handler
}
}
func NewClientCallWrapper(keys ...string) client.CallWrapper {
return func(fn client.CallFunc) client.CallFunc {
return func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
if keys == nil {
return fn(ctx, addr, req, rsp, opts)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return fn(ctx, addr, req, rsp, opts)
}
}
}
func (w *wrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
if w.keys == nil {
return w.Client.Call(ctx, req, rsp, opts...)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range w.keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return w.Client.Call(ctx, req, rsp, opts...)
}
func (w *wrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
if w.keys == nil {
return w.Client.Stream(ctx, req, opts...)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range w.keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return w.Client.Stream(ctx, req, opts...)
}
func NewServerHandlerWrapper(keys ...string) server.HandlerWrapper {
return func(fn server.HandlerFunc) server.HandlerFunc {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
if keys == nil {
return fn(ctx, req, rsp)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return fn(ctx, req, rsp)
}
}
}

View File

@@ -1,63 +0,0 @@
package recovery
import (
"context"
"fmt"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/server"
)
func NewOptions(opts ...Option) Options {
options := Options{
ServerHandlerFn: DefaultServerHandlerFn,
}
for _, o := range opts {
o(&options)
}
return options
}
type Options struct {
ServerHandlerFn func(context.Context, server.Request, interface{}, error) error
}
type Option func(*Options)
func ServerHandlerFunc(fn func(context.Context, server.Request, interface{}, error) error) Option {
return func(o *Options) {
o.ServerHandlerFn = fn
}
}
var DefaultServerHandlerFn = func(ctx context.Context, req server.Request, rsp interface{}, err error) error {
return errors.BadRequest("", "%v", err)
}
var Hook = NewHook()
type hook struct {
opts Options
}
func NewHook(opts ...Option) *hook {
return &hook{opts: NewOptions(opts...)}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) (err error) {
defer func() {
r := recover()
switch verr := r.(type) {
case nil:
return
case error:
err = w.opts.ServerHandlerFn(ctx, req, rsp, verr)
default:
err = w.opts.ServerHandlerFn(ctx, req, rsp, fmt.Errorf("%v", r))
}
}()
err = next(ctx, req, rsp)
return err
}
}

View File

@@ -1,103 +0,0 @@
package requestid
import (
"context"
"net/textproto"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/server"
"go.unistack.org/micro/v4/util/id"
)
type XRequestIDKey struct{}
// DefaultMetadataKey contains metadata key
var DefaultMetadataKey = textproto.CanonicalMIMEHeaderKey("x-request-id")
// DefaultMetadataFunc wil be used if user not provide own func to fill metadata
var DefaultMetadataFunc = func(ctx context.Context) (context.Context, error) {
var xid string
cid, cok := ctx.Value(XRequestIDKey{}).(string)
if cok && cid != "" {
xid = cid
}
imd, iok := metadata.FromIncomingContext(ctx)
if !iok || imd == nil {
imd = metadata.New(1)
ctx = metadata.NewIncomingContext(ctx, imd)
}
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(1)
ctx = metadata.NewOutgoingContext(ctx, omd)
}
if xid == "" {
xid = imd.GetJoined(DefaultMetadataKey)
if xid == "" {
xid = omd.GetJoined(DefaultMetadataKey)
}
}
if xid == "" {
var err error
xid, err = id.New()
if err != nil {
return ctx, err
}
}
if !cok {
ctx = context.WithValue(ctx, XRequestIDKey{}, xid)
}
if !iok {
imd.Set(DefaultMetadataKey, xid)
}
if !ook {
omd.Set(DefaultMetadataKey, xid)
}
return ctx, nil
}
type hook struct{}
func NewHook() *hook {
return &hook{}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return err
}
return next(ctx, req, rsp)
}
}
func (w *hook) ClientCall(next client.FuncCall) client.FuncCall {
return func(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return err
}
return next(ctx, req, rsp, opts...)
}
}
func (w *hook) ClientStream(next client.FuncStream) client.FuncStream {
return func(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return nil, err
}
return next(ctx, req, opts...)
}
}

View File

@@ -1,34 +0,0 @@
package requestid
import (
"context"
"slices"
"testing"
"go.unistack.org/micro/v4/metadata"
)
func TestDefaultMetadataFunc(t *testing.T) {
ctx := context.TODO()
nctx, err := DefaultMetadataFunc(ctx)
if err != nil {
t.Fatalf("%v", err)
}
imd, ok := metadata.FromIncomingContext(nctx)
if !ok {
t.Fatalf("md missing in incoming context")
}
omd, ok := metadata.FromOutgoingContext(nctx)
if !ok {
t.Fatalf("md missing in outgoing context")
}
iv := imd.Get(DefaultMetadataKey)
ov := omd.Get(DefaultMetadataKey)
if !slices.Equal(iv, ov) {
t.Fatalf("missing metadata key value %v != %v", iv, ov)
}
}

View File

@@ -1,51 +0,0 @@
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"runtime"
)
//go:generate sh -c "go run gen.go > wrap_gen.go"
// namedValueToValue converts driver arguments of NamedValue format to Value format. Implemented in the same way as in
// database/sql ctxutil.go.
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
dargs := make([]driver.Value, len(named))
for n, param := range named {
if len(param.Name) > 0 {
return nil, errors.New("sql: driver does not support the use of Named Parameters")
}
dargs[n] = param.Value
}
return dargs, nil
}
// namedValueToLabels convert driver arguments to interface{} slice
func namedValueToLabels(named []driver.NamedValue) []interface{} {
largs := make([]interface{}, 0, len(named)*2)
var name string
for _, param := range named {
if param.Name != "" {
name = param.Name
} else {
name = fmt.Sprintf("$%d", param.Ordinal)
}
largs = append(largs, fmt.Sprintf("%s=%v", name, param.Value))
}
return largs
}
// getCallerName get the name of the function A where A() -> B() -> GetFunctionCallerName()
func getCallerName() string {
pc, _, _, ok := runtime.Caller(3)
details := runtime.FuncForPC(pc)
var callerName string
if ok && details != nil {
callerName = details.Name()
} else {
callerName = labelUnknown
}
return callerName
}

View File

@@ -1,467 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"fmt"
"time"
"go.unistack.org/micro/v4/hooks/requestid"
"go.unistack.org/micro/v4/tracer"
)
var (
_ driver.Conn = (*wrapperConn)(nil)
_ driver.ConnBeginTx = (*wrapperConn)(nil)
_ driver.ConnPrepareContext = (*wrapperConn)(nil)
_ driver.Pinger = (*wrapperConn)(nil)
_ driver.Validator = (*wrapperConn)(nil)
_ driver.Queryer = (*wrapperConn)(nil) // nolint:staticcheck
_ driver.QueryerContext = (*wrapperConn)(nil)
_ driver.Execer = (*wrapperConn)(nil) // nolint:staticcheck
_ driver.ExecerContext = (*wrapperConn)(nil)
// _ driver.Connector
// _ driver.Driver
// _ driver.DriverContext
)
// wrapperConn defines a wrapper for driver.Conn
type wrapperConn struct {
d *wrapperDriver
dname string
conn driver.Conn
opts Options
ctx context.Context
//span tracer.Span
}
// Close implements driver.Conn Close
func (w *wrapperConn) Close() error {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Close"}
ts := time.Now()
err := w.conn.Close()
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Close", getCallerName(), td, err)...)
}
*/
return err
}
// Begin implements driver.Conn Begin
func (w *wrapperConn) Begin() (driver.Tx, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
labels := []string{labelMethod, "Begin"}
ts := time.Now()
tx, err := w.conn.Begin() // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Begin", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Begin", getCallerName(), td, err)...)
}
*/
return &wrapperTx{tx: tx, opts: w.opts, ctx: ctx}, nil
}
// BeginTx implements driver.ConnBeginTx BeginTx
func (w *wrapperConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
name := getQueryName(ctx)
nctx, span := w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
span.AddLabels("db.method", "BeginTx")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "BeginTx", labelQuery, name}
connBeginTx, ok := w.conn.(driver.ConnBeginTx)
if !ok {
return w.Begin()
}
ts := time.Now()
tx, err := connBeginTx.BeginTx(nctx, opts)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "BeginTx", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "BeginTx", getCallerName(), td, err)...)
}
*/
return &wrapperTx{tx: tx, opts: w.opts, ctx: ctx, span: span}, nil
}
// Prepare implements driver.Conn Prepare
func (w *wrapperConn) Prepare(query string) (driver.Stmt, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Prepare", labelQuery, getCallerName()}
ts := time.Now()
stmt, err := w.conn.Prepare(query)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Prepare", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Prepare", getCallerName(), td, err)...)
}
*/
return wrapStmt(stmt, query, w.opts), nil
}
// PrepareContext implements driver.ConnPrepareContext PrepareContext
func (w *wrapperConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "PrepareContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "PrepareContext", labelQuery, name}
conn, ok := w.conn.(driver.ConnPrepareContext)
if !ok {
return w.Prepare(query)
}
ts := time.Now()
stmt, err := conn.PrepareContext(nctx, query)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "PrepareContext", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "PrepareContext", getCallerName(), td, err)...)
}
*/
return wrapStmt(stmt, query, w.opts), nil
}
// Exec implements driver.Execer Exec
func (w *wrapperConn) Exec(query string, args []driver.Value) (driver.Result, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Exec", labelQuery, getCallerName()}
// nolint:staticcheck
conn, ok := w.conn.(driver.Execer)
if !ok {
return nil, driver.ErrSkip
}
ts := time.Now()
res, err := conn.Exec(query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Exec", getCallerName(), td, err)...)
}
*/
return res, err
}
// Exec implements driver.StmtExecContext ExecContext
func (w *wrapperConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "ExecContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
labels := []string{labelMethod, "ExecContext", labelQuery, name}
conn, ok := w.conn.(driver.ExecerContext)
if !ok {
// nolint:staticcheck
return nil, driver.ErrSkip
}
ts := time.Now()
res, err := conn.ExecContext(nctx, query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", getCallerName(), td, err)...)
}
*/
return res, err
}
// Ping implements driver.Pinger Ping
func (w *wrapperConn) Ping(ctx context.Context) error {
conn, ok := w.conn.(driver.Pinger)
if !ok {
// fallback path to check db alive
pc, err := w.d.Open(w.dname)
if err != nil {
return err
}
return pc.Close()
}
var nctx context.Context //nolint:gosimple
nctx = ctx
/*
var span tracer.Span
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "Ping")
defer span.Finish()
*/
labels := []string{labelMethod, "Ping"}
ts := time.Now()
err := conn.Ping(nctx)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
// span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Ping", getCallerName(), td, err)...)
}
*/
return err
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
return nil
}
// Query implements driver.Queryer Query
func (w *wrapperConn) Query(query string, args []driver.Value) (driver.Rows, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
// nolint:staticcheck
conn, ok := w.conn.(driver.Queryer)
if !ok {
return nil, driver.ErrSkip
}
labels := []string{labelMethod, "Query", labelQuery, getCallerName()}
ts := time.Now()
rows, err := conn.Query(query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Query", getCallerName(), td, err)...)
}
*/
return rows, err
}
// QueryContext implements Driver.QueryerContext QueryContext
func (w *wrapperConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "QueryContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
labels := []string{labelMethod, "QueryContext", labelQuery, name}
conn, ok := w.conn.(driver.QueryerContext)
if !ok {
return nil, driver.ErrSkip
}
ts := time.Now()
rows, err := conn.QueryContext(nctx, query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", getCallerName(), td, err)...)
}
*/
return rows, err
}
// CheckNamedValue implements driver.NamedValueChecker
func (w *wrapperConn) CheckNamedValue(v *driver.NamedValue) error {
s, ok := w.conn.(driver.NamedValueChecker)
if !ok {
return driver.ErrSkip
}
return s.CheckNamedValue(v)
}
// IsValid implements driver.Validator
func (w *wrapperConn) IsValid() bool {
v, ok := w.conn.(driver.Validator)
if !ok {
return w.conn != nil
}
return v.IsValid()
}
func (w *wrapperConn) ResetSession(ctx context.Context) error {
s, ok := w.conn.(driver.SessionResetter)
if !ok {
return driver.ErrSkip
}
return s.ResetSession(ctx)
}

View File

@@ -1,94 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"time"
)
var (
// _ driver.DriverContext = (*wrapperDriver)(nil)
// _ driver.Connector = (*wrapperDriver)(nil)
)
/*
type conn interface {
driver.Pinger
driver.Execer
driver.ExecerContext
driver.Queryer
driver.QueryerContext
driver.Conn
driver.ConnPrepareContext
driver.ConnBeginTx
}
*/
// wrapperDriver defines a wrapper for driver.Driver
type wrapperDriver struct {
driver driver.Driver
opts Options
ctx context.Context
}
// NewWrapper creates and returns a new SQL driver with passed capabilities
func NewWrapper(d driver.Driver, opts ...Option) driver.Driver {
return &wrapperDriver{driver: d, opts: NewOptions(opts...), ctx: context.Background()}
}
type wrappedConnector struct {
connector driver.Connector
// name string
opts Options
ctx context.Context
}
func NewWrapperConnector(c driver.Connector, opts ...Option) driver.Connector {
return &wrappedConnector{connector: c, opts: NewOptions(opts...), ctx: context.Background()}
}
// Connect implements driver.Driver Connect
func (w *wrappedConnector) Connect(ctx context.Context) (driver.Conn, error) {
return w.connector.Connect(ctx)
}
// Driver implements driver.Driver Driver
func (w *wrappedConnector) Driver() driver.Driver {
return w.connector.Driver()
}
/*
// Connect implements driver.Driver OpenConnector
func (w *wrapperDriver) OpenConnector(name string) (driver.Conn, error) {
return &wrapperConnector{driver: w.driver, name: name, opts: w.opts}, nil
}
*/
// Open implements driver.Driver Open
func (w *wrapperDriver) Open(name string) (driver.Conn, error) {
// ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) // Ensure eventual timeout
// defer cancel()
/*
connector, err := w.OpenConnector(name)
if err != nil {
return nil, err
}
return connector.Connect(ctx)
*/
ts := time.Now()
c, err := w.driver.Open(name)
td := time.Since(ts)
/*
if w.opts.LoggerEnabled {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Open", getCallerName(), td, err)...)
}
*/
_ = td
if err != nil {
return nil, err
}
return wrapConn(c, w.opts), nil
}

View File

@@ -1,167 +0,0 @@
//go:build ignore
package main
import (
"bytes"
"crypto/md5"
"fmt"
"io"
"sort"
"strings"
)
var connIfaces = []string{
"driver.ConnBeginTx",
"driver.ConnPrepareContext",
"driver.Execer",
"driver.ExecerContext",
"driver.NamedValueChecker",
"driver.Pinger",
"driver.Queryer",
"driver.QueryerContext",
"driver.SessionResetter",
"driver.Validator",
}
var stmtIfaces = []string{
"driver.StmtExecContext",
"driver.StmtQueryContext",
"driver.ColumnConverter",
"driver.NamedValueChecker",
}
func getHash(s []string) string {
h := md5.New()
io.WriteString(h, strings.Join(s, "|"))
return fmt.Sprintf("%x", h.Sum(nil))
}
func main() {
comboConn := all(connIfaces)
sort.Slice(comboConn, func(i, j int) bool {
return len(comboConn[i]) < len(comboConn[j])
})
comboStmt := all(stmtIfaces)
sort.Slice(comboStmt, func(i, j int) bool {
return len(comboStmt[i]) < len(comboStmt[j])
})
b := bytes.NewBuffer(nil)
b.WriteString("// Code generated. DO NOT EDIT.\n\n")
b.WriteString("package sql\n\n")
b.WriteString(`import "database/sql/driver"`)
b.WriteString("\n\n")
b.WriteString("func wrapConn(dc driver.Conn, opts Options) driver.Conn {\n")
b.WriteString("\tc := &wrapperConn{conn: dc, opts: opts}\n")
for idx := len(comboConn) - 1; idx >= 0; idx-- {
ifaces := comboConn[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("\tif _, ok := dc.(wrapConn%04d_%s); ok {\n", n, h))
b.WriteString("\treturn struct {\n")
b.WriteString("\t\tdriver.Conn\n")
b.WriteString(fmt.Sprintf("\t\t\t%s", strings.Join(ifaces, "\n\t\t\t")))
b.WriteString("\t\t\n}{")
for idx := range ifaces {
if idx > 0 {
b.WriteString(", ")
b.WriteString("c")
} else if idx == 0 {
b.WriteString("c")
} else {
b.WriteString("c")
}
}
b.WriteString(", c}\n")
b.WriteString("}\n\n")
}
b.WriteString("return c\n")
b.WriteString("}\n")
for idx := len(comboConn) - 1; idx >= 0; idx-- {
ifaces := comboConn[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("// %s\n", strings.Join(ifaces, "|")))
b.WriteString(fmt.Sprintf("type wrapConn%04d_%s interface {\n", n, h))
for _, iface := range ifaces {
b.WriteString(fmt.Sprintf("\t%s\n", iface))
}
b.WriteString("}\n\n")
}
b.WriteString("func wrapStmt(stmt driver.Stmt, query string, opts Options) driver.Stmt {\n")
b.WriteString("\tc := &wrapperStmt{stmt: stmt, query: query, opts: opts}\n")
for idx := len(comboStmt) - 1; idx >= 0; idx-- {
ifaces := comboStmt[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("\tif _, ok := stmt.(wrapStmt%04d_%s); ok {\n", n, h))
b.WriteString("\treturn struct {\n")
b.WriteString("\t\tdriver.Stmt\n")
b.WriteString(fmt.Sprintf("\t\t\t%s", strings.Join(ifaces, "\n\t\t\t")))
b.WriteString("\t\t\n}{")
for idx := range ifaces {
if idx > 0 {
b.WriteString(", ")
b.WriteString("c")
} else if idx == 0 {
b.WriteString("c")
} else {
b.WriteString("c")
}
}
b.WriteString(", c}\n")
b.WriteString("}\n\n")
}
b.WriteString("return c\n")
b.WriteString("}\n")
for idx := len(comboStmt) - 1; idx >= 0; idx-- {
ifaces := comboStmt[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("// %s\n", strings.Join(ifaces, "|")))
b.WriteString(fmt.Sprintf("type wrapStmt%04d_%s interface {\n", n, h))
for _, iface := range ifaces {
b.WriteString(fmt.Sprintf("\t%s\n", iface))
}
b.WriteString("}\n\n")
}
fmt.Printf("%s\n", b.String())
}
// all returns all combinations for a given string array.
func all[T any](set []T) (subsets [][]T) {
length := uint(len(set))
for subsetBits := 1; subsetBits < (1 << length); subsetBits++ {
var subset []T
for object := uint(0); object < length; object++ {
if (subsetBits>>object)&1 == 1 {
subset = append(subset, set[object])
}
}
subsets = append(subsets, subset)
}
return subsets
}

View File

@@ -1,172 +0,0 @@
package sql
import (
"context"
"fmt"
"time"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/tracer"
)
var (
// DefaultMeterStatsInterval holds default stats interval
DefaultMeterStatsInterval = 5 * time.Second
// DefaultLoggerObserver used to prepare labels for logger
DefaultLoggerObserver = func(ctx context.Context, method string, query string, td time.Duration, err error) []interface{} {
labels := []interface{}{"db.method", method, "took", fmt.Sprintf("%v", td)}
if err != nil {
labels = append(labels, "error", err.Error())
}
if query != labelUnknown {
labels = append(labels, "query", query)
}
return labels
}
)
var (
MaxOpenConnections = "micro_sql_max_open_conn"
OpenConnections = "micro_sql_open_conn"
InuseConnections = "micro_sql_inuse_conn"
IdleConnections = "micro_sql_idle_conn"
WaitConnections = "micro_sql_waited_conn"
BlockedSeconds = "micro_sql_blocked_seconds"
MaxIdleClosed = "micro_sql_max_idle_closed"
MaxIdletimeClosed = "micro_sql_closed_max_idle"
MaxLifetimeClosed = "micro_sql_closed_max_lifetime"
meterRequestTotal = "micro_sql_request_total"
meterRequestLatencyMicroseconds = "micro_sql_latency_microseconds"
meterRequestDurationSeconds = "micro_sql_request_duration_seconds"
labelUnknown = "unknown"
labelQuery = "db_statement"
labelMethod = "db_method"
labelStatus = "status"
labelSuccess = "success"
labelFailure = "failure"
labelHost = "db_host"
labelDatabase = "db_name"
)
// Options struct holds wrapper options
type Options struct {
Logger logger.Logger
Meter meter.Meter
Tracer tracer.Tracer
DatabaseHost string
DatabaseName string
MeterStatsInterval time.Duration
LoggerLevel logger.Level
LoggerEnabled bool
LoggerObserver func(ctx context.Context, method string, name string, td time.Duration, err error) []interface{}
}
// Option func signature
type Option func(*Options)
// NewOptions create new Options struct from provided option slice
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
MeterStatsInterval: DefaultMeterStatsInterval,
LoggerLevel: logger.ErrorLevel,
LoggerObserver: DefaultLoggerObserver,
}
for _, o := range opts {
o(&options)
}
options.Meter = options.Meter.Clone(
meter.Labels(
labelHost, options.DatabaseHost,
labelDatabase, options.DatabaseName,
),
)
options.Logger = options.Logger.Clone(logger.WithAddCallerSkipCount(1))
return options
}
// MetricInterval specifies stats interval for *sql.DB
func MetricInterval(td time.Duration) Option {
return func(o *Options) {
o.MeterStatsInterval = td
}
}
func DatabaseHost(host string) Option {
return func(o *Options) {
o.DatabaseHost = host
}
}
func DatabaseName(name string) Option {
return func(o *Options) {
o.DatabaseName = name
}
}
// Meter passes meter.Meter to wrapper
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Logger passes logger.Logger to wrapper
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// LoggerEnabled enable sql logging
func LoggerEnabled(b bool) Option {
return func(o *Options) {
o.LoggerEnabled = b
}
}
// LoggerLevel passes logger.Level option
func LoggerLevel(lvl logger.Level) Option {
return func(o *Options) {
o.LoggerLevel = lvl
}
}
// LoggerObserver passes observer to fill logger fields
func LoggerObserver(obs func(context.Context, string, string, time.Duration, error) []interface{}) Option {
return func(o *Options) {
o.LoggerObserver = obs
}
}
// Tracer passes tracer.Tracer to wrapper
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
type queryNameKey struct{}
// QueryName passes query name to wrapper func
func QueryName(ctx context.Context, name string) context.Context {
if ctx == nil {
ctx = context.Background()
}
return context.WithValue(ctx, queryNameKey{}, name)
}
func getQueryName(ctx context.Context) string {
if v, ok := ctx.Value(queryNameKey{}).(string); ok && v != labelUnknown {
return v
}
return getCallerName()
}

View File

@@ -1,41 +0,0 @@
package sql
import (
"context"
"database/sql"
"time"
)
type Statser interface {
Stats() sql.DBStats
}
func NewStatsMeter(ctx context.Context, db Statser, opts ...Option) {
options := NewOptions(opts...)
go func() {
ticker := time.NewTicker(options.MeterStatsInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if db == nil {
return
}
stats := db.Stats()
options.Meter.Counter(MaxOpenConnections).Set(uint64(stats.MaxOpenConnections))
options.Meter.Counter(OpenConnections).Set(uint64(stats.OpenConnections))
options.Meter.Counter(InuseConnections).Set(uint64(stats.InUse))
options.Meter.Counter(IdleConnections).Set(uint64(stats.Idle))
options.Meter.Counter(WaitConnections).Set(uint64(stats.WaitCount))
options.Meter.FloatCounter(BlockedSeconds).Set(stats.WaitDuration.Seconds())
options.Meter.Counter(MaxIdleClosed).Set(uint64(stats.MaxIdleClosed))
options.Meter.Counter(MaxIdletimeClosed).Set(uint64(stats.MaxIdleTimeClosed))
options.Meter.Counter(MaxLifetimeClosed).Set(uint64(stats.MaxLifetimeClosed))
}
}
}()
}

View File

@@ -1,287 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"fmt"
"time"
requestid "go.unistack.org/micro/v4/hooks/requestid"
"go.unistack.org/micro/v4/tracer"
)
var (
_ driver.Stmt = (*wrapperStmt)(nil)
_ driver.StmtQueryContext = (*wrapperStmt)(nil)
_ driver.StmtExecContext = (*wrapperStmt)(nil)
_ driver.NamedValueChecker = (*wrapperStmt)(nil)
)
// wrapperStmt defines a wrapper for driver.Stmt
type wrapperStmt struct {
stmt driver.Stmt
opts Options
query string
ctx context.Context
}
// Close implements driver.Stmt Close
func (w *wrapperStmt) Close() error {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Close"}
ts := time.Now()
err := w.stmt.Close()
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Close", getCallerName(), td, err)...)
}
*/
return err
}
// NumInput implements driver.Stmt NumInput
func (w *wrapperStmt) NumInput() int {
return w.stmt.NumInput()
}
// CheckNamedValue implements driver.NamedValueChecker
func (w *wrapperStmt) CheckNamedValue(v *driver.NamedValue) error {
s, ok := w.stmt.(driver.NamedValueChecker)
if !ok {
return driver.ErrSkip
}
return s.CheckNamedValue(v)
}
// Exec implements driver.Stmt Exec
func (w *wrapperStmt) Exec(args []driver.Value) (driver.Result, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Exec"}
ts := time.Now()
res, err := w.stmt.Exec(args) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Exec", getCallerName(), td, err)...)
}
*/
return res, err
}
// Query implements driver.Stmt Query
func (w *wrapperStmt) Query(args []driver.Value) (driver.Rows, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Query"}
ts := time.Now()
rows, err := w.stmt.Query(args) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Query", getCallerName(), td, err)...)
}
*/
return rows, err
}
// ColumnConverter implements driver.ColumnConverter
func (w *wrapperStmt) ColumnConverter(idx int) driver.ValueConverter {
s, ok := w.stmt.(driver.ColumnConverter) // nolint:staticcheck
if !ok {
return nil
}
return s.ColumnConverter(idx)
}
// ExecContext implements driver.StmtExecContext ExecContext
func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "ExecContext")
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "ExecContext", labelQuery, name}
if conn, ok := w.stmt.(driver.StmtExecContext); ok {
ts := time.Now()
res, err := conn.ExecContext(nctx, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...)
}
*/
return res, err
}
values, err := namedValueToValue(args)
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, 0, err)...)
}
*/
return nil, err
}
ts := time.Now()
res, err := w.Exec(values) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...)
}
*/
return res, err
}
// QueryContext implements driver.StmtQueryContext StmtQueryContext
func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "QueryContext")
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "QueryContext", labelQuery, name}
if conn, ok := w.stmt.(driver.StmtQueryContext); ok {
ts := time.Now()
rows, err := conn.QueryContext(nctx, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...)
}
*/
return rows, err
}
values, err := namedValueToValue(args)
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, 0, err)...)
}
*/
return nil, err
}
ts := time.Now()
rows, err := w.Query(values) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...)
}
*/
return rows, err
}

View File

@@ -1,63 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"time"
"go.unistack.org/micro/v4/tracer"
)
var _ driver.Tx = (*wrapperTx)(nil)
// wrapperTx defines a wrapper for driver.Tx
type wrapperTx struct {
tx driver.Tx
span tracer.Span
opts Options
ctx context.Context
}
// Commit implements driver.Tx Commit
func (w *wrapperTx) Commit() error {
ts := time.Now()
err := w.tx.Commit()
td := time.Since(ts)
_ = td
if w.span != nil {
if err != nil {
w.span.SetStatus(tracer.SpanStatusError, err.Error())
}
w.span.Finish()
}
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Commit", getCallerName(), td, err)...)
}
*/
w.ctx = nil
return err
}
// Rollback implements driver.Tx Rollback
func (w *wrapperTx) Rollback() error {
ts := time.Now()
err := w.tx.Rollback()
td := time.Since(ts)
_ = td
if w.span != nil {
if err != nil {
w.span.SetStatus(tracer.SpanStatusError, err.Error())
}
w.span.Finish()
}
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Rollback", getCallerName(), td, err)...)
}
*/
w.ctx = nil
return err
}

View File

@@ -1,19 +0,0 @@
package sql
import (
"database/sql/driver"
)
/*
func wrapDriver(d driver.Driver, opts Options) driver.Driver {
if _, ok := d.(driver.DriverContext); ok {
return &wrapperDriver{driver: d, opts: opts}
}
return struct{ driver.Driver }{&wrapperDriver{driver: d, opts: opts}}
}
*/
// WrapConn allows an existing driver.Conn to be wrapped.
func WrapConn(c driver.Conn, opts ...Option) driver.Conn {
return wrapConn(c, NewOptions(opts...))
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,133 +0,0 @@
package validator
import (
"context"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/server"
)
var (
DefaultClientErrorFunc = func(req client.Request, rsp interface{}, err error) error {
if rsp != nil {
return errors.BadGateway(req.Service(), "%v", err)
}
return errors.BadRequest(req.Service(), "%v", err)
}
DefaultServerErrorFunc = func(req server.Request, rsp interface{}, err error) error {
if rsp != nil {
return errors.BadGateway(req.Service(), "%v", err)
}
return errors.BadRequest(req.Service(), "%v", err)
}
)
type (
ClientErrorFunc func(client.Request, interface{}, error) error
ServerErrorFunc func(server.Request, interface{}, error) error
)
// Options struct holds wrapper options
type Options struct {
ClientErrorFn ClientErrorFunc
ServerErrorFn ServerErrorFunc
ClientValidateResponse bool
ServerValidateResponse bool
}
// Option func signature
type Option func(*Options)
func ClientValidateResponse(b bool) Option {
return func(o *Options) {
o.ClientValidateResponse = b
}
}
func ServerValidateResponse(b bool) Option {
return func(o *Options) {
o.ClientValidateResponse = b
}
}
func ClientReqErrorFn(fn ClientErrorFunc) Option {
return func(o *Options) {
o.ClientErrorFn = fn
}
}
func ServerErrorFn(fn ServerErrorFunc) Option {
return func(o *Options) {
o.ServerErrorFn = fn
}
}
func NewOptions(opts ...Option) Options {
options := Options{
ClientErrorFn: DefaultClientErrorFunc,
ServerErrorFn: DefaultServerErrorFunc,
}
for _, o := range opts {
o(&options)
}
return options
}
func NewHook(opts ...Option) *hook {
return &hook{opts: NewOptions(opts...)}
}
type validator interface {
Validate() error
}
type hook struct {
opts Options
}
func (w *hook) ClientCall(next client.FuncCall) client.FuncCall {
return func(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return w.opts.ClientErrorFn(req, nil, err)
}
}
err := next(ctx, req, rsp, opts...)
if v, ok := rsp.(validator); ok && w.opts.ClientValidateResponse {
if verr := v.Validate(); verr != nil {
return w.opts.ClientErrorFn(req, rsp, verr)
}
}
return err
}
}
func (w *hook) ClientStream(next client.FuncStream) client.FuncStream {
return func(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return nil, w.opts.ClientErrorFn(req, nil, err)
}
}
return next(ctx, req, opts...)
}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return w.opts.ServerErrorFn(req, nil, err)
}
}
err := next(ctx, req, rsp)
if v, ok := rsp.(validator); ok && w.opts.ServerValidateResponse {
if verr := v.Validate(); verr != nil {
return w.opts.ServerErrorFn(req, rsp, verr)
}
}
return err
}
}

View File

@@ -13,15 +13,6 @@ func FromContext(ctx context.Context) (Logger, bool) {
return l, ok
}
// MustContext returns logger from passed context or DefaultLogger if empty
func MustContext(ctx context.Context) Logger {
l, ok := FromContext(ctx)
if !ok {
panic("missing logger")
}
return l
}
// NewContext stores logger into passed context
func NewContext(ctx context.Context, l Logger) context.Context {
if ctx == nil {
@@ -29,13 +20,3 @@ func NewContext(ctx context.Context, l Logger) context.Context {
}
return context.WithValue(ctx, loggerKey{}, l)
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}

View File

@@ -40,14 +40,3 @@ func TestNewContext(t *testing.T) {
t.Fatal("NewContext not works")
}
}
func TestSetOption(t *testing.T) {
type key struct{}
o := SetOption(key{}, "test")
opts := &Options{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetOption not works")
}
}

View File

@@ -4,20 +4,18 @@ package logger
type Level int8
const (
// TraceLevel usually used to find bugs, very verbose
// TraceLevel level usually used to find bugs, very verbose
TraceLevel Level = iota - 2
// DebugLevel used only when enabled debugging
// DebugLevel level used only when enabled debugging
DebugLevel
// InfoLevel used for general info about what's going on inside the application
// InfoLevel level used for general info about what's going on inside the application
InfoLevel
// WarnLevel used for non-critical entries
// WarnLevel level used for non-critical entries
WarnLevel
// ErrorLevel used for errors that should definitely be noted
// ErrorLevel level used for errors that should definitely be noted
ErrorLevel
// FatalLevel used for critical errors and then calls `os.Exit(1)`
// FatalLevel level used for critical errors and then calls `os.Exit(1)`
FatalLevel
// NoneLevel used to disable logging
NoneLevel
)
// String returns logger level string representation
@@ -35,8 +33,6 @@ func (l Level) String() string {
return "error"
case FatalLevel:
return "fatal"
case NoneLevel:
return "none"
}
return "info"
}
@@ -62,8 +58,6 @@ func ParseLevel(lvl string) Level {
return ErrorLevel
case FatalLevel.String():
return FatalLevel
case NoneLevel.String():
return NoneLevel
}
return InfoLevel
}

View File

@@ -1,27 +1,28 @@
// Package logger provides a log interface
package logger
package logger // import "go.unistack.org/micro/v4/logger"
import (
"context"
"os"
"go.unistack.org/micro/v4/options"
)
type ContextAttrFunc func(ctx context.Context) []interface{}
var DefaultContextAttrFuncs []ContextAttrFunc
var (
// DefaultLogger variable
DefaultLogger = NewLogger()
DefaultLogger = NewLogger(WithLevel(ParseLevel(os.Getenv("MICRO_LOG_LEVEL"))))
// DefaultLevel used by logger
DefaultLevel = InfoLevel
// DefaultCallerSkipCount used by logger
DefaultCallerSkipCount = 2
)
// Logger is a generic logging interface
type Logger interface {
// Init initialises options
Init(opts ...Option) error
Init(opts ...options.Option) error
// Clone create logger copy with new options
Clone(opts ...Option) Logger
Clone(opts ...options.Option) Logger
// V compare provided verbosity level with current log level
V(level Level) bool
// Level sets the log level for logger
@@ -31,24 +32,111 @@ type Logger interface {
// Fields set fields to always be logged with keyval pairs
Fields(fields ...interface{}) Logger
// Info level message
Info(ctx context.Context, msg string, args ...interface{})
Info(ctx context.Context, args ...interface{})
// Trace level message
Trace(ctx context.Context, msg string, args ...interface{})
Trace(ctx context.Context, args ...interface{})
// Debug level message
Debug(ctx context.Context, msg string, args ...interface{})
Debug(ctx context.Context, args ...interface{})
// Warn level message
Warn(ctx context.Context, msg string, args ...interface{})
Warn(ctx context.Context, args ...interface{})
// Error level message
Error(ctx context.Context, msg string, args ...interface{})
Error(ctx context.Context, args ...interface{})
// Fatal level message
Fatal(ctx context.Context, msg string, args ...interface{})
Fatal(ctx context.Context, args ...interface{})
// Infof level message
Infof(ctx context.Context, msg string, args ...interface{})
// Tracef level message
Tracef(ctx context.Context, msg string, args ...interface{})
// Debug level message
Debugf(ctx context.Context, msg string, args ...interface{})
// Warn level message
Warnf(ctx context.Context, msg string, args ...interface{})
// Error level message
Errorf(ctx context.Context, msg string, args ...interface{})
// Fatal level message
Fatalf(ctx context.Context, msg string, args ...interface{})
// Log logs message with needed level
Log(ctx context.Context, level Level, msg string, args ...interface{})
// Name returns broker instance name
Name() string
// String returns the type of logger
Log(ctx context.Context, level Level, args ...interface{})
// Logf logs message with needed level
Logf(ctx context.Context, level Level, msg string, args ...interface{})
// String returns the name of logger
String() string
}
// Field contains keyval pair
type Field interface{}
// Info writes msg to default logger on info level
func Info(ctx context.Context, args ...interface{}) {
DefaultLogger.Info(ctx, args...)
}
// Error writes msg to default logger on error level
func Error(ctx context.Context, args ...interface{}) {
DefaultLogger.Error(ctx, args...)
}
// Debug writes msg to default logger on debug level
func Debug(ctx context.Context, args ...interface{}) {
DefaultLogger.Debug(ctx, args...)
}
// Warn writes msg to default logger on warn level
func Warn(ctx context.Context, args ...interface{}) {
DefaultLogger.Warn(ctx, args...)
}
// Trace writes msg to default logger on trace level
func Trace(ctx context.Context, args ...interface{}) {
DefaultLogger.Trace(ctx, args...)
}
// Fatal writes msg to default logger on fatal level
func Fatal(ctx context.Context, args ...interface{}) {
DefaultLogger.Fatal(ctx, args...)
}
// Infof writes formatted msg to default logger on info level
func Infof(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Infof(ctx, msg, args...)
}
// Errorf writes formatted msg to default logger on error level
func Errorf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Errorf(ctx, msg, args...)
}
// Debugf writes formatted msg to default logger on debug level
func Debugf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Debugf(ctx, msg, args...)
}
// Warnf writes formatted msg to default logger on warn level
func Warnf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Warnf(ctx, msg, args...)
}
// Tracef writes formatted msg to default logger on trace level
func Tracef(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Tracef(ctx, msg, args...)
}
// Fatalf writes formatted msg to default logger on fatal level
func Fatalf(ctx context.Context, msg string, args ...interface{}) {
DefaultLogger.Fatalf(ctx, msg, args...)
}
// V returns true if passed level enabled in default logger
func V(level Level) bool {
return DefaultLogger.V(level)
}
// Init initialize logger
func Init(opts ...options.Option) error {
return DefaultLogger.Init(opts...)
}
// Fields create logger with specific fields
func Fields(fields ...interface{}) Logger {
return DefaultLogger.Fields(fields...)
}

140
logger/logger_test.go Normal file
View File

@@ -0,0 +1,140 @@
package logger
import (
"bytes"
"context"
"log"
"testing"
)
func TestContext(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl, ok := FromContext(NewContext(ctx, l.Fields("key", "val")))
if !ok {
t.Fatal("context without logger")
}
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFromContextWithFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
var ok bool
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
ctx = NewContext(ctx, nl)
l, ok = FromContext(ctx)
if !ok {
t.Fatalf("context does not have logger")
}
l.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestClone(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Clone(WithLevel(ErrorLevel))
if err := nl.Init(); err != nil {
t.Fatal(err)
}
nl.Info(ctx, "info message")
if len(buf.Bytes()) != 0 {
t.Fatal("message must not be logged")
}
l.Info(ctx, "info message")
if len(buf.Bytes()) == 0 {
t.Fatal("message must be logged")
}
}
func TestRedirectStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(ErrorLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
fn := RedirectStdLogger(l, ErrorLevel)
defer fn()
log.Print("test")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"error","msg":"test"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
lg := NewStdLogger(l, ErrorLevel)
lg.Print("test")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"error","msg":"test"`)) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestLogger(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(WithLevel(TraceLevel), WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Trace(ctx, "trace_msg1")
// l.Warn(ctx, "warn_msg1")
// l.Fields("error", "test").Info(ctx, "error message")
// l.Warn(ctx, "first second")
if !bytes.Contains(buf.Bytes(), []byte(`"level":"trace","msg":"trace_msg1"`)) {
t.Fatalf("logger tracer, buf %s", buf.Bytes())
}
/*
if !bytes.Contains(buf.Bytes(), []byte(`"warn","msg":"warn_msg1"`)) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"error":"test","level":"info","msg":"error message"`)) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"level":"warn","msg":"first second"`)) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
*/
}

View File

@@ -1,78 +0,0 @@
package logger
import (
"context"
)
const (
defaultCallerSkipCount = 2
)
type noopLogger struct {
opts Options
}
func NewLogger(opts ...Option) Logger {
options := NewOptions(opts...)
options.CallerSkipCount = defaultCallerSkipCount
return &noopLogger{opts: options}
}
func (l *noopLogger) V(_ Level) bool {
return false
}
func (l *noopLogger) Level(_ Level) {
}
func (l *noopLogger) Name() string {
return l.opts.Name
}
func (l *noopLogger) Init(opts ...Option) error {
for _, o := range opts {
o(&l.opts)
}
return nil
}
func (l *noopLogger) Clone(opts ...Option) Logger {
nl := &noopLogger{opts: l.opts}
for _, o := range opts {
o(&nl.opts)
}
return nl
}
func (l *noopLogger) Fields(_ ...interface{}) Logger {
return l
}
func (l *noopLogger) Options() Options {
return l.opts
}
func (l *noopLogger) String() string {
return "noop"
}
func (l *noopLogger) Log(ctx context.Context, lvl Level, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
}
func (l *noopLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
}

View File

@@ -3,228 +3,66 @@ package logger
import (
"context"
"io"
"log/slog"
"os"
"slices"
"time"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/options"
)
// Option func signature
type Option func(*Options)
// Options holds logger options
type Options struct {
// TimeKey is the key used for the time of the log call
TimeKey string
// LevelKey is the key used for the level of the log call
LevelKey string
// ErroreKey is the key used for the error of the log call
ErrorKey string
// MessageKey is the key used for the message of the log call
MessageKey string
// SourceKey is the key used for the source file and line of the log call
SourceKey string
// StacktraceKey is the key used for the stacktrace
StacktraceKey string
// Name holds the logger name
Name string
// Out holds the output writer
Out io.Writer
// Context holds exernal options
Context context.Context
// Meter used to count logs for specific level
Meter meter.Meter
// TimeFunc used to obtain current time
TimeFunc func() time.Time
// Fields holds additional metadata
Fields []interface{}
// ContextAttrFuncs contains funcs that executed before log func on context
ContextAttrFuncs []ContextAttrFunc
// callerSkipCount number of frmaes to skip
CallerSkipCount int
// Name holds the logger name
Name string
// The logging level the logger should log
Level Level
// AddSource enabled writing source file and position in log
AddSource bool
// AddStacktrace controls writing of stacktaces on error
AddStacktrace bool
// DedupKeys deduplicate keys in log output
DedupKeys bool
// CallerSkipCount number of frmaes to skip
CallerSkipCount int
}
// NewOptions creates new options struct
func NewOptions(opts ...Option) Options {
func NewOptions(opts ...options.Option) Options {
options := Options{
Level: DefaultLevel,
Fields: make([]interface{}, 0, 6),
Out: os.Stderr,
Context: context.Background(),
ContextAttrFuncs: DefaultContextAttrFuncs,
AddSource: true,
TimeFunc: time.Now,
Meter: meter.DefaultMeter,
Level: DefaultLevel,
Fields: make([]interface{}, 0, 6),
Out: os.Stderr,
CallerSkipCount: DefaultCallerSkipCount,
Context: context.Background(),
}
WithMicroKeys()(&options)
for _, o := range opts {
o(&options)
}
return options
}
// WithContextAttrFuncs appends default funcs for the context attrs filler
func WithContextAttrFuncs(fncs ...ContextAttrFunc) Option {
return func(o *Options) {
o.ContextAttrFuncs = append(o.ContextAttrFuncs, fncs...)
}
}
// WithDedupKeys dont log duplicate keys
func WithDedupKeys(b bool) Option {
return func(o *Options) {
o.DedupKeys = b
}
}
// WithAddFields add fields for the logger
func WithAddFields(fields ...interface{}) Option {
return func(o *Options) {
if o.DedupKeys {
for i := 0; i < len(o.Fields); i += 2 {
for j := 0; j < len(fields); j += 2 {
iv, iok := o.Fields[i].(string)
jv, jok := fields[j].(string)
if iok && jok && iv == jv {
o.Fields[i+1] = fields[j+1]
fields = slices.Delete(fields, j, j+2)
}
}
}
if len(fields) > 0 {
o.Fields = append(o.Fields, fields...)
}
} else {
o.Fields = append(o.Fields, fields...)
}
}
}
// WithFields set default fields for the logger
func WithFields(fields ...interface{}) Option {
return func(o *Options) {
o.Fields = fields
func WithFields(fields ...interface{}) options.Option {
return func(src interface{}) error {
return options.Set(src, fields, ".Fields")
}
}
// WithLevel set default level for the logger
func WithLevel(level Level) Option {
return func(o *Options) {
o.Level = level
func WithLevel(lvl Level) options.Option {
return func(src interface{}) error {
return options.Set(src, lvl, ".Level")
}
}
// WithOutput set default output writer for the logger
func WithOutput(out io.Writer) Option {
return func(o *Options) {
o.Out = out
func WithOutput(out io.Writer) options.Option {
return func(src interface{}) error {
return options.Set(src, out, ".Out")
}
}
// WithAddStacktrace controls writing stacktrace on error
func WithAddStacktrace(v bool) Option {
return func(o *Options) {
o.AddStacktrace = v
}
}
// WithAddSource controls writing source file and pos in log
func WithAddSource(v bool) Option {
return func(o *Options) {
o.AddSource = v
}
}
// WithContext set context
func WithContext(ctx context.Context) Option {
return func(o *Options) {
o.Context = ctx
}
}
// WithName sets the name
func WithName(n string) Option {
return func(o *Options) {
o.Name = n
}
}
// WithMeter sets the meter
func WithMeter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// WithTimeFunc sets the func to obtain current time
func WithTimeFunc(fn func() time.Time) Option {
return func(o *Options) {
o.TimeFunc = fn
}
}
func WithZapKeys() Option {
return func(o *Options) {
o.TimeKey = "@timestamp"
o.LevelKey = slog.LevelKey
o.MessageKey = slog.MessageKey
o.SourceKey = "caller"
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}
func WithZerologKeys() Option {
return func(o *Options) {
o.TimeKey = slog.TimeKey
o.LevelKey = slog.LevelKey
o.MessageKey = "message"
o.SourceKey = "caller"
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}
func WithSlogKeys() Option {
return func(o *Options) {
o.TimeKey = slog.TimeKey
o.LevelKey = slog.LevelKey
o.MessageKey = slog.MessageKey
o.SourceKey = slog.SourceKey
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}
func WithMicroKeys() Option {
return func(o *Options) {
o.TimeKey = "timestamp"
o.LevelKey = slog.LevelKey
o.MessageKey = slog.MessageKey
o.SourceKey = "caller"
o.StacktraceKey = "stacktrace"
o.ErrorKey = "error"
}
}
// WithAddCallerSkipCount add skip count for copy logger
func WithAddCallerSkipCount(n int) Option {
return func(o *Options) {
if n > 0 {
o.CallerSkipCount += n
}
// WithCallerSkipCount set frame count to skip
func WithCallerSkipCount(c int) options.Option {
return func(src interface{}) error {
return options.Set(src, c, ".CallerSkipCount")
}
}

245
logger/slog.go Normal file
View File

@@ -0,0 +1,245 @@
package logger
import (
"context"
"fmt"
"os"
"go.unistack.org/micro/v4/options"
"golang.org/x/exp/slog"
)
var (
traceValue = slog.StringValue("trace")
debugValue = slog.StringValue("debug")
infoValue = slog.StringValue("info")
warnValue = slog.StringValue("warn")
errorValue = slog.StringValue("error")
fatalValue = slog.StringValue("fatal")
)
var renameAttr = func(_ []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.TimeKey:
a.Key = "timestamp"
case slog.LevelKey:
level := a.Value.Any().(slog.Level)
lvl := slogToLoggerLevel(level)
switch {
case lvl < DebugLevel:
a.Value = traceValue
case lvl < InfoLevel:
a.Value = debugValue
case lvl < WarnLevel:
a.Value = infoValue
case lvl < ErrorLevel:
a.Value = warnValue
case lvl < FatalLevel:
a.Value = errorValue
case lvl >= FatalLevel:
a.Value = fatalValue
default:
a.Value = infoValue
}
}
return a
}
type slogLogger struct {
slog *slog.Logger
leveler *slog.LevelVar
opts Options
}
func (s *slogLogger) Clone(opts ...options.Option) Logger {
options := s.opts
for _, o := range opts {
o(&options)
}
l := &slogLogger{
opts: options,
}
if slog, ok := s.opts.Context.Value(loggerKey{}).(*slog.Logger); ok {
l.slog = slog
return nil
}
l.leveler = new(slog.LevelVar)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: renameAttr,
Level: l.leveler,
}
l.leveler.Set(loggerToSlogLevel(l.opts.Level))
handler := slog.NewJSONHandler(options.Out, handleOpt)
l.slog = slog.New(handler).With(options.Fields...)
return l
}
func (s *slogLogger) V(level Level) bool {
return s.opts.Level.Enabled(level)
}
func (s *slogLogger) Level(level Level) {
s.leveler.Set(loggerToSlogLevel(level))
}
func (s *slogLogger) Options() Options {
return s.opts
}
func (s *slogLogger) Fields(fields ...interface{}) Logger {
nl := &slogLogger{opts: s.opts}
nl.leveler = new(slog.LevelVar)
nl.leveler.Set(s.leveler.Level())
handleOpt := &slog.HandlerOptions{
ReplaceAttr: renameAttr,
Level: s.leveler,
}
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
nl.slog = slog.New(handler).With(fields...)
return nl
}
func (s *slogLogger) Init(opts ...options.Option) error {
for _, o := range opts {
if err := o(&s.opts); err != nil {
return err
}
}
if slog, ok := s.opts.Context.Value(loggerKey{}).(*slog.Logger); ok {
s.slog = slog
return nil
}
s.leveler = new(slog.LevelVar)
handleOpt := &slog.HandlerOptions{
ReplaceAttr: renameAttr,
Level: s.leveler,
}
s.leveler.Set(loggerToSlogLevel(s.opts.Level))
handler := slog.NewJSONHandler(s.opts.Out, handleOpt)
s.slog = slog.New(handler).With(s.opts.Fields...)
return nil
}
func (s *slogLogger) Log(ctx context.Context, lvl Level, args ...any) {
if !s.V(lvl) {
return
}
slvl := loggerToSlogLevel(lvl)
msg := fmt.Sprint(args...)
s.slog.Log(ctx, slvl, msg)
}
func (s *slogLogger) Logf(ctx context.Context, lvl Level, format string, args ...any) {
if !s.V(lvl) {
return
}
slvl := loggerToSlogLevel(lvl)
s.slog.Log(ctx, slvl, format, args...)
}
func (s *slogLogger) Info(ctx context.Context, args ...any) {
s.Log(ctx, InfoLevel, args...)
}
func (s *slogLogger) Infof(ctx context.Context, format string, args ...interface{}) {
s.Logf(ctx, InfoLevel, format, args...)
}
func (s *slogLogger) Debug(ctx context.Context, args ...any) {
s.Log(ctx, DebugLevel, args...)
}
func (s *slogLogger) Debugf(ctx context.Context, format string, args ...any) {
s.Logf(ctx, DebugLevel, format, args...)
}
func (s *slogLogger) Error(ctx context.Context, args ...any) {
s.Log(ctx, ErrorLevel, args...)
}
func (s *slogLogger) Trace(ctx context.Context, args ...interface{}) {
s.Log(ctx, TraceLevel, args...)
}
func (s *slogLogger) Tracef(ctx context.Context, msg string, args ...interface{}) {
s.Logf(ctx, TraceLevel, msg, args...)
}
func (s *slogLogger) Errorf(ctx context.Context, format string, args ...any) {
s.Logf(ctx, ErrorLevel, format, args...)
}
func (s *slogLogger) Fatal(ctx context.Context, args ...any) {
s.Log(ctx, FatalLevel, args...)
}
func (s *slogLogger) Fatalf(ctx context.Context, msg string, args ...interface{}) {
s.Logf(ctx, FatalLevel, msg, args...)
os.Exit(1)
}
func (s *slogLogger) Warn(ctx context.Context, args ...any) {
s.Log(ctx, WarnLevel, args...)
}
func (s *slogLogger) Warnf(ctx context.Context, format string, args ...any) {
s.Logf(ctx, WarnLevel, format, args...)
}
func (s *slogLogger) String() string {
return "slog"
}
func NewLogger(opts ...options.Option) Logger {
l := &slogLogger{
opts: NewOptions(opts...),
}
return l
}
func loggerToSlogLevel(level Level) slog.Level {
switch level {
case DebugLevel:
return slog.LevelDebug
case WarnLevel:
return slog.LevelWarn
case ErrorLevel:
return slog.LevelError
case TraceLevel:
return slog.LevelDebug - 1
case FatalLevel:
return slog.LevelError + 1
default:
return slog.LevelInfo
}
}
func slogToLoggerLevel(level slog.Level) Level {
switch level {
case slog.LevelDebug:
return DebugLevel
case slog.LevelWarn:
return WarnLevel
case slog.LevelError:
return ErrorLevel
case slog.LevelDebug - 1:
return TraceLevel
case slog.LevelError + 1:
return FatalLevel
default:
return InfoLevel
}
}

View File

@@ -1,382 +0,0 @@
package slog
import (
"context"
"io"
"log/slog"
"os"
"reflect"
"regexp"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/semconv"
"go.unistack.org/micro/v4/tracer"
)
const (
badKey = "!BADKEY"
// defaultCallerSkipCount used by logger
defaultCallerSkipCount = 3
timeFormat = "2006-01-02T15:04:05.000000000Z07:00"
)
var reTrace = regexp.MustCompile(`.*/slog/logger\.go.*\n`)
var (
traceValue = slog.StringValue("trace")
debugValue = slog.StringValue("debug")
infoValue = slog.StringValue("info")
warnValue = slog.StringValue("warn")
errorValue = slog.StringValue("error")
fatalValue = slog.StringValue("fatal")
noneValue = slog.StringValue("none")
)
type wrapper struct {
h slog.Handler
level atomic.Int64
}
func (h *wrapper) Enabled(ctx context.Context, level slog.Level) bool {
return level >= slog.Level(int(h.level.Load()))
}
func (h *wrapper) Handle(ctx context.Context, rec slog.Record) error {
return h.h.Handle(ctx, rec)
}
func (h *wrapper) WithAttrs(attrs []slog.Attr) slog.Handler {
return h.h.WithAttrs(attrs)
}
func (h *wrapper) WithGroup(name string) slog.Handler {
return h.h.WithGroup(name)
}
func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.SourceKey:
source := a.Value.Any().(*slog.Source)
a.Value = slog.StringValue(source.File + ":" + strconv.Itoa(source.Line))
a.Key = s.opts.SourceKey
case slog.TimeKey:
a.Key = s.opts.TimeKey
a.Value = slog.StringValue(a.Value.Time().Format(timeFormat))
case slog.MessageKey:
a.Key = s.opts.MessageKey
case slog.LevelKey:
level := a.Value.Any().(slog.Level)
lvl := slogToLoggerLevel(level)
a.Key = s.opts.LevelKey
switch {
case lvl < logger.DebugLevel:
a.Value = traceValue
case lvl < logger.InfoLevel:
a.Value = debugValue
case lvl < logger.WarnLevel:
a.Value = infoValue
case lvl < logger.ErrorLevel:
a.Value = warnValue
case lvl < logger.FatalLevel:
a.Value = errorValue
case lvl >= logger.FatalLevel:
a.Value = fatalValue
case lvl >= logger.NoneLevel:
a.Value = noneValue
default:
a.Value = infoValue
}
}
return a
}
type slogLogger struct {
handler *wrapper
opts logger.Options
mu sync.RWMutex
}
func (s *slogLogger) Clone(opts ...logger.Option) logger.Logger {
s.mu.RLock()
options := s.opts
s.mu.RUnlock()
for _, o := range opts {
o(&options)
}
if len(options.ContextAttrFuncs) == 0 {
options.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
attrs, _ := s.argsAttrs(options.Fields)
l := &slogLogger{
handler: &wrapper{h: s.handler.h.WithAttrs(attrs)},
opts: options,
}
l.handler.level.Store(int64(loggerToSlogLevel(options.Level)))
return l
}
func (s *slogLogger) V(level logger.Level) bool {
s.mu.Lock()
v := s.opts.Level.Enabled(level)
s.mu.Unlock()
return v
}
func (s *slogLogger) Level(level logger.Level) {
s.mu.Lock()
s.opts.Level = level
s.handler.level.Store(int64(loggerToSlogLevel(level)))
s.mu.Unlock()
}
func (s *slogLogger) Options() logger.Options {
return s.opts
}
func (s *slogLogger) Fields(fields ...interface{}) logger.Logger {
s.mu.RLock()
options := s.opts
s.mu.RUnlock()
l := &slogLogger{opts: options}
logger.WithAddFields(fields...)(&l.opts)
if len(options.ContextAttrFuncs) == 0 {
options.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
attrs, _ := s.argsAttrs(fields)
l.handler = &wrapper{h: s.handler.h.WithAttrs(attrs)}
l.handler.level.Store(int64(loggerToSlogLevel(l.opts.Level)))
return l
}
func (s *slogLogger) Init(opts ...logger.Option) error {
s.mu.Lock()
for _, o := range opts {
o(&s.opts)
}
if len(s.opts.ContextAttrFuncs) == 0 {
s.opts.ContextAttrFuncs = logger.DefaultContextAttrFuncs
}
handleOpt := &slog.HandlerOptions{
ReplaceAttr: s.renameAttr,
Level: loggerToSlogLevel(logger.TraceLevel),
AddSource: s.opts.AddSource,
}
attrs, _ := s.argsAttrs(s.opts.Fields)
var h slog.Handler
if s.opts.Context != nil {
if v, ok := s.opts.Context.Value(handlerKey{}).(slog.Handler); ok && v != nil {
h = v
}
if fn := s.opts.Context.Value(handlerFnKey{}); fn != nil {
if rfn := reflect.ValueOf(fn); rfn.Kind() == reflect.Func {
if ret := rfn.Call([]reflect.Value{reflect.ValueOf(s.opts.Out), reflect.ValueOf(handleOpt)}); len(ret) == 1 {
if iface, ok := ret[0].Interface().(slog.Handler); ok && iface != nil {
h = iface
}
}
}
}
}
if h == nil {
h = slog.NewJSONHandler(s.opts.Out, handleOpt)
}
s.handler = &wrapper{h: h.WithAttrs(attrs)}
s.handler.level.Store(int64(loggerToSlogLevel(s.opts.Level)))
s.mu.Unlock()
return nil
}
func (s *slogLogger) Log(ctx context.Context, lvl logger.Level, msg string, attrs ...interface{}) {
s.printLog(ctx, lvl, msg, attrs...)
}
func (s *slogLogger) Info(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.InfoLevel, msg, attrs...)
}
func (s *slogLogger) Debug(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.DebugLevel, msg, attrs...)
}
func (s *slogLogger) Trace(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.TraceLevel, msg, attrs...)
}
func (s *slogLogger) Error(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.ErrorLevel, msg, attrs...)
}
func (s *slogLogger) Fatal(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.FatalLevel, msg, attrs...)
if closer, ok := s.opts.Out.(io.Closer); ok {
closer.Close()
}
time.Sleep(1 * time.Second)
os.Exit(1)
}
func (s *slogLogger) Warn(ctx context.Context, msg string, attrs ...interface{}) {
s.printLog(ctx, logger.WarnLevel, msg, attrs...)
}
func (s *slogLogger) Name() string {
return s.opts.Name
}
func (s *slogLogger) String() string {
return "slog"
}
func (s *slogLogger) printLog(ctx context.Context, lvl logger.Level, msg string, args ...interface{}) {
if !s.V(lvl) {
return
}
var argError error
s.opts.Meter.Counter(semconv.LoggerMessageTotal, "level", lvl.String()).Inc()
attrs, err := s.argsAttrs(args)
if err != nil {
argError = err
}
if argError != nil {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, argError.Error())
}
}
for _, fn := range s.opts.ContextAttrFuncs {
ctxAttrs, err := s.argsAttrs(fn(ctx))
if err != nil {
argError = err
}
attrs = append(attrs, ctxAttrs...)
}
if argError != nil {
if span, ok := tracer.SpanFromContext(ctx); ok {
span.SetStatus(tracer.SpanStatusError, argError.Error())
}
}
if s.opts.AddStacktrace && (lvl == logger.FatalLevel || lvl == logger.ErrorLevel) {
stackInfo := make([]byte, 1024*1024)
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
if len(traceLines) != 0 {
attrs = append(attrs, slog.String(s.opts.StacktraceKey, traceLines[len(traceLines)-1]))
}
}
}
var pcs [1]uintptr
runtime.Callers(s.opts.CallerSkipCount, pcs[:]) // skip [Callers, printLog, LogLvlMethod]
r := slog.NewRecord(s.opts.TimeFunc(), loggerToSlogLevel(lvl), msg, pcs[0])
r.AddAttrs(attrs...)
_ = s.handler.Handle(ctx, r)
}
func NewLogger(opts ...logger.Option) logger.Logger {
s := &slogLogger{
opts: logger.NewOptions(opts...),
}
s.opts.CallerSkipCount = defaultCallerSkipCount
return s
}
func loggerToSlogLevel(level logger.Level) slog.Level {
switch level {
case logger.DebugLevel:
return slog.LevelDebug
case logger.WarnLevel:
return slog.LevelWarn
case logger.ErrorLevel:
return slog.LevelError
case logger.TraceLevel:
return slog.LevelDebug - 1
case logger.FatalLevel:
return slog.LevelError + 1
case logger.NoneLevel:
return slog.LevelError + 2
default:
return slog.LevelInfo
}
}
func slogToLoggerLevel(level slog.Level) logger.Level {
switch level {
case slog.LevelDebug:
return logger.DebugLevel
case slog.LevelWarn:
return logger.WarnLevel
case slog.LevelError:
return logger.ErrorLevel
case slog.LevelDebug - 1:
return logger.TraceLevel
case slog.LevelError + 1:
return logger.FatalLevel
case slog.LevelError + 2:
return logger.NoneLevel
default:
return logger.InfoLevel
}
}
func (s *slogLogger) argsAttrs(args []interface{}) ([]slog.Attr, error) {
attrs := make([]slog.Attr, 0, len(args))
var err error
for idx := 0; idx < len(args); idx++ {
switch arg := args[idx].(type) {
case slog.Attr:
attrs = append(attrs, arg)
case string:
if idx+1 < len(args) {
attrs = append(attrs, slog.Any(arg, args[idx+1]))
idx++
} else {
attrs = append(attrs, slog.String(badKey, arg))
}
case error:
attrs = append(attrs, slog.String(s.opts.ErrorKey, arg.Error()))
err = arg
}
}
return attrs, err
}
type handlerKey struct{}
func WithHandler(h slog.Handler) logger.Option {
return logger.SetOption(handlerKey{}, h)
}
type handlerFnKey struct{}
func WithHandlerFunc(fn any) logger.Option {
return logger.SetOption(handlerFnKey{}, fn)
}

View File

@@ -1,472 +0,0 @@
package slog
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"log/slog"
"strings"
"testing"
"time"
"github.com/google/uuid"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/util/buffer"
)
// always first to have proper check
func TestStacktrace(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.DebugLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Error(ctx, "msg1", errors.New("err"))
if !bytes.Contains(buf.Bytes(), []byte(`slog_test.go:32`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestNoneLevel(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.NoneLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Error(ctx, "msg1", errors.New("err"))
if buf.Len() != 0 {
t.Fatalf("logger none level not works, buf contains: %s", buf.Bytes())
}
}
func TestDelayedBuffer(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
dbuf := buffer.NewDelayedBuffer(100, 100*time.Millisecond, buf)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(dbuf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Error(ctx, "msg1", errors.New("err"))
time.Sleep(120 * time.Millisecond)
if !bytes.Contains(buf.Bytes(), []byte(`key1=val1`)) {
t.Fatalf("logger delayed buffer not works, buf contains: %s", buf.Bytes())
}
}
func TestTime(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true),
logger.WithTimeFunc(func() time.Time {
return time.Unix(0, 0)
}),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Error(ctx, "msg1", errors.New("err"))
if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T03:00:00.000000000+03:00`)) &&
!bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestWithFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithDedupKeys(true),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg1")
l = l.Fields("key1", "val2")
l.Info(ctx, "msg2")
if !bytes.Contains(buf.Bytes(), []byte(`msg=msg2 key1=val1`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestWithDedupKeysWithAddFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithDedupKeys(true),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg1")
if err := l.Init(logger.WithAddFields("key2", "val2")); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg2")
if err := l.Init(logger.WithAddFields("key2", "val3", "key1", "val4")); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg3")
if !bytes.Contains(buf.Bytes(), []byte(`msg=msg3 key1=val4 key2=val3`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestWithHandlerFunc(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
)
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg1")
if !bytes.Contains(buf.Bytes(), []byte(`msg=msg1`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestWithAddFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg1")
if err := l.Init(logger.WithAddFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg2")
if err := l.Init(logger.WithAddFields("key2", "val2")); err != nil {
t.Fatal(err)
}
l.Info(ctx, "msg3")
if !bytes.Contains(buf.Bytes(), []byte(`"key1"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"key2"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestMultipleFieldsWithLevel(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l = l.Fields("key", "val")
l.Info(ctx, "msg1")
nl := l.Clone(logger.WithLevel(logger.DebugLevel))
nl.Debug(ctx, "msg2")
l.Debug(ctx, "msg3")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"msg1"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"msg2"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if bytes.Contains(buf.Bytes(), []byte(`"msg3"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestMultipleFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.InfoLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l = l.Fields("key", "val")
l = l.Fields("key1", "val1")
l.Info(ctx, "msg")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"key1":"val1"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestError(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf), logger.WithAddStacktrace(true))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Error(ctx, "message", fmt.Errorf("error message"))
if !bytes.Contains(buf.Bytes(), []byte(`"stacktrace":"`)) {
t.Fatalf("logger stacktrace not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"error":"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestErrorf(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf), logger.WithAddStacktrace(true))
if err := l.Init(logger.WithContextAttrFuncs(func(_ context.Context) []interface{} {
return nil
})); err != nil {
t.Fatal(err)
}
l.Log(ctx, logger.ErrorLevel, "message", errors.New("error msg"))
l.Log(ctx, logger.ErrorLevel, "", errors.New("error msg"))
if !bytes.Contains(buf.Bytes(), []byte(`"error":"error msg"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"stacktrace":"`)) {
t.Fatalf("logger stacktrace not works, buf contains: %s", buf.Bytes())
}
if !bytes.Contains(buf.Bytes(), []byte(`"error":"`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
func TestContext(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl, ok := logger.FromContext(logger.NewContext(ctx, l.Fields("key", "val")))
if !ok {
t.Fatal("context without logger")
}
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
nl.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestFromContextWithFields(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
var ok bool
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Fields("key", "val")
ctx = logger.NewContext(ctx, nl)
l, ok = logger.FromContext(ctx)
if !ok {
t.Fatalf("context does not have logger")
}
l.Info(ctx, "message")
if !bytes.Contains(buf.Bytes(), []byte(`"key":"val"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
l.Info(ctx, "test", "uncorrected number attributes")
if !bytes.Contains(buf.Bytes(), []byte(`"!BADKEY":"uncorrected number attributes"`)) {
t.Fatalf("logger fields not works, buf contains: %s", buf.Bytes())
}
}
func TestClone(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
nl := l.Clone(logger.WithLevel(logger.ErrorLevel))
if err := nl.Init(); err != nil {
t.Fatal(err)
}
nl.Info(ctx, "info message")
if len(buf.Bytes()) != 0 {
t.Fatal("message must not be logged")
}
l.Info(ctx, "info message")
if len(buf.Bytes()) == 0 {
t.Fatal("message must be logged")
}
}
func TestRedirectStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
fn := logger.RedirectStdLogger(l, logger.ErrorLevel)
defer fn()
log.Print("test")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestStdLogger(t *testing.T) {
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
lg := logger.NewStdLogger(l, logger.ErrorLevel)
lg.Print("test")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"error"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test"`))) {
t.Fatalf("logger error, buf %s", buf.Bytes())
}
}
func TestLogger(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Trace(ctx, "trace_msg1")
l.Warn(ctx, "warn_msg1")
l.Fields("error", "test").Info(ctx, "error message")
l.Warn(ctx, "first second")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"trace"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"trace_msg1"`))) {
t.Fatalf("logger tracer, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"warn_msg1"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"error message","error":"test"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"warn"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"first second"`))) {
t.Fatalf("logger warn, buf %s", buf.Bytes())
}
}
func Test_WithContextAttrFunc(t *testing.T) {
loggerContextAttrFuncs := []logger.ContextAttrFunc{
func(ctx context.Context) []interface{} {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
return nil
}
attrs := make([]interface{}, 0, 10)
for k, v := range md {
key := strings.ToLower(k)
switch key {
case "x-request-id", "phone", "external-Id", "source-service", "x-app-install-id", "client-id", "client-ip":
attrs = append(attrs, key, v[0])
}
}
return attrs
},
}
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, loggerContextAttrFuncs...)
ctx := context.TODO()
ctx = metadata.AppendOutgoingContext(ctx, "X-Request-Id", uuid.New().String(),
"Source-Service", "Test-System")
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.TraceLevel), logger.WithOutput(buf))
if err := l.Init(); err != nil {
t.Fatal(err)
}
l.Info(ctx, "test message")
if !(bytes.Contains(buf.Bytes(), []byte(`"level":"info"`)) && bytes.Contains(buf.Bytes(), []byte(`"msg":"test message"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"x-request-id":"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
if !(bytes.Contains(buf.Bytes(), []byte(`"source-service":"Test-System"`))) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
buf.Reset()
omd, _ := metadata.FromOutgoingContext(ctx)
l.Info(ctx, "test message1")
omd.Set("Source-Service", "Test-System2")
l.Info(ctx, "test message2")
// t.Logf("xxx %s", buf.Bytes())
}

View File

@@ -36,14 +36,14 @@ var (
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
filteredBytes = []byte("<filtered>")
// openBracketBytes = []byte("[")
// closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("{")
closeMapBytes = []byte("}")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("{")
closeMapBytes = []byte("}")
)
type protoMessage interface {
@@ -52,15 +52,13 @@ type protoMessage interface {
}
type Wrapper struct {
pointers map[uintptr]int
takeMap map[int]bool
val interface{}
s fmt.State
opts *Options
val interface{}
s fmt.State
pointers map[uintptr]int
opts *Options
depth int
ignoreNextType bool
takeMap map[int]bool
protoWrapperType bool
sqlWrapperType bool
}

View File

@@ -82,12 +82,12 @@ func TestTagged(t *testing.T) {
func TestTaggedNested(t *testing.T) {
type val struct {
key string `logger:"take"`
// val string `logger:"omit"`
val string `logger:"omit"`
unk string
}
type str struct {
// key string `logger:"omit"`
val *val `logger:"take"`
key string `logger:"omit"`
val *val `logger:"take"`
}
var iface interface{}

298
logger/wrapper/wrapper.go Normal file
View File

@@ -0,0 +1,298 @@
// Package wrapper provides wrapper for Logger
package wrapper
import (
"context"
"fmt"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/server"
)
var (
// DefaultClientCallObserver called by wrapper in client Call
DefaultClientCallObserver = func(ctx context.Context, req client.Request, rsp interface{}, opts []options.Option, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultClientStreamObserver called by wrapper in client Stream
DefaultClientStreamObserver = func(ctx context.Context, req client.Request, opts []options.Option, stream client.Stream, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultServerHandlerObserver called by wrapper in server Handler
DefaultServerHandlerObserver = func(ctx context.Context, req server.Request, rsp interface{}, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultClientCallFuncObserver called by wrapper in client CallFunc
DefaultClientCallFuncObserver = func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions, err error) []string {
labels := []string{"service", req.Service(), "endpoint", req.Endpoint()}
if err != nil {
labels = append(labels, "error", err.Error())
}
return labels
}
// DefaultSkipEndpoints wrapper not called for this endpoints
DefaultSkipEndpoints = []string{"Meter.Metrics", "Health.Live", "Health.Ready", "Health.Version"}
)
type lWrapper struct {
client.Client
serverHandler server.HandlerFunc
clientCallFunc client.CallFunc
opts Options
}
type (
// ClientCallObserver func signature
ClientCallObserver func(context.Context, client.Request, interface{}, []options.Option, error) []string
// ClientStreamObserver func signature
ClientStreamObserver func(context.Context, client.Request, []options.Option, client.Stream, error) []string
// ClientCallFuncObserver func signature
ClientCallFuncObserver func(context.Context, string, client.Request, interface{}, client.CallOptions, error) []string
// ServerHandlerObserver func signature
ServerHandlerObserver func(context.Context, server.Request, interface{}, error) []string
)
// Options struct for wrapper
type Options struct {
// Logger that used for log
Logger logger.Logger
// ServerHandlerObservers funcs
ServerHandlerObservers []ServerHandlerObserver
// ClientCallObservers funcs
ClientCallObservers []ClientCallObserver
// ClientStreamObservers funcs
ClientStreamObservers []ClientStreamObserver
// ClientCallFuncObservers funcs
ClientCallFuncObservers []ClientCallFuncObserver
// SkipEndpoints
SkipEndpoints []string
// Level for logger
Level logger.Level
// Enabled flag
Enabled bool
}
// Option func signature
type Option func(*Options)
// NewOptions creates Options from Option slice
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Level: logger.TraceLevel,
ClientCallObservers: []ClientCallObserver{DefaultClientCallObserver},
ClientStreamObservers: []ClientStreamObserver{DefaultClientStreamObserver},
ClientCallFuncObservers: []ClientCallFuncObserver{DefaultClientCallFuncObserver},
ServerHandlerObservers: []ServerHandlerObserver{DefaultServerHandlerObserver},
SkipEndpoints: DefaultSkipEndpoints,
}
for _, o := range opts {
o(&options)
}
return options
}
// WithEnabled enable/diable flag
func WithEnabled(b bool) Option {
return func(o *Options) {
o.Enabled = b
}
}
// WithLevel log level
func WithLevel(l logger.Level) Option {
return func(o *Options) {
o.Level = l
}
}
// WithLogger logger
func WithLogger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// WithClientCallObservers funcs
func WithClientCallObservers(ob ...ClientCallObserver) Option {
return func(o *Options) {
o.ClientCallObservers = ob
}
}
// WithClientStreamObservers funcs
func WithClientStreamObservers(ob ...ClientStreamObserver) Option {
return func(o *Options) {
o.ClientStreamObservers = ob
}
}
// WithClientCallFuncObservers funcs
func WithClientCallFuncObservers(ob ...ClientCallFuncObserver) Option {
return func(o *Options) {
o.ClientCallFuncObservers = ob
}
}
// WithServerHandlerObservers funcs
func WithServerHandlerObservers(ob ...ServerHandlerObserver) Option {
return func(o *Options) {
o.ServerHandlerObservers = ob
}
}
// SkipEndpoins
func SkipEndpoints(eps ...string) Option {
return func(o *Options) {
o.SkipEndpoints = append(o.SkipEndpoints, eps...)
}
}
func (l *lWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...options.Option) error {
err := l.Client.Call(ctx, req, rsp, opts...)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ClientCallObservers {
labels = append(labels, o(ctx, req, rsp, opts, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
func (l *lWrapper) Stream(ctx context.Context, req client.Request, opts ...options.Option) (client.Stream, error) {
stream, err := l.Client.Stream(ctx, req, opts...)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return stream, err
}
}
if !l.opts.Enabled {
return stream, err
}
var labels []string
for _, o := range l.opts.ClientStreamObservers {
labels = append(labels, o(ctx, req, opts, stream, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return stream, err
}
func (l *lWrapper) ServerHandler(ctx context.Context, req server.Request, rsp interface{}) error {
err := l.serverHandler(ctx, req, rsp)
endpoint := req.Endpoint()
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ServerHandlerObservers {
labels = append(labels, o(ctx, req, rsp, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
// NewClientWrapper accepts an open options and returns a Client Wrapper
func NewClientWrapper(opts ...Option) client.Wrapper {
return func(c client.Client) client.Client {
options := NewOptions()
for _, o := range opts {
o(&options)
}
return &lWrapper{opts: options, Client: c}
}
}
// NewClientCallWrapper accepts an options and returns a Call Wrapper
func NewClientCallWrapper(opts ...Option) client.CallWrapper {
return func(h client.CallFunc) client.CallFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
l := &lWrapper{opts: options, clientCallFunc: h}
return l.ClientCallFunc
}
}
func (l *lWrapper) ClientCallFunc(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
err := l.clientCallFunc(ctx, addr, req, rsp, opts)
endpoint := fmt.Sprintf("%s.%s", req.Service(), req.Endpoint())
for _, ep := range l.opts.SkipEndpoints {
if ep == endpoint {
return err
}
}
if !l.opts.Enabled {
return err
}
var labels []string
for _, o := range l.opts.ClientCallFuncObservers {
labels = append(labels, o(ctx, addr, req, rsp, opts, err)...)
}
l.opts.Logger.Fields(labels).Log(ctx, l.opts.Level)
return err
}
// NewServerHandlerWrapper accepts an options and returns a Handler Wrapper
func NewServerHandlerWrapper(opts ...Option) server.HandlerWrapper {
return func(h server.HandlerFunc) server.HandlerFunc {
options := NewOptions()
for _, o := range opts {
o(&options)
}
l := &lWrapper{opts: options, serverHandler: h}
return l.ServerHandler
}
}

View File

@@ -1,294 +1,142 @@
// Package metadata is a way of defining message headers
package metadata
import (
"context"
"fmt"
"strings"
)
// In the metadata package, context and metadata are treated as immutable.
// Deep copies of metadata are made to keep things safe and correct.
// If a user takes a map and changes it across threads, it's their responsibility.
//
// 1. Incoming Context
//
// This context is provided by an external system and populated by the server or broker of the micro framework.
// It should not be modified. The idea is to extract all necessary data from it,
// validate the data, and transfer it into the current context.
// After that, only the current context should be used throughout the code.
//
// 2. Current Context
//
// This is the context used during the execution flow.
// You can add any needed metadata to it and pass it through your code.
//
// 3. Outgoing Context
//
// This context is for sending data to external systems.
// You can add what you need before sending it out.
// But its usually better to build and prepare this context right before making the external call,
// instead of changing it in many places.
//
// Execution Flow:
//
// [External System]
// ↓
// [Incoming Context]
// ↓
// [Extract & Validate Metadata from Incoming Context]
// ↓
// [Prepare Current Context]
// ↓
// [Enrich Current Context]
// ↓
// [Business Logic]
// ↓
// [Prepare Outgoing Context]
// ↓
// [External System Call]
type (
metadataCurrentKey struct{}
metadataIncomingKey struct{}
metadataOutgoingKey struct{}
rawMetadata struct {
md Metadata
added [][]string
}
mdIncomingKey struct{}
mdOutgoingKey struct{}
mdKey struct{}
)
// NewContext creates a new context with the provided Metadata attached.
// The Metadata must not be modified after calling this function.
func NewContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md})
}
// NewIncomingContext creates a new context with the provided incoming Metadata attached.
// The Metadata must not be modified after calling this function.
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataIncomingKey{}, rawMetadata{md: md})
}
// NewOutgoingContext creates a new context with the provided outgoing Metadata attached.
// The Metadata must not be modified after calling this function.
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md})
}
// AppendContext returns a new context with the provided key-value pairs (kv)
// merged with any existing metadata in the context. For a description of kv,
// please refer to the Pairs documentation.
func AppendContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataCurrentKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md.md, added: added})
}
// AppendOutgoingContext returns a new context with the provided key-value pairs (kv)
// merged with any existing metadata in the context. For a description of kv,
// please refer to the Pairs documentation.
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md.md, added: added})
}
// FromContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
func FromContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
}
// MustContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
func MustContext(ctx context.Context) Metadata {
md, ok := FromContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// FromIncomingContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
// FromIncomingContext returns metadata from incoming ctx
// returned metadata shoud not be modified or race condition happens
func FromIncomingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
if !ok {
if ctx == nil {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
md, ok := ctx.Value(mdIncomingKey{}).(*rawMetadata)
if !ok || md.md == nil {
return nil, false
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromIncomingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
return md.md, ok
}
// MustIncomingContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
func MustIncomingContext(ctx context.Context) Metadata {
md, ok := FromIncomingContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// FromOutgoingContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
// FromOutgoingContext returns metadata from outgoing ctx
// returned metadata shoud not be modified or race condition happens
func FromOutgoingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
if !ok {
if ctx == nil {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
md, ok := ctx.Value(mdOutgoingKey{}).(*rawMetadata)
if !ok || md.md == nil {
return nil, false
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, ok
return md.md, ok
}
// MustOutgoingContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
func MustOutgoingContext(ctx context.Context) Metadata {
md, ok := FromOutgoingContext(ctx)
// FromContext returns metadata from the given context
// returned metadata shoud not be modified or race condition happens
func FromContext(ctx context.Context) (Metadata, bool) {
if ctx == nil {
return nil, false
}
md, ok := ctx.Value(mdKey{}).(*rawMetadata)
if !ok || md.md == nil {
return nil, false
}
return md.md, ok
}
// NewContext creates a new context with the given metadata
func NewContext(ctx context.Context, md Metadata) context.Context {
if ctx == nil {
ctx = context.Background()
}
ctx = context.WithValue(ctx, mdKey{}, &rawMetadata{md})
ctx = context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{})
ctx = context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{})
return ctx
}
// SetOutgoingContext modify outgoing context with given metadata
func SetOutgoingContext(ctx context.Context, md Metadata) bool {
if ctx == nil {
return false
}
if omd, ok := ctx.Value(mdOutgoingKey{}).(*rawMetadata); ok {
omd.md = md
return true
}
return false
}
// SetIncomingContext modify incoming context with given metadata
func SetIncomingContext(ctx context.Context, md Metadata) bool {
if ctx == nil {
return false
}
if omd, ok := ctx.Value(mdIncomingKey{}).(*rawMetadata); ok {
omd.md = md
return true
}
return false
}
// NewIncomingContext creates a new context with incoming metadata attached
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
if ctx == nil {
ctx = context.Background()
}
ctx = context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{md})
if v, ok := ctx.Value(mdOutgoingKey{}).(*rawMetadata); !ok || v == nil {
ctx = context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{})
}
return ctx
}
// NewOutgoingContext creates a new context with outcoming metadata attached
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
if ctx == nil {
ctx = context.Background()
}
ctx = context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{md})
if v, ok := ctx.Value(mdIncomingKey{}).(*rawMetadata); !ok || v == nil {
ctx = context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{})
}
return ctx
}
// AppendOutgoingContext apends new md to context
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
md, ok := Pairs(kv...)
if !ok {
panic("missing metadata")
return ctx
}
return md
}
// ValueFromCurrentContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromCurrentContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
omd, ok := FromOutgoingContext(ctx)
if !ok {
return nil
return NewOutgoingContext(ctx, md)
}
if v, ok := md.md[key]; ok {
return copyOf(v)
for k, v := range md {
omd.Set(k, v)
}
for k, v := range md.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
return NewOutgoingContext(ctx, omd)
}
// ValueFromIncomingContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
// AppendIncomingContext apends new md to context
func AppendIncomingContext(ctx context.Context, kv ...string) context.Context {
md, ok := Pairs(kv...)
if !ok {
return nil
return ctx
}
if v, ok := raw.md[key]; ok {
return copyOf(v)
}
for k, v := range raw.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
// ValueFromOutgoingContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromOutgoingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
omd, ok := FromIncomingContext(ctx)
if !ok {
return nil
return NewIncomingContext(ctx, md)
}
if v, ok := md.md[key]; ok {
return copyOf(v)
for k, v := range md {
omd.Set(k, v)
}
for k, v := range md.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
return NewIncomingContext(ctx, omd)
}

140
metadata/context_test.go Normal file
View File

@@ -0,0 +1,140 @@
package metadata
import (
"context"
"testing"
)
func TestFromNilContext(t *testing.T) {
// nolint: staticcheck
c, ok := FromContext(nil)
if ok || c != nil {
t.Fatal("FromContext not works")
}
}
func TestNewNilContext(t *testing.T) {
// nolint: staticcheck
ctx := NewContext(nil, New(0))
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("NewContext not works")
}
}
func TestFromContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), mdKey{}, &rawMetadata{New(0)})
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("FromContext not works")
}
}
func TestNewContext(t *testing.T) {
ctx := NewContext(context.TODO(), New(0))
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("NewContext not works")
}
}
func TestFromIncomingContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), mdIncomingKey{}, &rawMetadata{New(0)})
c, ok := FromIncomingContext(ctx)
if c == nil || !ok {
t.Fatal("FromIncomingContext not works")
}
}
func TestFromOutgoingContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), mdOutgoingKey{}, &rawMetadata{New(0)})
c, ok := FromOutgoingContext(ctx)
if c == nil || !ok {
t.Fatal("FromOutgoingContext not works")
}
}
func TestSetIncomingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := context.WithValue(context.TODO(), mdIncomingKey{}, &rawMetadata{})
if !SetIncomingContext(ctx, md) {
t.Fatal("SetIncomingContext not works")
}
md, ok := FromIncomingContext(ctx)
if md == nil || !ok {
t.Fatal("SetIncomingContext not works")
} else if v, ok := md.Get("key"); !ok || v != "val" {
t.Fatal("SetIncomingContext not works")
}
}
func TestSetOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := context.WithValue(context.TODO(), mdOutgoingKey{}, &rawMetadata{})
if !SetOutgoingContext(ctx, md) {
t.Fatal("SetOutgoingContext not works")
}
md, ok := FromOutgoingContext(ctx)
if md == nil || !ok {
t.Fatal("SetOutgoingContext not works")
} else if v, ok := md.Get("key"); !ok || v != "val" {
t.Fatal("SetOutgoingContext not works")
}
}
func TestNewIncomingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := NewIncomingContext(context.TODO(), md)
c, ok := FromIncomingContext(ctx)
if c == nil || !ok {
t.Fatal("NewIncomingContext not works")
}
}
func TestNewOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := NewOutgoingContext(context.TODO(), md)
c, ok := FromOutgoingContext(ctx)
if c == nil || !ok {
t.Fatal("NewOutgoingContext not works")
}
}
func TestAppendIncomingContext(t *testing.T) {
md := New(1)
md.Set("key1", "val1")
ctx := AppendIncomingContext(context.TODO(), "key2", "val2")
nmd, ok := FromIncomingContext(ctx)
if nmd == nil || !ok {
t.Fatal("AppendIncomingContext not works")
}
if v, ok := nmd.Get("key2"); !ok || v != "val2" {
t.Fatal("AppendIncomingContext not works")
}
}
func TestAppendOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key1", "val1")
ctx := AppendOutgoingContext(context.TODO(), "key2", "val2")
nmd, ok := FromOutgoingContext(ctx)
if nmd == nil || !ok {
t.Fatal("AppendOutgoingContext not works")
}
if v, ok := nmd.Get("key2"); !ok || v != "val2" {
t.Fatal("AppendOutgoingContext not works")
}
}

View File

@@ -1,19 +0,0 @@
// Package metadata is a way of defining message headers
package metadata
var (
// HeaderTopic is the header name that contains topic name.
HeaderTopic = "Micro-Topic"
// HeaderContentType specifies content type of message.
HeaderContentType = "Content-Type"
// HeaderEndpoint specifies endpoint in service.
HeaderEndpoint = "Micro-Endpoint"
// HeaderService specifies service.
HeaderService = "Micro-Service"
// HeaderTimeout specifies timeout of operation.
HeaderTimeout = "Micro-Timeout"
// HeaderAuthorization specifies Authorization header.
HeaderAuthorization = "Authorization"
// HeaderXRequestID specifies request id.
HeaderXRequestID = "X-Request-Id"
)

View File

@@ -1,7 +0,0 @@
package metadata
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
return vals
}

View File

@@ -1,37 +0,0 @@
package metadata
import "sort"
type Iterator struct {
md Metadata
keys []string
cur int
cnt int
}
// Next advances the iterator to the next element.
func (iter *Iterator) Next(k *string, v *[]string) bool {
if iter.cur+1 > iter.cnt {
return false
}
if k != nil && v != nil {
*k = iter.keys[iter.cur]
vv := iter.md[*k]
*v = make([]string, len(vv))
copy(*v, vv)
iter.cur++
}
return true
}
// Iterator returns an iterator for iterating over metadata in sorted order.
func (md Metadata) Iterator() *Iterator {
iter := &Iterator{md: md, cnt: len(md)}
iter.keys = make([]string, 0, iter.cnt)
for k := range md {
iter.keys = append(iter.keys, k)
}
sort.Strings(iter.keys)
return iter
}

View File

@@ -1,160 +1,143 @@
package metadata
// Package metadata is a way of defining message headers
package metadata // import "go.unistack.org/micro/v4/metadata"
import (
"fmt"
"net/textproto"
"strings"
"sort"
)
// defaultMetadataSize is used when initializing new Metadata.
var (
// HeaderTopic is the header name that contains topic name
HeaderTopic = "Micro-Topic"
// HeaderContentType specifies content type of message
HeaderContentType = "Content-Type"
// HeaderEndpoint specifies endpoint in service
HeaderEndpoint = "Micro-Endpoint"
// HeaderService specifies service
HeaderService = "Micro-Service"
// HeaderTimeout specifies timeout of operation
HeaderTimeout = "Micro-Timeout"
// HeaderAuthorization specifies Authorization header
HeaderAuthorization = "Authorization"
// HeaderXRequestID specifies request id
HeaderXRequestID = "X-Request-Id"
)
// Metadata is our way of representing request headers internally.
// They're used at the RPC level and translate back and forth
// from Transport headers.
type Metadata map[string]string
type rawMetadata struct {
md Metadata
}
// defaultMetadataSize used when need to init new Metadata
var defaultMetadataSize = 2
// Metadata maps keys to values. Use the New, NewWithMetadata and Pairs functions to create it.
type Metadata map[string][]string
// New creates a zero-value Metadata with the specified size.
func New(l int) Metadata {
if l == 0 {
l = defaultMetadataSize
}
md := make(Metadata, l)
return md
// Iterator used to iterate over metadata with order
type Iterator struct {
md Metadata
keys []string
cur int
cnt int
}
// NewWithMetadata creates a Metadata from the provided key-value map.
func NewWithMetadata(m map[string]string) Metadata {
md := make(Metadata, len(m))
for key, val := range m {
md[key] = append(md[key], val)
// Next advance iterator to next element
func (iter *Iterator) Next(k, v *string) bool {
if iter.cur+1 > iter.cnt {
return false
}
return md
*k = iter.keys[iter.cur]
*v = iter.md[*k]
iter.cur++
return true
}
// Pairs returns a Metadata formed from the key-value mapping. It panics if the length of kv is odd.
func Pairs(kv ...string) Metadata {
// Iterator returns the itarator for metadata in sorted order
func (md Metadata) Iterator() *Iterator {
iter := &Iterator{md: md, cnt: len(md)}
iter.keys = make([]string, 0, iter.cnt)
for k := range md {
iter.keys = append(iter.keys, k)
}
sort.Strings(iter.keys)
return iter
}
// Get returns value from metadata by key
func (md Metadata) Get(key string) (string, bool) {
// fast path
val, ok := md[key]
if !ok {
// slow path
val, ok = md[textproto.CanonicalMIMEHeaderKey(key)]
}
return val, ok
}
// Set is used to store value in metadata
func (md Metadata) Set(kv ...string) {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
kv = kv[:len(kv)-1]
}
md := make(Metadata, len(kv)/2)
for i := 0; i < len(kv); i += 2 {
md[kv[i]] = append(md[kv[i]], kv[i+1])
for idx := 0; idx < len(kv); idx += 2 {
md[textproto.CanonicalMIMEHeaderKey(kv[idx])] = kv[idx+1]
}
return md
}
// Join combines multiple Metadatas into a single Metadata.
// The order of values for each key is determined by the order in which the Metadatas are provided to Join.
func Join(mds ...Metadata) Metadata {
out := Metadata{}
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
// Del is used to remove value from metadata
func (md Metadata) Del(keys ...string) {
for _, key := range keys {
// fast path
delete(md, key)
// slow path
delete(md, textproto.CanonicalMIMEHeaderKey(key))
}
}
// Copy makes a copy of the metadata
func Copy(md Metadata) Metadata {
nmd := New(len(md))
for key, val := range md {
nmd.Set(key, val)
}
return nmd
}
// New return new sized metadata
func New(size int) Metadata {
if size == 0 {
size = defaultMetadataSize
}
return make(Metadata, size)
}
// Merge merges metadata to existing metadata, overwriting if specified
func Merge(omd Metadata, mmd Metadata, overwrite bool) Metadata {
var ok bool
nmd := Copy(omd)
for key, val := range mmd {
_, ok = nmd[key]
switch {
case ok && !overwrite:
continue
case val != "":
nmd.Set(key, val)
case ok && val == "":
nmd.Del(key)
}
}
return out
return nmd
}
// Copy returns a deep copy of Metadata.
func Copy(src Metadata) Metadata {
out := make(Metadata, len(src))
for k, v := range src {
out[k] = copyOf(v)
}
return out
}
// Copy returns a deep copy of Metadata.
func (md Metadata) Copy() Metadata {
out := make(Metadata, len(md))
for k, v := range md {
out[k] = copyOf(v)
}
return out
}
// CopyTo performs a deep copy of Metadata to the out.
func (md Metadata) CopyTo(out Metadata) {
for k, v := range md {
out[k] = copyOf(v)
}
}
// Len returns the number of items in Metadata.
func (md Metadata) Len() int {
return len(md)
}
// AsMap returns a deep copy of Metadata as a map[string]string
func (md Metadata) AsMap() map[string]string {
out := make(map[string]string, len(md))
for k, v := range md {
out[k] = strings.Join(v, ",")
}
return out
}
// AsHTTP1 returns a deep copy of Metadata with keys converted to canonical MIME header key format.
func (md Metadata) AsHTTP1() map[string][]string {
out := make(map[string][]string, len(md))
for k, v := range md {
out[textproto.CanonicalMIMEHeaderKey(k)] = copyOf(v)
}
return out
}
// AsHTTP2 returns a deep copy of Metadata with keys converted to lowercase.
func (md Metadata) AsHTTP2() map[string][]string {
out := make(map[string][]string, len(md))
for k, v := range md {
out[strings.ToLower(k)] = copyOf(v)
}
return out
}
// Get retrieves the values for a given key, checking the key in three formats:
// - exact case,
// - lower case,
// - canonical MIME header key format.
func (md Metadata) Get(k string) []string {
v, ok := md[k]
if !ok {
v, ok = md[strings.ToLower(k)]
}
if !ok {
v = md[textproto.CanonicalMIMEHeaderKey(k)]
}
return v
}
// GetJoined retrieves the values for a given key and joins them into a single string, separated by commas.
func (md Metadata) GetJoined(k string) string {
return strings.Join(md.Get(k), ",")
}
// Set assigns the values to the given key.
func (md Metadata) Set(key string, vals ...string) {
if len(vals) == 0 {
return
}
md[key] = vals
}
// Append adds values to the existing values for the given key.
func (md Metadata) Append(key string, vals ...string) {
if len(vals) == 0 {
return
}
md[key] = append(md[key], vals...)
}
// Del removes the values for the given keys k. It checks and removes the keys in the following formats:
// - exact case,
// - lower case,
// - canonical MIME header key format.
func (md Metadata) Del(k ...string) {
for i := range k {
delete(md, k[i])
delete(md, strings.ToLower(k[i]))
delete(md, textproto.CanonicalMIMEHeaderKey(k[i]))
// Pairs from which metadata created
func Pairs(kv ...string) (Metadata, bool) {
if len(kv)%2 == 1 {
return nil, false
}
md := New(len(kv) / 2)
md.Set(kv...)
return md, true
}

Some files were not shown because too many files have changed in this diff Show More