Compare commits

..

98 Commits

Author SHA1 Message Date
2bac878845 broker: fix message options
Some checks failed
coverage / build (push) Failing after 1m58s
test / test (push) Has started running
sync / sync (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-06-09 17:23:30 +03:00
9ee31fb5a6 fixup compile
Some checks failed
coverage / build (push) Has been cancelled
test / test (push) Has been cancelled
sync / sync (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-29 12:46:23 +03:00
ed5d30a58e store/noop: fixup Exists
Some checks failed
coverage / build (push) Has been cancelled
test / test (push) Has been cancelled
sync / sync (push) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-29 12:43:39 +03:00
vtolstov
b4b67a8b41 Apply Code Coverage Badge 2025-05-25 02:41:23 +00:00
13f90ff716 changed embedded mutex to private field (#217)
Some checks failed
sync / sync (push) Failing after 16m12s
test / test (push) Failing after 17m28s
coverage / build (push) Failing after 17m40s
2025-05-25 01:15:03 +03:00
0f8f12aee0 add tracer enabled status
Some checks failed
coverage / build (push) Successful in 2m52s
test / test (push) Failing after 18m53s
sync / sync (push) Successful in 26s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-19 09:33:01 +03:00
8b406cf963 util/buffer: add Reset() method
Some checks failed
coverage / build (push) Failing after 1m36s
test / test (push) Successful in 3m35s
sync / sync (push) Successful in 7s
closes #402

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-12 19:18:45 +03:00
029a434a2b broker: pass broker content type if message options not pass it
All checks were successful
coverage / build (push) Successful in 1m44s
test / test (push) Successful in 3m5s
sync / sync (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-09 13:51:35 +03:00
vtolstov
847259bc39 Apply Code Coverage Badge 2025-05-09 09:36:02 +00:00
a1ee8728ad broker: add Content-Type and DefaultContentType
All checks were successful
coverage / build (push) Successful in 1m58s
sync / sync (push) Successful in 1m37s
test / test (push) Successful in 3m47s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-09 12:34:46 +03:00
88a5875cfb switch yaml package to maintained one
All checks were successful
coverage / build (push) Successful in 2m16s
test / test (push) Successful in 4m37s
sync / sync (push) Successful in 8s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-09 12:18:49 +03:00
03ee33040c util/id: switch to default uuid package
All checks were successful
coverage / build (push) Successful in 2m21s
test / test (push) Successful in 4m50s
sync / sync (push) Successful in 6s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-08 19:07:00 +03:00
0144f175f0 add comment
All checks were successful
coverage / build (push) Successful in 1m33s
test / test (push) Successful in 3m41s
sync / sync (push) Successful in 8s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-06 23:00:15 +03:00
b3539a32ab logger: add none level
closes #399

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-06 23:00:15 +03:00
vtolstov
6a7223ea4a Apply Code Coverage Badge 2025-05-06 08:09:12 +00:00
1a1b67866a [v4] improve metadata documentation (#216)
All checks were successful
coverage / build (push) Successful in 1m47s
test / test (push) Successful in 2m50s
* add usage docs for context types and metadata, improve comments

* changes after review
2025-05-06 11:02:27 +03:00
b7c98da6d1 added commit hash check to avoid unnecessary repository cloning (#215)
All checks were successful
sync / sync (push) Successful in 16s
2025-05-05 14:53:28 +03:00
2c21cce076 tracer/noop: disable allocation for trace and span id
All checks were successful
coverage / build (push) Successful in 2m22s
test / test (push) Successful in 4m22s
sync / sync (push) Successful in 26s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-05 09:48:22 +03:00
c8946dcdc8 fix sync
All checks were successful
sync / sync (push) Successful in 24s
2025-05-04 15:03:22 +03:00
vtolstov
d342ff2626 Apply Code Coverage Badge 2025-05-02 06:23:44 +00:00
f2d0d67d4c hooks/requestid: fixup panic
All checks were successful
coverage / build (push) Successful in 2m0s
test / test (push) Successful in 2m33s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-02 09:22:19 +03:00
677dc30af0 [v4] update ci (#213)
All checks were successful
sync / sync (push) Successful in 41s
* update ci

* cleanup
2025-05-01 19:15:06 +03:00
vtolstov
7122cc873c Apply Code Coverage Badge 2025-05-01 15:58:29 +00:00
77e370ffdc move wrapper.sql to hook/sql (#401)
All checks were successful
coverage / build (push) Successful in 2m3s
test / test (push) Successful in 2m42s
move micro-wrapper-sql to core repo

Co-authored-by: Vasiliy Tolstov <v.tolstov@unistack.org>
Reviewed-on: #401
Co-authored-by: Evstigneev Denis <danteevstigneev@yandex.ru>
Co-committed-by: Evstigneev Denis <danteevstigneev@yandex.ru>
2025-05-01 18:56:34 +03:00
vtolstov
7b1c42e50b Apply Code Coverage Badge 2025-04-29 20:18:49 +00:00
f3b9493ac3 hooks/metadata: fix
All checks were successful
coverage / build (push) Successful in 1m20s
test / test (push) Successful in 1m57s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 23:18:10 +03:00
e4ee705eb2 metadata: sync with grpc
Some checks failed
coverage / build (push) Failing after 41s
test / test (push) Successful in 2m31s
sync / sync (push) Successful in 46s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 23:13:57 +03:00
7ff7a3dbe0 update all
All checks were successful
sync / sync (push) Successful in 41s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 18:29:26 +03:00
7af5147f4b update all
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 18:28:33 +03:00
394fd16243 update all
All checks were successful
sync / sync (push) Successful in 36s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 18:16:16 +03:00
2b08c8f682 fixup coverage job
All checks were successful
sync / sync (push) Successful in 43s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 14:13:56 +03:00
f9a7f62d02 fixup workflow
Some checks failed
sync / sync (push) Failing after 1m9s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 14:06:43 +03:00
vtolstov
f5aedf5951 Apply Code Coverage Badge 2025-04-29 10:54:56 +00:00
a5ef231171 cleanup metadata
All checks were successful
sync / sync (push) Successful in 1m20s
coverage / build (push) Successful in 1m23s
test / test (push) Successful in 2m42s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 13:54:16 +03:00
23f2ee9bb7 fixup hooks
All checks were successful
coverage / build (push) Successful in 3m12s
test / test (push) Successful in 5m4s
sync / sync (push) Successful in 31s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 13:09:56 +03:00
88606e89ca fixup metadata
Some checks are pending
coverage / build (push) Waiting to run
test / test (push) Waiting to run
sync / sync (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-29 13:01:37 +03:00
vtolstov
24efbb68bf Apply Code Coverage Badge
All checks were successful
coverage / build (push) Successful in 2m4s
test / test (push) Successful in 3m28s
sync / sync (push) Successful in 25s
2025-04-27 19:16:46 +00:00
vtolstov
cecdaa0fed Apply Code Coverage Badge 2025-04-27 19:13:06 +00:00
vtolstov
9627995cee Apply Code Coverage Badge
All checks were successful
sync / sync (push) Successful in 1m54s
coverage / build (push) Successful in 1m57s
test / test (push) Successful in 2m27s
2025-04-27 19:07:59 +00:00
vtolstov
0f3539dc7b Apply Code Coverage Badge 2025-04-27 19:06:03 +00:00
ff414eff2e [v4] fix flatten map util function (#211)
All checks were successful
sync / sync (push) Successful in 1m51s
coverage / build (push) Successful in 2m17s
test / test (push) Successful in 4m29s
* add the fixed version of FlattenMap() and corresponding tests
* replaced the old FlattenMap() implementation with a new one
2025-04-27 21:44:24 +03:00
vtolstov
fbf6832738 Apply Code Coverage Badge
Some checks are pending
coverage / build (push) Successful in 6m34s
test / test (push) Successful in 8m23s
sync / sync (push) Has started running
2025-04-27 18:21:57 +00:00
vtolstov
59ff1f931b Apply Code Coverage Badge 2025-04-27 18:19:31 +00:00
2030bd2803 attempt to fix coverage/lint/test job (#210) 2025-04-27 21:12:16 +03:00
bb87a87ae5 improve sync
All checks were successful
sync / sync (push) Successful in 13s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 21:02:19 +03:00
0bd5aed7cc rename workflow
All checks were successful
sync / sync (push) Successful in 10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 16:08:57 +03:00
434798a574 check actions env
All checks were successful
sync / sync (push) Successful in 9s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 16:00:00 +03:00
459a951115 check actions env
All checks were successful
syncpull / pull (push) Successful in 9s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 15:49:50 +03:00
770c2715d4 check actions env
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 15:48:57 +03:00
c93286afd5 [v4] rename .gitea to .github
All checks were successful
syncpull / pull (push) Successful in 10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 14:10:57 +03:00
vtolstov
6bf118d978 Apply Code Coverage Badge 2025-04-27 11:05:07 +00:00
7493de1168 move hooks (#398)
All checks were successful
coverage / build (push) Successful in 1m18s
test / test (push) Successful in 2m5s
## Pull Request template
Please, go through these steps before clicking submit on this PR.

1. Give a descriptive title to your PR.
2. Provide a description of your changes.
3. Make sure you have some relevant tests.
4. Put `closes #XXXX` in your comment to auto-close the issue that your PR fixes (if applicable).

**PLEASE REMOVE THIS TEMPLATE BEFORE SUBMITTING**

Reviewed-on: #398
Co-authored-by: Evstigneev Denis <danteevstigneev@yandex.ru>
Co-committed-by: Evstigneev Denis <danteevstigneev@yandex.ru>
2025-04-27 14:04:33 +03:00
vtolstov
212a685b50 Apply Code Coverage Badge 2025-04-27 10:58:10 +00:00
3f21bafc2f fixup lint
All checks were successful
coverage / build (push) Successful in 1m17s
test / test (push) Successful in 2m51s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 13:57:42 +03:00
a9ed8b16c1 skip on needed changes
All checks were successful
syncpull / pull (push) Successful in 10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 13:40:33 +03:00
vtolstov
740cd5931d Apply Code Coverage Badge 2025-04-27 10:39:03 +00:00
85a78063d0 fix panic on shutdown caused by double channel close (#209)
All checks were successful
coverage / build (push) Successful in 2m35s
test / test (push) Successful in 3m43s
2025-04-27 13:37:18 +03:00
604ad9cd9d check sync action
All checks were successful
syncpull / pull (push) Successful in 18s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 13:36:11 +03:00
91137537a2 check sync action
All checks were successful
syncpull / pull (push) Successful in 11s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 13:23:33 +03:00
950e2352fd check sync action
Some checks failed
syncpull / pull (push) Failing after 10s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 13:17:27 +03:00
0bb29b29cf check sync action
Some checks failed
syncpull / pull (push) Failing after 5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 12:38:02 +03:00
17bcd0b0ab check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 12:37:17 +03:00
20f9f4da3b check sync action
Some checks failed
syncpull / pull (push) Failing after 8s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 12:34:28 +03:00
66fa04b8dc check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 12:33:09 +03:00
1ef3ad6531 check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 12:32:22 +03:00
c95a91349d check sync action
Some checks failed
syncpull / pull (push) Failing after 5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 12:30:52 +03:00
fdcf8e6ca4 check sync action
Some checks failed
syncpull / pull (push) Failing after 5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 12:25:27 +03:00
8cb2d9db4a check sync action
Some checks failed
syncpull / pull (push) Failing after 5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:28:50 +03:00
04da4388ac check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:27:26 +03:00
79fb23e644 check sync action
Some checks failed
syncpull / pull (push) Failing after 5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:24:40 +03:00
f8fe923ab1 check sync action
Some checks failed
syncpull / pull (push) Failing after 5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:21:25 +03:00
105f56dbfe check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:20:21 +03:00
9fed5a368b check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:19:58 +03:00
7374d41cf8 check sync action
Some checks failed
syncpull / pull (push) Failing after 6s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:15:32 +03:00
a4a8935c1f check sync action
Some checks failed
syncpull / pull (push) Failing after 6s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:11:39 +03:00
5f498c8232 check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:10:06 +03:00
a00fdf679b check sync action
Some checks failed
syncpull / pull (push) Failing after 6s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 11:05:22 +03:00
dc9ebe4155 check sync action
Some checks failed
syncpull / pull (push) Failing after 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 10:57:51 +03:00
87ced484b7 check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 10:56:34 +03:00
af99b11a59 check sync action
Some checks failed
syncpull / pull (push) Failing after 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 10:52:20 +03:00
2724b51f7c check sync action
Some checks failed
syncpull / pull (push) Failing after 4s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 10:41:22 +03:00
5b5d0e02b9 check sync action
Some checks failed
syncpull / pull (push) Failing after 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 10:38:36 +03:00
afc2de6819 check sync action
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 10:37:01 +03:00
32a8ab9c05 check sync action
Some checks failed
syncpull / pull (push) Failing after 5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 10:34:25 +03:00
vtolstov
7e5401bded Apply Code Coverage Badge 2025-04-27 06:30:32 +00:00
64b91cea06 check sync action
All checks were successful
coverage / build (push) Successful in 1m18s
test / test (push) Successful in 2m5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 09:29:33 +03:00
vtolstov
0f59fdcbde Apply Code Coverage Badge 2025-04-27 06:29:19 +00:00
50979e6708 check sync action
Some checks failed
coverage / build (push) Has started running
test / test (push) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 09:28:17 +03:00
46f3108870 check sync action
Some checks failed
coverage / build (push) Has been cancelled
test / test (push) Has started running
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 09:27:35 +03:00
vtolstov
5fed91a65f Apply Code Coverage Badge 2025-04-27 06:25:16 +00:00
1c5bba908d check sync action
All checks were successful
coverage / build (push) Successful in 1m14s
test / test (push) Successful in 2m3s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-27 09:24:41 +03:00
vtolstov
bc8ebdcad5 Apply Code Coverage Badge 2025-04-24 11:55:11 +00:00
fc24f3af92 metadata: add AsMap func
All checks were successful
coverage / build (push) Successful in 1m18s
test / test (push) Successful in 2m3s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-24 14:54:37 +03:00
1058177d1c Delete SECURITY.md 2025-04-22 15:54:19 +03:00
vtolstov
fa53fac085 Apply Code Coverage Badge 2025-04-13 21:03:53 +00:00
8c060df5e3 tracer: add IsRecording to span interface
All checks were successful
coverage / build (push) Successful in 2m0s
test / test (push) Successful in 4m29s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-04-14 00:02:45 +03:00
e1f8c62685 broker: add SetPublishOption
All checks were successful
coverage / build (push) Successful in 1m33s
test / test (push) Successful in 2m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-03-07 18:21:57 +03:00
562b1ab9b7 broker: simplify handler check
All checks were successful
coverage / build (push) Successful in 1m19s
test / test (push) Successful in 2m5s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-03-07 15:26:20 +03:00
72 changed files with 23641 additions and 825 deletions

View File

@@ -3,14 +3,16 @@ name: coverage
on:
push:
branches: [ main, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
pull_request:
branches: [ main, v3, v4 ]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: checkout code
@@ -22,7 +24,7 @@ jobs:
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
go-version: 'stable'
- name: test coverage
run: |
@@ -39,8 +41,8 @@ jobs:
name: autocommit
with:
commit_message: Apply Code Coverage Badge
skip_fetch: true
skip_checkout: true
skip_fetch: false
skip_checkout: false
file_pattern: ./README.md
- name: push
@@ -48,4 +50,4 @@ jobs:
uses: ad-m/github-push-action@master
with:
github_token: ${{ github.token }}
branch: ${{ github.ref }}
branch: ${{ github.ref }}

View File

@@ -3,10 +3,10 @@ name: lint
on:
pull_request:
types: [opened, reopened, synchronize]
branches:
- master
- v3
- v4
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
lint:
@@ -20,10 +20,10 @@ jobs:
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run lint
uses: https://github.com/golangci/golangci-lint-action@v6
uses: golangci/golangci-lint-action@v6
with:
version: 'latest'

94
.github/workflows/job_sync.yml vendored Normal file
View File

@@ -0,0 +1,94 @@
name: sync
on:
schedule:
- cron: '*/5 * * * *'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
sync:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: init
run: |
git config --global user.email "vtolstov <vtolstov@users.noreply.github.com>"
git config --global user.name "github-actions[bot]"
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
- name: check master
id: check_master
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync master
if: steps.check_master.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream master
git push upstream master --progress
git push origin master --progress
cd ../
rm -rf repo
- name: check v3
id: check_v3
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v3
if: steps.check_v3.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v3
git push upstream v3 --progress
git push origin v3 --progress
cd ../
rm -rf repo
- name: check v4
id: check_v4
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v4
if: steps.check_v4.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v4
git push upstream v4 --progress
git push origin v4 --progress
cd ../
rm -rf repo

View File

@@ -3,15 +3,12 @@ name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches:
- master
- v3
- v4
branches: [ master, v3, v4 ]
push:
branches:
- master
- v3
- v4
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:

View File

@@ -3,15 +3,12 @@ name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches:
- master
- v3
- v4
branches: [ master, v3, v4 ]
push:
branches:
- master
- v3
- v4
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:
@@ -35,19 +32,19 @@ jobs:
go-version: 'stable'
- name: setup go work
env:
GOWORK: /workspace/${{ github.repository_owner }}/go.work
GOWORK: ${{ github.workspace }}/go.work
run: |
go work init
go work use .
go work use micro-tests
- name: setup deps
env:
GOWORK: /workspace/${{ github.repository_owner }}/go.work
GOWORK: ${{ github.workspace }}/go.work
run: go get -v ./...
- name: run tests
env:
INTEGRATION_TESTS: yes
GOWORK: /workspace/${{ github.repository_owner }}/go.work
GOWORK: ${{ github.workspace }}/go.work
run: |
cd micro-tests
go test -mod readonly -v ./... || true

View File

@@ -1,5 +1,5 @@
run:
concurrency: 8
deadline: 5m
timeout: 5m
issues-exit-code: 1
tests: true

View File

@@ -1,5 +1,5 @@
# Micro
![Coverage](https://img.shields.io/badge/Coverage-44.2%25-yellow)
![Coverage](https://img.shields.io/badge/Coverage-33.6%25-yellow)
[![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview)
[![Status](https://git.unistack.org/unistack-org/micro/actions/workflows/job_tests.yml/badge.svg?branch=v4)](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush)

View File

@@ -1,15 +0,0 @@
# Security Policy
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 3.7.x | :white_check_mark: |
| < 3.7.0 | :x: |
## Reporting a Vulnerability
If you find any issue, please create github issue in this repo

View File

@@ -21,7 +21,7 @@ var (
// ErrInvalidMessage returns when invalid Message passed
ErrInvalidMessage = errors.New("invalid message")
// ErrInvalidHandler returns when subscriber passed to Subscribe
ErrInvalidHandler = errors.New("invalid handler")
ErrInvalidHandler = errors.New("invalid handler, ony func(Message) error and func([]Message) error supported")
// DefaultGracefulTimeout
DefaultGracefulTimeout = 5 * time.Second
)
@@ -41,7 +41,7 @@ type Broker interface {
// Disconnect disconnect from broker
Disconnect(ctx context.Context) error
// NewMessage create new broker message to publish.
NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error)
NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error)
// Publish message to broker topic
Publish(ctx context.Context, topic string, messages ...Message) error
// Subscribe subscribes to topic message via handler

View File

@@ -42,6 +42,16 @@ func SetSubscribeOption(k, v interface{}) SubscribeOption {
}
}
// SetMessageOption returns a function to setup a context with given value
func SetMessageOption(k, v interface{}) MessageOption {
return func(o *MessageOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {

View File

@@ -22,8 +22,8 @@ type Broker struct {
subscribers map[string][]*Subscriber
addr string
opts broker.Options
sync.RWMutex
connected bool
mu sync.RWMutex
connected bool
}
type memoryMessage struct {
@@ -32,7 +32,7 @@ type memoryMessage struct {
ctx context.Context
body []byte
hdr metadata.Metadata
opts broker.PublishOptions
opts broker.MessageOptions
}
func (m *memoryMessage) Ack() error {
@@ -72,9 +72,9 @@ func (b *Broker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx]
}
b.RLock()
b.mu.RLock()
c, ok := b.opts.Codecs[ct]
b.RUnlock()
b.mu.RUnlock()
if ok {
return c, nil
}
@@ -96,8 +96,8 @@ func (b *Broker) Connect(ctx context.Context) error {
default:
}
b.Lock()
defer b.Unlock()
b.mu.Lock()
defer b.mu.Unlock()
if b.connected {
return nil
@@ -126,8 +126,8 @@ func (b *Broker) Disconnect(ctx context.Context) error {
default:
}
b.Lock()
defer b.Unlock()
b.mu.Lock()
defer b.mu.Unlock()
if !b.connected {
return nil
@@ -157,8 +157,11 @@ func (b *Broker) Init(opts ...broker.Option) error {
return nil
}
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.PublishOption) (broker.Message, error) {
options := broker.NewPublishOptions(opts...)
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) {
options := broker.NewMessageOptions(opts...)
if options.ContentType == "" {
options.ContentType = b.opts.ContentType
}
m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType)
if err == nil {
@@ -180,12 +183,12 @@ func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker
}
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
b.RLock()
b.mu.RLock()
if !b.connected {
b.RUnlock()
b.mu.RUnlock()
return broker.ErrNotConnected
}
b.RUnlock()
b.mu.RUnlock()
select {
case <-ctx.Done():
@@ -193,9 +196,9 @@ func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.M
default:
}
b.RLock()
b.mu.RLock()
subs, ok := b.subscribers[topic]
b.RUnlock()
b.mu.RUnlock()
if !ok {
return nil
}
@@ -252,12 +255,12 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
return nil, err
}
b.RLock()
b.mu.RLock()
if !b.connected {
b.RUnlock()
b.mu.RUnlock()
return nil, broker.ErrNotConnected
}
b.RUnlock()
b.mu.RUnlock()
sid, err := id.New()
if err != nil {
@@ -275,13 +278,13 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
ctx: ctx,
}
b.Lock()
b.mu.Lock()
b.subscribers[topic] = append(b.subscribers[topic], sub)
b.Unlock()
b.mu.Unlock()
go func() {
<-sub.exit
b.Lock()
b.mu.Lock()
newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1)
for _, sb := range b.subscribers[topic] {
if sb.id == sub.id {
@@ -290,7 +293,7 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
newSubscribers = append(newSubscribers, sb)
}
b.subscribers[topic] = newSubscribers
b.Unlock()
b.mu.Unlock()
}()
return sub, nil

View File

@@ -49,7 +49,7 @@ func TestMemoryBroker(t *testing.T) {
"id", fmt.Sprintf("%d", i),
),
[]byte(`"hello world"`),
broker.PublishContentType("application/octet-stream"),
broker.MessageContentType("application/octet-stream"),
)
if err != nil {
t.Fatal(err)

View File

@@ -14,16 +14,16 @@ type NoopBroker struct {
funcPublish FuncPublish
funcSubscribe FuncSubscribe
opts Options
sync.RWMutex
mu sync.RWMutex
}
func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx]
}
b.RLock()
b.mu.RLock()
c, ok := b.opts.Codecs[ct]
b.RUnlock()
b.mu.RUnlock()
if ok {
return c, nil
}
@@ -99,7 +99,7 @@ type noopMessage struct {
ctx context.Context
body []byte
hdr metadata.Metadata
opts PublishOptions
opts MessageOptions
}
func (m *noopMessage) Ack() error {
@@ -126,8 +126,11 @@ func (m *noopMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
return m.c.Unmarshal(m.body, dst)
}
func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error) {
options := NewPublishOptions(opts...)
func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error) {
options := NewMessageOptions(opts...)
if options.ContentType == "" {
options.ContentType = b.opts.ContentType
}
m := &noopMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType)
if err == nil {

View File

@@ -45,6 +45,9 @@ type Options struct {
// GracefulTimeout contains time to wait to finish in flight requests
GracefulTimeout time.Duration
// ContentType will be used if no content-type set when creating message
ContentType string
}
// NewOptions create new Options
@@ -57,14 +60,19 @@ func NewOptions(opts ...Option) Options {
Codecs: make(map[string]codec.Codec),
Tracer: tracer.DefaultTracer,
GracefulTimeout: DefaultGracefulTimeout,
ContentType: DefaultContentType,
}
for _, o := range opts {
o(&options)
}
return options
}
// DefaultContentType is the default content-type if not specified
var DefaultContentType = ""
// Context sets the context option
func Context(ctx context.Context) Option {
return func(o *Options) {
@@ -72,18 +80,29 @@ func Context(ctx context.Context) Option {
}
}
// PublishOptions struct
type PublishOptions struct {
// ContentType used by default if not specified
func ContentType(ct string) Option {
return func(o *Options) {
o.ContentType = ct
}
}
// MessageOptions struct
type MessageOptions struct {
// ContentType for message body
ContentType string
// BodyOnly flag says the message contains raw body bytes and don't need
// codec Marshal method
BodyOnly bool
// Context holds custom options
Context context.Context
}
// NewPublishOptions creates PublishOptions struct
func NewPublishOptions(opts ...PublishOption) PublishOptions {
options := PublishOptions{}
// NewMessageOptions creates MessageOptions struct
func NewMessageOptions(opts ...MessageOption) MessageOptions {
options := MessageOptions{
Context: context.Background(),
}
for _, o := range opts {
o(&options)
}
@@ -109,19 +128,19 @@ type SubscribeOptions struct {
// Option func
type Option func(*Options)
// PublishOption func
type PublishOption func(*PublishOptions)
// MessageOption func
type MessageOption func(*MessageOptions)
// PublishContentType sets message content-type that used to Marshal
func PublishContentType(ct string) PublishOption {
return func(o *PublishOptions) {
// MessageContentType sets message content-type that used to Marshal
func MessageContentType(ct string) MessageOption {
return func(o *MessageOptions) {
o.ContentType = ct
}
}
// PublishBodyOnly publish only body of the message
func PublishBodyOnly(b bool) PublishOption {
return func(o *PublishOptions) {
// MessageBodyOnly publish only body of the message
func MessageBodyOnly(b bool) MessageOption {
return func(o *MessageOptions) {
o.BodyOnly = b
}
}

View File

@@ -1,87 +1,14 @@
package broker
import (
"fmt"
"reflect"
"unicode"
"unicode/utf8"
)
const (
messageSig = "func(broker.Message) error"
messagesSig = "func([]broker.Message) error"
)
// Precompute the reflect type for error. Can't use error directly
// because Typeof takes an empty interface value. This is annoying.
var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
// Is this an exported - upper case - name?
func isExported(name string) bool {
r, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(r)
}
// Is this type exported or a builtin?
func isExportedOrBuiltinType(t reflect.Type) bool {
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
// PkgPath will be non-empty even for an exported type,
// so we need to check the type name as well.
return isExported(t.Name()) || t.PkgPath() == ""
}
// IsValidHandler func signature
func IsValidHandler(sub interface{}) error {
typ := reflect.TypeOf(sub)
var argType reflect.Type
switch typ.Kind() {
case reflect.Func:
name := "Func"
switch typ.NumIn() {
case 1:
argType = typ.In(0)
default:
return fmt.Errorf("subscriber %v takes wrong number of args: %v required signature %s", name, typ.NumIn(), messageSig)
}
if !isExportedOrBuiltinType(argType) {
return fmt.Errorf("subscriber %v argument type not exported: %v", name, argType)
}
if typ.NumOut() != 1 {
return fmt.Errorf("subscriber %v has wrong number of return values: %v require signature %s",
name, typ.NumOut(), messageSig)
}
if returnType := typ.Out(0); returnType != typeOfError {
return fmt.Errorf("subscriber %v returns %v not error", name, returnType.String())
}
switch sub.(type) {
default:
hdlr := reflect.ValueOf(sub)
name := reflect.Indirect(hdlr).Type().Name()
for m := 0; m < typ.NumMethod(); m++ {
method := typ.Method(m)
switch method.Type.NumIn() {
case 3:
argType = method.Type.In(2)
default:
return fmt.Errorf("subscriber %v.%v takes wrong number of args: %v required signature %s",
name, method.Name, method.Type.NumIn(), messageSig)
}
if !isExportedOrBuiltinType(argType) {
return fmt.Errorf("%v argument type not exported: %v", name, argType)
}
if method.Type.NumOut() != 1 {
return fmt.Errorf(
"subscriber %v.%v has wrong number of return values: %v require signature %s",
name, method.Name, method.Type.NumOut(), messageSig)
}
if returnType := method.Type.Out(0); returnType != typeOfError {
return fmt.Errorf("subscriber %v.%v returns %v not error", name, method.Name, returnType.String())
}
}
return ErrInvalidHandler
case func(Message) error:
break
case func([]Message) error:
break
}
return nil
}

View File

@@ -3,8 +3,6 @@ package codec
import (
"errors"
"gopkg.in/yaml.v3"
)
var (
@@ -68,10 +66,10 @@ func (m *RawMessage) MarshalYAML() ([]byte, error) {
}
// UnmarshalYAML sets *m to a copy of data.
func (m *RawMessage) UnmarshalYAML(n *yaml.Node) error {
func (m *RawMessage) UnmarshalYAML(data []byte) error {
if m == nil {
return errors.New("RawMessage UnmarshalYAML on nil pointer")
}
*m = append((*m)[0:0], []byte(n.Value)...)
*m = append((*m)[0:0], data...)
return nil
}

View File

@@ -1,7 +1,5 @@
package codec
import "gopkg.in/yaml.v3"
// Frame gives us the ability to define raw data to send over the pipes
type Frame struct {
Data []byte
@@ -28,8 +26,8 @@ func (m *Frame) MarshalYAML() ([]byte, error) {
}
// UnmarshalYAML set frame data
func (m *Frame) UnmarshalYAML(n *yaml.Node) error {
m.Data = []byte(n.Value)
func (m *Frame) UnmarshalYAML(data []byte) error {
m.Data = append((m.Data)[0:0], data...)
return nil
}

6
go.mod
View File

@@ -6,19 +6,19 @@ require (
dario.cat/mergo v1.0.1
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/KimMachineGun/automemlimit v0.7.0
github.com/ash3in/uuidv8 v1.2.0
github.com/goccy/go-yaml v1.17.1
github.com/google/uuid v1.6.0
github.com/matoous/go-nanoid v1.5.1
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
github.com/spf13/cast v1.7.1
github.com/stretchr/testify v1.10.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.unistack.org/micro-proto/v4 v4.1.0
golang.org/x/sync v0.10.0
google.golang.org/grpc v1.69.4
google.golang.org/protobuf v1.36.3
gopkg.in/yaml.v3 v3.0.1
)
require (
@@ -26,9 +26,9 @@ require (
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/stretchr/testify v1.10.0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/sys v0.29.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

4
go.sum
View File

@@ -4,12 +4,12 @@ github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7Oputl
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88=
github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
github.com/ash3in/uuidv8 v1.2.0 h1:2oogGdtCPwaVtyvPPGin4TfZLtOGE5F+W++E880G6SI=
github.com/ash3in/uuidv8 v1.2.0/go.mod h1:BnU0wJBxnzdEKmVg4xckBkD+VZuecTFTUP3M0dWgyY4=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY=
github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=

117
hooks/metadata/metadata.go Normal file
View File

@@ -0,0 +1,117 @@
package metadata
import (
"context"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/server"
)
type wrapper struct {
keys []string
client.Client
}
func NewClientWrapper(keys ...string) client.Wrapper {
return func(c client.Client) client.Client {
handler := &wrapper{
Client: c,
keys: keys,
}
return handler
}
}
func NewClientCallWrapper(keys ...string) client.CallWrapper {
return func(fn client.CallFunc) client.CallFunc {
return func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
if keys == nil {
return fn(ctx, addr, req, rsp, opts)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return fn(ctx, addr, req, rsp, opts)
}
}
}
func (w *wrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
if w.keys == nil {
return w.Client.Call(ctx, req, rsp, opts...)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range w.keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return w.Client.Call(ctx, req, rsp, opts...)
}
func (w *wrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
if w.keys == nil {
return w.Client.Stream(ctx, req, opts...)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range w.keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return w.Client.Stream(ctx, req, opts...)
}
func NewServerHandlerWrapper(keys ...string) server.HandlerWrapper {
return func(fn server.HandlerFunc) server.HandlerFunc {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
if keys == nil {
return fn(ctx, req, rsp)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return fn(ctx, req, rsp)
}
}
}

View File

@@ -0,0 +1,63 @@
package recovery
import (
"context"
"fmt"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/server"
)
func NewOptions(opts ...Option) Options {
options := Options{
ServerHandlerFn: DefaultServerHandlerFn,
}
for _, o := range opts {
o(&options)
}
return options
}
type Options struct {
ServerHandlerFn func(context.Context, server.Request, interface{}, error) error
}
type Option func(*Options)
func ServerHandlerFunc(fn func(context.Context, server.Request, interface{}, error) error) Option {
return func(o *Options) {
o.ServerHandlerFn = fn
}
}
var DefaultServerHandlerFn = func(ctx context.Context, req server.Request, rsp interface{}, err error) error {
return errors.BadRequest("", "%v", err)
}
var Hook = NewHook()
type hook struct {
opts Options
}
func NewHook(opts ...Option) *hook {
return &hook{opts: NewOptions(opts...)}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) (err error) {
defer func() {
r := recover()
switch verr := r.(type) {
case nil:
return
case error:
err = w.opts.ServerHandlerFn(ctx, req, rsp, verr)
default:
err = w.opts.ServerHandlerFn(ctx, req, rsp, fmt.Errorf("%v", r))
}
}()
err = next(ctx, req, rsp)
return err
}
}

View File

@@ -0,0 +1,103 @@
package requestid
import (
"context"
"net/textproto"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/server"
"go.unistack.org/micro/v4/util/id"
)
type XRequestIDKey struct{}
// DefaultMetadataKey contains metadata key
var DefaultMetadataKey = textproto.CanonicalMIMEHeaderKey("x-request-id")
// DefaultMetadataFunc wil be used if user not provide own func to fill metadata
var DefaultMetadataFunc = func(ctx context.Context) (context.Context, error) {
var xid string
cid, cok := ctx.Value(XRequestIDKey{}).(string)
if cok && cid != "" {
xid = cid
}
imd, iok := metadata.FromIncomingContext(ctx)
if !iok || imd == nil {
imd = metadata.New(1)
ctx = metadata.NewIncomingContext(ctx, imd)
}
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(1)
ctx = metadata.NewOutgoingContext(ctx, omd)
}
if xid == "" {
xid = imd.GetJoined(DefaultMetadataKey)
if xid == "" {
xid = omd.GetJoined(DefaultMetadataKey)
}
}
if xid == "" {
var err error
xid, err = id.New()
if err != nil {
return ctx, err
}
}
if !cok {
ctx = context.WithValue(ctx, XRequestIDKey{}, xid)
}
if !iok {
imd.Set(DefaultMetadataKey, xid)
}
if !ook {
omd.Set(DefaultMetadataKey, xid)
}
return ctx, nil
}
type hook struct{}
func NewHook() *hook {
return &hook{}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return err
}
return next(ctx, req, rsp)
}
}
func (w *hook) ClientCall(next client.FuncCall) client.FuncCall {
return func(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return err
}
return next(ctx, req, rsp, opts...)
}
}
func (w *hook) ClientStream(next client.FuncStream) client.FuncStream {
return func(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return nil, err
}
return next(ctx, req, opts...)
}
}

View File

@@ -0,0 +1,34 @@
package requestid
import (
"context"
"slices"
"testing"
"go.unistack.org/micro/v4/metadata"
)
func TestDefaultMetadataFunc(t *testing.T) {
ctx := context.TODO()
nctx, err := DefaultMetadataFunc(ctx)
if err != nil {
t.Fatalf("%v", err)
}
imd, ok := metadata.FromIncomingContext(nctx)
if !ok {
t.Fatalf("md missing in incoming context")
}
omd, ok := metadata.FromOutgoingContext(nctx)
if !ok {
t.Fatalf("md missing in outgoing context")
}
iv := imd.Get(DefaultMetadataKey)
ov := omd.Get(DefaultMetadataKey)
if !slices.Equal(iv, ov) {
t.Fatalf("missing metadata key value %v != %v", iv, ov)
}
}

51
hooks/sql/common.go Normal file
View File

@@ -0,0 +1,51 @@
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"runtime"
)
//go:generate sh -c "go run gen.go > wrap_gen.go"
// namedValueToValue converts driver arguments of NamedValue format to Value format. Implemented in the same way as in
// database/sql ctxutil.go.
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
dargs := make([]driver.Value, len(named))
for n, param := range named {
if len(param.Name) > 0 {
return nil, errors.New("sql: driver does not support the use of Named Parameters")
}
dargs[n] = param.Value
}
return dargs, nil
}
// namedValueToLabels convert driver arguments to interface{} slice
func namedValueToLabels(named []driver.NamedValue) []interface{} {
largs := make([]interface{}, 0, len(named)*2)
var name string
for _, param := range named {
if param.Name != "" {
name = param.Name
} else {
name = fmt.Sprintf("$%d", param.Ordinal)
}
largs = append(largs, fmt.Sprintf("%s=%v", name, param.Value))
}
return largs
}
// getCallerName get the name of the function A where A() -> B() -> GetFunctionCallerName()
func getCallerName() string {
pc, _, _, ok := runtime.Caller(3)
details := runtime.FuncForPC(pc)
var callerName string
if ok && details != nil {
callerName = details.Name()
} else {
callerName = labelUnknown
}
return callerName
}

467
hooks/sql/conn.go Normal file
View File

@@ -0,0 +1,467 @@
package sql
import (
"context"
"database/sql/driver"
"fmt"
"time"
"go.unistack.org/micro/v4/hooks/requestid"
"go.unistack.org/micro/v4/tracer"
)
var (
_ driver.Conn = (*wrapperConn)(nil)
_ driver.ConnBeginTx = (*wrapperConn)(nil)
_ driver.ConnPrepareContext = (*wrapperConn)(nil)
_ driver.Pinger = (*wrapperConn)(nil)
_ driver.Validator = (*wrapperConn)(nil)
_ driver.Queryer = (*wrapperConn)(nil) // nolint:staticcheck
_ driver.QueryerContext = (*wrapperConn)(nil)
_ driver.Execer = (*wrapperConn)(nil) // nolint:staticcheck
_ driver.ExecerContext = (*wrapperConn)(nil)
// _ driver.Connector
// _ driver.Driver
// _ driver.DriverContext
)
// wrapperConn defines a wrapper for driver.Conn
type wrapperConn struct {
d *wrapperDriver
dname string
conn driver.Conn
opts Options
ctx context.Context
//span tracer.Span
}
// Close implements driver.Conn Close
func (w *wrapperConn) Close() error {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Close"}
ts := time.Now()
err := w.conn.Close()
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Close", getCallerName(), td, err)...)
}
*/
return err
}
// Begin implements driver.Conn Begin
func (w *wrapperConn) Begin() (driver.Tx, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
labels := []string{labelMethod, "Begin"}
ts := time.Now()
tx, err := w.conn.Begin() // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Begin", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Begin", getCallerName(), td, err)...)
}
*/
return &wrapperTx{tx: tx, opts: w.opts, ctx: ctx}, nil
}
// BeginTx implements driver.ConnBeginTx BeginTx
func (w *wrapperConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
name := getQueryName(ctx)
nctx, span := w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
span.AddLabels("db.method", "BeginTx")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "BeginTx", labelQuery, name}
connBeginTx, ok := w.conn.(driver.ConnBeginTx)
if !ok {
return w.Begin()
}
ts := time.Now()
tx, err := connBeginTx.BeginTx(nctx, opts)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "BeginTx", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "BeginTx", getCallerName(), td, err)...)
}
*/
return &wrapperTx{tx: tx, opts: w.opts, ctx: ctx, span: span}, nil
}
// Prepare implements driver.Conn Prepare
func (w *wrapperConn) Prepare(query string) (driver.Stmt, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Prepare", labelQuery, getCallerName()}
ts := time.Now()
stmt, err := w.conn.Prepare(query)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Prepare", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Prepare", getCallerName(), td, err)...)
}
*/
return wrapStmt(stmt, query, w.opts), nil
}
// PrepareContext implements driver.ConnPrepareContext PrepareContext
func (w *wrapperConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "PrepareContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "PrepareContext", labelQuery, name}
conn, ok := w.conn.(driver.ConnPrepareContext)
if !ok {
return w.Prepare(query)
}
ts := time.Now()
stmt, err := conn.PrepareContext(nctx, query)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "PrepareContext", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "PrepareContext", getCallerName(), td, err)...)
}
*/
return wrapStmt(stmt, query, w.opts), nil
}
// Exec implements driver.Execer Exec
func (w *wrapperConn) Exec(query string, args []driver.Value) (driver.Result, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Exec", labelQuery, getCallerName()}
// nolint:staticcheck
conn, ok := w.conn.(driver.Execer)
if !ok {
return nil, driver.ErrSkip
}
ts := time.Now()
res, err := conn.Exec(query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Exec", getCallerName(), td, err)...)
}
*/
return res, err
}
// Exec implements driver.StmtExecContext ExecContext
func (w *wrapperConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "ExecContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
labels := []string{labelMethod, "ExecContext", labelQuery, name}
conn, ok := w.conn.(driver.ExecerContext)
if !ok {
// nolint:staticcheck
return nil, driver.ErrSkip
}
ts := time.Now()
res, err := conn.ExecContext(nctx, query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", getCallerName(), td, err)...)
}
*/
return res, err
}
// Ping implements driver.Pinger Ping
func (w *wrapperConn) Ping(ctx context.Context) error {
conn, ok := w.conn.(driver.Pinger)
if !ok {
// fallback path to check db alive
pc, err := w.d.Open(w.dname)
if err != nil {
return err
}
return pc.Close()
}
var nctx context.Context //nolint:gosimple
nctx = ctx
/*
var span tracer.Span
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "Ping")
defer span.Finish()
*/
labels := []string{labelMethod, "Ping"}
ts := time.Now()
err := conn.Ping(nctx)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
// span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Ping", getCallerName(), td, err)...)
}
*/
return err
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
return nil
}
// Query implements driver.Queryer Query
func (w *wrapperConn) Query(query string, args []driver.Value) (driver.Rows, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
// nolint:staticcheck
conn, ok := w.conn.(driver.Queryer)
if !ok {
return nil, driver.ErrSkip
}
labels := []string{labelMethod, "Query", labelQuery, getCallerName()}
ts := time.Now()
rows, err := conn.Query(query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Query", getCallerName(), td, err)...)
}
*/
return rows, err
}
// QueryContext implements Driver.QueryerContext QueryContext
func (w *wrapperConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "QueryContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
labels := []string{labelMethod, "QueryContext", labelQuery, name}
conn, ok := w.conn.(driver.QueryerContext)
if !ok {
return nil, driver.ErrSkip
}
ts := time.Now()
rows, err := conn.QueryContext(nctx, query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", getCallerName(), td, err)...)
}
*/
return rows, err
}
// CheckNamedValue implements driver.NamedValueChecker
func (w *wrapperConn) CheckNamedValue(v *driver.NamedValue) error {
s, ok := w.conn.(driver.NamedValueChecker)
if !ok {
return driver.ErrSkip
}
return s.CheckNamedValue(v)
}
// IsValid implements driver.Validator
func (w *wrapperConn) IsValid() bool {
v, ok := w.conn.(driver.Validator)
if !ok {
return w.conn != nil
}
return v.IsValid()
}
func (w *wrapperConn) ResetSession(ctx context.Context) error {
s, ok := w.conn.(driver.SessionResetter)
if !ok {
return driver.ErrSkip
}
return s.ResetSession(ctx)
}

94
hooks/sql/driver.go Normal file
View File

@@ -0,0 +1,94 @@
package sql
import (
"context"
"database/sql/driver"
"time"
)
var (
// _ driver.DriverContext = (*wrapperDriver)(nil)
// _ driver.Connector = (*wrapperDriver)(nil)
)
/*
type conn interface {
driver.Pinger
driver.Execer
driver.ExecerContext
driver.Queryer
driver.QueryerContext
driver.Conn
driver.ConnPrepareContext
driver.ConnBeginTx
}
*/
// wrapperDriver defines a wrapper for driver.Driver
type wrapperDriver struct {
driver driver.Driver
opts Options
ctx context.Context
}
// NewWrapper creates and returns a new SQL driver with passed capabilities
func NewWrapper(d driver.Driver, opts ...Option) driver.Driver {
return &wrapperDriver{driver: d, opts: NewOptions(opts...), ctx: context.Background()}
}
type wrappedConnector struct {
connector driver.Connector
// name string
opts Options
ctx context.Context
}
func NewWrapperConnector(c driver.Connector, opts ...Option) driver.Connector {
return &wrappedConnector{connector: c, opts: NewOptions(opts...), ctx: context.Background()}
}
// Connect implements driver.Driver Connect
func (w *wrappedConnector) Connect(ctx context.Context) (driver.Conn, error) {
return w.connector.Connect(ctx)
}
// Driver implements driver.Driver Driver
func (w *wrappedConnector) Driver() driver.Driver {
return w.connector.Driver()
}
/*
// Connect implements driver.Driver OpenConnector
func (w *wrapperDriver) OpenConnector(name string) (driver.Conn, error) {
return &wrapperConnector{driver: w.driver, name: name, opts: w.opts}, nil
}
*/
// Open implements driver.Driver Open
func (w *wrapperDriver) Open(name string) (driver.Conn, error) {
// ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) // Ensure eventual timeout
// defer cancel()
/*
connector, err := w.OpenConnector(name)
if err != nil {
return nil, err
}
return connector.Connect(ctx)
*/
ts := time.Now()
c, err := w.driver.Open(name)
td := time.Since(ts)
/*
if w.opts.LoggerEnabled {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Open", getCallerName(), td, err)...)
}
*/
_ = td
if err != nil {
return nil, err
}
return wrapConn(c, w.opts), nil
}

167
hooks/sql/gen.go Normal file
View File

@@ -0,0 +1,167 @@
//go:build ignore
package main
import (
"bytes"
"crypto/md5"
"fmt"
"io"
"sort"
"strings"
)
var connIfaces = []string{
"driver.ConnBeginTx",
"driver.ConnPrepareContext",
"driver.Execer",
"driver.ExecerContext",
"driver.NamedValueChecker",
"driver.Pinger",
"driver.Queryer",
"driver.QueryerContext",
"driver.SessionResetter",
"driver.Validator",
}
var stmtIfaces = []string{
"driver.StmtExecContext",
"driver.StmtQueryContext",
"driver.ColumnConverter",
"driver.NamedValueChecker",
}
func getHash(s []string) string {
h := md5.New()
io.WriteString(h, strings.Join(s, "|"))
return fmt.Sprintf("%x", h.Sum(nil))
}
func main() {
comboConn := all(connIfaces)
sort.Slice(comboConn, func(i, j int) bool {
return len(comboConn[i]) < len(comboConn[j])
})
comboStmt := all(stmtIfaces)
sort.Slice(comboStmt, func(i, j int) bool {
return len(comboStmt[i]) < len(comboStmt[j])
})
b := bytes.NewBuffer(nil)
b.WriteString("// Code generated. DO NOT EDIT.\n\n")
b.WriteString("package sql\n\n")
b.WriteString(`import "database/sql/driver"`)
b.WriteString("\n\n")
b.WriteString("func wrapConn(dc driver.Conn, opts Options) driver.Conn {\n")
b.WriteString("\tc := &wrapperConn{conn: dc, opts: opts}\n")
for idx := len(comboConn) - 1; idx >= 0; idx-- {
ifaces := comboConn[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("\tif _, ok := dc.(wrapConn%04d_%s); ok {\n", n, h))
b.WriteString("\treturn struct {\n")
b.WriteString("\t\tdriver.Conn\n")
b.WriteString(fmt.Sprintf("\t\t\t%s", strings.Join(ifaces, "\n\t\t\t")))
b.WriteString("\t\t\n}{")
for idx := range ifaces {
if idx > 0 {
b.WriteString(", ")
b.WriteString("c")
} else if idx == 0 {
b.WriteString("c")
} else {
b.WriteString("c")
}
}
b.WriteString(", c}\n")
b.WriteString("}\n\n")
}
b.WriteString("return c\n")
b.WriteString("}\n")
for idx := len(comboConn) - 1; idx >= 0; idx-- {
ifaces := comboConn[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("// %s\n", strings.Join(ifaces, "|")))
b.WriteString(fmt.Sprintf("type wrapConn%04d_%s interface {\n", n, h))
for _, iface := range ifaces {
b.WriteString(fmt.Sprintf("\t%s\n", iface))
}
b.WriteString("}\n\n")
}
b.WriteString("func wrapStmt(stmt driver.Stmt, query string, opts Options) driver.Stmt {\n")
b.WriteString("\tc := &wrapperStmt{stmt: stmt, query: query, opts: opts}\n")
for idx := len(comboStmt) - 1; idx >= 0; idx-- {
ifaces := comboStmt[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("\tif _, ok := stmt.(wrapStmt%04d_%s); ok {\n", n, h))
b.WriteString("\treturn struct {\n")
b.WriteString("\t\tdriver.Stmt\n")
b.WriteString(fmt.Sprintf("\t\t\t%s", strings.Join(ifaces, "\n\t\t\t")))
b.WriteString("\t\t\n}{")
for idx := range ifaces {
if idx > 0 {
b.WriteString(", ")
b.WriteString("c")
} else if idx == 0 {
b.WriteString("c")
} else {
b.WriteString("c")
}
}
b.WriteString(", c}\n")
b.WriteString("}\n\n")
}
b.WriteString("return c\n")
b.WriteString("}\n")
for idx := len(comboStmt) - 1; idx >= 0; idx-- {
ifaces := comboStmt[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("// %s\n", strings.Join(ifaces, "|")))
b.WriteString(fmt.Sprintf("type wrapStmt%04d_%s interface {\n", n, h))
for _, iface := range ifaces {
b.WriteString(fmt.Sprintf("\t%s\n", iface))
}
b.WriteString("}\n\n")
}
fmt.Printf("%s\n", b.String())
}
// all returns all combinations for a given string array.
func all[T any](set []T) (subsets [][]T) {
length := uint(len(set))
for subsetBits := 1; subsetBits < (1 << length); subsetBits++ {
var subset []T
for object := uint(0); object < length; object++ {
if (subsetBits>>object)&1 == 1 {
subset = append(subset, set[object])
}
}
subsets = append(subsets, subset)
}
return subsets
}

172
hooks/sql/options.go Normal file
View File

@@ -0,0 +1,172 @@
package sql
import (
"context"
"fmt"
"time"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/tracer"
)
var (
// DefaultMeterStatsInterval holds default stats interval
DefaultMeterStatsInterval = 5 * time.Second
// DefaultLoggerObserver used to prepare labels for logger
DefaultLoggerObserver = func(ctx context.Context, method string, query string, td time.Duration, err error) []interface{} {
labels := []interface{}{"db.method", method, "took", fmt.Sprintf("%v", td)}
if err != nil {
labels = append(labels, "error", err.Error())
}
if query != labelUnknown {
labels = append(labels, "query", query)
}
return labels
}
)
var (
MaxOpenConnections = "micro_sql_max_open_conn"
OpenConnections = "micro_sql_open_conn"
InuseConnections = "micro_sql_inuse_conn"
IdleConnections = "micro_sql_idle_conn"
WaitConnections = "micro_sql_waited_conn"
BlockedSeconds = "micro_sql_blocked_seconds"
MaxIdleClosed = "micro_sql_max_idle_closed"
MaxIdletimeClosed = "micro_sql_closed_max_idle"
MaxLifetimeClosed = "micro_sql_closed_max_lifetime"
meterRequestTotal = "micro_sql_request_total"
meterRequestLatencyMicroseconds = "micro_sql_latency_microseconds"
meterRequestDurationSeconds = "micro_sql_request_duration_seconds"
labelUnknown = "unknown"
labelQuery = "db_statement"
labelMethod = "db_method"
labelStatus = "status"
labelSuccess = "success"
labelFailure = "failure"
labelHost = "db_host"
labelDatabase = "db_name"
)
// Options struct holds wrapper options
type Options struct {
Logger logger.Logger
Meter meter.Meter
Tracer tracer.Tracer
DatabaseHost string
DatabaseName string
MeterStatsInterval time.Duration
LoggerLevel logger.Level
LoggerEnabled bool
LoggerObserver func(ctx context.Context, method string, name string, td time.Duration, err error) []interface{}
}
// Option func signature
type Option func(*Options)
// NewOptions create new Options struct from provided option slice
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
MeterStatsInterval: DefaultMeterStatsInterval,
LoggerLevel: logger.ErrorLevel,
LoggerObserver: DefaultLoggerObserver,
}
for _, o := range opts {
o(&options)
}
options.Meter = options.Meter.Clone(
meter.Labels(
labelHost, options.DatabaseHost,
labelDatabase, options.DatabaseName,
),
)
options.Logger = options.Logger.Clone(logger.WithAddCallerSkipCount(1))
return options
}
// MetricInterval specifies stats interval for *sql.DB
func MetricInterval(td time.Duration) Option {
return func(o *Options) {
o.MeterStatsInterval = td
}
}
func DatabaseHost(host string) Option {
return func(o *Options) {
o.DatabaseHost = host
}
}
func DatabaseName(name string) Option {
return func(o *Options) {
o.DatabaseName = name
}
}
// Meter passes meter.Meter to wrapper
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Logger passes logger.Logger to wrapper
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// LoggerEnabled enable sql logging
func LoggerEnabled(b bool) Option {
return func(o *Options) {
o.LoggerEnabled = b
}
}
// LoggerLevel passes logger.Level option
func LoggerLevel(lvl logger.Level) Option {
return func(o *Options) {
o.LoggerLevel = lvl
}
}
// LoggerObserver passes observer to fill logger fields
func LoggerObserver(obs func(context.Context, string, string, time.Duration, error) []interface{}) Option {
return func(o *Options) {
o.LoggerObserver = obs
}
}
// Tracer passes tracer.Tracer to wrapper
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
type queryNameKey struct{}
// QueryName passes query name to wrapper func
func QueryName(ctx context.Context, name string) context.Context {
if ctx == nil {
ctx = context.Background()
}
return context.WithValue(ctx, queryNameKey{}, name)
}
func getQueryName(ctx context.Context) string {
if v, ok := ctx.Value(queryNameKey{}).(string); ok && v != labelUnknown {
return v
}
return getCallerName()
}

41
hooks/sql/stats.go Normal file
View File

@@ -0,0 +1,41 @@
package sql
import (
"context"
"database/sql"
"time"
)
type Statser interface {
Stats() sql.DBStats
}
func NewStatsMeter(ctx context.Context, db Statser, opts ...Option) {
options := NewOptions(opts...)
go func() {
ticker := time.NewTicker(options.MeterStatsInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if db == nil {
return
}
stats := db.Stats()
options.Meter.Counter(MaxOpenConnections).Set(uint64(stats.MaxOpenConnections))
options.Meter.Counter(OpenConnections).Set(uint64(stats.OpenConnections))
options.Meter.Counter(InuseConnections).Set(uint64(stats.InUse))
options.Meter.Counter(IdleConnections).Set(uint64(stats.Idle))
options.Meter.Counter(WaitConnections).Set(uint64(stats.WaitCount))
options.Meter.FloatCounter(BlockedSeconds).Set(stats.WaitDuration.Seconds())
options.Meter.Counter(MaxIdleClosed).Set(uint64(stats.MaxIdleClosed))
options.Meter.Counter(MaxIdletimeClosed).Set(uint64(stats.MaxIdleTimeClosed))
options.Meter.Counter(MaxLifetimeClosed).Set(uint64(stats.MaxLifetimeClosed))
}
}
}()
}

287
hooks/sql/stmt.go Normal file
View File

@@ -0,0 +1,287 @@
package sql
import (
"context"
"database/sql/driver"
"fmt"
"time"
requestid "go.unistack.org/micro/v4/hooks/requestid"
"go.unistack.org/micro/v4/tracer"
)
var (
_ driver.Stmt = (*wrapperStmt)(nil)
_ driver.StmtQueryContext = (*wrapperStmt)(nil)
_ driver.StmtExecContext = (*wrapperStmt)(nil)
_ driver.NamedValueChecker = (*wrapperStmt)(nil)
)
// wrapperStmt defines a wrapper for driver.Stmt
type wrapperStmt struct {
stmt driver.Stmt
opts Options
query string
ctx context.Context
}
// Close implements driver.Stmt Close
func (w *wrapperStmt) Close() error {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Close"}
ts := time.Now()
err := w.stmt.Close()
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Close", getCallerName(), td, err)...)
}
*/
return err
}
// NumInput implements driver.Stmt NumInput
func (w *wrapperStmt) NumInput() int {
return w.stmt.NumInput()
}
// CheckNamedValue implements driver.NamedValueChecker
func (w *wrapperStmt) CheckNamedValue(v *driver.NamedValue) error {
s, ok := w.stmt.(driver.NamedValueChecker)
if !ok {
return driver.ErrSkip
}
return s.CheckNamedValue(v)
}
// Exec implements driver.Stmt Exec
func (w *wrapperStmt) Exec(args []driver.Value) (driver.Result, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Exec"}
ts := time.Now()
res, err := w.stmt.Exec(args) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Exec", getCallerName(), td, err)...)
}
*/
return res, err
}
// Query implements driver.Stmt Query
func (w *wrapperStmt) Query(args []driver.Value) (driver.Rows, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Query"}
ts := time.Now()
rows, err := w.stmt.Query(args) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Query", getCallerName(), td, err)...)
}
*/
return rows, err
}
// ColumnConverter implements driver.ColumnConverter
func (w *wrapperStmt) ColumnConverter(idx int) driver.ValueConverter {
s, ok := w.stmt.(driver.ColumnConverter) // nolint:staticcheck
if !ok {
return nil
}
return s.ColumnConverter(idx)
}
// ExecContext implements driver.StmtExecContext ExecContext
func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "ExecContext")
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "ExecContext", labelQuery, name}
if conn, ok := w.stmt.(driver.StmtExecContext); ok {
ts := time.Now()
res, err := conn.ExecContext(nctx, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...)
}
*/
return res, err
}
values, err := namedValueToValue(args)
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, 0, err)...)
}
*/
return nil, err
}
ts := time.Now()
res, err := w.Exec(values) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...)
}
*/
return res, err
}
// QueryContext implements driver.StmtQueryContext StmtQueryContext
func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "QueryContext")
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "QueryContext", labelQuery, name}
if conn, ok := w.stmt.(driver.StmtQueryContext); ok {
ts := time.Now()
rows, err := conn.QueryContext(nctx, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...)
}
*/
return rows, err
}
values, err := namedValueToValue(args)
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, 0, err)...)
}
*/
return nil, err
}
ts := time.Now()
rows, err := w.Query(values) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...)
}
*/
return rows, err
}

63
hooks/sql/tx.go Normal file
View File

@@ -0,0 +1,63 @@
package sql
import (
"context"
"database/sql/driver"
"time"
"go.unistack.org/micro/v4/tracer"
)
var _ driver.Tx = (*wrapperTx)(nil)
// wrapperTx defines a wrapper for driver.Tx
type wrapperTx struct {
tx driver.Tx
span tracer.Span
opts Options
ctx context.Context
}
// Commit implements driver.Tx Commit
func (w *wrapperTx) Commit() error {
ts := time.Now()
err := w.tx.Commit()
td := time.Since(ts)
_ = td
if w.span != nil {
if err != nil {
w.span.SetStatus(tracer.SpanStatusError, err.Error())
}
w.span.Finish()
}
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Commit", getCallerName(), td, err)...)
}
*/
w.ctx = nil
return err
}
// Rollback implements driver.Tx Rollback
func (w *wrapperTx) Rollback() error {
ts := time.Now()
err := w.tx.Rollback()
td := time.Since(ts)
_ = td
if w.span != nil {
if err != nil {
w.span.SetStatus(tracer.SpanStatusError, err.Error())
}
w.span.Finish()
}
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Rollback", getCallerName(), td, err)...)
}
*/
w.ctx = nil
return err
}

19
hooks/sql/wrap.go Normal file
View File

@@ -0,0 +1,19 @@
package sql
import (
"database/sql/driver"
)
/*
func wrapDriver(d driver.Driver, opts Options) driver.Driver {
if _, ok := d.(driver.DriverContext); ok {
return &wrapperDriver{driver: d, opts: opts}
}
return struct{ driver.Driver }{&wrapperDriver{driver: d, opts: opts}}
}
*/
// WrapConn allows an existing driver.Conn to be wrapped.
func WrapConn(c driver.Conn, opts ...Option) driver.Conn {
return wrapConn(c, NewOptions(opts...))
}

20699
hooks/sql/wrap_gen.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,133 @@
package validator
import (
"context"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/server"
)
var (
DefaultClientErrorFunc = func(req client.Request, rsp interface{}, err error) error {
if rsp != nil {
return errors.BadGateway(req.Service(), "%v", err)
}
return errors.BadRequest(req.Service(), "%v", err)
}
DefaultServerErrorFunc = func(req server.Request, rsp interface{}, err error) error {
if rsp != nil {
return errors.BadGateway(req.Service(), "%v", err)
}
return errors.BadRequest(req.Service(), "%v", err)
}
)
type (
ClientErrorFunc func(client.Request, interface{}, error) error
ServerErrorFunc func(server.Request, interface{}, error) error
)
// Options struct holds wrapper options
type Options struct {
ClientErrorFn ClientErrorFunc
ServerErrorFn ServerErrorFunc
ClientValidateResponse bool
ServerValidateResponse bool
}
// Option func signature
type Option func(*Options)
func ClientValidateResponse(b bool) Option {
return func(o *Options) {
o.ClientValidateResponse = b
}
}
func ServerValidateResponse(b bool) Option {
return func(o *Options) {
o.ClientValidateResponse = b
}
}
func ClientReqErrorFn(fn ClientErrorFunc) Option {
return func(o *Options) {
o.ClientErrorFn = fn
}
}
func ServerErrorFn(fn ServerErrorFunc) Option {
return func(o *Options) {
o.ServerErrorFn = fn
}
}
func NewOptions(opts ...Option) Options {
options := Options{
ClientErrorFn: DefaultClientErrorFunc,
ServerErrorFn: DefaultServerErrorFunc,
}
for _, o := range opts {
o(&options)
}
return options
}
func NewHook(opts ...Option) *hook {
return &hook{opts: NewOptions(opts...)}
}
type validator interface {
Validate() error
}
type hook struct {
opts Options
}
func (w *hook) ClientCall(next client.FuncCall) client.FuncCall {
return func(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return w.opts.ClientErrorFn(req, nil, err)
}
}
err := next(ctx, req, rsp, opts...)
if v, ok := rsp.(validator); ok && w.opts.ClientValidateResponse {
if verr := v.Validate(); verr != nil {
return w.opts.ClientErrorFn(req, rsp, verr)
}
}
return err
}
}
func (w *hook) ClientStream(next client.FuncStream) client.FuncStream {
return func(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return nil, w.opts.ClientErrorFn(req, nil, err)
}
}
return next(ctx, req, opts...)
}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return w.opts.ServerErrorFn(req, nil, err)
}
}
err := next(ctx, req, rsp)
if v, ok := rsp.(validator); ok && w.opts.ServerValidateResponse {
if verr := v.Validate(); verr != nil {
return w.opts.ServerErrorFn(req, rsp, verr)
}
}
return err
}
}

View File

@@ -4,18 +4,20 @@ package logger
type Level int8
const (
// TraceLevel level usually used to find bugs, very verbose
// TraceLevel usually used to find bugs, very verbose
TraceLevel Level = iota - 2
// DebugLevel level used only when enabled debugging
// DebugLevel used only when enabled debugging
DebugLevel
// InfoLevel level used for general info about what's going on inside the application
// InfoLevel used for general info about what's going on inside the application
InfoLevel
// WarnLevel level used for non-critical entries
// WarnLevel used for non-critical entries
WarnLevel
// ErrorLevel level used for errors that should definitely be noted
// ErrorLevel used for errors that should definitely be noted
ErrorLevel
// FatalLevel level used for critical errors and then calls `os.Exit(1)`
// FatalLevel used for critical errors and then calls `os.Exit(1)`
FatalLevel
// NoneLevel used to disable logging
NoneLevel
)
// String returns logger level string representation
@@ -33,6 +35,8 @@ func (l Level) String() string {
return "error"
case FatalLevel:
return "fatal"
case NoneLevel:
return "none"
}
return "info"
}
@@ -58,6 +62,8 @@ func ParseLevel(lvl string) Level {
return ErrorLevel
case FatalLevel.String():
return FatalLevel
case NoneLevel.String():
return NoneLevel
}
return InfoLevel
}

View File

@@ -34,6 +34,7 @@ var (
warnValue = slog.StringValue("warn")
errorValue = slog.StringValue("error")
fatalValue = slog.StringValue("fatal")
noneValue = slog.StringValue("none")
)
type wrapper struct {
@@ -85,6 +86,8 @@ func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
a.Value = errorValue
case lvl >= logger.FatalLevel:
a.Value = fatalValue
case lvl >= logger.NoneLevel:
a.Value = noneValue
default:
a.Value = infoValue
}
@@ -316,6 +319,8 @@ func loggerToSlogLevel(level logger.Level) slog.Level {
return slog.LevelDebug - 1
case logger.FatalLevel:
return slog.LevelError + 1
case logger.NoneLevel:
return slog.LevelError + 2
default:
return slog.LevelInfo
}
@@ -333,6 +338,8 @@ func slogToLoggerLevel(level slog.Level) logger.Level {
return logger.TraceLevel
case slog.LevelError + 1:
return logger.FatalLevel
case slog.LevelError + 2:
return logger.NoneLevel
default:
return logger.InfoLevel
}

View File

@@ -36,6 +36,24 @@ func TestStacktrace(t *testing.T) {
}
}
func TestNoneLevel(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.NoneLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Error(ctx, "msg1", errors.New("err"))
if buf.Len() != 0 {
t.Fatalf("logger none level not works, buf contains: %s", buf.Bytes())
}
}
func TestDelayedBuffer(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
@@ -406,7 +424,7 @@ func TestLogger(t *testing.T) {
func Test_WithContextAttrFunc(t *testing.T) {
loggerContextAttrFuncs := []logger.ContextAttrFunc{
func(ctx context.Context) []interface{} {
md, ok := metadata.FromIncomingContext(ctx)
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
return nil
}
@@ -425,7 +443,7 @@ func Test_WithContextAttrFunc(t *testing.T) {
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, loggerContextAttrFuncs...)
ctx := context.TODO()
ctx = metadata.AppendIncomingContext(ctx, "X-Request-Id", uuid.New().String(),
ctx = metadata.AppendOutgoingContext(ctx, "X-Request-Id", uuid.New().String(),
"Source-Service", "Test-System")
buf := bytes.NewBuffer(nil)
@@ -445,9 +463,9 @@ func Test_WithContextAttrFunc(t *testing.T) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
buf.Reset()
imd, _ := metadata.FromIncomingContext(ctx)
omd, _ := metadata.FromOutgoingContext(ctx)
l.Info(ctx, "test message1")
imd.Set("Source-Service", "Test-System2")
omd.Set("Source-Service", "Test-System2")
l.Info(ctx, "test message2")
// t.Logf("xxx %s", buf.Bytes())

294
metadata/context.go Normal file
View File

@@ -0,0 +1,294 @@
package metadata
import (
"context"
"fmt"
"strings"
)
// In the metadata package, context and metadata are treated as immutable.
// Deep copies of metadata are made to keep things safe and correct.
// If a user takes a map and changes it across threads, it's their responsibility.
//
// 1. Incoming Context
//
// This context is provided by an external system and populated by the server or broker of the micro framework.
// It should not be modified. The idea is to extract all necessary data from it,
// validate the data, and transfer it into the current context.
// After that, only the current context should be used throughout the code.
//
// 2. Current Context
//
// This is the context used during the execution flow.
// You can add any needed metadata to it and pass it through your code.
//
// 3. Outgoing Context
//
// This context is for sending data to external systems.
// You can add what you need before sending it out.
// But its usually better to build and prepare this context right before making the external call,
// instead of changing it in many places.
//
// Execution Flow:
//
// [External System]
// ↓
// [Incoming Context]
// ↓
// [Extract & Validate Metadata from Incoming Context]
// ↓
// [Prepare Current Context]
// ↓
// [Enrich Current Context]
// ↓
// [Business Logic]
// ↓
// [Prepare Outgoing Context]
// ↓
// [External System Call]
type (
metadataCurrentKey struct{}
metadataIncomingKey struct{}
metadataOutgoingKey struct{}
rawMetadata struct {
md Metadata
added [][]string
}
)
// NewContext creates a new context with the provided Metadata attached.
// The Metadata must not be modified after calling this function.
func NewContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md})
}
// NewIncomingContext creates a new context with the provided incoming Metadata attached.
// The Metadata must not be modified after calling this function.
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataIncomingKey{}, rawMetadata{md: md})
}
// NewOutgoingContext creates a new context with the provided outgoing Metadata attached.
// The Metadata must not be modified after calling this function.
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md})
}
// AppendContext returns a new context with the provided key-value pairs (kv)
// merged with any existing metadata in the context. For a description of kv,
// please refer to the Pairs documentation.
func AppendContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataCurrentKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md.md, added: added})
}
// AppendOutgoingContext returns a new context with the provided key-value pairs (kv)
// merged with any existing metadata in the context. For a description of kv,
// please refer to the Pairs documentation.
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md.md, added: added})
}
// FromContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
func FromContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
}
// MustContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
func MustContext(ctx context.Context) Metadata {
md, ok := FromContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// FromIncomingContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
func FromIncomingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromIncomingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
}
// MustIncomingContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
func MustIncomingContext(ctx context.Context) Metadata {
md, ok := FromIncomingContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// FromOutgoingContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
func FromOutgoingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, ok
}
// MustOutgoingContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
func MustOutgoingContext(ctx context.Context) Metadata {
md, ok := FromOutgoingContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// ValueFromCurrentContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromCurrentContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
if !ok {
return nil
}
if v, ok := md.md[key]; ok {
return copyOf(v)
}
for k, v := range md.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
// ValueFromIncomingContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
if !ok {
return nil
}
if v, ok := raw.md[key]; ok {
return copyOf(v)
}
for k, v := range raw.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
// ValueFromOutgoingContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromOutgoingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
if !ok {
return nil
}
if v, ok := md.md[key]; ok {
return copyOf(v)
}
for k, v := range md.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}

View File

@@ -2,18 +2,18 @@
package metadata
var (
// HeaderTopic is the header name that contains topic name
// HeaderTopic is the header name that contains topic name.
HeaderTopic = "Micro-Topic"
// HeaderContentType specifies content type of message
// HeaderContentType specifies content type of message.
HeaderContentType = "Content-Type"
// HeaderEndpoint specifies endpoint in service
// HeaderEndpoint specifies endpoint in service.
HeaderEndpoint = "Micro-Endpoint"
// HeaderService specifies service
// HeaderService specifies service.
HeaderService = "Micro-Service"
// HeaderTimeout specifies timeout of operation
// HeaderTimeout specifies timeout of operation.
HeaderTimeout = "Micro-Timeout"
// HeaderAuthorization specifies Authorization header
// HeaderAuthorization specifies Authorization header.
HeaderAuthorization = "Authorization"
// HeaderXRequestID specifies request id
// HeaderXRequestID specifies request id.
HeaderXRequestID = "X-Request-Id"
)

7
metadata/helpers.go Normal file
View File

@@ -0,0 +1,7 @@
package metadata
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
return vals
}

37
metadata/iterator.go Normal file
View File

@@ -0,0 +1,37 @@
package metadata
import "sort"
type Iterator struct {
md Metadata
keys []string
cur int
cnt int
}
// Next advances the iterator to the next element.
func (iter *Iterator) Next(k *string, v *[]string) bool {
if iter.cur+1 > iter.cnt {
return false
}
if k != nil && v != nil {
*k = iter.keys[iter.cur]
vv := iter.md[*k]
*v = make([]string, len(vv))
copy(*v, vv)
iter.cur++
}
return true
}
// Iterator returns an iterator for iterating over metadata in sorted order.
func (md Metadata) Iterator() *Iterator {
iter := &Iterator{md: md, cnt: len(md)}
iter.keys = make([]string, 0, iter.cnt)
for k := range md {
iter.keys = append(iter.keys, k)
}
sort.Strings(iter.keys)
return iter
}

View File

@@ -1,21 +1,18 @@
package metadata
import (
"context"
"fmt"
"net/textproto"
"sort"
"strings"
)
// defaultMetadataSize used when need to init new Metadata
// defaultMetadataSize is used when initializing new Metadata.
var defaultMetadataSize = 2
// Metadata is a mapping from metadata keys to values. Users should use the following
// two convenience functions New and Pairs to generate Metadata.
// Metadata maps keys to values. Use the New, NewWithMetadata and Pairs functions to create it.
type Metadata map[string][]string
// New creates an zero Metadata.
// New creates a zero-value Metadata with the specified size.
func New(l int) Metadata {
if l == 0 {
l = defaultMetadataSize
@@ -24,7 +21,7 @@ func New(l int) Metadata {
return md
}
// NewWithMetadata creates an Metadata from a given key-value map.
// NewWithMetadata creates a Metadata from the provided key-value map.
func NewWithMetadata(m map[string]string) Metadata {
md := make(Metadata, len(m))
for key, val := range m {
@@ -33,8 +30,7 @@ func NewWithMetadata(m map[string]string) Metadata {
return md
}
// Pairs returns an Metadata formed by the mapping of key, value ...
// Pairs panics if len(kv) is odd.
// Pairs returns a Metadata formed from the key-value mapping. It panics if the length of kv is odd.
func Pairs(kv ...string) Metadata {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
@@ -46,12 +42,19 @@ func Pairs(kv ...string) Metadata {
return md
}
// Len returns the number of items in Metadata.
func (md Metadata) Len() int {
return len(md)
// Join combines multiple Metadatas into a single Metadata.
// The order of values for each key is determined by the order in which the Metadatas are provided to Join.
func Join(mds ...Metadata) Metadata {
out := Metadata{}
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return out
}
// Copy returns a copy of Metadata.
// Copy returns a deep copy of Metadata.
func Copy(src Metadata) Metadata {
out := make(Metadata, len(src))
for k, v := range src {
@@ -60,7 +63,7 @@ func Copy(src Metadata) Metadata {
return out
}
// Copy returns a copy of Metadata.
// Copy returns a deep copy of Metadata.
func (md Metadata) Copy() Metadata {
out := make(Metadata, len(md))
for k, v := range md {
@@ -69,8 +72,28 @@ func (md Metadata) Copy() Metadata {
return out
}
// AsHTTP1 returns a copy of Metadata
// with CanonicalMIMEHeaderKey.
// CopyTo performs a deep copy of Metadata to the out.
func (md Metadata) CopyTo(out Metadata) {
for k, v := range md {
out[k] = copyOf(v)
}
}
// Len returns the number of items in Metadata.
func (md Metadata) Len() int {
return len(md)
}
// AsMap returns a deep copy of Metadata as a map[string]string
func (md Metadata) AsMap() map[string]string {
out := make(map[string]string, len(md))
for k, v := range md {
out[k] = strings.Join(v, ",")
}
return out
}
// AsHTTP1 returns a deep copy of Metadata with keys converted to canonical MIME header key format.
func (md Metadata) AsHTTP1() map[string][]string {
out := make(map[string][]string, len(md))
for k, v := range md {
@@ -79,8 +102,7 @@ func (md Metadata) AsHTTP1() map[string][]string {
return out
}
// AsHTTP1 returns a copy of Metadata
// with strings.ToLower.
// AsHTTP2 returns a deep copy of Metadata with keys converted to lowercase.
func (md Metadata) AsHTTP2() map[string][]string {
out := make(map[string][]string, len(md))
for k, v := range md {
@@ -89,75 +111,35 @@ func (md Metadata) AsHTTP2() map[string][]string {
return out
}
// CopyTo copies Metadata to out.
func (md Metadata) CopyTo(out Metadata) {
for k, v := range md {
out[k] = copyOf(v)
}
}
// Get obtains the values for a given key.
func (md Metadata) MustGet(k string) []string {
v, ok := md.Get(k)
if !ok {
panic("missing metadata key")
}
return v
}
// Get obtains the values for a given key.
func (md Metadata) Get(k string) ([]string, bool) {
// Get retrieves the values for a given key, checking the key in three formats:
// - exact case,
// - lower case,
// - canonical MIME header key format.
func (md Metadata) Get(k string) []string {
v, ok := md[k]
if !ok {
v, ok = md[strings.ToLower(k)]
}
if !ok {
v, ok = md[textproto.CanonicalMIMEHeaderKey(k)]
}
return v, ok
}
// MustGetJoined obtains the values for a given key
// with joined values with "," symbol
func (md Metadata) MustGetJoined(k string) string {
v, ok := md.GetJoined(k)
if !ok {
panic("missing metadata key")
v = md[textproto.CanonicalMIMEHeaderKey(k)]
}
return v
}
// GetJoined obtains the values for a given key
// with joined values with "," symbol
func (md Metadata) GetJoined(k string) (string, bool) {
v, ok := md.Get(k)
if !ok {
return "", ok
}
return strings.Join(v, ","), true
// GetJoined retrieves the values for a given key and joins them into a single string, separated by commas.
func (md Metadata) GetJoined(k string) string {
return strings.Join(md.Get(k), ",")
}
// Set sets the value of a given key with a slice of values.
func (md Metadata) Add(key string, vals ...string) {
// Set assigns the values to the given key.
func (md Metadata) Set(key string, vals ...string) {
if len(vals) == 0 {
return
}
md[key] = vals
}
// Set sets the value of a given key with a slice of values.
func (md Metadata) Set(kvs ...string) {
if len(kvs)%2 == 1 {
panic(fmt.Sprintf("metadata: Set got an odd number of input pairs for metadata: %d", len(kvs)))
}
for i := 0; i < len(kvs); i += 2 {
md[kvs[i]] = append(md[kvs[i]], kvs[i+1])
}
}
// Append adds the values to key k, not overwriting what was already stored at
// that key.
// Append adds values to the existing values for the given key.
func (md Metadata) Append(key string, vals ...string) {
if len(vals) == 0 {
return
@@ -165,7 +147,10 @@ func (md Metadata) Append(key string, vals ...string) {
md[key] = append(md[key], vals...)
}
// Del removes the values for a given keys k.
// Del removes the values for the given keys k. It checks and removes the keys in the following formats:
// - exact case,
// - lower case,
// - canonical MIME header key format.
func (md Metadata) Del(k ...string) {
for i := range k {
delete(md, k[i])
@@ -173,321 +158,3 @@ func (md Metadata) Del(k ...string) {
delete(md, textproto.CanonicalMIMEHeaderKey(k[i]))
}
}
// Join joins any number of Metadatas into a single Metadata.
//
// The order of values for each key is determined by the order in which the Metadatas
// containing those values are presented to Join.
func Join(mds ...Metadata) Metadata {
out := Metadata{}
for _, Metadata := range mds {
for k, v := range Metadata {
out[k] = append(out[k], v...)
}
}
return out
}
type (
metadataIncomingKey struct{}
metadataOutgoingKey struct{}
metadataCurrentKey struct{}
)
// NewContext creates a new context with Metadata attached. Metadata must
// not be modified after calling this function.
func NewContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md})
}
// NewIncomingContext creates a new context with incoming Metadata attached. Metadata must
// not be modified after calling this function.
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataIncomingKey{}, rawMetadata{md: md})
}
// NewOutgoingContext creates a new context with outgoing Metadata attached. If used
// in conjunction with AppendOutgoingContext, NewOutgoingContext will
// overwrite any previously-appended metadata. Metadata must not be modified after
// calling this function.
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md})
}
// AppendContext returns a new context with the provided kv merged
// with any existing metadata in the context. Please refer to the documentation
// of Pairs for a description of kv.
func AppendContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataCurrentKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md.md, added: added})
}
// AppendIncomingContext returns a new context with the provided kv merged
// with any existing metadata in the context. Please refer to the documentation
// of Pairs for a description of kv.
func AppendIncomingContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendIncomingContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataIncomingKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataIncomingKey{}, rawMetadata{md: md.md, added: added})
}
// AppendOutgoingContext returns a new context with the provided kv merged
// with any existing metadata in the context. Please refer to the documentation
// of Pairs for a description of kv.
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md.md, added: added})
}
// FromContext returns the metadata in ctx if it exists.
func FromContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
}
// MustContext returns the metadata in ctx.
func MustContext(ctx context.Context) Metadata {
md, ok := FromContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// FromIncomingContext returns the incoming metadata in ctx if it exists.
func FromIncomingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromIncomingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
}
// MustIncomingContext returns the incoming metadata in ctx.
func MustIncomingContext(ctx context.Context) Metadata {
md, ok := FromIncomingContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
if !ok {
return nil
}
if v, ok := raw.md[key]; ok {
return copyOf(v)
}
for k, v := range raw.md {
// Case insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
// ValueFromCurrentContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
func ValueFromCurrentContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
if !ok {
return nil
}
if v, ok := md.md[key]; ok {
return copyOf(v)
}
for k, v := range md.md {
// Case insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
// MustOutgoingContext returns the outgoing metadata in ctx.
func MustOutgoingContext(ctx context.Context) Metadata {
md, ok := FromOutgoingContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// ValueFromOutgoingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
func ValueFromOutgoingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
if !ok {
return nil
}
if v, ok := md.md[key]; ok {
return copyOf(v)
}
for k, v := range md.md {
// Case insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
return vals
}
// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
func FromOutgoingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, ok
}
type rawMetadata struct {
md Metadata
added [][]string
}
// Iterator used to iterate over metadata with order
type Iterator struct {
md Metadata
keys []string
cur int
cnt int
}
// Next advance iterator to next element
func (iter *Iterator) Next(k *string, v *[]string) bool {
if iter.cur+1 > iter.cnt {
return false
}
if k != nil && v != nil {
*k = iter.keys[iter.cur]
vv := iter.md[*k]
*v = make([]string, len(vv))
copy(*v, vv)
iter.cur++
}
return true
}
// Iterator returns the itarator for metadata in sorted order
func (md Metadata) Iterator() *Iterator {
iter := &Iterator{md: md, cnt: len(md)}
iter.keys = make([]string, 0, iter.cnt)
for k := range md {
iter.keys = append(iter.keys, k)
}
sort.Strings(iter.keys)
return iter
}

View File

@@ -5,6 +5,15 @@ import (
"testing"
)
func TesSet(t *testing.T) {
md := Pairs("key1", "val1", "key2", "val2")
md.Set("key1", "val2", "val3")
v := md.GetJoined("X-Request-Id")
if v != "val2, val3" {
t.Fatal("set not works")
}
}
/*
func TestAppendOutgoingContextModify(t *testing.T) {
md := Pairs("key1", "val1")
@@ -19,8 +28,8 @@ func TestAppendOutgoingContextModify(t *testing.T) {
func TestLowercase(t *testing.T) {
md := New(1)
md["x-request-id"] = []string{"12345"}
v, ok := md.GetJoined("X-Request-Id")
if !ok || v == "" {
v := md.GetJoined("X-Request-Id")
if v == "" {
t.Fatalf("metadata invalid %#+v", md)
}
}
@@ -47,33 +56,9 @@ func TestMultipleUsage(t *testing.T) {
_ = omd
}
func TestMetadataSetMultiple(t *testing.T) {
md := New(4)
md.Set("key1", "val1", "key2", "val2")
if v, ok := md.GetJoined("key1"); !ok || v != "val1" {
t.Fatalf("invalid kv %#+v", md)
}
if v, ok := md.GetJoined("key2"); !ok || v != "val2" {
t.Fatalf("invalid kv %#+v", md)
}
}
func TestAppend(t *testing.T) {
ctx := context.Background()
ctx = AppendIncomingContext(ctx, "key1", "val1", "key2", "val2")
md, ok := FromIncomingContext(ctx)
if !ok {
t.Fatal("metadata empty")
}
if _, ok := md.Get("key1"); !ok {
t.Fatal("key1 not found")
}
}
func TestPairs(t *testing.T) {
md := Pairs("key1", "val1", "key2", "val2")
if _, ok := md.Get("key1"); !ok {
if v := md.Get("key1"); v == nil {
t.Fatal("key1 not found")
}
}
@@ -97,7 +82,7 @@ func TestPassing(t *testing.T) {
if !ok {
t.Fatalf("missing metadata from outgoing context")
}
if v, ok := md.Get("Key1"); !ok || v[0] != "Val1" {
if v := md.Get("Key1"); v == nil || v[0] != "Val1" {
t.Fatalf("invalid metadata value %#+v", md)
}
}
@@ -127,21 +112,21 @@ func TestIterator(t *testing.T) {
func TestMedataCanonicalKey(t *testing.T) {
md := New(1)
md.Set("x-request-id", "12345")
v, ok := md.GetJoined("x-request-id")
if !ok {
v := md.GetJoined("x-request-id")
if v == "" {
t.Fatalf("failed to get x-request-id")
} else if v != "12345" {
t.Fatalf("invalid metadata value: %s != %s", "12345", v)
}
v, ok = md.GetJoined("X-Request-Id")
if !ok {
v = md.GetJoined("X-Request-Id")
if v == "" {
t.Fatalf("failed to get x-request-id")
} else if v != "12345" {
t.Fatalf("invalid metadata value: %s != %s", "12345", v)
}
v, ok = md.GetJoined("X-Request-ID")
if !ok {
v = md.GetJoined("X-Request-ID")
if v == "" {
t.Fatalf("failed to get x-request-id")
} else if v != "12345" {
t.Fatalf("invalid metadata value: %s != %s", "12345", v)
@@ -153,8 +138,8 @@ func TestMetadataSet(t *testing.T) {
md.Set("Key", "val")
val, ok := md.GetJoined("Key")
if !ok {
val := md.GetJoined("Key")
if val == "" {
t.Fatal("key Key not found")
}
if val != "val" {
@@ -169,8 +154,8 @@ func TestMetadataDelete(t *testing.T) {
}
md.Del("Baz")
_, ok := md.Get("Baz")
if ok {
v := md.Get("Baz")
if v != nil {
t.Fatal("key Baz not deleted")
}
}
@@ -269,20 +254,6 @@ func TestNewOutgoingContext(t *testing.T) {
}
}
func TestAppendIncomingContext(t *testing.T) {
md := New(1)
md.Set("key1", "val1")
ctx := AppendIncomingContext(context.TODO(), "key2", "val2")
nmd, ok := FromIncomingContext(ctx)
if nmd == nil || !ok {
t.Fatal("AppendIncomingContext not works")
}
if v, ok := nmd.GetJoined("key2"); !ok || v != "val2" {
t.Fatal("AppendIncomingContext not works")
}
}
func TestAppendOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key1", "val1")
@@ -292,7 +263,7 @@ func TestAppendOutgoingContext(t *testing.T) {
if nmd == nil || !ok {
t.Fatal("AppendOutgoingContext not works")
}
if v, ok := nmd.GetJoined("key2"); !ok || v != "val2" {
if v := nmd.GetJoined("key2"); v != "val2" {
t.Fatal("AppendOutgoingContext not works")
}
}

View File

@@ -11,8 +11,8 @@ import (
)
type httpProfile struct {
server *http.Server
sync.Mutex
server *http.Server
mu sync.Mutex
running bool
}
@@ -21,8 +21,8 @@ var DefaultAddress = ":6060"
// Start the profiler
func (h *httpProfile) Start() error {
h.Lock()
defer h.Unlock()
h.mu.Lock()
defer h.mu.Unlock()
if h.running {
return nil
@@ -30,9 +30,9 @@ func (h *httpProfile) Start() error {
go func() {
if err := h.server.ListenAndServe(); err != nil {
h.Lock()
h.mu.Lock()
h.running = false
h.Unlock()
h.mu.Unlock()
}
}()
@@ -43,8 +43,8 @@ func (h *httpProfile) Start() error {
// Stop the profiler
func (h *httpProfile) Stop() error {
h.Lock()
defer h.Unlock()
h.mu.Lock()
defer h.mu.Unlock()
if !h.running {
return nil

View File

@@ -17,7 +17,7 @@ type profiler struct {
cpuFile *os.File
memFile *os.File
opts profile.Options
sync.Mutex
mu sync.Mutex
running bool
}
@@ -39,8 +39,8 @@ func (p *profiler) writeHeap(f *os.File) {
}
func (p *profiler) Start() error {
p.Lock()
defer p.Unlock()
p.mu.Lock()
defer p.mu.Unlock()
if p.running {
return nil
@@ -86,8 +86,8 @@ func (p *profiler) Start() error {
}
func (p *profiler) Stop() error {
p.Lock()
defer p.Unlock()
p.mu.Lock()
defer p.mu.Unlock()
select {
case <-p.exit:

View File

@@ -33,7 +33,7 @@ type memory struct {
records map[string]services
watchers map[string]*watcher
opts register.Options
sync.RWMutex
mu sync.RWMutex
}
// services is a KV map with service name as the key and a map of records as the value
@@ -57,7 +57,7 @@ func (m *memory) ttlPrune() {
defer prune.Stop()
for range prune.C {
m.Lock()
m.mu.Lock()
for namespace, services := range m.records {
for service, versions := range services {
for version, record := range versions {
@@ -72,24 +72,24 @@ func (m *memory) ttlPrune() {
}
}
}
m.Unlock()
m.mu.Unlock()
}
}
func (m *memory) sendEvent(r *register.Result) {
m.RLock()
m.mu.RLock()
watchers := make([]*watcher, 0, len(m.watchers))
for _, w := range m.watchers {
watchers = append(watchers, w)
}
m.RUnlock()
m.mu.RUnlock()
for _, w := range watchers {
select {
case <-w.exit:
m.Lock()
m.mu.Lock()
delete(m.watchers, w.id)
m.Unlock()
m.mu.Unlock()
default:
select {
case w.res <- r:
@@ -113,8 +113,8 @@ func (m *memory) Init(opts ...register.Option) error {
}
// add services
m.Lock()
defer m.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
return nil
}
@@ -124,8 +124,8 @@ func (m *memory) Options() register.Options {
}
func (m *memory) Register(_ context.Context, s *register.Service, opts ...register.RegisterOption) error {
m.Lock()
defer m.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
options := register.NewRegisterOptions(opts...)
@@ -197,8 +197,8 @@ func (m *memory) Register(_ context.Context, s *register.Service, opts ...regist
}
func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...register.DeregisterOption) error {
m.Lock()
defer m.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
options := register.NewDeregisterOptions(opts...)
@@ -264,9 +264,9 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe
// if it's a wildcard domain, return from all domains
if options.Namespace == register.WildcardNamespace {
m.RLock()
m.mu.RLock()
recs := m.records
m.RUnlock()
m.mu.RUnlock()
var services []*register.Service
@@ -286,8 +286,8 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe
return services, nil
}
m.RLock()
defer m.RUnlock()
m.mu.RLock()
defer m.mu.RUnlock()
// check the domain exists
services, ok := m.records[options.Namespace]
@@ -319,9 +319,9 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption)
// if it's a wildcard domain, list from all domains
if options.Namespace == register.WildcardNamespace {
m.RLock()
m.mu.RLock()
recs := m.records
m.RUnlock()
m.mu.RUnlock()
var services []*register.Service
@@ -336,8 +336,8 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption)
return services, nil
}
m.RLock()
defer m.RUnlock()
m.mu.RLock()
defer m.mu.RUnlock()
// ensure the domain exists
services, ok := m.records[options.Namespace]
@@ -371,9 +371,9 @@ func (m *memory) Watch(ctx context.Context, opts ...register.WatchOption) (regis
wo: wo,
}
m.Lock()
m.mu.Lock()
m.watchers[w.id] = w
m.Unlock()
m.mu.Unlock()
return w, nil
}

View File

@@ -69,7 +69,8 @@ type Service struct {
type Node struct {
Metadata metadata.Metadata `json:"metadata,omitempty"`
ID string `json:"id,omitempty"`
Address string `json:"address,omitempty"`
// Address also prefixed with scheme like grpc://xx.xx.xx.xx:1234
Address string `json:"address,omitempty"`
}
// Option func signature

View File

@@ -51,13 +51,13 @@ func (r *rpcHandler) Options() HandlerOptions {
}
type noopServer struct {
h Handler
wg *sync.WaitGroup
rsvc *register.Service
handlers map[string]Handler
exit chan chan error
opts Options
sync.RWMutex
h Handler
wg *sync.WaitGroup
rsvc *register.Service
handlers map[string]Handler
exit chan chan error
opts Options
mu sync.RWMutex
registered bool
started bool
}
@@ -125,10 +125,10 @@ func (n *noopServer) String() string {
//nolint:gocyclo
func (n *noopServer) Register() error {
n.RLock()
n.mu.RLock()
rsvc := n.rsvc
config := n.opts
n.RUnlock()
n.mu.RUnlock()
// if service already filled, reuse it and return early
if rsvc != nil {
@@ -144,9 +144,9 @@ func (n *noopServer) Register() error {
return err
}
n.RLock()
n.mu.RLock()
registered := n.registered
n.RUnlock()
n.mu.RUnlock()
if !registered {
if config.Logger.V(logger.InfoLevel) {
@@ -164,8 +164,8 @@ func (n *noopServer) Register() error {
return nil
}
n.Lock()
defer n.Unlock()
n.mu.Lock()
defer n.mu.Unlock()
n.registered = true
if cacheService {
@@ -178,9 +178,9 @@ func (n *noopServer) Register() error {
func (n *noopServer) Deregister() error {
var err error
n.RLock()
n.mu.RLock()
config := n.opts
n.RUnlock()
n.mu.RUnlock()
service, err := NewRegisterService(n)
if err != nil {
@@ -195,29 +195,29 @@ func (n *noopServer) Deregister() error {
return err
}
n.Lock()
n.mu.Lock()
n.rsvc = nil
if !n.registered {
n.Unlock()
n.mu.Unlock()
return nil
}
n.registered = false
n.Unlock()
n.mu.Unlock()
return nil
}
//nolint:gocyclo
func (n *noopServer) Start() error {
n.RLock()
n.mu.RLock()
if n.started {
n.RUnlock()
n.mu.RUnlock()
return nil
}
config := n.Options()
n.RUnlock()
n.mu.RUnlock()
// use 127.0.0.1 to avoid scan of all network interfaces
addr, err := maddr.Extract("127.0.0.1")
@@ -235,11 +235,11 @@ func (n *noopServer) Start() error {
config.Logger.Info(n.opts.Context, "server [noop] Listening on "+config.Address)
}
n.Lock()
n.mu.Lock()
if len(config.Advertise) == 0 {
config.Advertise = config.Address
}
n.Unlock()
n.mu.Unlock()
// use RegisterCheck func before register
// nolint: nestif
@@ -273,9 +273,9 @@ func (n *noopServer) Start() error {
select {
// register self on interval
case <-t.C:
n.RLock()
n.mu.RLock()
registered := n.registered
n.RUnlock()
n.mu.RUnlock()
rerr := config.RegisterCheck(config.Context)
// nolint: nestif
if rerr != nil && registered {
@@ -332,29 +332,29 @@ func (n *noopServer) Start() error {
}()
// mark the server as started
n.Lock()
n.mu.Lock()
n.started = true
n.Unlock()
n.mu.Unlock()
return nil
}
func (n *noopServer) Stop() error {
n.RLock()
n.mu.RLock()
if !n.started {
n.RUnlock()
n.mu.RUnlock()
return nil
}
n.RUnlock()
n.mu.RUnlock()
ch := make(chan error)
n.exit <- ch
err := <-ch
n.Lock()
n.mu.Lock()
n.rsvc = nil
n.started = false
n.Unlock()
n.mu.Unlock()
return err
}

View File

@@ -96,9 +96,10 @@ func RegisterHandler(s server.Server, h interface{}, opts ...server.HandlerOptio
}
type service struct {
done chan struct{}
opts Options
sync.RWMutex
done chan struct{}
opts Options
mu sync.RWMutex
stopped bool
}
// NewService creates and returns a new Service based on the packages within.
@@ -320,9 +321,9 @@ func (s *service) Health() bool {
func (s *service) Start() error {
var err error
s.RLock()
s.mu.RLock()
config := s.opts
s.RUnlock()
s.mu.RUnlock()
for _, cfg := range s.opts.Configs {
if cfg.Options().Struct == nil {
@@ -379,9 +380,9 @@ func (s *service) Start() error {
}
func (s *service) Stop() error {
s.RLock()
s.mu.RLock()
config := s.opts
s.RUnlock()
s.mu.RUnlock()
if config.Loggers[0].V(logger.InfoLevel) {
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name()))
@@ -424,7 +425,7 @@ func (s *service) Stop() error {
}
}
close(s.done)
s.notifyShutdown()
return nil
}
@@ -448,10 +449,23 @@ func (s *service) Run() error {
return err
}
// wait on context cancel
<-s.done
return s.Stop()
return nil
}
// notifyShutdown marks the service as stopped and closes the done channel.
// It ensures the channel is closed only once, preventing multiple closures.
func (s *service) notifyShutdown() {
s.mu.Lock()
if s.stopped {
s.mu.Unlock()
return
}
s.stopped = true
s.mu.Unlock()
close(s.done)
}
type Namer interface {

View File

@@ -3,7 +3,9 @@ package micro
import (
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/config"
@@ -737,3 +739,41 @@ func Test_getNameIndex(t *testing.T) {
}
}
*/
func TestServiceShutdown(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatalf("service shutdown failed: %v", r)
}
}()
s, ok := NewService().(*service)
require.NotNil(t, s)
require.True(t, ok)
require.NoError(t, s.Start())
require.False(t, s.stopped)
require.NoError(t, s.Stop())
require.True(t, s.stopped)
}
func TestServiceMultipleShutdowns(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatalf("service shutdown failed: %v", r)
}
}()
s := NewService()
go func() {
time.Sleep(10 * time.Millisecond)
// first call
require.NoError(t, s.Stop())
// duplicate call
require.NoError(t, s.Stop())
}()
require.NoError(t, s.Run())
}

View File

@@ -139,7 +139,7 @@ func (n *noopStore) fnExists(ctx context.Context, _ string, _ ...ExistsOption) e
return ctx.Err()
default:
}
return nil
return ErrNotFound
}
func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error {

View File

@@ -9,7 +9,7 @@ type memorySync struct {
locks map[string]*memoryLock
options Options
mtx gosync.RWMutex
mu gosync.RWMutex
}
type memoryLock struct {
@@ -74,7 +74,7 @@ func (m *memorySync) Options() Options {
func (m *memorySync) Lock(id string, opts ...LockOption) error {
// lock our access
m.mtx.Lock()
m.mu.Lock()
var options LockOptions
for _, o := range opts {
@@ -90,11 +90,11 @@ func (m *memorySync) Lock(id string, opts ...LockOption) error {
release: make(chan bool),
}
// unlock
m.mtx.Unlock()
m.mu.Unlock()
return nil
}
m.mtx.Unlock()
m.mu.Unlock()
// set wait time
var wait <-chan time.Time
@@ -124,12 +124,12 @@ lockLoop:
// wait for the lock to be released
select {
case <-lk.release:
m.mtx.Lock()
m.mu.Lock()
// someone locked before us
lk, ok = m.locks[id]
if ok {
m.mtx.Unlock()
m.mu.Unlock()
continue
}
@@ -141,7 +141,7 @@ lockLoop:
release: make(chan bool),
}
m.mtx.Unlock()
m.mu.Unlock()
break lockLoop
case <-ttl:
@@ -160,8 +160,8 @@ lockLoop:
}
func (m *memorySync) Unlock(id string) error {
m.mtx.Lock()
defer m.mtx.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
lk, ok := m.locks[id]
// no lock exists

View File

@@ -46,6 +46,10 @@ func (s memoryStringer) String() string {
return s.s
}
func (t *Tracer) Enabled() bool {
return t.opts.Enabled
}
func (t *Tracer) Flush(_ context.Context) error {
return nil
}
@@ -89,6 +93,10 @@ func (s *Span) Tracer() tracer.Tracer {
return s.tracer
}
func (s *Span) IsRecording() bool {
return true
}
type Event struct {
name string
labels []interface{}

View File

@@ -4,7 +4,7 @@ import (
"context"
"time"
"go.unistack.org/micro/v4/util/id"
"github.com/google/uuid"
)
var _ Tracer = (*noopTracer)(nil)
@@ -18,6 +18,12 @@ func (t *noopTracer) Spans() []Span {
return t.spans
}
var uuidNil = uuid.Nil.String()
func (t *noopTracer) Enabled() bool {
return t.opts.Enabled
}
func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
options := NewSpanOptions(opts...)
span := &noopSpan{
@@ -28,8 +34,8 @@ func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption)
labels: options.Labels,
kind: options.Kind,
}
span.spanID.s, _ = id.New()
span.traceID.s, _ = id.New()
span.spanID.s = uuidNil
span.traceID.s = uuidNil
if span.ctx == nil {
span.ctx = context.Background()
}
@@ -120,6 +126,10 @@ func (s *noopSpan) SetStatus(st SpanStatus, msg string) {
s.statusMsg = msg
}
func (s *noopSpan) IsRecording() bool {
return false
}
// NewTracer returns new memory tracer
func NewTracer(opts ...Option) Tracer {
return &noopTracer{

View File

@@ -142,6 +142,8 @@ type Options struct {
Name string
// ContextAttrFuncs contains funcs that provides tracing
ContextAttrFuncs []ContextAttrFunc
// Enabled specify trace status
Enabled bool
}
// Option func signature
@@ -181,6 +183,7 @@ func NewOptions(opts ...Option) Options {
Logger: logger.DefaultLogger,
Context: context.Background(),
ContextAttrFuncs: DefaultContextAttrFuncs,
Enabled: true,
}
for _, o := range opts {
o(&options)
@@ -194,3 +197,10 @@ func Name(n string) Option {
o.Name = n
}
}
// Disabled disable tracer
func Disabled(b bool) Option {
return func(o *Options) {
o.Enabled = !b
}
}

View File

@@ -51,6 +51,8 @@ type Tracer interface {
// Extract(ctx context.Context)
// Flush flushes spans
Flush(ctx context.Context) error
// Enabled returns tracer status
Enabled() bool
}
type Span interface {
@@ -78,4 +80,6 @@ type Span interface {
TraceID() string
// SpanID returns span id
SpanID() string
// IsRecording returns the recording state of the Span.
IsRecording() bool
}

View File

@@ -67,6 +67,12 @@ func (b *SeekerBuffer) Close() error {
return nil
}
// Reset clears all the data out of the buffer and sets the read position to 0.
func (b *SeekerBuffer) Reset() {
b.data = nil
b.pos = 0
}
// Len returns the length of data remaining to be read.
func (b *SeekerBuffer) Len() int {
return len(b.data[b.pos:])

View File

@@ -137,7 +137,7 @@ type cache struct {
opts Options
sync.RWMutex
mu sync.RWMutex
}
type cacheEntry struct {
@@ -171,7 +171,7 @@ func (c *cache) put(req string, res string) {
ttl = c.opts.MaxCacheTTL
}
c.Lock()
c.mu.Lock()
if c.entries == nil {
c.entries = make(map[string]cacheEntry)
}
@@ -207,7 +207,7 @@ func (c *cache) put(req string, res string) {
}
c.opts.Meter.Counter(semconv.CacheItemsTotal, "type", "dns").Inc()
c.Unlock()
c.mu.Unlock()
}
func (c *cache) get(req string) (res string) {
@@ -219,8 +219,8 @@ func (c *cache) get(req string) (res string) {
return ""
}
c.RLock()
defer c.RUnlock()
c.mu.RLock()
defer c.mu.RUnlock()
if c.entries == nil {
return ""

View File

@@ -20,7 +20,7 @@ type dnsConn struct {
ibuf bytes.Buffer
obuf bytes.Buffer
sync.Mutex
mu sync.Mutex
}
type roundTripper func(ctx context.Context, req string) (res string, err error)
@@ -42,15 +42,15 @@ func (c *dnsConn) Read(b []byte) (n int, err error) {
}
func (c *dnsConn) Write(b []byte) (n int, err error) {
c.Lock()
defer c.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
return c.ibuf.Write(b)
}
func (c *dnsConn) Close() error {
c.Lock()
c.mu.Lock()
cancel := c.cancel
c.Unlock()
c.mu.Unlock()
if cancel != nil {
cancel()
@@ -78,9 +78,9 @@ func (c *dnsConn) SetDeadline(t time.Time) error {
}
func (c *dnsConn) SetReadDeadline(t time.Time) error {
c.Lock()
c.mu.Lock()
c.deadline = t
c.Unlock()
c.mu.Unlock()
return nil
}
@@ -90,8 +90,8 @@ func (c *dnsConn) SetWriteDeadline(_ time.Time) error {
}
func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
c.Lock()
defer c.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
// drain the output buffer
if c.obuf.Len() > 0 {
@@ -119,8 +119,8 @@ func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
}
func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
c.Lock()
defer c.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
c.obuf.WriteByte(byte(len(str) >> 8))
c.obuf.WriteByte(byte(len(str)))
c.obuf.WriteString(str)
@@ -128,8 +128,8 @@ func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
}
func (c *dnsConn) childContext() (context.Context, context.CancelFunc) {
c.Lock()
defer c.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
if c.ctx == nil {
c.ctx, c.cancel = context.WithCancel(context.Background())
}

View File

@@ -52,7 +52,7 @@ type clientTracer struct {
tr tracer.Tracer
activeHooks map[string]context.Context
root tracer.Span
mtx sync.Mutex
mu sync.Mutex
}
func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrace {
@@ -83,8 +83,8 @@ func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrac
}
func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
ct.mtx.Lock()
defer ct.mtx.Unlock()
ct.mu.Lock()
defer ct.mu.Unlock()
if hookCtx, found := ct.activeHooks[hook]; !found {
var sp tracer.Span
@@ -104,8 +104,8 @@ func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
}
func (ct *clientTracer) end(hook string, err error, attrs ...interface{}) {
ct.mtx.Lock()
defer ct.mtx.Unlock()
ct.mu.Lock()
defer ct.mu.Unlock()
if ctx, ok := ct.activeHooks[hook]; ok { // nolint:nestif
if span, ok := tracer.SpanFromContext(ctx); ok {
if err != nil {
@@ -136,8 +136,8 @@ func (ct *clientTracer) getParentContext(hook string) context.Context {
}
func (ct *clientTracer) span(hook string) (tracer.Span, bool) {
ct.mtx.Lock()
defer ct.mtx.Unlock()
ct.mu.Lock()
defer ct.mu.Unlock()
if ctx, ok := ct.activeHooks[hook]; ok {
return tracer.SpanFromContext(ctx)
}

View File

@@ -2,12 +2,8 @@ package id
import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"time"
uuidv8 "github.com/ash3in/uuidv8"
"github.com/google/uuid"
nanoid "github.com/matoous/go-nanoid"
)
@@ -25,6 +21,7 @@ type Type int
const (
TypeUnspecified Type = iota
TypeNanoid
TypeUUIDv7
TypeUUIDv8
)
@@ -58,14 +55,14 @@ func (g *Generator) New() (string, error) {
}
return nanoid.Generate(g.opts.NanoidAlphabet, g.opts.NanoidSize)
case TypeUUIDv8:
timestamp := uint64(time.Now().UnixNano())
clockSeq := make([]byte, 2)
if _, err := rand.Read(clockSeq); err != nil {
return "", fmt.Errorf("failed to generate random clock sequence: %w", err)
case TypeUUIDv7:
uid, err := uuid.NewV7()
if err != nil {
return "", err
}
clockSeqValue := binary.BigEndian.Uint16(clockSeq) & 0x0FFF // Mask to 12 bits
return uuidv8.NewWithParams(timestamp, clockSeqValue, g.opts.UUIDNode[:], uuidv8.TimestampBits48)
return uid.String(), nil
case TypeUUIDv8:
return "", errors.New("unsupported uuid version v8")
}
return "", errors.New("invalid option, Type unspecified")
}
@@ -82,16 +79,15 @@ func New(opts ...Option) (string, error) {
if options.NanoidSize <= 0 {
return "", errors.New("invalid option, NanoidSize must be positive integer")
}
return nanoid.Generate(options.NanoidAlphabet, options.NanoidSize)
case TypeUUIDv8:
timestamp := uint64(time.Now().UnixNano())
clockSeq := make([]byte, 2)
if _, err := rand.Read(clockSeq); err != nil {
return "", fmt.Errorf("failed to generate random clock sequence: %w", err)
case TypeUUIDv7:
uid, err := uuid.NewV7()
if err != nil {
return "", err
}
clockSeqValue := binary.BigEndian.Uint16(clockSeq) & 0x0FFF // Mask to 12 bits
return uuidv8.NewWithParams(timestamp, clockSeqValue, options.UUIDNode[:], uuidv8.TimestampBits48)
return uid.String(), nil
case TypeUUIDv8:
return "", errors.New("unsupported uuid version v8")
}
return "", errors.New("invalid option, Type unspecified")
@@ -145,7 +141,7 @@ func WithUUIDNode(node [6]byte) Option {
// NewOptions returns new Options struct filled by opts
func NewOptions(opts ...Option) Options {
options := Options{
Type: TypeUUIDv8,
Type: TypeUUIDv7,
NanoidAlphabet: DefaultNanoidAlphabet,
NanoidSize: DefaultNanoidSize,
UUIDNode: generatedNode,

View File

@@ -489,35 +489,74 @@ func URLMap(query string) (map[string]interface{}, error) {
return mp.(map[string]interface{}), nil
}
// FlattenMap expand key.subkey to nested map
func FlattenMap(a map[string]interface{}) map[string]interface{} {
// preprocess map
nb := make(map[string]interface{}, len(a))
for k, v := range a {
ps := strings.Split(k, ".")
if len(ps) == 1 {
nb[k] = v
// FlattenMap flattens a nested map into a single-level map using dot notation for nested keys.
// In case of key conflicts, all nested levels will be discarded in favor of the first-level key.
//
// Example #1:
//
// Input:
// {
// "user.name": "alex",
// "user.document.id": "document_id"
// "user.document.number": "document_number"
// }
// Output:
// {
// "user": {
// "name": "alex",
// "document": {
// "id": "document_id"
// "number": "document_number"
// }
// }
// }
//
// Example #2 (with conflicts):
//
// Input:
// {
// "user": "alex",
// "user.document.id": "document_id"
// "user.document.number": "document_number"
// }
// Output:
// {
// "user": "alex"
// }
func FlattenMap(input map[string]interface{}) map[string]interface{} {
result := make(map[string]interface{})
for k, v := range input {
parts := strings.Split(k, ".")
if len(parts) == 1 {
result[k] = v
continue
}
em := make(map[string]interface{})
em[ps[len(ps)-1]] = v
for i := len(ps) - 2; i > 0; i-- {
nm := make(map[string]interface{})
nm[ps[i]] = em
em = nm
}
if vm, ok := nb[ps[0]]; ok {
// nested map
nm := vm.(map[string]interface{})
for vk, vv := range em {
nm[vk] = vv
current := result
for i, part := range parts {
// last element in the path
if i == len(parts)-1 {
current[part] = v
break
}
// initialize map for current level if not exist
if _, ok := current[part]; !ok {
current[part] = make(map[string]interface{})
}
if nested, ok := current[part].(map[string]interface{}); ok {
current = nested // continue to the nested map
} else {
break // if current element is not a map, ignore it
}
nb[ps[0]] = nm
} else {
nb[ps[0]] = em
}
}
return nb
return result
}
/*

View File

@@ -6,6 +6,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
rutil "go.unistack.org/micro/v4/util/reflect"
)
@@ -319,3 +320,140 @@ func TestIsZero(t *testing.T) {
// t.Logf("XX %#+v\n", ok)
}
func TestFlattenMap(t *testing.T) {
tests := []struct {
name string
input map[string]interface{}
expected map[string]interface{}
}{
{
name: "empty map",
input: map[string]interface{}{},
expected: map[string]interface{}{},
},
{
name: "nil map",
input: nil,
expected: map[string]interface{}{},
},
{
name: "single level",
input: map[string]interface{}{
"username": "username",
"password": "password",
},
expected: map[string]interface{}{
"username": "username",
"password": "password",
},
},
{
name: "two level",
input: map[string]interface{}{
"order_id": "order_id",
"user.name": "username",
"user.password": "password",
},
expected: map[string]interface{}{
"order_id": "order_id",
"user": map[string]interface{}{
"name": "username",
"password": "password",
},
},
},
{
name: "three level",
input: map[string]interface{}{
"order_id": "order_id",
"user.name": "username",
"user.password": "password",
"user.document.id": "document_id",
"user.document.number": "document_number",
},
expected: map[string]interface{}{
"order_id": "order_id",
"user": map[string]interface{}{
"name": "username",
"password": "password",
"document": map[string]interface{}{
"id": "document_id",
"number": "document_number",
},
},
},
},
{
name: "four level",
input: map[string]interface{}{
"order_id": "order_id",
"user.name": "username",
"user.password": "password",
"user.document.id": "document_id",
"user.document.number": "document_number",
"user.info.permissions.read": "available",
"user.info.permissions.write": "available",
},
expected: map[string]interface{}{
"order_id": "order_id",
"user": map[string]interface{}{
"name": "username",
"password": "password",
"document": map[string]interface{}{
"id": "document_id",
"number": "document_number",
},
"info": map[string]interface{}{
"permissions": map[string]interface{}{
"read": "available",
"write": "available",
},
},
},
},
},
{
name: "key conflicts",
input: map[string]interface{}{
"user": "user",
"user.name": "username",
"user.password": "password",
},
expected: map[string]interface{}{
"user": "user",
},
},
{
name: "overwriting conflicts",
input: map[string]interface{}{
"order_id": "order_id",
"user.document.id": "document_id",
"user.document.number": "document_number",
"user.info.address": "address",
"user.info.phone": "phone",
},
expected: map[string]interface{}{
"order_id": "order_id",
"user": map[string]interface{}{
"document": map[string]interface{}{
"id": "document_id",
"number": "document_number",
},
"info": map[string]interface{}{
"address": "address",
"phone": "phone",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for range 100 { // need to exclude the impact of key order in the map on the test.
require.Equal(t, tt.expected, rutil.FlattenMap(tt.input))
}
})
}
}

View File

@@ -14,7 +14,7 @@ type Buffer struct {
vals []*Entry
size int
sync.RWMutex
mu sync.RWMutex
}
// Entry is ring buffer data entry
@@ -35,8 +35,8 @@ type Stream struct {
// Put adds a new value to ring buffer
func (b *Buffer) Put(v interface{}) {
b.Lock()
defer b.Unlock()
b.mu.Lock()
defer b.mu.Unlock()
// append to values
entry := &Entry{
@@ -63,8 +63,8 @@ func (b *Buffer) Put(v interface{}) {
// Get returns the last n entries
func (b *Buffer) Get(n int) []*Entry {
b.RLock()
defer b.RUnlock()
b.mu.RLock()
defer b.mu.RUnlock()
// reset any invalid values
if n > len(b.vals) || n < 0 {
@@ -80,8 +80,8 @@ func (b *Buffer) Get(n int) []*Entry {
// Since returns the entries since a specific time
func (b *Buffer) Since(t time.Time) []*Entry {
b.RLock()
defer b.RUnlock()
b.mu.RLock()
defer b.mu.RUnlock()
// return all the values
if t.IsZero() {
@@ -109,8 +109,8 @@ func (b *Buffer) Since(t time.Time) []*Entry {
// Stream logs from the buffer
// Close the channel when you want to stop
func (b *Buffer) Stream() (<-chan *Entry, chan bool) {
b.Lock()
defer b.Unlock()
b.mu.Lock()
defer b.mu.Unlock()
entries := make(chan *Entry, 128)
id := id.MustNew()

View File

@@ -24,7 +24,7 @@ type stream struct {
err error
request *request
sync.RWMutex
mu sync.RWMutex
}
type request struct {
@@ -57,9 +57,9 @@ func (s *stream) Request() server.Request {
func (s *stream) Send(v interface{}) error {
err := s.Stream.SendMsg(v)
if err != nil {
s.Lock()
s.mu.Lock()
s.err = err
s.Unlock()
s.mu.Unlock()
}
return err
}
@@ -68,17 +68,17 @@ func (s *stream) Send(v interface{}) error {
func (s *stream) Recv(v interface{}) error {
err := s.Stream.RecvMsg(v)
if err != nil {
s.Lock()
s.mu.Lock()
s.err = err
s.Unlock()
s.mu.Unlock()
}
return err
}
// Error returns error that stream holds
func (s *stream) Error() error {
s.RLock()
defer s.RUnlock()
s.mu.RLock()
defer s.mu.RUnlock()
return s.err
}

View File

@@ -6,7 +6,7 @@ import (
"strconv"
"time"
"gopkg.in/yaml.v3"
"github.com/goccy/go-yaml"
)
type Duration int64
@@ -58,9 +58,9 @@ func (d Duration) MarshalYAML() (interface{}, error) {
return time.Duration(d).String(), nil
}
func (d *Duration) UnmarshalYAML(n *yaml.Node) error {
func (d *Duration) UnmarshalYAML(data []byte) error {
var v interface{}
if err := yaml.Unmarshal([]byte(n.Value), &v); err != nil {
if err := yaml.Unmarshal(data, &v); err != nil {
return err
}
switch value := v.(type) {

View File

@@ -6,7 +6,7 @@ import (
"testing"
"time"
"gopkg.in/yaml.v3"
"github.com/goccy/go-yaml"
)
func TestMarshalYAML(t *testing.T) {