Compare commits

...

15 Commits

Author SHA1 Message Date
e6e8bcfa5d update workflows
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2022-03-05 19:08:02 +03:00
11700d0482 update workflows
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-10-27 17:47:11 +03:00
8e582cc1c0 update import paths
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-10-27 00:48:31 +03:00
994882e1b2 update workflow
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-09-21 21:46:31 +03:00
5b28857fcd update workflows
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-09-17 07:47:32 +03:00
f94f94b416 Revert "update workflows"
This reverts commit 8e4b7d1b54.
2021-09-17 07:43:49 +03:00
8e4b7d1b54 update workflows
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-09-17 07:41:27 +03:00
13bb13f432 enable automerge
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-09-16 10:46:35 +03:00
f13d3917d9
Update README.md 2021-09-06 10:27:02 +03:00
7916dafb4d intermediate fixes for subscriber
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-08-04 16:40:48 +03:00
83b037df20 try to fix ack err cases
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-07-29 23:50:26 +03:00
5934963d81 update for latest micro
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-07-26 09:41:11 +03:00
2a935a51f3
improve consumer speed (#41)
* improve consumer speed

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-07-20 15:05:42 +03:00
f39888f6d4 support sync and async commit offsets
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-07-18 13:45:12 +03:00
b15586163e speedup consumer commit offsets
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2021-07-18 13:28:13 +03:00
16 changed files with 828 additions and 206 deletions

19
.github/renovate.json vendored
View File

@ -1,19 +0,0 @@
{
"extends": [
"config:base"
],
"packageRules": [
{
"matchUpdateTypes": ["minor", "patch", "pin", "digest"],
"automerge": true
},
{
"groupName": "all deps",
"separateMajorMinor": true,
"groupSlug": "all",
"packagePatterns": [
"*"
]
}
]
}

13
.github/stale.sh vendored
View File

@ -1,13 +0,0 @@
#!/bin/bash -ex
export PATH=$PATH:$(pwd)/bin
export GO111MODULE=on
export GOBIN=$(pwd)/bin
#go get github.com/rvflash/goup@v0.4.1
#goup -v ./...
#go get github.com/psampaz/go-mod-outdated@v0.6.0
go list -u -m -mod=mod -json all | go-mod-outdated -update -direct -ci || true
#go list -u -m -json all | go-mod-outdated -update

20
.github/workflows/autoapprove.yml vendored Normal file
View File

@ -0,0 +1,20 @@
name: "autoapprove"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
autoapprove:
runs-on: ubuntu-latest
steps:
- name: approve
uses: hmarr/auto-approve-action@v2
if: github.actor == 'vtolstov' || github.actor == 'dependabot[bot]'
id: approve
with:
github-token: ${{ secrets.GITHUB_TOKEN }}

21
.github/workflows/automerge.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: "automerge"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
automerge:
runs-on: ubuntu-latest
if: github.actor == 'vtolstov'
steps:
- name: merge
id: merge
run: gh pr merge --auto --merge "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.TOKEN}}

View File

@ -3,6 +3,7 @@ on:
push:
branches:
- master
- v3
jobs:
test:
name: test
@ -13,7 +14,7 @@ jobs:
with:
go-version: 1.16
- name: checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: cache
uses: actions/cache@v2
with:
@ -31,9 +32,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: lint
uses: golangci/golangci-lint-action@v2
uses: golangci/golangci-lint-action@v3.1.0
continue-on-error: true
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.

78
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,78 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "codeql"
on:
workflow_run:
workflows: ["prbuild"]
types:
- completed
push:
branches: [ master, v3 ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master, v3 ]
schedule:
- cron: '34 1 * * 0'
jobs:
analyze:
name: analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup
uses: actions/setup-go@v2
with:
go-version: 1.16
# Initializes the CodeQL tools for scanning.
- name: init
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: analyze
uses: github/codeql-action/analyze@v1

View File

@ -0,0 +1,27 @@
name: "dependabot-automerge"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
automerge:
runs-on: ubuntu-latest
if: github.actor == 'dependabot[bot]'
steps:
- name: metadata
id: metadata
uses: dependabot/fetch-metadata@v1.3.0
with:
github-token: "${{ secrets.TOKEN }}"
- name: merge
id: merge
if: ${{contains(steps.metadata.outputs.dependency-names, 'go.unistack.org')}}
run: gh pr merge --auto --merge "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.TOKEN}}

View File

@ -3,6 +3,7 @@ on:
pull_request:
branches:
- master
- v3
jobs:
test:
name: test
@ -13,7 +14,7 @@ jobs:
with:
go-version: 1.16
- name: checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: cache
uses: actions/cache@v2
with:
@ -31,9 +32,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: lint
uses: golangci/golangci-lint-action@v2
uses: golangci/golangci-lint-action@v3.1.0
continue-on-error: true
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.

View File

@ -1,7 +1,9 @@
# Micro Broker Segmentio [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/unistack-org/micro-broker-segmentio?tab=overview) [![Status](https://github.com/unistack-org/micro-broker-segmentio/workflows/build/badge.svg?branch=master)](https://github.com/unistack-org/micro-broker-segmentio/actions?query=workflow%3Abuild+branch%3Amaster+event%3Apush) [![Lint](https://goreportcard.com/badge/github.com/unistack-org/micro-broker-segmentio)](https://goreportcard.com/report/github.com/unistack-org/micro-broker-segmentio)
# Micro Broker Segmentio [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro-broker-segmentio?tab=overview) [![Status](https://go.unistack.org/micro-broker-segmentio/workflows/build/badge.svg?branch=master)](https://go.unistack.org/micro-broker-segmentio/actions?query=workflow%3Abuild+branch%3Amaster+event%3Apush) [![Lint](https://goreportcard.com/badge/go.unistack.org/micro-broker-segmentio)](https://goreportcard.com/report/go.unistack.org/micro-broker-segmentio)
Micro Broker Segmentio implementation.
DEPRECATION WARNING: not maintained, use go.unistack.org/micro-broker-kgo instead
## License
Apache 2.0 licensed.

View File

@ -6,24 +6,25 @@ import (
"strings"
"testing"
segmentio "github.com/unistack-org/micro-broker-segmentio/v3"
"github.com/unistack-org/micro/v3/broker"
"github.com/unistack-org/micro/v3/logger"
segmentio "go.unistack.org/micro-broker-segmentio/v3"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
)
var (
bm = &broker.Message{
Header: map[string]string{"hkey": "hval"},
Body: []byte(`"body"`),
}
)
var bm = &broker.Message{
Header: map[string]string{"hkey": "hval"},
Body: []byte(`"body"`),
}
func TestPubSub(t *testing.T) {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
logger.DefaultLogger.Init(logger.WithLevel(logger.TraceLevel))
if err := logger.DefaultLogger.Init(logger.WithLevel(logger.TraceLevel)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
var addrs []string
@ -33,7 +34,7 @@ func TestPubSub(t *testing.T) {
addrs = strings.Split(addr, ",")
}
b := segmentio.NewBroker(broker.Addrs(addrs...))
b := segmentio.NewBroker(broker.Addrs(addrs...), segmentio.ClientID("test"))
if err := b.Init(); err != nil {
t.Fatal(err)
}

7
go.mod
View File

@ -1,9 +1,8 @@
module github.com/unistack-org/micro-broker-segmentio/v3
module go.unistack.org/micro-broker-segmentio/v3
go 1.16
require (
github.com/google/uuid v1.2.0
github.com/segmentio/kafka-go v0.4.16
github.com/unistack-org/micro/v3 v3.4.7
github.com/segmentio/kafka-go v0.4.22
go.unistack.org/micro/v3 v3.8.5
)

30
go.sum
View File

@ -1,17 +1,17 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/ef-ds/deque v1.0.4/go.mod h1:gXDnTC3yqvBcHbq2lcExjtAcVrOnJCbMcZXmuj8Z4tg=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
@ -25,24 +25,26 @@ github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDm
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/segmentio/kafka-go v0.4.16 h1:9dt78ehM9qzAkekA60D6A96RlqDzC3hnYYa8y5Szd+U=
github.com/segmentio/kafka-go v0.4.16/go.mod h1:19+Eg7KwrNKy/PFhiIthEPkO8k+ac7/ZYXwYM9Df10w=
github.com/silas/dag v0.0.0-20210121180416-41cf55125c34/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
github.com/segmentio/kafka-go v0.4.22 h1:F4k2OTm9Y4+zliuoXgNKJZTktE0miQioZZzofsjhRdk=
github.com/segmentio/kafka-go v0.4.22/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
github.com/silas/dag v0.0.0-20210626123444-3804bac2d6d4/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/unistack-org/micro/v3 v3.4.7 h1:zmGFx2J6tIbmr4IGLcc+LNtbftQFZI42bfuNV5xNYM0=
github.com/unistack-org/micro/v3 v3.4.7/go.mod h1:LXmPfbJnJNvL0kQs8HfnkV3Wya2Wb+C7keVq++RCZnk=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
go.unistack.org/micro-proto/v3 v3.1.0 h1:q39FwjFiRZn+Ux/tt+d3bJTmDtsQQWa+3SLYVo1vLfA=
go.unistack.org/micro-proto/v3 v3.1.0/go.mod h1:DpRhYCBXlmSJ/AAXTmntvlh7kQkYU6eFvlmYAx4BQS8=
go.unistack.org/micro/v3 v3.8.5 h1:DIYWRsQF+NPhKZP45sCtNsUhaRw6u2+Ps7U+pKU7i3s=
go.unistack.org/micro/v3 v3.8.5/go.mod h1:KMMmOmbgo/D52/rCAbqeKbBsgEEbSKM69he54J3ZIuA=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20210510120150-4163338589ed h1:p9UgmWI9wKpfYmgaV/IZKGdXc5qEK45tDwwwDyjS26I=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b h1:eB48h3HiRycXNy8E0Gf5e0hv7YT6Kt14L/D73G1fuwo=
golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -52,8 +54,12 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=

View File

@ -5,18 +5,22 @@ import (
"time"
kafka "github.com/segmentio/kafka-go"
"github.com/unistack-org/micro/v3/broker"
"github.com/unistack-org/micro/v3/client"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/client"
)
var (
DefaultReaderConfig = kafka.ReaderConfig{}
DefaultWriterConfig = kafka.WriterConfig{}
DefaultStatsInterval = time.Second * 10
DefaultReaderConfig = kafka.ReaderConfig{}
DefaultWriterConfig = kafka.WriterConfig{}
DefaultStatsInterval = time.Second * 10
DefaultCommitInterval = time.Second * 0
DefaultCommitQueueSize = 2000
)
type readerConfigKey struct{}
type writerConfigKey struct{}
type (
readerConfigKey struct{}
writerConfigKey struct{}
)
func ReaderConfig(c kafka.ReaderConfig) broker.Option {
return broker.SetOption(readerConfigKey{}, c)
@ -66,3 +70,15 @@ type writerCompletionFunc struct{}
func WriterCompletionFunc(fn func([]kafka.Message, error)) broker.Option {
return broker.SetOption(writerCompletionFunc{}, fn)
}
type clientIDKey struct{}
func ClientID(id string) broker.Option {
return broker.SetOption(clientIDKey{}, id)
}
type commitIntervalKey struct{}
func CommitInterval(td time.Duration) broker.Option {
return broker.SetOption(commitIntervalKey{}, td)
}

View File

@ -1,16 +1,19 @@
// Package kafka provides a kafka broker using segmentio
package segmentio
package segmentio // import "go.unistack.org/micro-broker-segmentio/v3"
import (
"context"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
kafka "github.com/segmentio/kafka-go"
"github.com/unistack-org/micro/v3/broker"
"github.com/unistack-org/micro/v3/logger"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/util/id"
)
type kBroker struct {
@ -28,16 +31,9 @@ type kBroker struct {
}
type subscriber struct {
k *kBroker
topic string
opts broker.SubscribeOptions
offset int64
gen *kafka.Generation
partition int
handler broker.Handler
reader *kafka.Reader
closed bool
done chan struct{}
group *kafka.ConsumerGroup
cgcfg kafka.ConsumerGroupConfig
brokerOpts broker.Options
@ -46,14 +42,13 @@ type subscriber struct {
type publication struct {
topic string
partition int
offset int64
err error
m *broker.Message
opts broker.Options
ctx context.Context
generation *kafka.Generation
reader *kafka.Reader
km kafka.Message
offsets map[string]map[int]int64 // for commit offsets
ackErr atomic.Value
msg *broker.Message
ackCh chan map[string]map[int]int64
readerDone *int32
}
func (p *publication) Topic() string {
@ -61,20 +56,28 @@ func (p *publication) Topic() string {
}
func (p *publication) Message() *broker.Message {
return p.m
return p.msg
}
func (p *publication) Ack() error {
if p.opts.Logger.V(logger.TraceLevel) {
p.opts.Logger.Tracef(p.opts.Context, "commit offset %#+v\n", p.offsets)
if cerr := p.ackErr.Load(); cerr != nil {
return cerr.(error)
}
return p.generation.CommitOffsets(p.offsets)
if atomic.LoadInt32(p.readerDone) == 1 {
return kafka.ErrGroupClosed
}
p.ackCh <- map[string]map[int]int64{p.topic: {p.partition: p.offset}}
return nil
}
func (p *publication) Error() error {
return p.err
}
func (p *publication) SetError(err error) {
p.err = err
}
func (s *subscriber) Options() broker.SubscribeOptions {
return s.opts
}
@ -86,22 +89,21 @@ func (s *subscriber) Topic() string {
func (s *subscriber) Unsubscribe(ctx context.Context) error {
var err error
s.Lock()
s.closed = true
group := s.group
close(s.done)
s.Unlock()
if group != nil {
err = group.Close()
}
if err == nil {
s.Lock()
s.closed = true
s.Unlock()
}
return err
}
func (k *kBroker) Address() string {
if len(k.addrs) > 0 {
return k.addrs[0]
}
return "127.0.0.1:9092"
return strings.Join(k.addrs, ",")
}
func (k *kBroker) Name() string {
@ -220,6 +222,46 @@ func (k *kBroker) Options() broker.Options {
return k.opts
}
func (k *kBroker) BatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
var val []byte
var err error
options := broker.NewPublishOptions(opts...)
kmsgs := make([]kafka.Message, 0, len(msgs))
for _, msg := range msgs {
if options.BodyOnly {
val = msg.Body
} else {
val, err = k.opts.Codec.Marshal(msg)
if err != nil {
return err
}
}
topic, _ := msg.Header.Get(metadata.HeaderTopic)
kmsg := kafka.Message{Topic: topic, Value: val}
if options.Context != nil {
if key, ok := options.Context.Value(publishKey{}).([]byte); ok && len(key) > 0 {
kmsg.Key = key
}
}
kmsgs = append(kmsgs, kmsg)
}
if k.writer.Async {
k.Lock()
k.messages = append(k.messages, kmsgs...)
k.Unlock()
return nil
}
wCtx := k.opts.Context
if ctx != nil {
wCtx = ctx
}
return k.writer.WriteMessages(wCtx, kmsgs...)
}
func (k *kBroker) Publish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error {
var val []byte
var err error
@ -252,18 +294,33 @@ func (k *kBroker) Publish(ctx context.Context, topic string, msg *broker.Message
if ctx != nil {
wCtx = ctx
}
if err = k.writer.WriteMessages(wCtx, kmsg); err == nil {
return nil
}
logger.Debugf(wCtx, "recreate writer because of err: %v", err)
k.Lock()
if err = k.writer.Close(); err != nil {
logger.Errorf(wCtx, "failed to close writer: %v", err)
k.Unlock()
return err
}
k.writer = newWriter(k.writerConfig)
k.Unlock()
return k.writer.WriteMessages(wCtx, kmsg)
}
func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
func (k *kBroker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
opt := broker.NewSubscribeOptions(opts...)
if opt.Group == "" {
id, err := uuid.NewRandom()
gid, err := id.New()
if err != nil {
return nil, err
}
opt.Group = id.String()
opt.Group = gid
}
cgcfg := kafka.ConsumerGroupConfig{
@ -275,27 +332,42 @@ func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Ha
StartOffset: k.readerConfig.StartOffset,
Logger: k.readerConfig.Logger,
ErrorLogger: k.readerConfig.ErrorLogger,
Dialer: k.readerConfig.Dialer,
}
cgcfg.StartOffset = kafka.LastOffset
if err := cgcfg.Validate(); err != nil {
return nil, err
}
cgroup, err := kafka.NewConsumerGroup(cgcfg)
if err != nil {
return nil, err
gCtx := k.opts.Context
if ctx != nil {
gCtx = ctx
}
sub := &subscriber{brokerOpts: k.opts, opts: opt, topic: topic, group: cgroup, cgcfg: cgcfg, done: make(chan struct{})}
sub := &subscriber{brokerOpts: k.opts, opts: opt, topic: topic, cgcfg: cgcfg}
sub.createGroup(gCtx)
go func() {
defer func() {
sub.RLock()
closed := sub.closed
sub.RUnlock()
if !closed {
if err := sub.group.Close(); err != nil {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] consumer group close error %v", err)
}
}
}()
for {
select {
case <-sub.done:
return
case <-ctx.Done():
// unexpected context closed
sub.RLock()
closed := sub.closed
sub.RUnlock()
if closed {
return
}
if k.opts.Context.Err() != nil && k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] context closed unexpected %v", k.opts.Context.Err())
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] subscribe context closed %v", k.opts.Context.Err())
}
return
case <-k.opts.Context.Done():
@ -303,81 +375,269 @@ func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Ha
closed := sub.closed
sub.RUnlock()
if closed {
// unsubcribed and closed
return
}
// unexpected context closed
if k.opts.Context.Err() != nil && k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] context closed unexpected %v", k.opts.Context.Err())
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] broker context closed error %v", k.opts.Context.Err())
}
return
default:
sub.RLock()
group := sub.group
closed := sub.closed
sub.RUnlock()
if closed {
return
}
gCtx := k.opts.Context
if ctx != nil {
gCtx = ctx
}
generation, err := group.Next(gCtx)
generation, err := sub.group.Next(gCtx)
switch err {
case nil:
// normal execution
case kafka.ErrGroupClosed:
k.opts.Logger.Debugf(k.opts.Context, "group closed %v", err)
sub.RLock()
closed := sub.closed
sub.RUnlock()
if !closed {
if k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] recreate consumer group, as it closed %v", k.opts.Context.Err())
}
if err = group.Close(); err != nil && k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] consumer group close error %v", err)
continue
}
sub.createGroup(gCtx)
continue
if closed {
return
}
return
if k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] recreate consumer group, as it closed by kafka %v", k.opts.Context.Err())
}
sub.createGroup(gCtx)
continue
default:
k.opts.Logger.Debugf(k.opts.Context, "some error: %v", err)
sub.RLock()
closed := sub.closed
sub.RUnlock()
if !closed {
if k.opts.Logger.V(logger.TraceLevel) {
k.opts.Logger.Tracef(k.opts.Context, "[segmentio] recreate consumer group, as unexpected consumer error %v", err)
}
if closed {
return
}
if err = group.Close(); err != nil && k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] consumer group close error %v", err)
if k.opts.Logger.V(logger.DebugLevel) {
k.opts.Logger.Debugf(k.opts.Context, "[segmentio] recreate consumer group, as unexpected consumer error %T %v", err, err)
}
sub.createGroup(k.opts.Context)
sub.createGroup(gCtx)
continue
}
for _, t := range cgcfg.Topics {
assignments := generation.Assignments[t]
ackCh := make(chan map[string]map[int]int64, DefaultCommitQueueSize)
errChLen := 0
for _, assignments := range generation.Assignments {
errChLen += len(assignments)
}
errChs := make([]chan error, 0, errChLen)
commitDoneCh := make(chan bool)
readerDone := int32(0)
cntWait := int32(0)
for topic, assignments := range generation.Assignments {
if k.opts.Logger.V(logger.DebugLevel) {
k.opts.Logger.Debugf(k.opts.Context, "topic: %s assignments: %v", topic, assignments)
}
for _, assignment := range assignments {
cfg := k.readerConfig
cfg.Topic = t
cfg.Topic = topic
cfg.Partition = assignment.ID
cfg.GroupID = ""
// break reading
reader := kafka.NewReader(cfg)
if k.opts.Logger.V(logger.TraceLevel) {
k.opts.Logger.Tracef(k.opts.Context, "[segmentio] reader current offset: %v new offset: %v", reader.Offset(), assignment.Offset)
}
reader.SetOffset(assignment.Offset)
cgh := &cgHandler{generation: generation, brokerOpts: k.opts, subOpts: opt, reader: reader, handler: handler}
if err := reader.SetOffset(assignment.Offset); err != nil {
if k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "assignments offset %d can be set by reader: %v", assignment.Offset, err)
}
if err = reader.Close(); err != nil {
if k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "reader close err: %v", err)
}
}
continue
}
errCh := make(chan error)
errChs = append(errChs, errCh)
cgh := &cgHandler{
brokerOpts: k.opts,
subOpts: opt,
reader: reader,
batchhandler: handler,
ackCh: ackCh,
errCh: errCh,
cntWait: &cntWait,
readerDone: &readerDone,
commitDoneCh: commitDoneCh,
}
atomic.AddInt32(cgh.cntWait, 1)
generation.Start(cgh.run)
}
}
if k.opts.Logger.V(logger.DebugLevel) {
k.opts.Logger.Debug(k.opts.Context, "start commit loop")
}
// run async commit loop
go k.commitLoop(generation, k.readerConfig.CommitInterval, ackCh, errChs, &readerDone, commitDoneCh, &cntWait)
}
}
}()
return sub, nil
}
func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
opt := broker.NewSubscribeOptions(opts...)
if opt.Group == "" {
gid, err := id.New()
if err != nil {
return nil, err
}
opt.Group = gid
}
cgcfg := kafka.ConsumerGroupConfig{
ID: opt.Group,
WatchPartitionChanges: true,
Brokers: k.readerConfig.Brokers,
Topics: []string{topic},
GroupBalancers: k.readerConfig.GroupBalancers,
StartOffset: k.readerConfig.StartOffset,
Logger: k.readerConfig.Logger,
ErrorLogger: k.readerConfig.ErrorLogger,
Dialer: k.readerConfig.Dialer,
}
if err := cgcfg.Validate(); err != nil {
return nil, err
}
gCtx := k.opts.Context
if ctx != nil {
gCtx = ctx
}
sub := &subscriber{brokerOpts: k.opts, opts: opt, topic: topic, cgcfg: cgcfg}
sub.createGroup(gCtx)
go func() {
defer func() {
sub.RLock()
closed := sub.closed
sub.RUnlock()
if !closed {
if err := sub.group.Close(); err != nil {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] consumer group close error %v", err)
}
}
}()
for {
select {
case <-ctx.Done():
sub.RLock()
closed := sub.closed
sub.RUnlock()
if closed {
return
}
if k.opts.Context.Err() != nil && k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] subscribe context closed %v", k.opts.Context.Err())
}
return
case <-k.opts.Context.Done():
sub.RLock()
closed := sub.closed
sub.RUnlock()
if closed {
return
}
if k.opts.Context.Err() != nil && k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] broker context closed error %v", k.opts.Context.Err())
}
return
default:
sub.RLock()
closed := sub.closed
sub.RUnlock()
if closed {
return
}
generation, err := sub.group.Next(gCtx)
switch err {
case nil:
// normal execution
case kafka.ErrGroupClosed:
k.opts.Logger.Debugf(k.opts.Context, "group closed %v", err)
sub.RLock()
closed := sub.closed
sub.RUnlock()
if closed {
return
}
if k.opts.Logger.V(logger.ErrorLevel) {
k.opts.Logger.Errorf(k.opts.Context, "[segmentio] recreate consumer group, as it closed by kafka %v", k.opts.Context.Err())
}
sub.createGroup(gCtx)
continue
default:
k.opts.Logger.Debugf(k.opts.Context, "some error: %v", err)
sub.RLock()
closed := sub.closed
sub.RUnlock()
if closed {
return
}
if k.opts.Logger.V(logger.DebugLevel) {
k.opts.Logger.Debugf(k.opts.Context, "[segmentio] recreate consumer group, as unexpected consumer error %T %v", err, err)
}
sub.createGroup(gCtx)
continue
}
// k.opts.Meter.Counter("broker_reader_partitions", "topic", topic).Set(uint64(0))
ackCh := make(chan map[string]map[int]int64, DefaultCommitQueueSize)
errChLen := 0
for _, assignments := range generation.Assignments {
errChLen += len(assignments)
}
errChs := make([]chan error, 0, errChLen)
commitDoneCh := make(chan bool)
readerDone := int32(0)
cntWait := int32(0)
for topic, assignments := range generation.Assignments {
// k.opts.Meter.Counter("broker_reader_partitions", "topic", topic).Set(uint64(len(assignments)))
if k.opts.Logger.V(logger.DebugLevel) {
k.opts.Logger.Debugf(k.opts.Context, "topic: %s assignments: %v", topic, assignments)
}
for _, assignment := range assignments {
cfg := k.readerConfig
cfg.Topic = topic
cfg.Partition = assignment.ID
cfg.GroupID = ""
reader := kafka.NewReader(cfg)
// as we dont use consumer group in reader, reader not started before actuall fetch, so we can ignore all errors
_ = reader.SetOffset(assignment.Offset)
errCh := make(chan error)
errChs = append(errChs, errCh)
cgh := &cgHandler{
brokerOpts: k.opts,
subOpts: opt,
reader: reader,
handler: handler,
ackCh: ackCh,
errCh: errCh,
cntWait: &cntWait,
readerDone: &readerDone,
commitDoneCh: commitDoneCh,
}
atomic.AddInt32(cgh.cntWait, 1)
generation.Start(cgh.run)
}
}
// run async commit loop
go k.commitLoop(generation, k.readerConfig.CommitInterval, ackCh, errChs, &readerDone, commitDoneCh, &cntWait)
}
}
}()
@ -386,30 +646,189 @@ func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Ha
}
type cgHandler struct {
topic string
generation *kafka.Generation
brokerOpts broker.Options
subOpts broker.SubscribeOptions
reader *kafka.Reader
handler broker.Handler
brokerOpts broker.Options
subOpts broker.SubscribeOptions
reader *kafka.Reader
handler broker.Handler
batchhandler broker.BatchHandler
ackCh chan map[string]map[int]int64
errCh chan error
readerDone *int32
commitDoneCh chan bool
cntWait *int32
}
func (k *kBroker) commitLoop(generation *kafka.Generation, commitInterval time.Duration, ackCh chan map[string]map[int]int64, errChs []chan error, readerDone *int32, commitDoneCh chan bool, cntWait *int32) {
if k.opts.Logger.V(logger.DebugLevel) {
k.opts.Logger.Debug(k.opts.Context, "start async commit loop")
}
td := DefaultCommitInterval
if commitInterval > 0 {
td = commitInterval
}
if v, ok := k.opts.Context.Value(commitIntervalKey{}).(time.Duration); ok && td > 0 {
td = v
}
var mapMu sync.Mutex
offsets := make(map[string]map[int]int64, 4)
go func() {
defer func() {
k.opts.Logger.Debug(k.opts.Context, "return from commitLoop and close commitDoneCh")
close(commitDoneCh)
}()
checkTicker := time.NewTicker(300 * time.Millisecond)
defer checkTicker.Stop()
for {
select {
case <-checkTicker.C:
if atomic.LoadInt32(cntWait) != 0 {
continue
}
mapMu.Lock()
if err := generation.CommitOffsets(offsets); err != nil {
for _, errCh := range errChs {
errCh <- err
}
mapMu.Unlock()
return
}
mapMu.Unlock()
if k.opts.Logger.V(logger.DebugLevel) {
k.opts.Logger.Debug(k.opts.Context, "stop commit filling loop")
}
return
case ack := <-ackCh:
switch td {
case 0: // sync commits as CommitInterval == 0
if len(ack) > 0 {
err := generation.CommitOffsets(ack)
if err != nil {
for _, errCh := range errChs {
errCh <- err
}
return
}
}
default: // async commits as CommitInterval > 0
mapMu.Lock()
for t, p := range ack {
if _, ok := offsets[t]; !ok {
offsets[t] = make(map[int]int64, 4)
}
for k, v := range p {
offsets[t][k] = v
}
}
mapMu.Unlock()
}
}
}
}()
if td == 0 {
// sync commit loop
for {
if atomic.LoadInt32(readerDone) == 1 && atomic.LoadInt32(cntWait) == 0 {
break
}
time.Sleep(1 * time.Second)
}
}
// async commit loop
if td > 0 {
ticker := time.NewTicker(td)
doneTicker := time.NewTicker(300 * time.Millisecond)
defer doneTicker.Stop()
for {
select {
case <-doneTicker.C:
if atomic.LoadInt32(readerDone) == 1 && atomic.LoadInt32(cntWait) == 0 {
// fire immediate commit offsets
ticker.Stop()
}
case <-ticker.C:
mapMu.Lock()
if k.opts.Logger.V(logger.DebugLevel) && len(offsets) > 0 {
k.opts.Logger.Debugf(k.opts.Context, "async commit offsets: %v", offsets)
}
err := generation.CommitOffsets(offsets)
if err != nil {
for _, errCh := range errChs {
errCh <- err
}
mapMu.Unlock()
return
}
offsets = make(map[string]map[int]int64, 4)
mapMu.Unlock()
if atomic.LoadInt32(readerDone) == 1 && atomic.LoadInt32(cntWait) == 0 {
return
}
}
}
}
}
func (h *cgHandler) run(ctx context.Context) {
offsets := make(map[string]map[int]int64)
offsets[h.reader.Config().Topic] = make(map[int]int64)
if h.brokerOpts.Logger.V(logger.DebugLevel) {
h.brokerOpts.Logger.Debugf(ctx, "start partition reader topic: %s partition: %d", h.reader.Config().Topic, h.reader.Config().Partition)
}
td := DefaultStatsInterval
if v, ok := h.brokerOpts.Context.Value(statsIntervalKey{}).(time.Duration); ok && td > 0 {
td = v
}
// start stats loop
go readerStats(ctx, h.reader, td, h.brokerOpts.Meter)
var commitErr atomic.Value
defer func() {
atomic.AddInt32(h.cntWait, -1)
atomic.CompareAndSwapInt32(h.readerDone, 0, 1)
if err := h.reader.Close(); err != nil && h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio] reader close error: %v", err)
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio] reader for topic %s partition %d close error: %v", h.reader.Config().Topic, h.reader.Config().Partition, err)
}
h.brokerOpts.Logger.Debug(h.brokerOpts.Context, "wait start for commitDoneCh channel closing")
<-h.commitDoneCh
h.brokerOpts.Logger.Debug(h.brokerOpts.Context, "wait stop for commitDoneCh channel closing")
if h.brokerOpts.Logger.V(logger.DebugLevel) {
h.brokerOpts.Logger.Debugf(ctx, "stop partition reader topic: %s partition: %d", h.reader.Config().Topic, h.reader.Config().Partition)
}
}()
/*
tc := time.NewTicker(3 * time.Second)
defer tc.Stop()
*/
go func() {
for {
select {
// case <-tc.C:
// commitErr.Store(errors.New("my err"))
// return
case err := <-h.errCh:
if err != nil {
commitErr.Store(err)
return
}
case <-ctx.Done():
return
}
}
}()
for {
select {
case <-ctx.Done():
@ -418,35 +837,47 @@ func (h *cgHandler) run(ctx context.Context) {
msg, err := h.reader.ReadMessage(ctx)
switch err {
default:
if h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio] unexpected error: %v", err)
switch kerr := err.(type) {
case kafka.Error:
if h.brokerOpts.Logger.V(logger.DebugLevel) {
h.brokerOpts.Logger.Debugf(h.brokerOpts.Context, "[segmentio] kafka error %T err: %v", kerr, kerr)
}
return
default:
if h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio] unexpected error type: %T err: %v", err, err)
}
return
}
return
case kafka.ErrGenerationEnded:
// generation has ended
if h.brokerOpts.Logger.V(logger.TraceLevel) {
h.brokerOpts.Logger.Trace(h.brokerOpts.Context, "[segmentio] generation ended")
if h.brokerOpts.Logger.V(logger.DebugLevel) {
h.brokerOpts.Logger.Debug(h.brokerOpts.Context, "[segmentio] generation ended, rebalance or close")
}
return
case nil:
if cerr := commitErr.Load(); cerr != nil {
if h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio] commit error: %v", cerr)
}
return
}
eh := h.brokerOpts.ErrorHandler
if h.subOpts.ErrorHandler != nil {
eh = h.subOpts.ErrorHandler
}
offsets[msg.Topic][msg.Partition] = msg.Offset + 1
// github.com/segmentio/kafka-go/commit.go makeCommit builds commit message with offset + 1
// zookeeper store offset which needs to be sent on new consumer, so current + 1
p := &publication{topic: msg.Topic, opts: h.brokerOpts, generation: h.generation, m: &broker.Message{}, offsets: offsets}
p := &publication{ackCh: h.ackCh, partition: msg.Partition, offset: msg.Offset + 1, topic: msg.Topic, msg: &broker.Message{}, readerDone: h.readerDone}
if h.subOpts.BodyOnly {
p.m.Body = msg.Value
p.msg.Body = msg.Value
} else {
if err := h.brokerOpts.Codec.Unmarshal(msg.Value, p.m); err != nil {
if err := h.brokerOpts.Codec.Unmarshal(msg.Value, p.msg); err != nil {
p.err = err
p.m.Body = msg.Value
p.msg.Body = msg.Value
if eh != nil {
eh(p)
_ = eh(p)
} else {
if h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio]: failed to unmarshal: %v", err)
@ -455,17 +886,24 @@ func (h *cgHandler) run(ctx context.Context) {
continue
}
}
if cerr := commitErr.Load(); cerr != nil {
if h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio] commit error: %v", cerr)
}
return
}
err = h.handler(p)
if err == nil && h.subOpts.AutoAck {
if err = p.Ack(); err != nil {
if h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio]: unable to commit msg: %v", err)
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio]: message ack error: %v", err)
}
return
}
} else if err != nil {
p.err = err
if eh != nil {
eh(p)
_ = eh(p)
} else {
if h.brokerOpts.Logger.V(logger.ErrorLevel) {
h.brokerOpts.Logger.Errorf(h.brokerOpts.Context, "[segmentio]: subscriber error: %v", err)
@ -478,17 +916,28 @@ func (h *cgHandler) run(ctx context.Context) {
}
func (sub *subscriber) createGroup(ctx context.Context) {
sub.RLock()
cgcfg := sub.cgcfg
sub.RUnlock()
var err error
for {
select {
case <-ctx.Done():
// closed
return
default:
cgroup, err := kafka.NewConsumerGroup(cgcfg)
sub.RLock()
cgcfg := sub.cgcfg
closed := sub.closed
cgroup := sub.group
sub.RUnlock()
if closed {
return
}
if cgroup != nil {
if err = cgroup.Close(); err != nil {
if sub.brokerOpts.Logger.V(logger.ErrorLevel) {
sub.brokerOpts.Logger.Errorf(sub.brokerOpts.Context, "[segmentio]: consumer group close error %v", err)
}
}
}
cgroup, err = kafka.NewConsumerGroup(cgcfg)
if err != nil {
if sub.brokerOpts.Logger.V(logger.ErrorLevel) {
sub.brokerOpts.Logger.Errorf(sub.brokerOpts.Context, "[segmentio]: consumer group error %v", err)
@ -498,14 +947,13 @@ func (sub *subscriber) createGroup(ctx context.Context) {
sub.Lock()
sub.group = cgroup
sub.Unlock()
// return
return
}
}
}
func (k *kBroker) String() string {
return "kafka"
return "segmentio"
}
func (k *kBroker) configure(opts ...broker.Option) error {
@ -555,8 +1003,32 @@ func (k *kBroker) configure(opts ...broker.Option) error {
}
k.addrs = cAddrs
k.readerConfig = readerConfig
k.writer = &kafka.Writer{
Addr: kafka.TCP(k.addrs...),
k.writerConfig = writerConfig
k.writerConfig.Brokers = k.addrs
if k.readerConfig.Dialer == nil {
k.readerConfig.Dialer = kafka.DefaultDialer
}
if k.writerConfig.Dialer == nil {
k.writerConfig.Dialer = kafka.DefaultDialer
}
if id, ok := k.opts.Context.Value(clientIDKey{}).(string); ok {
k.writerConfig.Dialer.ClientID = id
k.readerConfig.Dialer.ClientID = id
}
k.writer = newWriter(k.writerConfig)
if fn, ok := k.opts.Context.Value(writerCompletionFunc{}).(func([]kafka.Message, error)); ok {
k.writer.Completion = fn
}
k.init = true
return nil
}
func newWriter(writerConfig kafka.WriterConfig) *kafka.Writer {
return &kafka.Writer{
Addr: kafka.TCP(writerConfig.Brokers...),
Balancer: writerConfig.Balancer,
MaxAttempts: writerConfig.MaxAttempts,
BatchSize: writerConfig.BatchSize,
@ -566,19 +1038,16 @@ func (k *kBroker) configure(opts ...broker.Option) error {
WriteTimeout: writerConfig.WriteTimeout,
RequiredAcks: kafka.RequiredAcks(writerConfig.RequiredAcks),
Async: writerConfig.Async,
//Completion: writerConfig.Completion,
//Compression: writerConfig.Compression,
Logger: writerConfig.Logger,
ErrorLogger: writerConfig.ErrorLogger,
//Transport: writerConfig.Transport,
Logger: writerConfig.Logger,
ErrorLogger: writerConfig.ErrorLogger,
Transport: &kafka.Transport{
Dial: writerConfig.Dialer.DialFunc,
ClientID: writerConfig.Dialer.ClientID,
IdleTimeout: time.Second * 5,
MetadataTTL: time.Second * 9,
SASL: writerConfig.Dialer.SASLMechanism,
},
}
if fn, ok := k.opts.Context.Value(writerCompletionFunc{}).(func([]kafka.Message, error)); ok {
k.writer.Completion = fn
}
k.init = true
return nil
}
func NewBroker(opts ...broker.Option) broker.Broker {

View File

@ -7,14 +7,17 @@ import (
"sync/atomic"
"testing"
segmentio "github.com/unistack-org/micro-broker-segmentio/v3"
"github.com/unistack-org/micro/v3/broker"
"github.com/unistack-org/micro/v3/logger"
segmentio "go.unistack.org/micro-broker-segmentio/v3"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
)
func TestSegmentioSubscribe(t *testing.T) {
ctx := context.Background()
logger.DefaultLogger.Init(logger.WithLevel(logger.TraceLevel))
if err := logger.DefaultLogger.Init(logger.WithLevel(logger.TraceLevel)); err != nil {
t.Fatal(err)
}
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
@ -43,7 +46,6 @@ func TestSegmentioSubscribe(t *testing.T) {
done := make(chan struct{}, 100)
fn := func(msg broker.Event) error {
if err := msg.Ack(); err != nil {
panic(err)
return err
}
done <- struct{}{}
@ -179,7 +181,7 @@ func BenchmarkSegmentioCodecJsonSubscribe(b *testing.B) {
return
}
if err := brk.Publish(ctx, "test_topic", bm); err != nil {
b.Fatal(err)
panic(err)
}
}
}()

View File

@ -2,15 +2,23 @@ package segmentio
import (
"context"
"sync"
"time"
kafka "github.com/segmentio/kafka-go"
"github.com/unistack-org/micro/v3/meter"
"go.unistack.org/micro/v3/meter"
)
func readerStats(ctx context.Context, r *kafka.Reader, td time.Duration, m meter.Meter) {
ticker := time.NewTicker(td)
defer ticker.Stop()
var once sync.Once
onceLabels := make([]string, 0, 4)
defer func() {
ticker.Stop()
m.Counter("broker_reader_count", onceLabels...).Add(int(-1))
}()
for {
select {
@ -22,7 +30,10 @@ func readerStats(ctx context.Context, r *kafka.Reader, td time.Duration, m meter
}
rstats := r.Stats()
labels := []string{"topic", rstats.Topic, "partition", rstats.Partition, "client_id", rstats.ClientID}
once.Do(func() {
onceLabels = []string{"topic", rstats.Topic, "client_id", rstats.ClientID}
m.Counter("broker_reader_count", onceLabels...).Add(int(1))
})
m.Counter("broker_reader_dial_count", labels...).Add(int(rstats.Dials))
m.Counter("broker_reader_fetch_count", labels...).Add(int(rstats.Fetches))
m.Counter("broker_reader_message_count", labels...).Add(int(rstats.Messages))