Compare commits
5 Commits
Author | SHA1 | Date | |
---|---|---|---|
8090e90683 | |||
146a458b7a | |||
4d3b054dfd | |||
a99096d5ce | |||
d50289370e |
@@ -3,10 +3,10 @@ name: lint
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches: [ master, v3, v4 ]
|
||||
paths-ignore:
|
||||
- '.github/**'
|
||||
- '.gitea/**'
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
@@ -24,6 +24,6 @@ jobs:
|
||||
- name: setup deps
|
||||
run: go get -v ./...
|
||||
- name: run lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: https://github.com/golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: 'latest'
|
17
.gitea/workflows/job_notify.yml
Normal file
17
.gitea/workflows/job_notify.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
name: notify
|
||||
on: [push]
|
||||
jobs:
|
||||
notify:
|
||||
name: notify
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: send
|
||||
uses: appleboy/telegram-action@master
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_TO }}
|
||||
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||
message: |
|
||||
New commit by ${{ github.actor }}:
|
||||
Commit: ${{ github.event.commits[0].message }}
|
||||
Repository: ${{ github.repository }}
|
||||
Changes: https://git.unistack.org/${{ github.repository }}/commit/${{github.sha}}
|
@@ -3,12 +3,15 @@ name: test
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches: [ master, v3, v4 ]
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
push:
|
||||
branches: [ master, v3, v4 ]
|
||||
paths-ignore:
|
||||
- '.github/**'
|
||||
- '.gitea/**'
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
test:
|
@@ -3,12 +3,15 @@ name: test
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
branches: [ master, v3, v4 ]
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
push:
|
||||
branches: [ master, v3, v4 ]
|
||||
paths-ignore:
|
||||
- '.github/**'
|
||||
- '.gitea/**'
|
||||
branches:
|
||||
- master
|
||||
- v3
|
||||
- v4
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -32,19 +35,19 @@ jobs:
|
||||
go-version: 'stable'
|
||||
- name: setup go work
|
||||
env:
|
||||
GOWORK: ${{ github.workspace }}/go.work
|
||||
GOWORK: /workspace/${{ github.repository_owner }}/go.work
|
||||
run: |
|
||||
go work init
|
||||
go work use .
|
||||
go work use micro-tests
|
||||
- name: setup deps
|
||||
env:
|
||||
GOWORK: ${{ github.workspace }}/go.work
|
||||
GOWORK: /workspace/${{ github.repository_owner }}/go.work
|
||||
run: go get -v ./...
|
||||
- name: run tests
|
||||
env:
|
||||
INTEGRATION_TESTS: yes
|
||||
GOWORK: ${{ github.workspace }}/go.work
|
||||
GOWORK: /workspace/${{ github.repository_owner }}/go.work
|
||||
run: |
|
||||
cd micro-tests
|
||||
go test -mod readonly -v ./... || true
|
53
.github/workflows/job_coverage.yml
vendored
53
.github/workflows/job_coverage.yml
vendored
@@ -1,53 +0,0 @@
|
||||
name: coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, v3, v4 ]
|
||||
paths-ignore:
|
||||
- '.github/**'
|
||||
- '.gitea/**'
|
||||
pull_request:
|
||||
branches: [ main, v3, v4 ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
if: github.server_url != 'https://github.com'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
filter: 'blob:none'
|
||||
|
||||
- name: setup go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
cache-dependency-path: "**/*.sum"
|
||||
go-version: 'stable'
|
||||
|
||||
- name: test coverage
|
||||
run: |
|
||||
go test -v -cover ./... -covermode=count -coverprofile coverage.out -coverpkg ./...
|
||||
go tool cover -func coverage.out -o coverage.out
|
||||
|
||||
- name: coverage badge
|
||||
uses: tj-actions/coverage-badge-go@v2
|
||||
with:
|
||||
green: 80
|
||||
filename: coverage.out
|
||||
|
||||
- uses: stefanzweifel/git-auto-commit-action@v4
|
||||
name: autocommit
|
||||
with:
|
||||
commit_message: Apply Code Coverage Badge
|
||||
skip_fetch: false
|
||||
skip_checkout: false
|
||||
file_pattern: ./README.md
|
||||
|
||||
- name: push
|
||||
if: steps.auto-commit-action.outputs.changes_detected == 'true'
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ github.token }}
|
||||
branch: ${{ github.ref }}
|
94
.github/workflows/job_sync.yml
vendored
94
.github/workflows/job_sync.yml
vendored
@@ -1,94 +0,0 @@
|
||||
name: sync
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '*/5 * * * *'
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sync:
|
||||
if: github.server_url != 'https://github.com'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: init
|
||||
run: |
|
||||
git config --global user.email "vtolstov <vtolstov@users.noreply.github.com>"
|
||||
git config --global user.name "github-actions[bot]"
|
||||
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
|
||||
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
|
||||
|
||||
- name: check master
|
||||
id: check_master
|
||||
run: |
|
||||
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
|
||||
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
|
||||
echo "src_hash=$src_hash"
|
||||
echo "dst_hash=$dst_hash"
|
||||
if [ "$src_hash" != "$dst_hash" ]; then
|
||||
echo "sync_needed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "sync_needed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: sync master
|
||||
if: steps.check_master.outputs.sync_needed == 'true'
|
||||
run: |
|
||||
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
||||
cd repo
|
||||
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
|
||||
git pull --rebase upstream master
|
||||
git push upstream master --progress
|
||||
git push origin master --progress
|
||||
cd ../
|
||||
rm -rf repo
|
||||
|
||||
- name: check v3
|
||||
id: check_v3
|
||||
run: |
|
||||
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
|
||||
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
|
||||
echo "src_hash=$src_hash"
|
||||
echo "dst_hash=$dst_hash"
|
||||
if [ "$src_hash" != "$dst_hash" ]; then
|
||||
echo "sync_needed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "sync_needed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: sync v3
|
||||
if: steps.check_v3.outputs.sync_needed == 'true'
|
||||
run: |
|
||||
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
||||
cd repo
|
||||
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
|
||||
git pull --rebase upstream v3
|
||||
git push upstream v3 --progress
|
||||
git push origin v3 --progress
|
||||
cd ../
|
||||
rm -rf repo
|
||||
|
||||
- name: check v4
|
||||
id: check_v4
|
||||
run: |
|
||||
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
|
||||
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
|
||||
echo "src_hash=$src_hash"
|
||||
echo "dst_hash=$dst_hash"
|
||||
if [ "$src_hash" != "$dst_hash" ]; then
|
||||
echo "sync_needed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "sync_needed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: sync v4
|
||||
if: steps.check_v4.outputs.sync_needed == 'true'
|
||||
run: |
|
||||
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
||||
cd repo
|
||||
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
|
||||
git pull --rebase upstream v4
|
||||
git push upstream v4 --progress
|
||||
git push origin v4 --progress
|
||||
cd ../
|
||||
rm -rf repo
|
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
concurrency: 8
|
||||
timeout: 5m
|
||||
deadline: 5m
|
||||
issues-exit-code: 1
|
||||
tests: true
|
||||
|
11
README.md
11
README.md
@@ -1,2 +1,9 @@
|
||||
# micro-broker-kgo
|
||||

|
||||
# broker-kgo
|
||||
|
||||
TODO:
|
||||
|
||||
1) экспортируем текущий оффсет для каждой партиции в группе
|
||||
2) экспортируем лаг для группы
|
||||
3) мониторим
|
||||
1) если есть лаг больше нуля
|
||||
2) если дельта оффсета за нужное нам время не
|
75
broker.go
75
broker.go
@@ -1,75 +0,0 @@
|
||||
package kgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"go.unistack.org/micro/v4/logger"
|
||||
)
|
||||
|
||||
type hookEvent struct {
|
||||
log logger.Logger
|
||||
fatalOnError bool
|
||||
connected *atomic.Uint32
|
||||
}
|
||||
|
||||
var (
|
||||
_ kgo.HookBrokerConnect = &hookEvent{}
|
||||
_ kgo.HookBrokerDisconnect = &hookEvent{}
|
||||
_ kgo.HookBrokerRead = &hookEvent{}
|
||||
_ kgo.HookBrokerWrite = &hookEvent{}
|
||||
_ kgo.HookGroupManageError = &hookEvent{}
|
||||
_ kgo.HookProduceRecordUnbuffered = &hookEvent{}
|
||||
)
|
||||
|
||||
func (m *hookEvent) OnGroupManageError(err error) {
|
||||
if err != nil && !kgo.IsRetryableBrokerErr(err) {
|
||||
m.connected.Store(0)
|
||||
if m.fatalOnError {
|
||||
m.log.Fatal(context.TODO(), "kgo.OnGroupManageError", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *hookEvent) OnBrokerConnect(_ kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) {
|
||||
if err != nil && !kgo.IsRetryableBrokerErr(err) {
|
||||
m.connected.Store(0)
|
||||
if m.fatalOnError {
|
||||
m.log.Fatal(context.TODO(), "kgo.OnBrokerConnect", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *hookEvent) OnBrokerDisconnect(_ kgo.BrokerMetadata, _ net.Conn) {
|
||||
// m.connected.Store(0)
|
||||
}
|
||||
|
||||
func (m *hookEvent) OnBrokerWrite(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) {
|
||||
if err != nil && !kgo.IsRetryableBrokerErr(err) {
|
||||
m.connected.Store(0)
|
||||
if m.fatalOnError {
|
||||
m.log.Fatal(context.TODO(), "kgo.OnBrokerWrite", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *hookEvent) OnBrokerRead(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) {
|
||||
if err != nil && !kgo.IsRetryableBrokerErr(err) {
|
||||
m.connected.Store(0)
|
||||
if m.fatalOnError {
|
||||
m.log.Fatal(context.TODO(), "kgo.OnBrokerRead", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *hookEvent) OnProduceRecordUnbuffered(_ *kgo.Record, err error) {
|
||||
if err != nil && !kgo.IsRetryableBrokerErr(err) {
|
||||
m.connected.Store(0)
|
||||
if m.fatalOnError {
|
||||
m.log.Fatal(context.TODO(), "kgo.OnProduceRecordUnbuffered", err)
|
||||
}
|
||||
}
|
||||
}
|
41
carrier.go
41
carrier.go
@@ -1,12 +1,8 @@
|
||||
package kgo
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"go.unistack.org/micro/v4/metadata"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
)
|
||||
|
||||
// RecordCarrier injects and extracts traces from a kgo.Record.
|
||||
@@ -57,37 +53,24 @@ func (c RecordCarrier) Keys() []string {
|
||||
return out
|
||||
}
|
||||
|
||||
func setHeaders(r *kgo.Record, md metadata.Metadata, exclude ...string) {
|
||||
func setHeaders(r *kgo.Record, md metadata.Metadata) {
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
loop:
|
||||
for k, v := range md {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
|
||||
if _, ok := seen[k]; ok {
|
||||
continue loop
|
||||
}
|
||||
|
||||
if slices.ContainsFunc(exclude, func(s string) bool {
|
||||
return strings.EqualFold(s, k)
|
||||
}) {
|
||||
continue loop
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.Headers); i++ {
|
||||
if strings.EqualFold(r.Headers[i].Key, k) {
|
||||
if r.Headers[i].Key == k {
|
||||
// Key exist, update the value.
|
||||
r.Headers[i].Value = []byte(strings.Join(v, ","))
|
||||
r.Headers[i].Value = []byte(v)
|
||||
continue loop
|
||||
} else if _, ok := seen[k]; ok {
|
||||
continue loop
|
||||
}
|
||||
// Key does not exist, append new header.
|
||||
r.Headers = append(r.Headers, kgo.RecordHeader{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
})
|
||||
seen[k] = struct{}{}
|
||||
}
|
||||
|
||||
// Key does not exist, append new header.
|
||||
r.Headers = append(r.Headers, kgo.RecordHeader{
|
||||
Key: k,
|
||||
Value: []byte(strings.Join(v, ",")),
|
||||
})
|
||||
|
||||
seen[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
48
event.go
Normal file
48
event.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package kgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"go.unistack.org/micro/v3/broker"
|
||||
)
|
||||
|
||||
type event struct {
|
||||
ctx context.Context
|
||||
topic string
|
||||
err error
|
||||
sync.RWMutex
|
||||
msg *broker.Message
|
||||
ack bool
|
||||
}
|
||||
|
||||
func (p *event) Context() context.Context {
|
||||
return p.ctx
|
||||
}
|
||||
|
||||
func (p *event) Topic() string {
|
||||
return p.topic
|
||||
}
|
||||
|
||||
func (p *event) Message() *broker.Message {
|
||||
return p.msg
|
||||
}
|
||||
|
||||
func (p *event) Ack() error {
|
||||
p.ack = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *event) Error() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
func (p *event) SetError(err error) {
|
||||
p.err = err
|
||||
}
|
||||
|
||||
var eventPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &event{msg: &broker.Message{}}
|
||||
},
|
||||
}
|
35
go.mod
35
go.mod
@@ -1,23 +1,28 @@
|
||||
module go.unistack.org/micro-broker-kgo/v4
|
||||
module go.unistack.org/micro-broker-kgo/v3
|
||||
|
||||
go 1.23.8
|
||||
go 1.22.7
|
||||
|
||||
toolchain go1.23.3
|
||||
|
||||
require (
|
||||
github.com/twmb/franz-go v1.19.5
|
||||
github.com/twmb/franz-go/pkg/kadm v1.16.0
|
||||
github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3
|
||||
github.com/twmb/franz-go/pkg/kmsg v1.11.2
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.unistack.org/micro/v4 v4.1.17
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/twmb/franz-go v1.18.0
|
||||
github.com/twmb/franz-go/pkg/kadm v1.14.0
|
||||
github.com/twmb/franz-go/pkg/kmsg v1.9.0
|
||||
go.opentelemetry.io/otel v1.33.0
|
||||
go.unistack.org/micro/v3 v3.11.22
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/matoous/go-nanoid v1.5.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/spf13/cast v1.9.2 // indirect
|
||||
go.unistack.org/micro-proto/v4 v4.1.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
go.unistack.org/micro-proto/v3 v3.4.1 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/net v0.32.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
|
||||
google.golang.org/grpc v1.69.2 // indirect
|
||||
google.golang.org/protobuf v1.36.0 // indirect
|
||||
)
|
||||
|
74
go.sum
74
go.sum
@@ -1,46 +1,48 @@
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/matoous/go-nanoid v1.5.1 h1:aCjdvTyO9LLnTIi0fgdXhOPPvOHjpXN6Ik9DaNjIct4=
|
||||
github.com/matoous/go-nanoid v1.5.1/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE=
|
||||
github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/twmb/franz-go v1.19.5 h1:W7+o8D0RsQsedqib71OVlLeZ0zI6CbFra7yTYhZTs5Y=
|
||||
github.com/twmb/franz-go v1.19.5/go.mod h1:4kFJ5tmbbl7asgwAGVuyG1ZMx0NNpYk7EqflvWfPCpM=
|
||||
github.com/twmb/franz-go/pkg/kadm v1.16.0 h1:STMs1t5lYR5mR974PSiwNzE5TvsosByTp+rKXLOhAjE=
|
||||
github.com/twmb/franz-go/pkg/kadm v1.16.0/go.mod h1:MUdcUtnf9ph4SFBLLA/XxE29rvLhWYLM9Ygb8dfSCvw=
|
||||
github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3 h1:p24opKWPySAy8xSl8NqRgOv7Q+bX7kdrQirBVRJzQfo=
|
||||
github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3/go.mod h1:7uQs3Ae6HkWT1Y9elMbqtAcNFCI0y6+iS+Phw49L49U=
|
||||
github.com/twmb/franz-go/pkg/kmsg v1.11.2 h1:hIw75FpwcAjgeyfIGFqivAvwC5uNIOWRGvQgZhH4mhg=
|
||||
github.com/twmb/franz-go/pkg/kmsg v1.11.2/go.mod h1:CFfkkLysDNmukPYhGzuUcDtf46gQSqCZHMW1T4Z+wDE=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.unistack.org/micro-proto/v4 v4.1.0 h1:qPwL2n/oqh9RE3RTTDgt28XK3QzV597VugQPaw9lKUk=
|
||||
go.unistack.org/micro-proto/v4 v4.1.0/go.mod h1:ArmK7o+uFvxSY3dbJhKBBX4Pm1rhWdLEFf3LxBrMtec=
|
||||
go.unistack.org/micro/v4 v4.1.17 h1:26QDtRSYVpozYuassyvLP4sEQRo3dxgD3sVILRXmIPo=
|
||||
go.unistack.org/micro/v4 v4.1.17/go.mod h1:xleO2M5Yxh4s6I+RUcLrEpUjobefh+71ctrdIfn7TUs=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
github.com/twmb/franz-go v1.18.0 h1:25FjMZfdozBywVX+5xrWC2W+W76i0xykKjTdEeD2ejw=
|
||||
github.com/twmb/franz-go v1.18.0/go.mod h1:zXCGy74M0p5FbXsLeASdyvfLFsBvTubVqctIaa5wQ+I=
|
||||
github.com/twmb/franz-go/pkg/kadm v1.14.0 h1:nAn1co1lXzJQocpzyIyOFOjUBf4WHWs5/fTprXy2IZs=
|
||||
github.com/twmb/franz-go/pkg/kadm v1.14.0/go.mod h1:XjOPz6ZaXXjrW2jVCfLuucP8H1w2TvD6y3PT2M+aAM4=
|
||||
github.com/twmb/franz-go/pkg/kmsg v1.9.0 h1:JojYUph2TKAau6SBtErXpXGC7E3gg4vGZMv9xFU/B6M=
|
||||
github.com/twmb/franz-go/pkg/kmsg v1.9.0/go.mod h1:CMbfazviCyY6HM0SXuG5t9vOwYDHRCSrJJyBAe5paqg=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1 h1:UTjLSRz2YZuaHk9iSlVqqsA50JQNAEK2ZFboGqtEa9Q=
|
||||
go.unistack.org/micro-proto/v3 v3.4.1/go.mod h1:okx/cnOhzuCX0ggl/vToatbCupi0O44diiiLLsZ93Zo=
|
||||
go.unistack.org/micro/v3 v3.11.0 h1:usQ+8wQuOWpQd4+DGhFXSgZ+e+wOBjuT3W5GJZ02bSs=
|
||||
go.unistack.org/micro/v3 v3.11.0/go.mod h1:YzMldzHN9Ei+zy5t/Psu7RUWDZwUfrNYiStSQtTz90g=
|
||||
go.unistack.org/micro/v3 v3.11.22 h1:VPtp/+rp/baKlNb6WVlx4ZzufYuwHrfABoftnDi1uek=
|
||||
go.unistack.org/micro/v3 v3.11.22/go.mod h1:TjF2+KJ2RG+IB4d0wnXtaF5KgqwAqy/AMh+w9gDpRHg=
|
||||
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
|
||||
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
|
||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
||||
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
|
||||
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
|
||||
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
65
kadmtest.go
Normal file
65
kadmtest.go
Normal file
@@ -0,0 +1,65 @@
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kadm"
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"github.com/twmb/franz-go/pkg/kversion"
|
||||
|
||||
//"github.com/twmb/franz-go/pkg/sasl/scram"
|
||||
"github.com/twmb/franz-go/pkg/sasl/plain"
|
||||
)
|
||||
|
||||
func die(msg string, args ...any) {
|
||||
fmt.Fprintf(os.Stderr, msg, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
seeds := []string{"vm-kafka-ump01tn.mbrd.ru:9092", "vm-kafka-ump02tn.mbrd.ru:9092", "vm-kafka-ump03tn.mbrd.ru:9092"}
|
||||
|
||||
pass := "XXXXX"
|
||||
user := "XXXXX"
|
||||
|
||||
var adminClient *kadm.Client
|
||||
{
|
||||
client, err := kgo.NewClient(
|
||||
kgo.SeedBrokers(seeds...),
|
||||
// kgo.SASL((scram.Auth{User: user, Pass: pass}).AsSha512Mechanism()),
|
||||
kgo.SASL((plain.Auth{User: user, Pass: pass}).AsMechanism()),
|
||||
|
||||
// Do not try to send requests newer than 2.4.0 to avoid breaking changes in the request struct.
|
||||
// Sometimes there are breaking changes for newer versions where more properties are required to set.
|
||||
kgo.MaxVersions(kversion.V2_4_0()),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
adminClient = kadm.NewClient(client)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dg, err := adminClient.DescribeGroups(ctx, "interestrate_loader")
|
||||
if err != nil {
|
||||
die("failed to describe group: %v", err)
|
||||
}
|
||||
|
||||
for _, m := range dg["interestrate_loader"].Members {
|
||||
mc, _ := m.Assigned.AsConsumer()
|
||||
for _, mt := range mc.Topics {
|
||||
for _, p := range mt.Partitions {
|
||||
fmt.Printf("client:%s\tpartitions: %d\n", m.ClientID, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
372
kgo.go
372
kgo.go
@@ -5,37 +5,31 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand/v2"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"github.com/twmb/franz-go/pkg/kmsg"
|
||||
"go.unistack.org/micro/v4/broker"
|
||||
"go.unistack.org/micro/v4/codec"
|
||||
"go.unistack.org/micro/v4/logger"
|
||||
"go.unistack.org/micro/v4/metadata"
|
||||
"go.unistack.org/micro/v4/options"
|
||||
"go.unistack.org/micro/v4/semconv"
|
||||
"go.unistack.org/micro/v4/tracer"
|
||||
"go.unistack.org/micro/v4/util/id"
|
||||
mrand "go.unistack.org/micro/v4/util/rand"
|
||||
"go.unistack.org/micro/v3/broker"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
"go.unistack.org/micro/v3/semconv"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
mrand "go.unistack.org/micro/v3/util/rand"
|
||||
)
|
||||
|
||||
var _ broker.Broker = (*Broker)(nil)
|
||||
|
||||
var messagePool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &kgoMessage{}
|
||||
},
|
||||
}
|
||||
|
||||
var ErrLostMessage = errors.New("message not marked for offsets commit and will be lost in next iteration")
|
||||
|
||||
var DefaultRetryBackoffFn = func() func(int) time.Duration {
|
||||
var rngMu sync.Mutex
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
return func(fails int) time.Duration {
|
||||
const (
|
||||
min = 100 * time.Millisecond
|
||||
@@ -51,7 +45,7 @@ var DefaultRetryBackoffFn = func() func(int) time.Duration {
|
||||
backoff := min * time.Duration(1<<(fails-1))
|
||||
|
||||
rngMu.Lock()
|
||||
jitter := 0.8 + 0.4*rand.Float64()
|
||||
jitter := 0.8 + 0.4*rng.Float64()
|
||||
rngMu.Unlock()
|
||||
|
||||
backoff = time.Duration(float64(backoff) * jitter)
|
||||
@@ -64,18 +58,13 @@ var DefaultRetryBackoffFn = func() func(int) time.Duration {
|
||||
}()
|
||||
|
||||
type Broker struct {
|
||||
funcPublish broker.FuncPublish
|
||||
funcSubscribe broker.FuncSubscribe
|
||||
c *kgo.Client
|
||||
connected *atomic.Uint32
|
||||
|
||||
kopts []kgo.Opt
|
||||
subs []*Subscriber
|
||||
|
||||
init bool
|
||||
c *kgo.Client
|
||||
kopts []kgo.Opt
|
||||
connected *atomic.Uint32
|
||||
sync.RWMutex
|
||||
opts broker.Options
|
||||
|
||||
mu sync.RWMutex
|
||||
init bool
|
||||
subs []*Subscriber
|
||||
}
|
||||
|
||||
func (r *Broker) Live() bool {
|
||||
@@ -102,74 +91,6 @@ func (k *Broker) Client() *kgo.Client {
|
||||
return k.c
|
||||
}
|
||||
|
||||
type kgoMessage struct {
|
||||
c codec.Codec
|
||||
topic string
|
||||
ctx context.Context
|
||||
body []byte
|
||||
hdr metadata.Metadata
|
||||
opts broker.MessageOptions
|
||||
ack bool
|
||||
}
|
||||
|
||||
func (m *kgoMessage) Ack() error {
|
||||
m.ack = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *kgoMessage) Body() []byte {
|
||||
return m.body
|
||||
}
|
||||
|
||||
func (m *kgoMessage) Header() metadata.Metadata {
|
||||
return m.hdr
|
||||
}
|
||||
|
||||
func (m *kgoMessage) Context() context.Context {
|
||||
return m.ctx
|
||||
}
|
||||
|
||||
func (m *kgoMessage) Topic() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *kgoMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
|
||||
return m.c.Unmarshal(m.body, dst)
|
||||
}
|
||||
|
||||
func (b *Broker) newCodec(ct string) (codec.Codec, error) {
|
||||
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
|
||||
ct = ct[:idx]
|
||||
}
|
||||
b.mu.RLock()
|
||||
c, ok := b.opts.Codecs[ct]
|
||||
b.mu.RUnlock()
|
||||
if ok {
|
||||
return c, nil
|
||||
}
|
||||
return nil, codec.ErrUnknownContentType
|
||||
}
|
||||
|
||||
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) {
|
||||
options := broker.NewMessageOptions(opts...)
|
||||
if options.ContentType == "" {
|
||||
options.ContentType = b.opts.ContentType
|
||||
}
|
||||
|
||||
m := &kgoMessage{ctx: ctx, hdr: hdr.Copy(), opts: options}
|
||||
c, err := b.newCodec(m.opts.ContentType)
|
||||
if err == nil {
|
||||
m.body, err = c.Marshal(body)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.hdr.Set(metadata.HeaderContentType, m.opts.ContentType)
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *hookTracer, error) {
|
||||
var c *kgo.Client
|
||||
var err error
|
||||
@@ -187,18 +108,10 @@ func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *ho
|
||||
}
|
||||
}
|
||||
|
||||
var fatalOnError bool
|
||||
if k.opts.Context != nil {
|
||||
if v, ok := k.opts.Context.Value(fatalOnErrorKey{}).(bool); ok && v {
|
||||
fatalOnError = v
|
||||
}
|
||||
}
|
||||
|
||||
htracer := &hookTracer{group: group, clientID: clientID, tracer: k.opts.Tracer}
|
||||
opts = append(opts,
|
||||
kgo.WithHooks(&hookMeter{meter: k.opts.Meter}),
|
||||
kgo.WithHooks(htracer),
|
||||
kgo.WithHooks(&hookEvent{log: k.opts.Logger, fatalOnError: fatalOnError, connected: k.connected}),
|
||||
)
|
||||
|
||||
select {
|
||||
@@ -220,9 +133,8 @@ func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *ho
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
k.connected.Store(1)
|
||||
return c, htracer, nil
|
||||
}
|
||||
return c, htracer, nil
|
||||
}
|
||||
|
||||
func (k *Broker) Connect(ctx context.Context) error {
|
||||
@@ -240,10 +152,10 @@ func (k *Broker) Connect(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
k.mu.Lock()
|
||||
k.Lock()
|
||||
k.c = c
|
||||
k.connected.Store(1)
|
||||
k.mu.Unlock()
|
||||
k.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -261,8 +173,8 @@ func (k *Broker) Disconnect(ctx context.Context) error {
|
||||
ctx, span = k.opts.Tracer.Start(ctx, "Disconnect")
|
||||
defer span.Finish()
|
||||
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
select {
|
||||
case <-nctx.Done():
|
||||
return nctx.Err()
|
||||
@@ -286,8 +198,8 @@ func (k *Broker) Disconnect(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (k *Broker) Init(opts ...broker.Option) error {
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
|
||||
if len(opts) == 0 && k.init {
|
||||
return nil
|
||||
@@ -316,18 +228,6 @@ func (k *Broker) Init(opts ...broker.Option) error {
|
||||
}
|
||||
}
|
||||
|
||||
k.funcPublish = k.fnPublish
|
||||
k.funcSubscribe = k.fnSubscribe
|
||||
|
||||
k.opts.Hooks.EachPrev(func(hook options.Hook) {
|
||||
switch h := hook.(type) {
|
||||
case broker.HookPublish:
|
||||
k.funcPublish = h(k.funcPublish)
|
||||
case broker.HookSubscribe:
|
||||
k.funcSubscribe = h(k.funcSubscribe)
|
||||
}
|
||||
})
|
||||
|
||||
k.init = true
|
||||
|
||||
return nil
|
||||
@@ -337,92 +237,98 @@ func (k *Broker) Options() broker.Options {
|
||||
return k.opts
|
||||
}
|
||||
|
||||
func (b *Broker) Publish(ctx context.Context, topic string, messages ...broker.Message) error {
|
||||
return b.funcPublish(ctx, topic, messages...)
|
||||
func (k *Broker) BatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
|
||||
return k.publish(ctx, msgs, opts...)
|
||||
}
|
||||
|
||||
func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker.Message) error {
|
||||
return b.publish(ctx, topic, messages...)
|
||||
func (k *Broker) Publish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error {
|
||||
msg.Header.Set(metadata.HeaderTopic, topic)
|
||||
return k.publish(ctx, []*broker.Message{msg}, opts...)
|
||||
}
|
||||
|
||||
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
|
||||
var records []*kgo.Record
|
||||
|
||||
for _, msg := range messages {
|
||||
|
||||
rec := &kgo.Record{
|
||||
Context: msg.Context(),
|
||||
Topic: topic,
|
||||
Value: msg.Body(),
|
||||
func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
|
||||
k.Lock()
|
||||
if k.connected.Load() == 0 {
|
||||
c, _, err := k.connect(ctx, k.kopts...)
|
||||
if err != nil {
|
||||
k.Unlock()
|
||||
return err
|
||||
}
|
||||
k.c = c
|
||||
k.connected.Store(1)
|
||||
}
|
||||
k.Unlock()
|
||||
|
||||
var promise func(*kgo.Record, error)
|
||||
if rec.Context != nil {
|
||||
if k, ok := rec.Context.Value(messageKey{}).([]byte); ok && k != nil {
|
||||
rec.Key = k
|
||||
}
|
||||
if p, ok := rec.Context.Value(messagePromiseKey{}).(func(*kgo.Record, error)); ok && p != nil {
|
||||
promise = p
|
||||
}
|
||||
options := broker.NewPublishOptions(opts...)
|
||||
records := make([]*kgo.Record, 0, len(msgs))
|
||||
var errs []string
|
||||
var err error
|
||||
var key []byte
|
||||
var promise func(*kgo.Record, error)
|
||||
|
||||
if options.Context != nil {
|
||||
if k, ok := options.Context.Value(publishKey{}).([]byte); ok && k != nil {
|
||||
key = k
|
||||
}
|
||||
|
||||
kmsg, ok := msg.(*kgoMessage)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if kmsg.opts.Context != nil {
|
||||
if k, ok := kmsg.opts.Context.Value(messageKey{}).([]byte); ok && k != nil {
|
||||
rec.Key = k
|
||||
}
|
||||
if p, ok := kmsg.opts.Context.Value(messagePromiseKey{}).(func(*kgo.Record, error)); ok && p != nil {
|
||||
promise = p
|
||||
}
|
||||
}
|
||||
|
||||
setHeaders(rec, msg.Header())
|
||||
|
||||
if promise != nil {
|
||||
ts := time.Now()
|
||||
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Inc()
|
||||
b.c.Produce(ctx, rec, func(r *kgo.Record, err error) {
|
||||
te := time.Since(ts)
|
||||
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Dec()
|
||||
b.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
|
||||
b.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
|
||||
if err != nil {
|
||||
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "failure").Inc()
|
||||
} else {
|
||||
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "success").Inc()
|
||||
}
|
||||
promise(r, err)
|
||||
})
|
||||
continue
|
||||
} else {
|
||||
records = append(records, rec)
|
||||
if p, ok := options.Context.Value(publishPromiseKey{}).(func(*kgo.Record, error)); ok && p != nil {
|
||||
promise = p
|
||||
}
|
||||
}
|
||||
|
||||
if len(records) > 0 {
|
||||
var errs []string
|
||||
ts := time.Now()
|
||||
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", topic, "topic", topic).Set(uint64(len(records)))
|
||||
results := b.c.ProduceSync(ctx, records...)
|
||||
te := time.Since(ts)
|
||||
for _, result := range results {
|
||||
b.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
|
||||
b.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
|
||||
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Dec()
|
||||
if result.Err != nil {
|
||||
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "failure").Inc()
|
||||
errs = append(errs, result.Err.Error())
|
||||
} else {
|
||||
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "success").Inc()
|
||||
for _, msg := range msgs {
|
||||
rec := &kgo.Record{Context: ctx, Key: key}
|
||||
rec.Topic, _ = msg.Header.Get(metadata.HeaderTopic)
|
||||
msg.Header.Del(metadata.HeaderTopic)
|
||||
k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Inc()
|
||||
if options.BodyOnly || k.opts.Codec.String() == "noop" {
|
||||
rec.Value = msg.Body
|
||||
for k, v := range msg.Header {
|
||||
rec.Headers = append(rec.Headers, kgo.RecordHeader{Key: http.CanonicalHeaderKey(k), Value: []byte(v)})
|
||||
}
|
||||
} else {
|
||||
rec.Value, err = k.opts.Codec.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
records = append(records, rec)
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("publish error: %s", strings.Join(errs, "\n"))
|
||||
if promise != nil {
|
||||
ts := time.Now()
|
||||
for _, rec := range records {
|
||||
k.c.Produce(ctx, rec, func(r *kgo.Record, err error) {
|
||||
te := time.Since(ts)
|
||||
k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Dec()
|
||||
k.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
|
||||
k.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
|
||||
if err != nil {
|
||||
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "failure").Inc()
|
||||
} else {
|
||||
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "success").Inc()
|
||||
}
|
||||
promise(r, err)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
ts := time.Now()
|
||||
results := k.c.ProduceSync(ctx, records...)
|
||||
te := time.Since(ts)
|
||||
for _, result := range results {
|
||||
k.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
|
||||
k.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
|
||||
k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Dec()
|
||||
if result.Err != nil {
|
||||
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "failure").Inc()
|
||||
errs = append(errs, result.Err.Error())
|
||||
} else {
|
||||
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "success").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("publish error: %s", strings.Join(errs, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -444,71 +350,38 @@ func (k *Broker) TopicExists(ctx context.Context, topic string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
|
||||
return b.funcSubscribe(ctx, topic, handler, opts...)
|
||||
func (k *Broker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
|
||||
if err := broker.IsValidHandler(handler); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
|
||||
options := broker.NewSubscribeOptions(opts...)
|
||||
|
||||
switch handler.(type) {
|
||||
default:
|
||||
return nil, broker.ErrInvalidHandler
|
||||
case func(broker.Message) error:
|
||||
break
|
||||
case func([]broker.Message) error:
|
||||
break
|
||||
}
|
||||
|
||||
if options.Group == "" {
|
||||
uid, err := id.New()
|
||||
uid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Group = uid
|
||||
options.Group = uid.String()
|
||||
}
|
||||
|
||||
commitInterval := DefaultCommitInterval
|
||||
if b.opts.Context != nil {
|
||||
if v, ok := b.opts.Context.Value(commitIntervalKey{}).(time.Duration); ok && v > 0 {
|
||||
if k.opts.Context != nil {
|
||||
if v, ok := k.opts.Context.Value(commitIntervalKey{}).(time.Duration); ok && v > 0 {
|
||||
commitInterval = v
|
||||
}
|
||||
}
|
||||
|
||||
var messagePool bool
|
||||
var fatalOnError bool
|
||||
if b.opts.Context != nil {
|
||||
if v, ok := b.opts.Context.Value(fatalOnErrorKey{}).(bool); ok && v {
|
||||
fatalOnError = v
|
||||
}
|
||||
if v, ok := b.opts.Context.Value(subscribeMessagePoolKey{}).(bool); ok && v {
|
||||
messagePool = v
|
||||
}
|
||||
}
|
||||
|
||||
if options.Context != nil {
|
||||
if v, ok := options.Context.Value(fatalOnErrorKey{}).(bool); ok && v {
|
||||
fatalOnError = v
|
||||
}
|
||||
}
|
||||
|
||||
sub := &Subscriber{
|
||||
topic: topic,
|
||||
opts: options,
|
||||
handler: handler,
|
||||
kopts: b.opts,
|
||||
consumers: make(map[tp]*consumer),
|
||||
done: make(chan struct{}),
|
||||
fatalOnError: fatalOnError,
|
||||
connected: b.connected,
|
||||
messagePool: messagePool,
|
||||
topic: topic,
|
||||
opts: options,
|
||||
handler: handler,
|
||||
kopts: k.opts,
|
||||
consumers: make(map[tp]*consumer),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
kopts := append(b.kopts,
|
||||
kopts := append(k.kopts,
|
||||
kgo.ConsumerGroup(options.Group),
|
||||
kgo.ConsumeTopics(topic),
|
||||
kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()),
|
||||
@@ -516,9 +389,7 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
|
||||
kgo.AutoCommitInterval(commitInterval),
|
||||
kgo.OnPartitionsAssigned(sub.assigned),
|
||||
kgo.OnPartitionsRevoked(sub.revoked),
|
||||
kgo.StopProducerOnDataLossDetected(),
|
||||
kgo.OnPartitionsLost(sub.lost),
|
||||
kgo.AutoCommitCallback(sub.autocommit),
|
||||
kgo.AutoCommitMarks(),
|
||||
)
|
||||
|
||||
@@ -528,7 +399,7 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
|
||||
}
|
||||
}
|
||||
|
||||
c, htracer, err := b.connect(ctx, kopts...)
|
||||
c, htracer, err := k.connect(ctx, kopts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -550,10 +421,9 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
|
||||
|
||||
go sub.poll(ctx)
|
||||
|
||||
b.mu.Lock()
|
||||
b.subs = append(b.subs, sub)
|
||||
b.mu.Unlock()
|
||||
|
||||
k.Lock()
|
||||
k.subs = append(k.subs, sub)
|
||||
k.Unlock()
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
@@ -580,7 +450,7 @@ func NewBroker(opts ...broker.Option) *Broker {
|
||||
kgo.BlockRebalanceOnPoll(),
|
||||
kgo.Balancers(kgo.CooperativeStickyBalancer()),
|
||||
kgo.FetchIsolationLevel(kgo.ReadUncommitted()),
|
||||
kgo.UnknownTopicRetries(1),
|
||||
kgo.UnknownTopicRetries(0),
|
||||
}
|
||||
|
||||
if options.Context != nil {
|
||||
|
172
kgo_test.go
172
kgo_test.go
@@ -2,140 +2,39 @@ package kgo_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kfake"
|
||||
kg "github.com/twmb/franz-go/pkg/kgo"
|
||||
kgo "go.unistack.org/micro-broker-kgo/v4"
|
||||
"go.unistack.org/micro/v4/broker"
|
||||
"go.unistack.org/micro/v4/codec"
|
||||
"go.unistack.org/micro/v4/logger"
|
||||
"go.unistack.org/micro/v4/logger/slog"
|
||||
"go.unistack.org/micro/v4/metadata"
|
||||
kgo "go.unistack.org/micro-broker-kgo/v3"
|
||||
"go.unistack.org/micro/v3/broker"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
msgcnt = int64(1200)
|
||||
msgcnt = int64(12000000)
|
||||
group = "38"
|
||||
prefill = true
|
||||
loglevel = logger.ErrorLevel
|
||||
cluster *kfake.Cluster
|
||||
prefill = false
|
||||
loglevel = logger.InfoLevel
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
cluster = kfake.MustCluster(
|
||||
kfake.AllowAutoTopicCreation(),
|
||||
)
|
||||
defer cluster.Close()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestFail(t *testing.T) {
|
||||
logger.DefaultLogger = slog.NewLogger()
|
||||
if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
b := kgo.NewBroker(
|
||||
broker.ContentType("application/octet-stream"),
|
||||
broker.Codec("application/octet-stream", codec.NewCodec()),
|
||||
broker.Addrs(cluster.ListenAddrs()...),
|
||||
kgo.CommitInterval(5*time.Second),
|
||||
kgo.Options(
|
||||
kg.ClientID("test"),
|
||||
kg.FetchMaxBytes(10*1024*1024),
|
||||
kg.AllowAutoTopicCreation(),
|
||||
),
|
||||
)
|
||||
|
||||
t.Logf("broker init")
|
||||
if err := b.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("broker connect")
|
||||
if err := b.Connect(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Logf("broker disconnect")
|
||||
if err := b.Disconnect(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Logf("broker health %v", b.Health())
|
||||
msgs := make([]broker.Message, 0, msgcnt)
|
||||
for i := int64(0); i < msgcnt; i++ {
|
||||
m, err := b.NewMessage(ctx, metadata.Pairs("hkey", "hval"), []byte(`test`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
msgs = append(msgs, m)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, msg := range msgs {
|
||||
// t.Logf("broker publish")
|
||||
if err := b.Publish(ctx, "test.fail", msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// t.Skip()
|
||||
|
||||
idx := int64(0)
|
||||
fn := func(msg broker.Message) error {
|
||||
atomic.AddInt64(&idx, 1)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
// t.Logf("ack")
|
||||
return msg.Ack()
|
||||
}
|
||||
|
||||
sub, err := b.Subscribe(ctx, "test.fail", fn,
|
||||
broker.SubscribeAutoAck(true),
|
||||
broker.SubscribeGroup(group),
|
||||
broker.SubscribeBodyOnly(true))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := sub.Unsubscribe(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
t.Logf("health check")
|
||||
if !b.Health() {
|
||||
t.Logf("health works")
|
||||
break
|
||||
}
|
||||
t.Logf("health sleep")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if err := b.Disconnect(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
var bm = &broker.Message{
|
||||
Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: "test"},
|
||||
Body: []byte(`"body"`),
|
||||
}
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
var addrs []string
|
||||
ctx := context.TODO()
|
||||
b := kgo.NewBroker(
|
||||
broker.ContentType("application/octet-stream"),
|
||||
broker.Codec("application/octet-stream", codec.NewCodec()),
|
||||
broker.Addrs(cluster.ListenAddrs()...),
|
||||
broker.Addrs(addrs...),
|
||||
kgo.CommitInterval(5*time.Second),
|
||||
kgo.Options(
|
||||
kg.ClientID("test"),
|
||||
kg.FetchMaxBytes(10*1024*1024),
|
||||
kg.AllowAutoTopicCreation(),
|
||||
),
|
||||
kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(10*1024*1024)),
|
||||
)
|
||||
if err := b.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -147,23 +46,27 @@ func TestConnect(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPubSub(t *testing.T) {
|
||||
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
b := kgo.NewBroker(
|
||||
broker.ContentType("application/octet-stream"),
|
||||
broker.Codec("application/octet-stream", codec.NewCodec()),
|
||||
broker.Addrs(cluster.ListenAddrs()...),
|
||||
kgo.CommitInterval(5*time.Second),
|
||||
kgo.Options(
|
||||
kg.ClientID("test"),
|
||||
kg.FetchMaxBytes(10*1024*1024),
|
||||
kg.AllowAutoTopicCreation(),
|
||||
),
|
||||
)
|
||||
var addrs []string
|
||||
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
|
||||
addrs = []string{"127.0.0.1:29091", "127.0.0.2:29092", "127.0.0.3:29093"}
|
||||
} else {
|
||||
addrs = strings.Split(addr, ",")
|
||||
}
|
||||
|
||||
b := kgo.NewBroker(
|
||||
broker.Addrs(addrs...),
|
||||
kgo.CommitInterval(5*time.Second),
|
||||
kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(10*1024*1024)),
|
||||
)
|
||||
if err := b.Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -178,26 +81,25 @@ func TestPubSub(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
if prefill {
|
||||
msgs := make([]broker.Message, 0, msgcnt)
|
||||
msgs := make([]*broker.Message, 0, msgcnt)
|
||||
for i := int64(0); i < msgcnt; i++ {
|
||||
m, _ := b.NewMessage(ctx, metadata.Pairs("hkey", "hval"), []byte(`test`))
|
||||
msgs = append(msgs, m)
|
||||
msgs = append(msgs, bm)
|
||||
}
|
||||
|
||||
if err := b.Publish(ctx, "test.pubsub", msgs...); err != nil {
|
||||
if err := b.BatchPublish(ctx, msgs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// t.Skip()
|
||||
}
|
||||
done := make(chan bool, 1)
|
||||
idx := int64(0)
|
||||
fn := func(msg broker.Message) error {
|
||||
fn := func(msg broker.Event) error {
|
||||
atomic.AddInt64(&idx, 1)
|
||||
// time.Sleep(200 * time.Millisecond)
|
||||
return msg.Ack()
|
||||
}
|
||||
|
||||
sub, err := b.Subscribe(ctx, "test.pubsub", fn,
|
||||
sub, err := b.Subscribe(ctx, "test", fn,
|
||||
broker.SubscribeAutoAck(true),
|
||||
broker.SubscribeGroup(group),
|
||||
broker.SubscribeBodyOnly(true))
|
||||
@@ -222,7 +124,7 @@ func TestPubSub(t *testing.T) {
|
||||
if prc := atomic.LoadInt64(&idx); prc == msgcnt {
|
||||
close(done)
|
||||
} else {
|
||||
t.Logf("processed %v of %v\n", prc, msgcnt)
|
||||
fmt.Printf("processed %v\n", prc)
|
||||
}
|
||||
case <-ticker.C:
|
||||
close(done)
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"go.unistack.org/micro/v4/logger"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
)
|
||||
|
||||
type mlogger struct {
|
||||
|
4
meter.go
4
meter.go
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"go.unistack.org/micro/v4/meter"
|
||||
"go.unistack.org/micro/v3/meter"
|
||||
)
|
||||
|
||||
type hookMeter struct {
|
||||
@@ -61,7 +61,7 @@ const (
|
||||
labelTopic = "topic"
|
||||
)
|
||||
|
||||
func (m *hookMeter) OnGroupManageError(_ error) {
|
||||
func (m *hookMeter) OnGroupManageError(err error) {
|
||||
m.meter.Counter(metricBrokerGroupErrors).Inc()
|
||||
}
|
||||
|
||||
|
45
options.go
45
options.go
@@ -5,7 +5,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"go.unistack.org/micro/v4/broker"
|
||||
"go.unistack.org/micro/v3/broker"
|
||||
"go.unistack.org/micro/v3/client"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -27,11 +28,16 @@ func SubscribeContext(ctx context.Context) broker.SubscribeOption {
|
||||
return broker.SetSubscribeOption(subscribeContextKey{}, ctx)
|
||||
}
|
||||
|
||||
type messageKey struct{}
|
||||
type publishKey struct{}
|
||||
|
||||
// MessageKey set the kafka message key (broker option)
|
||||
func MessageKey(key []byte) broker.MessageOption {
|
||||
return broker.SetMessageOption(messageKey{}, key)
|
||||
// PublishKey set the kafka message key (broker option)
|
||||
func PublishKey(key []byte) broker.PublishOption {
|
||||
return broker.SetPublishOption(publishKey{}, key)
|
||||
}
|
||||
|
||||
// ClientPublishKey set the kafka message key (client option)
|
||||
func ClientPublishKey(key []byte) client.PublishOption {
|
||||
return client.SetPublishOption(publishKey{}, key)
|
||||
}
|
||||
|
||||
type optionsKey struct{}
|
||||
@@ -66,12 +72,6 @@ func SubscribeOptions(opts ...kgo.Opt) broker.SubscribeOption {
|
||||
}
|
||||
}
|
||||
|
||||
type fatalOnErrorKey struct{}
|
||||
|
||||
func FatalOnError(b bool) broker.Option {
|
||||
return broker.SetOption(fatalOnErrorKey{}, b)
|
||||
}
|
||||
|
||||
type clientIDKey struct{}
|
||||
|
||||
func ClientID(id string) broker.Option {
|
||||
@@ -98,21 +98,14 @@ func SubscribeMaxInFlight(n int) broker.SubscribeOption {
|
||||
return broker.SetSubscribeOption(subscribeMaxInflightKey{}, n)
|
||||
}
|
||||
|
||||
// SubscribeMaxInFlight max queued messages
|
||||
func SubscribeFatalOnError(b bool) broker.SubscribeOption {
|
||||
return broker.SetSubscribeOption(fatalOnErrorKey{}, b)
|
||||
type publishPromiseKey struct{}
|
||||
|
||||
// PublishPromise set the kafka promise func for Produce
|
||||
func PublishPromise(fn func(*kgo.Record, error)) broker.PublishOption {
|
||||
return broker.SetPublishOption(publishPromiseKey{}, fn)
|
||||
}
|
||||
|
||||
type messagePromiseKey struct{}
|
||||
|
||||
// MessagePromise set the kafka promise func for Produce
|
||||
func MessagePromise(fn func(*kgo.Record, error)) broker.MessageOption {
|
||||
return broker.SetMessageOption(messagePromiseKey{}, fn)
|
||||
}
|
||||
|
||||
type subscribeMessagePoolKey struct{}
|
||||
|
||||
// SubscribeMessagePool optionaly enabled/disable message pool
|
||||
func SubscribeMessagePool(b bool) broker.SubscribeOption {
|
||||
return broker.SetSubscribeOption(subscribeMessagePoolKey{}, b)
|
||||
// ClientPublishKey set the kafka message key (client option)
|
||||
func ClientPublishPromise(fn func(*kgo.Record, error)) client.PublishOption {
|
||||
return client.SetPublishOption(publishPromiseKey{}, fn)
|
||||
}
|
||||
|
252
subscriber.go
252
subscriber.go
@@ -5,17 +5,15 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kadm"
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"github.com/twmb/franz-go/pkg/kmsg"
|
||||
"go.unistack.org/micro/v4/broker"
|
||||
"go.unistack.org/micro/v4/logger"
|
||||
"go.unistack.org/micro/v4/metadata"
|
||||
"go.unistack.org/micro/v4/semconv"
|
||||
"go.unistack.org/micro/v4/tracer"
|
||||
"go.unistack.org/micro/v3/broker"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
"go.unistack.org/micro/v3/semconv"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
type tp struct {
|
||||
@@ -24,34 +22,29 @@ type tp struct {
|
||||
}
|
||||
|
||||
type consumer struct {
|
||||
topic string
|
||||
c *kgo.Client
|
||||
htracer *hookTracer
|
||||
quit chan struct{}
|
||||
done chan struct{}
|
||||
recs chan kgo.FetchTopicPartition
|
||||
kopts broker.Options
|
||||
partition int32
|
||||
opts broker.SubscribeOptions
|
||||
handler interface{}
|
||||
connected *atomic.Uint32
|
||||
messagePool bool
|
||||
c *kgo.Client
|
||||
topic string
|
||||
partition int32
|
||||
htracer *hookTracer
|
||||
opts broker.SubscribeOptions
|
||||
kopts broker.Options
|
||||
handler broker.Handler
|
||||
quit chan struct{}
|
||||
done chan struct{}
|
||||
recs chan kgo.FetchTopicPartition
|
||||
}
|
||||
|
||||
type Subscriber struct {
|
||||
consumers map[tp]*consumer
|
||||
c *kgo.Client
|
||||
htracer *hookTracer
|
||||
topic string
|
||||
messagePool bool
|
||||
handler interface{}
|
||||
done chan struct{}
|
||||
kopts broker.Options
|
||||
opts broker.SubscribeOptions
|
||||
connected *atomic.Uint32
|
||||
mu sync.RWMutex
|
||||
closed bool
|
||||
fatalOnError bool
|
||||
c *kgo.Client
|
||||
topic string
|
||||
htracer *hookTracer
|
||||
opts broker.SubscribeOptions
|
||||
kopts broker.Options
|
||||
handler broker.Handler
|
||||
closed bool
|
||||
done chan struct{}
|
||||
consumers map[tp]*consumer
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (s *Subscriber) Client() *kgo.Client {
|
||||
@@ -117,11 +110,11 @@ func (s *Subscriber) poll(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.Lock()
|
||||
for p, l := range lmap {
|
||||
s.kopts.Meter.Counter(semconv.BrokerGroupLag, "topic", s.topic, "group", s.opts.Group, "partition", strconv.Itoa(int(p))).Set(uint64(l.Lag))
|
||||
s.kopts.Meter.Counter(semconv.BrokerGroupLag, "topic", s.topic, "group", s.opts.Group, "partition", strconv.Itoa(int(p)), "lag", strconv.Itoa(int(l.Lag)))
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.Unlock()
|
||||
|
||||
}
|
||||
}
|
||||
@@ -145,13 +138,8 @@ func (s *Subscriber) poll(ctx context.Context) {
|
||||
})
|
||||
|
||||
fetches.EachPartition(func(p kgo.FetchTopicPartition) {
|
||||
tps := tp{p.Topic, p.Partition}
|
||||
s.mu.Lock()
|
||||
c := s.consumers[tps]
|
||||
s.mu.Unlock()
|
||||
if c != nil {
|
||||
c.recs <- p
|
||||
}
|
||||
tp := tp{p.Topic, p.Partition}
|
||||
s.consumers[tp].recs <- p
|
||||
})
|
||||
s.c.AllowRebalance()
|
||||
}
|
||||
@@ -164,16 +152,9 @@ func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32)
|
||||
|
||||
for topic, partitions := range lost {
|
||||
for _, partition := range partitions {
|
||||
tps := tp{topic, partition}
|
||||
s.mu.Lock()
|
||||
pc, ok := s.consumers[tps]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
s.mu.Lock()
|
||||
delete(s.consumers, tps)
|
||||
s.mu.Unlock()
|
||||
tp := tp{topic, partition}
|
||||
pc := s.consumers[tp]
|
||||
delete(s.consumers, tp)
|
||||
close(pc.quit)
|
||||
if s.kopts.Logger.V(logger.DebugLevel) {
|
||||
s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] waiting for work to finish topic %s partition %d", topic, partition))
|
||||
@@ -184,21 +165,11 @@ func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subscriber) autocommit(_ *kgo.Client, _ *kmsg.OffsetCommitRequest, _ *kmsg.OffsetCommitResponse, err error) {
|
||||
if err != nil {
|
||||
// s.connected.Store(0)
|
||||
if s.fatalOnError {
|
||||
s.kopts.Logger.Fatal(context.TODO(), "kgo.AutoCommitCallback error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) {
|
||||
if s.kopts.Logger.V(logger.ErrorLevel) {
|
||||
s.kopts.Logger.Error(ctx, fmt.Sprintf("[kgo] lost %#+v", lost))
|
||||
if s.kopts.Logger.V(logger.DebugLevel) {
|
||||
s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] lost %#+v", lost))
|
||||
}
|
||||
s.killConsumers(ctx, lost)
|
||||
// s.connected.Store(0)
|
||||
}
|
||||
|
||||
func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) {
|
||||
@@ -208,7 +179,6 @@ func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[str
|
||||
s.killConsumers(ctx, revoked)
|
||||
if err := c.CommitMarkedOffsets(ctx); err != nil {
|
||||
s.kopts.Logger.Error(ctx, "[kgo] revoked CommitMarkedOffsets error", err)
|
||||
// s.connected.Store(0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,37 +186,36 @@ func (s *Subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[str
|
||||
for topic, partitions := range assigned {
|
||||
for _, partition := range partitions {
|
||||
pc := &consumer{
|
||||
c: c,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
htracer: s.htracer,
|
||||
quit: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
recs: make(chan kgo.FetchTopicPartition, 100),
|
||||
handler: s.handler,
|
||||
messagePool: s.messagePool,
|
||||
kopts: s.kopts,
|
||||
opts: s.opts,
|
||||
connected: s.connected,
|
||||
c: c,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
htracer: s.htracer,
|
||||
quit: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
recs: make(chan kgo.FetchTopicPartition, 100),
|
||||
handler: s.handler,
|
||||
kopts: s.kopts,
|
||||
opts: s.opts,
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.Lock()
|
||||
s.consumers[tp{topic, partition}] = pc
|
||||
s.mu.Unlock()
|
||||
s.Unlock()
|
||||
go pc.consume()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *consumer) consume() {
|
||||
var err error
|
||||
|
||||
defer close(pc.done)
|
||||
if pc.kopts.Logger.V(logger.DebugLevel) {
|
||||
pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("starting, topic %s partition %d", pc.topic, pc.partition))
|
||||
defer pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("killing, topic %s partition %d", pc.topic, pc.partition))
|
||||
}
|
||||
|
||||
var pm *kgoMessage
|
||||
eh := pc.kopts.ErrorHandler
|
||||
if pc.opts.ErrorHandler != nil {
|
||||
eh = pc.opts.ErrorHandler
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -257,59 +226,96 @@ func (pc *consumer) consume() {
|
||||
ctx, sp := pc.htracer.WithProcessSpan(record)
|
||||
ts := time.Now()
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Inc()
|
||||
|
||||
if pc.messagePool {
|
||||
pm = messagePool.Get().(*kgoMessage)
|
||||
} else {
|
||||
pm = &kgoMessage{}
|
||||
}
|
||||
pm.body = record.Value
|
||||
pm.topic = record.Topic
|
||||
pm.ack = false
|
||||
pm.hdr = metadata.New(len(record.Headers))
|
||||
pm.ctx = ctx
|
||||
p := eventPool.Get().(*event)
|
||||
p.msg.Header = nil
|
||||
p.msg.Body = nil
|
||||
p.topic = record.Topic
|
||||
p.err = nil
|
||||
p.ack = false
|
||||
p.msg.Header = metadata.New(len(record.Headers))
|
||||
p.ctx = ctx
|
||||
for _, hdr := range record.Headers {
|
||||
pm.hdr.Set(hdr.Key, string(hdr.Value))
|
||||
p.msg.Header.Set(hdr.Key, string(hdr.Value))
|
||||
}
|
||||
|
||||
switch h := pc.handler.(type) {
|
||||
case func(broker.Message) error:
|
||||
err = h(pm)
|
||||
case func([]broker.Message) error:
|
||||
err = h([]broker.Message{pm})
|
||||
}
|
||||
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
|
||||
if err != nil {
|
||||
if sp != nil {
|
||||
if pc.kopts.Codec.String() == "noop" {
|
||||
p.msg.Body = record.Value
|
||||
} else if pc.opts.BodyOnly {
|
||||
p.msg.Body = record.Value
|
||||
} else {
|
||||
sp.AddEvent("codec unmarshal start")
|
||||
err := pc.kopts.Codec.Unmarshal(record.Value, p.msg)
|
||||
sp.AddEvent("codec unmarshal stop")
|
||||
if err != nil {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc()
|
||||
p.err = err
|
||||
p.msg.Body = record.Value
|
||||
if eh != nil {
|
||||
_ = eh(p)
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
|
||||
if p.ack {
|
||||
pc.c.MarkCommitRecords(record)
|
||||
} else {
|
||||
eventPool.Put(p)
|
||||
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?")
|
||||
return
|
||||
}
|
||||
eventPool.Put(p)
|
||||
te := time.Since(ts)
|
||||
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
|
||||
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
|
||||
continue
|
||||
} else {
|
||||
pc.kopts.Logger.Error(pc.kopts.Context, "[kgo]: unmarshal error", err)
|
||||
}
|
||||
te := time.Since(ts)
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
|
||||
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
|
||||
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
|
||||
eventPool.Put(p)
|
||||
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] Unmarshal err not handled wtf?")
|
||||
sp.Finish()
|
||||
return
|
||||
}
|
||||
}
|
||||
sp.AddEvent("handler start")
|
||||
err := pc.handler(p)
|
||||
sp.AddEvent("handler stop")
|
||||
if err == nil {
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "success").Inc()
|
||||
} else {
|
||||
sp.SetStatus(tracer.SpanStatusError, err.Error())
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc()
|
||||
}
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
|
||||
if err == nil && pc.opts.AutoAck {
|
||||
p.ack = true
|
||||
} else if err != nil {
|
||||
p.err = err
|
||||
if eh != nil {
|
||||
sp.AddEvent("error handler start")
|
||||
_ = eh(p)
|
||||
sp.AddEvent("error handler stop")
|
||||
} else {
|
||||
if pc.kopts.Logger.V(logger.ErrorLevel) {
|
||||
pc.kopts.Logger.Error(pc.kopts.Context, "[kgo]: subscriber error", err)
|
||||
}
|
||||
}
|
||||
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc()
|
||||
} else if pc.opts.AutoAck {
|
||||
pm.ack = true
|
||||
}
|
||||
|
||||
te := time.Since(ts)
|
||||
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
|
||||
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
|
||||
|
||||
ack := pm.ack
|
||||
if pc.messagePool {
|
||||
messagePool.Put(p)
|
||||
}
|
||||
if ack {
|
||||
if p.ack {
|
||||
eventPool.Put(p)
|
||||
pc.c.MarkCommitRecords(record)
|
||||
} else {
|
||||
if sp != nil {
|
||||
sp.Finish()
|
||||
}
|
||||
// pc.connected.Store(0)
|
||||
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] message not commited")
|
||||
eventPool.Put(p)
|
||||
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?")
|
||||
sp.SetStatus(tracer.SpanStatusError, "ErrLostMessage")
|
||||
sp.Finish()
|
||||
return
|
||||
}
|
||||
if sp != nil {
|
||||
sp.Finish()
|
||||
}
|
||||
sp.Finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
29
tracer.go
29
tracer.go
@@ -6,14 +6,14 @@ import (
|
||||
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.unistack.org/micro/v4/metadata"
|
||||
"go.unistack.org/micro/v4/tracer"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
"go.unistack.org/micro/v3/tracer"
|
||||
)
|
||||
|
||||
type hookTracer struct {
|
||||
tracer tracer.Tracer
|
||||
clientID string
|
||||
group string
|
||||
tracer tracer.Tracer
|
||||
}
|
||||
|
||||
var messagingSystem = semconv.MessagingSystemKey.String("kafka")
|
||||
@@ -32,9 +32,6 @@ var (
|
||||
// the record's context, so it can be ended in the OnProduceRecordUnbuffered
|
||||
// hook.
|
||||
func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) {
|
||||
if !m.tracer.Enabled() {
|
||||
return
|
||||
}
|
||||
// Set up span options.
|
||||
attrs := []interface{}{
|
||||
messagingSystem,
|
||||
@@ -71,7 +68,7 @@ func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) {
|
||||
r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...)
|
||||
}
|
||||
|
||||
setHeaders(r, omd, metadata.HeaderContentType)
|
||||
setHeaders(r, omd)
|
||||
}
|
||||
|
||||
// OnProduceRecordUnbuffered continues and ends the "publish" span for an
|
||||
@@ -80,9 +77,6 @@ func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) {
|
||||
// It sets attributes with values unset when producing and records any error
|
||||
// that occurred during the publish operation.
|
||||
func (m *hookTracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) {
|
||||
if !m.tracer.Enabled() {
|
||||
return
|
||||
}
|
||||
if span, ok := tracer.SpanFromContext(r.Context); ok {
|
||||
span.AddLabels(
|
||||
semconv.MessagingKafkaDestinationPartition(int(r.Partition)),
|
||||
@@ -102,9 +96,6 @@ func (m *hookTracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) {
|
||||
// OnFetchRecordUnbuffered hook and can be used in downstream consumer
|
||||
// processing.
|
||||
func (m *hookTracer) OnFetchRecordBuffered(r *kgo.Record) {
|
||||
if !m.tracer.Enabled() {
|
||||
return
|
||||
}
|
||||
// Set up the span options.
|
||||
attrs := []interface{}{
|
||||
messagingSystem,
|
||||
@@ -144,15 +135,12 @@ func (m *hookTracer) OnFetchRecordBuffered(r *kgo.Record) {
|
||||
r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...)
|
||||
}
|
||||
|
||||
setHeaders(r, omd, metadata.HeaderContentType)
|
||||
setHeaders(r, omd)
|
||||
}
|
||||
|
||||
// OnFetchRecordUnbuffered continues and ends the "receive" span for an
|
||||
// unbuffered record.
|
||||
func (m *hookTracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) {
|
||||
if !m.tracer.Enabled() {
|
||||
return
|
||||
}
|
||||
span, _ := tracer.SpanFromContext(r.Context)
|
||||
span.Finish()
|
||||
}
|
||||
@@ -167,13 +155,6 @@ func (m *hookTracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) {
|
||||
// not a record which has been created for producing, so call this at the start of each
|
||||
// iteration of your processing for the record.
|
||||
func (m *hookTracer) WithProcessSpan(r *kgo.Record) (context.Context, tracer.Span) {
|
||||
if r.Context == nil {
|
||||
r.Context = context.Background()
|
||||
}
|
||||
|
||||
if !m.tracer.Enabled() {
|
||||
return r.Context, nil
|
||||
}
|
||||
// Set up the span options.
|
||||
attrs := []interface{}{
|
||||
messagingSystem,
|
||||
|
Reference in New Issue
Block a user