Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
ab9af5852b Bump github.com/twmb/franz-go/pkg/kmsg from 1.3.0 to 1.4.0
Bumps [github.com/twmb/franz-go/pkg/kmsg](https://github.com/twmb/franz-go) from 1.3.0 to 1.4.0.
- [Release notes](https://github.com/twmb/franz-go/releases)
- [Changelog](https://github.com/twmb/franz-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/twmb/franz-go/compare/v1.3.0...v1.4.0)

---
updated-dependencies:
- dependency-name: github.com/twmb/franz-go/pkg/kmsg
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-02-27 12:19:08 +00:00
29 changed files with 928 additions and 2276 deletions

19
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
# Maintain dependencies for Golang
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

20
.github/workflows/autoapprove.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: "autoapprove"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
autoapprove:
runs-on: ubuntu-latest
steps:
- name: approve
uses: hmarr/auto-approve-action@v3
if: github.actor == 'vtolstov' || github.actor == 'dependabot[bot]'
id: approve
with:
github-token: ${{ secrets.GITHUB_TOKEN }}

21
.github/workflows/automerge.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: "automerge"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
automerge:
runs-on: ubuntu-latest
if: github.actor == 'vtolstov'
steps:
- name: merge
id: merge
run: gh pr merge --auto --merge "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.TOKEN}}

47
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: build
on:
push:
branches:
- master
- v3
jobs:
test:
name: test
runs-on: ubuntu-latest
steps:
- name: setup
uses: actions/setup-go@v3
with:
go-version: 1.17
- name: checkout
uses: actions/checkout@v3
- name: cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-go-
- name: deps
run: go get -v -t -d ./...
- name: test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: lint
uses: golangci/golangci-lint-action@v3.4.0
continue-on-error: true
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.30
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# args: --issues-exit-code=0
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true

78
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@@ -0,0 +1,78 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "codeql"
on:
workflow_run:
workflows: ["prbuild"]
types:
- completed
push:
branches: [ master, v3 ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master, v3 ]
schedule:
- cron: '34 1 * * 0'
jobs:
analyze:
name: analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: checkout
uses: actions/checkout@v3
- name: setup
uses: actions/setup-go@v3
with:
go-version: 1.17
# Initializes the CodeQL tools for scanning.
- name: init
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: analyze
uses: github/codeql-action/analyze@v2

View File

@@ -0,0 +1,27 @@
name: "dependabot-automerge"
on:
pull_request_target:
types: [assigned, opened, synchronize, reopened]
permissions:
pull-requests: write
contents: write
jobs:
automerge:
runs-on: ubuntu-latest
if: github.actor == 'dependabot[bot]'
steps:
- name: metadata
id: metadata
uses: dependabot/fetch-metadata@v1.3.6
with:
github-token: "${{ secrets.TOKEN }}"
- name: merge
id: merge
if: ${{contains(steps.metadata.outputs.dependency-names, 'go.unistack.org')}}
run: gh pr merge --auto --merge "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.TOKEN}}

View File

@@ -1,53 +0,0 @@
name: coverage
on:
push:
branches: [ main, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
pull_request:
branches: [ main, v3, v4 ]
jobs:
build:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: test coverage
run: |
go test -v -cover ./... -covermode=count -coverprofile coverage.out -coverpkg ./...
go tool cover -func coverage.out -o coverage.out
- name: coverage badge
uses: tj-actions/coverage-badge-go@v2
with:
green: 80
filename: coverage.out
- uses: stefanzweifel/git-auto-commit-action@v4
name: autocommit
with:
commit_message: Apply Code Coverage Badge
skip_fetch: false
skip_checkout: false
file_pattern: ./README.md
- name: push
if: steps.auto-commit-action.outputs.changes_detected == 'true'
uses: ad-m/github-push-action@master
with:
github_token: ${{ github.token }}
branch: ${{ github.ref }}

View File

@@ -1,29 +0,0 @@
name: lint
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run lint
uses: golangci/golangci-lint-action@v6
with:
version: 'latest'

View File

@@ -1,94 +0,0 @@
name: sync
on:
schedule:
- cron: '*/5 * * * *'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
sync:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: init
run: |
git config --global user.email "vtolstov <vtolstov@users.noreply.github.com>"
git config --global user.name "github-actions[bot]"
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
- name: check master
id: check_master
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync master
if: steps.check_master.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream master
git push upstream master --progress
git push origin master --progress
cd ../
rm -rf repo
- name: check v3
id: check_v3
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v3
if: steps.check_v3.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v3
git push upstream v3 --progress
git push origin v3 --progress
cd ../
rm -rf repo
- name: check v4
id: check_v4
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v4
if: steps.check_v4.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v4
git push upstream v4 --progress
git push origin v4 --progress
cd ../
rm -rf repo

View File

@@ -1,31 +0,0 @@
name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...

View File

@@ -1,50 +0,0 @@
name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: checkout code
uses: actions/checkout@v4
with:
filter: 'blob:none'
- name: checkout tests
uses: actions/checkout@v4
with:
ref: master
filter: 'blob:none'
repository: unistack-org/micro-tests
path: micro-tests
- name: setup go
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
- name: setup go work
env:
GOWORK: ${{ github.workspace }}/go.work
run: |
go work init
go work use .
go work use micro-tests
- name: setup deps
env:
GOWORK: ${{ github.workspace }}/go.work
run: go get -v ./...
- name: run tests
env:
INTEGRATION_TESTS: yes
GOWORK: ${{ github.workspace }}/go.work
run: |
cd micro-tests
go test -mod readonly -v ./... || true

47
.github/workflows/pr.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: prbuild
on:
pull_request:
branches:
- master
- v3
jobs:
test:
name: test
runs-on: ubuntu-latest
steps:
- name: setup
uses: actions/setup-go@v3
with:
go-version: 1.17
- name: checkout
uses: actions/checkout@v3
- name: cache
uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-go-
- name: deps
run: go get -v -t -d ./...
- name: test
env:
INTEGRATION_TESTS: yes
run: go test -mod readonly -v ./...
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v3
- name: lint
uses: golangci/golangci-lint-action@v3.4.0
continue-on-error: true
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.30
# Optional: working directory, useful for monorepos
# working-directory: somedir
# Optional: golangci-lint command line arguments.
# args: --issues-exit-code=0
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true

6
.gitignore vendored
View File

@@ -13,9 +13,3 @@
# Dependency directories (remove the comment below to include it)
# vendor/
# General
.DS_Store
.idea
.vscode
bin/

View File

@@ -1,5 +0,0 @@
run:
concurrency: 8
timeout: 5m
issues-exit-code: 1
tests: true

View File

@@ -1,2 +1,9 @@
# micro-broker-kgo
![Coverage](https://img.shields.io/badge/Coverage-63.1%25-yellow)
yet another micro kafka broker alternative
TODO:
* dont always append options from context on Init and New
* add SubscriberOptions(...kgo.Opt)
* add ServerSubscribeOptions(...kgo.Opt)
* check PublisherOptions(...kgo.Opt)
* check ClientPublisherOptions(...kgo.Opt)

View File

@@ -1,93 +0,0 @@
package kgo
import (
"net/http"
"slices"
"strings"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v4/metadata"
)
// RecordCarrier injects and extracts traces from a kgo.Record.
//
// This type exists to satisfy the otel/propagation.TextMapCarrier interface.
type RecordCarrier struct {
record *kgo.Record
}
// NewRecordCarrier creates a new RecordCarrier.
func NewRecordCarrier(record *kgo.Record) RecordCarrier {
return RecordCarrier{record: record}
}
// Get retrieves a single value for a given key if it exists.
func (c RecordCarrier) Get(key string) string {
for _, h := range c.record.Headers {
if h.Key == key {
return string(h.Value)
}
}
return ""
}
// Set sets a header.
func (c RecordCarrier) Set(key, val string) {
// Check if key already exists.
for i, h := range c.record.Headers {
if h.Key == key {
// Key exist, update the value.
c.record.Headers[i].Value = []byte(val)
return
}
}
// Key does not exist, append new header.
c.record.Headers = append(c.record.Headers, kgo.RecordHeader{
Key: key,
Value: []byte(val),
})
}
// Keys returns a slice of all key identifiers in the carrier.
func (c RecordCarrier) Keys() []string {
out := make([]string, len(c.record.Headers))
for i, h := range c.record.Headers {
out[i] = h.Key
}
return out
}
func setHeaders(r *kgo.Record, md metadata.Metadata, exclude ...string) {
seen := make(map[string]struct{})
loop:
for k, v := range md {
k = http.CanonicalHeaderKey(k)
if _, ok := seen[k]; ok {
continue loop
}
if slices.ContainsFunc(exclude, func(s string) bool {
return strings.EqualFold(s, k)
}) {
continue loop
}
for i := 0; i < len(r.Headers); i++ {
if strings.EqualFold(r.Headers[i].Key, k) {
// Key exist, update the value.
r.Headers[i].Value = []byte(strings.Join(v, ","))
continue loop
}
}
// Key does not exist, append new header.
r.Headers = append(r.Headers, kgo.RecordHeader{
Key: k,
Value: []byte(strings.Join(v, ",")),
})
seen[k] = struct{}{}
}
}

View File

@@ -1,10 +0,0 @@
package kgo
import (
"context"
"errors"
)
func isContextError(err error) bool {
return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
}

27
go.mod
View File

@@ -1,27 +1,14 @@
module go.unistack.org/micro-broker-kgo/v4
module go.unistack.org/micro-broker-kgo/v3
go 1.23.8
go 1.17
require (
github.com/stretchr/testify v1.10.0
github.com/twmb/franz-go v1.19.5
github.com/twmb/franz-go/pkg/kadm v1.16.0
github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3
github.com/twmb/franz-go/pkg/kmsg v1.11.2
go.opentelemetry.io/otel v1.36.0
go.unistack.org/micro/v4 v4.1.17
github.com/twmb/franz-go v1.11.5
github.com/twmb/franz-go/pkg/kmsg v1.4.0
go.unistack.org/micro/v3 v3.10.14
)
require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/matoous/go-nanoid v1.5.1 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/spf13/cast v1.9.2 // indirect
go.unistack.org/micro-proto/v4 v4.1.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
github.com/klauspost/compress v1.15.9 // indirect
github.com/pierrec/lz4/v4 v4.1.15 // indirect
)

70
go.sum
View File

@@ -1,49 +1,25 @@
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/matoous/go-nanoid v1.5.1 h1:aCjdvTyO9LLnTIi0fgdXhOPPvOHjpXN6Ik9DaNjIct4=
github.com/matoous/go-nanoid v1.5.1/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE=
github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/twmb/franz-go v1.19.5 h1:W7+o8D0RsQsedqib71OVlLeZ0zI6CbFra7yTYhZTs5Y=
github.com/twmb/franz-go v1.19.5/go.mod h1:4kFJ5tmbbl7asgwAGVuyG1ZMx0NNpYk7EqflvWfPCpM=
github.com/twmb/franz-go/pkg/kadm v1.16.0 h1:STMs1t5lYR5mR974PSiwNzE5TvsosByTp+rKXLOhAjE=
github.com/twmb/franz-go/pkg/kadm v1.16.0/go.mod h1:MUdcUtnf9ph4SFBLLA/XxE29rvLhWYLM9Ygb8dfSCvw=
github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3 h1:p24opKWPySAy8xSl8NqRgOv7Q+bX7kdrQirBVRJzQfo=
github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3/go.mod h1:7uQs3Ae6HkWT1Y9elMbqtAcNFCI0y6+iS+Phw49L49U=
github.com/twmb/franz-go/pkg/kmsg v1.11.2 h1:hIw75FpwcAjgeyfIGFqivAvwC5uNIOWRGvQgZhH4mhg=
github.com/twmb/franz-go/pkg/kmsg v1.11.2/go.mod h1:CFfkkLysDNmukPYhGzuUcDtf46gQSqCZHMW1T4Z+wDE=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.unistack.org/micro-proto/v4 v4.1.0 h1:qPwL2n/oqh9RE3RTTDgt28XK3QzV597VugQPaw9lKUk=
go.unistack.org/micro-proto/v4 v4.1.0/go.mod h1:ArmK7o+uFvxSY3dbJhKBBX4Pm1rhWdLEFf3LxBrMtec=
go.unistack.org/micro/v4 v4.1.17 h1:26QDtRSYVpozYuassyvLP4sEQRo3dxgD3sVILRXmIPo=
go.unistack.org/micro/v4 v4.1.17/go.mod h1:xleO2M5Yxh4s6I+RUcLrEpUjobefh+71ctrdIfn7TUs=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/silas/dag v0.0.0-20211117232152-9d50aa809f35/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
github.com/twmb/franz-go v1.11.5 h1:TTv5lVJd+87XkmP9dWN9Jgpf7IUUr7a7jee+byR8LBE=
github.com/twmb/franz-go v1.11.5/go.mod h1:FvaHNlpT6woVYIl6LAuIeL7yHol1Fp6Gv2Dn21AvH78=
github.com/twmb/franz-go/pkg/kmsg v1.3.0/go.mod h1:SxG/xJKhgPu25SamAq0rrucfp7lbzCpEXOC+vH/ELrY=
github.com/twmb/franz-go/pkg/kmsg v1.4.0 h1:tbp9hxU6m8qZhQTlpGiaIJOm4BXix5lsuEZ7K00dF0s=
github.com/twmb/franz-go/pkg/kmsg v1.4.0/go.mod h1:SxG/xJKhgPu25SamAq0rrucfp7lbzCpEXOC+vH/ELrY=
go.unistack.org/micro/v3 v3.10.14 h1:7fgLpwGlCN67twhwtngJDEQvrMkUBDSA5vzZqxIDqNE=
go.unistack.org/micro/v3 v3.10.14/go.mod h1:uMAc0U/x7dmtICCrblGf0ZLgYegu3VwQAquu+OFCw1Q=
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,108 +0,0 @@
package kgo
import (
"context"
"net"
"sync/atomic"
"time"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v4/logger"
)
type hookEvent struct {
log logger.Logger
fatalOnError bool
connected *atomic.Uint32
}
var (
_ kgo.HookBrokerConnect = &hookEvent{}
_ kgo.HookBrokerDisconnect = &hookEvent{}
_ kgo.HookBrokerRead = &hookEvent{}
_ kgo.HookBrokerWrite = &hookEvent{}
_ kgo.HookGroupManageError = &hookEvent{}
_ kgo.HookProduceRecordUnbuffered = &hookEvent{}
)
func (m *hookEvent) OnGroupManageError(err error) {
switch {
case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err):
return
default:
ctx := context.TODO()
logMsg := "kgo.OnGroupManageError"
if m.fatalOnError {
m.log.Fatal(ctx, logMsg, err)
} else {
m.log.Error(ctx, logMsg, err)
}
}
}
func (m *hookEvent) OnBrokerConnect(_ kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) {
switch {
case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err):
return
default:
ctx := context.TODO()
logMsg := "kgo.OnBrokerConnect"
if m.fatalOnError {
m.log.Fatal(ctx, logMsg, err)
} else {
m.log.Error(ctx, logMsg, err)
}
}
}
func (m *hookEvent) OnBrokerDisconnect(_ kgo.BrokerMetadata, _ net.Conn) {}
func (m *hookEvent) OnBrokerWrite(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) {
switch {
case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err):
return
default:
ctx := context.TODO()
logMsg := "kgo.OnBrokerWrite"
if m.fatalOnError {
m.log.Fatal(ctx, logMsg, err)
} else {
m.log.Error(ctx, logMsg, err)
}
}
}
func (m *hookEvent) OnBrokerRead(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) {
switch {
case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err):
return
default:
ctx := context.TODO()
logMsg := "kgo.OnBrokerRead"
if m.fatalOnError {
m.log.Fatal(ctx, logMsg, err)
} else {
m.log.Error(ctx, logMsg, err)
}
}
}
func (m *hookEvent) OnProduceRecordUnbuffered(_ *kgo.Record, err error) {
switch {
case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err):
return
default:
ctx := context.TODO()
logMsg := "kgo.OnProduceRecordUnbuffered"
if m.fatalOnError {
m.log.Fatal(ctx, logMsg, err)
} else {
m.log.Error(ctx, logMsg, err)
}
}
}

View File

@@ -1,471 +0,0 @@
package kgo
import (
"context"
"errors"
"io"
"net"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v4/logger"
)
func TestHookEvent_OnGroupManageError(t *testing.T) {
tests := []struct {
name string
inputErr error
fatalOnError bool
expectedErrorIsCalled bool
expectedErrorMsg string
expectedFatalIsCalled bool
expectedFatalMsg string
}{
{
name: "error is nil",
inputErr: nil,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context canceled",
inputErr: context.Canceled,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context deadline exceeded",
inputErr: context.DeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: deadline exceeded (os package)",
inputErr: os.ErrDeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: EOF (io package)",
inputErr: io.EOF,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: closed network connection (net package)",
inputErr: net.ErrClosed,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "some error (non-fatal)",
inputErr: errors.New("some error"),
fatalOnError: false,
expectedErrorIsCalled: true,
expectedErrorMsg: "kgo.OnGroupManageError",
expectedFatalIsCalled: false,
},
{
name: "some error (fatal)",
inputErr: errors.New("some error"),
fatalOnError: true,
expectedErrorIsCalled: false,
expectedFatalIsCalled: true,
expectedFatalMsg: "kgo.OnGroupManageError",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
log := &mockLogger{}
he := &hookEvent{log: log, fatalOnError: tt.fatalOnError}
he.OnGroupManageError(tt.inputErr)
require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled)
require.Equal(t, tt.expectedErrorMsg, log.errorMsg)
require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled)
require.Equal(t, tt.expectedFatalMsg, log.fatalMsg)
})
}
}
func TestHookEvent_OnBrokerConnect(t *testing.T) {
tests := []struct {
name string
inputErr error
fatalOnError bool
expectedErrorIsCalled bool
expectedErrorMsg string
expectedFatalIsCalled bool
expectedFatalMsg string
}{
{
name: "error is nil",
inputErr: nil,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context canceled",
inputErr: context.Canceled,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context deadline exceeded",
inputErr: context.DeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: deadline exceeded (os package)",
inputErr: os.ErrDeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: EOF (io package)",
inputErr: io.EOF,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: closed network connection (net package)",
inputErr: net.ErrClosed,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "some error (non-fatal)",
inputErr: errors.New("some error"),
fatalOnError: false,
expectedErrorIsCalled: true,
expectedErrorMsg: "kgo.OnBrokerConnect",
expectedFatalIsCalled: false,
},
{
name: "some error (fatal)",
inputErr: errors.New("some error"),
fatalOnError: true,
expectedErrorIsCalled: false,
expectedFatalIsCalled: true,
expectedFatalMsg: "kgo.OnBrokerConnect",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
log := &mockLogger{}
he := &hookEvent{log: log, fatalOnError: tt.fatalOnError}
he.OnBrokerConnect(kgo.BrokerMetadata{}, 0, nil, tt.inputErr)
require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled)
require.Equal(t, tt.expectedErrorMsg, log.errorMsg)
require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled)
require.Equal(t, tt.expectedFatalMsg, log.fatalMsg)
})
}
}
func TestHookEvent_OnBrokerWrite(t *testing.T) {
tests := []struct {
name string
inputErr error
fatalOnError bool
expectedErrorIsCalled bool
expectedErrorMsg string
expectedFatalIsCalled bool
expectedFatalMsg string
}{
{
name: "error is nil",
inputErr: nil,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context canceled",
inputErr: context.Canceled,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context deadline exceeded",
inputErr: context.DeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: deadline exceeded (os package)",
inputErr: os.ErrDeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: EOF (io package)",
inputErr: io.EOF,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: closed network connection (net package)",
inputErr: net.ErrClosed,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "some error (non-fatal)",
inputErr: errors.New("some error"),
fatalOnError: false,
expectedErrorIsCalled: true,
expectedErrorMsg: "kgo.OnBrokerWrite",
expectedFatalIsCalled: false,
},
{
name: "some error (fatal)",
inputErr: errors.New("some error"),
fatalOnError: true,
expectedErrorIsCalled: false,
expectedFatalIsCalled: true,
expectedFatalMsg: "kgo.OnBrokerWrite",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
log := &mockLogger{}
he := &hookEvent{log: log, fatalOnError: tt.fatalOnError}
he.OnBrokerWrite(kgo.BrokerMetadata{}, 0, 0, 0, 0, tt.inputErr)
require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled)
require.Equal(t, tt.expectedErrorMsg, log.errorMsg)
require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled)
require.Equal(t, tt.expectedFatalMsg, log.fatalMsg)
})
}
}
func TestHookEvent_OnBrokerRead(t *testing.T) {
tests := []struct {
name string
inputErr error
fatalOnError bool
expectedErrorIsCalled bool
expectedErrorMsg string
expectedFatalIsCalled bool
expectedFatalMsg string
}{
{
name: "error is nil",
inputErr: nil,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context canceled",
inputErr: context.Canceled,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context deadline exceeded",
inputErr: context.DeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: deadline exceeded (os package)",
inputErr: os.ErrDeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: EOF (io package)",
inputErr: io.EOF,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: closed network connection (net package)",
inputErr: net.ErrClosed,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "some error (non-fatal)",
inputErr: errors.New("some error"),
fatalOnError: false,
expectedErrorIsCalled: true,
expectedErrorMsg: "kgo.OnBrokerRead",
expectedFatalIsCalled: false,
},
{
name: "some error (fatal)",
inputErr: errors.New("some error"),
fatalOnError: true,
expectedErrorIsCalled: false,
expectedFatalIsCalled: true,
expectedFatalMsg: "kgo.OnBrokerRead",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
log := &mockLogger{}
he := &hookEvent{log: log, fatalOnError: tt.fatalOnError}
he.OnBrokerRead(kgo.BrokerMetadata{}, 0, 0, 0, 0, tt.inputErr)
require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled)
require.Equal(t, tt.expectedErrorMsg, log.errorMsg)
require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled)
require.Equal(t, tt.expectedFatalMsg, log.fatalMsg)
})
}
}
func TestHookEvent_OnProduceRecordUnbuffered(t *testing.T) {
tests := []struct {
name string
inputErr error
fatalOnError bool
expectedErrorIsCalled bool
expectedErrorMsg string
expectedFatalIsCalled bool
expectedFatalMsg string
}{
{
name: "error is nil",
inputErr: nil,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context canceled",
inputErr: context.Canceled,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "context deadline exceeded",
inputErr: context.DeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: deadline exceeded (os package)",
inputErr: os.ErrDeadlineExceeded,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: EOF (io package)",
inputErr: io.EOF,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "retryable error: closed network connection (net package)",
inputErr: net.ErrClosed,
expectedErrorIsCalled: false,
expectedFatalIsCalled: false,
},
{
name: "some error (non-fatal)",
inputErr: errors.New("some error"),
fatalOnError: false,
expectedErrorIsCalled: true,
expectedErrorMsg: "kgo.OnProduceRecordUnbuffered",
expectedFatalIsCalled: false,
},
{
name: "some error (fatal)",
inputErr: errors.New("some error"),
fatalOnError: true,
expectedErrorIsCalled: false,
expectedFatalIsCalled: true,
expectedFatalMsg: "kgo.OnProduceRecordUnbuffered",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
log := &mockLogger{}
he := &hookEvent{log: log, fatalOnError: tt.fatalOnError}
he.OnProduceRecordUnbuffered(&kgo.Record{}, tt.inputErr)
require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled)
require.Equal(t, tt.expectedErrorMsg, log.errorMsg)
require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled)
require.Equal(t, tt.expectedFatalMsg, log.fatalMsg)
})
}
}
// Mocks
type mockLogger struct {
errorIsCalled bool
errorMsg string
fatalIsCalled bool
fatalMsg string
}
func (m *mockLogger) Init(...logger.Option) error {
panic("implement me")
}
func (m *mockLogger) Clone(...logger.Option) logger.Logger {
panic("implement me")
}
func (m *mockLogger) V(logger.Level) bool {
panic("implement me")
}
func (m *mockLogger) Level(logger.Level) {
panic("implement me")
}
func (m *mockLogger) Options() logger.Options {
panic("implement me")
}
func (m *mockLogger) Fields(...interface{}) logger.Logger {
panic("implement me")
}
func (m *mockLogger) Info(context.Context, string, ...interface{}) {
panic("implement me")
}
func (m *mockLogger) Trace(context.Context, string, ...interface{}) {
panic("implement me")
}
func (m *mockLogger) Debug(context.Context, string, ...interface{}) {
panic("implement me")
}
func (m *mockLogger) Warn(context.Context, string, ...interface{}) {
panic("implement me")
}
func (m *mockLogger) Error(ctx context.Context, msg string, args ...interface{}) {
m.errorIsCalled = true
m.errorMsg = msg
}
func (m *mockLogger) Fatal(ctx context.Context, msg string, args ...interface{}) {
m.fatalIsCalled = true
m.fatalMsg = msg
}
func (m *mockLogger) Log(context.Context, logger.Level, string, ...interface{}) {
panic("implement me")
}
func (m *mockLogger) Name() string {
panic("implement me")
}
func (m *mockLogger) String() string {
panic("implement me")
}

743
kgo.go
View File

@@ -1,298 +1,204 @@
// Package kgo provides a kafka broker using kgo
package kgo
package kgo // import "go.unistack.org/micro-broker-kgo/v3"
import (
"context"
"errors"
"fmt"
"math/rand/v2"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kerr"
kgo "github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/semconv"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v4/util/id"
mrand "go.unistack.org/micro/v4/util/rand"
"github.com/twmb/franz-go/pkg/kversion"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/util/id"
mrand "go.unistack.org/micro/v3/util/rand"
)
var _ broker.Broker = (*Broker)(nil)
var messagePool = sync.Pool{
New: func() interface{} {
return &kgoMessage{}
},
}
var ErrLostMessage = errors.New("message not marked for offsets commit and will be lost in next iteration")
var DefaultRetryBackoffFn = func() func(int) time.Duration {
var rngMu sync.Mutex
return func(fails int) time.Duration {
const (
min = 100 * time.Millisecond
max = time.Second
)
if fails <= 0 {
return min
}
if fails > 10 {
return max
}
backoff := min * time.Duration(1<<(fails-1))
rngMu.Lock()
jitter := 0.8 + 0.4*rand.Float64()
rngMu.Unlock()
backoff = time.Duration(float64(backoff) * jitter)
if backoff > max {
return max
}
return backoff
}
}()
type Broker struct {
funcPublish broker.FuncPublish
funcSubscribe broker.FuncSubscribe
c *kgo.Client
connected *atomic.Uint32
kopts []kgo.Opt
subs []*Subscriber
var _ broker.Broker = &kBroker{}
type kBroker struct {
writer *kgo.Client // used only to push messages
kopts []kgo.Opt
connected bool
init bool
sync.RWMutex
opts broker.Options
mu sync.RWMutex
init bool
subs []*subscriber
}
func (r *Broker) Live() bool {
return r.connected.Load() == 1
type subscriber struct {
reader *kgo.Client // used only to pull messages
topic string
opts broker.SubscribeOptions
kopts broker.Options
handler broker.Handler
batchhandler broker.BatchHandler
closed bool
done chan struct{}
consumers map[string]map[int32]worker
sync.RWMutex
}
func (r *Broker) Ready() bool {
return r.connected.Load() == 1
type publication struct {
topic string
err error
sync.RWMutex
msg *broker.Message
ack bool
}
func (r *Broker) Health() bool {
return r.connected.Load() == 1
func (p *publication) Topic() string {
return p.topic
}
func (k *Broker) Address() string {
func (p *publication) Message() *broker.Message {
return p.msg
}
func (p *publication) Ack() error {
p.ack = true
return nil
}
func (p *publication) Error() error {
return p.err
}
func (p *publication) SetError(err error) {
p.err = err
}
func (s *subscriber) Options() broker.SubscribeOptions {
return s.opts
}
func (s *subscriber) Topic() string {
return s.topic
}
func (s *subscriber) Unsubscribe(ctx context.Context) error {
if s.closed {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
default:
close(s.done)
s.closed = true
}
return nil
}
func (k *kBroker) Address() string {
return strings.Join(k.opts.Addrs, ",")
}
func (k *Broker) Name() string {
func (k *kBroker) Name() string {
return k.opts.Name
}
func (k *Broker) Client() *kgo.Client {
return k.c
}
type kgoMessage struct {
c codec.Codec
topic string
ctx context.Context
body []byte
hdr metadata.Metadata
opts broker.MessageOptions
ack bool
}
func (m *kgoMessage) Ack() error {
m.ack = true
return nil
}
func (m *kgoMessage) Body() []byte {
return m.body
}
func (m *kgoMessage) Header() metadata.Metadata {
return m.hdr
}
func (m *kgoMessage) Context() context.Context {
return m.ctx
}
func (m *kgoMessage) Topic() string {
return ""
}
func (m *kgoMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
return m.c.Unmarshal(m.body, dst)
}
func (b *Broker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx]
func (k *kBroker) Connect(ctx context.Context) error {
k.RLock()
if k.connected {
k.RUnlock()
return nil
}
b.mu.RLock()
c, ok := b.opts.Codecs[ct]
b.mu.RUnlock()
if ok {
return c, nil
}
return nil, codec.ErrUnknownContentType
}
k.RUnlock()
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) {
options := broker.NewMessageOptions(opts...)
if options.ContentType == "" {
options.ContentType = b.opts.ContentType
nctx := k.opts.Context
if ctx != nil {
nctx = ctx
}
m := &kgoMessage{ctx: ctx, hdr: hdr.Copy(), opts: options}
c, err := b.newCodec(m.opts.ContentType)
if err == nil {
m.body, err = c.Marshal(body)
}
if err != nil {
return nil, err
}
kaddrs := k.opts.Addrs
m.hdr.Set(metadata.HeaderContentType, m.opts.ContentType)
// shuffle addrs
var rng mrand.Rand
rng.Shuffle(len(kaddrs), func(i, j int) {
kaddrs[i], kaddrs[j] = kaddrs[j], kaddrs[i]
})
return m, nil
}
func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *hookTracer, error) {
var c *kgo.Client
var err error
sp, _ := tracer.SpanFromContext(ctx)
clientID := "kgo"
group := ""
if k.opts.Context != nil {
if id, ok := k.opts.Context.Value(clientIDKey{}).(string); ok {
clientID = id
}
if id, ok := k.opts.Context.Value(groupKey{}).(string); ok {
group = id
}
}
var fatalOnError bool
if k.opts.Context != nil {
if v, ok := k.opts.Context.Value(fatalOnErrorKey{}).(bool); ok && v {
fatalOnError = v
}
}
htracer := &hookTracer{group: group, clientID: clientID, tracer: k.opts.Tracer}
opts = append(opts,
kgo.WithHooks(&hookMeter{meter: k.opts.Meter}),
kgo.WithHooks(htracer),
kgo.WithHooks(&hookEvent{log: k.opts.Logger, fatalOnError: fatalOnError, connected: k.connected}),
)
kopts := append(k.kopts, kgo.SeedBrokers(kaddrs...))
select {
case <-ctx.Done():
if ctx.Err() != nil {
if sp != nil {
sp.SetStatus(tracer.SpanStatusError, ctx.Err().Error())
}
}
return nil, nil, ctx.Err()
case <-nctx.Done():
return nctx.Err()
default:
c, err = kgo.NewClient(opts...)
if err == nil {
err = c.Ping(ctx) // check connectivity to cluster
}
c, err := kgo.NewClient(kopts...)
if err != nil {
if sp != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
return nil, nil, err
return err
}
k.connected.Store(1)
return c, htracer, nil
}
}
func (k *Broker) Connect(ctx context.Context) error {
if k.connected.Load() == 1 {
return nil
}
// Request versions in order to guess Kafka Cluster version
versionsReq := kmsg.NewApiVersionsRequest()
versionsRes, err := versionsReq.RequestWith(ctx, c)
if err != nil {
return fmt.Errorf("failed to request api versions: %w", err)
}
err = kerr.ErrorForCode(versionsRes.ErrorCode)
if err != nil {
return fmt.Errorf("failed to request api versions. Inner kafka error: %w", err)
}
versions := kversion.FromApiVersionsResponse(versionsRes)
nctx := k.opts.Context
if ctx != nil {
nctx = ctx
}
if k.opts.Logger.V(logger.InfoLevel) {
logger.Infof(ctx, "[kgo] connected to to kafka cluster version %v", versions.VersionGuess())
}
c, _, err := k.connect(nctx, k.kopts...)
if err != nil {
return err
k.Lock()
k.connected = true
k.writer = c
k.Unlock()
}
k.mu.Lock()
k.c = c
k.connected.Store(1)
k.mu.Unlock()
return nil
}
func (k *Broker) Disconnect(ctx context.Context) error {
if k.connected.Load() == 0 {
func (k *kBroker) Disconnect(ctx context.Context) error {
k.RLock()
if !k.connected {
k.RUnlock()
return nil
}
k.RUnlock()
k.Lock()
defer k.Unlock()
nctx := k.opts.Context
if ctx != nil {
nctx = ctx
}
var span tracer.Span
ctx, span = k.opts.Tracer.Start(ctx, "Disconnect")
defer span.Finish()
k.mu.Lock()
defer k.mu.Unlock()
select {
case <-nctx.Done():
return nctx.Err()
default:
for _, sub := range k.subs {
if sub.closed {
continue
}
if err := sub.Unsubscribe(ctx); err != nil {
return err
}
}
if k.c != nil {
k.c.CloseAllowingRebalance()
// k.c.Close()
}
k.writer.Close()
}
k.connected.Store(0)
k.connected = false
return nil
}
func (k *Broker) Init(opts ...broker.Option) error {
k.mu.Lock()
defer k.mu.Unlock()
func (k *kBroker) Init(opts ...broker.Option) error {
k.Lock()
defer k.Unlock()
if len(opts) == 0 && k.init {
return nil
}
for _, o := range opts {
o(&k.opts)
}
@@ -316,153 +222,126 @@ func (k *Broker) Init(opts ...broker.Option) error {
}
}
k.funcPublish = k.fnPublish
k.funcSubscribe = k.fnSubscribe
k.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
case broker.HookPublish:
k.funcPublish = h(k.funcPublish)
case broker.HookSubscribe:
k.funcSubscribe = h(k.funcSubscribe)
}
})
// kgo.RecordPartitioner(),
k.init = true
return nil
}
func (k *Broker) Options() broker.Options {
func (k *kBroker) Options() broker.Options {
return k.opts
}
func (b *Broker) Publish(ctx context.Context, topic string, messages ...broker.Message) error {
return b.funcPublish(ctx, topic, messages...)
func (k *kBroker) BatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
return k.publish(ctx, msgs, opts...)
}
func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker.Message) error {
return b.publish(ctx, topic, messages...)
func (k *kBroker) Publish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error {
msg.Header.Set(metadata.HeaderTopic, topic)
return k.publish(ctx, []*broker.Message{msg}, opts...)
}
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
var records []*kgo.Record
func (k *kBroker) publish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
k.RLock()
if !k.connected {
k.RUnlock()
return broker.ErrNotConnected
}
k.RUnlock()
options := broker.NewPublishOptions(opts...)
records := make([]*kgo.Record, 0, len(msgs))
var errs []string
var err error
var key []byte
for _, msg := range messages {
rec := &kgo.Record{
Context: msg.Context(),
Topic: topic,
Value: msg.Body(),
if options.Context != nil {
if k, ok := options.Context.Value(publishKey{}).([]byte); ok && k != nil {
key = k
}
}
var promise func(*kgo.Record, error)
if rec.Context != nil {
if k, ok := rec.Context.Value(messageKey{}).([]byte); ok && k != nil {
rec.Key = k
for _, msg := range msgs {
rec := &kgo.Record{Context: ctx, Key: key}
rec.Topic, _ = msg.Header.Get(metadata.HeaderTopic)
if k.opts.Codec.String() == "noop" {
rec.Value = msg.Body
for k, v := range msg.Header {
rec.Headers = append(rec.Headers, kgo.RecordHeader{Key: k, Value: []byte(v)})
}
if p, ok := rec.Context.Value(messagePromiseKey{}).(func(*kgo.Record, error)); ok && p != nil {
promise = p
}
}
kmsg, ok := msg.(*kgoMessage)
if !ok {
continue
}
if kmsg.opts.Context != nil {
if k, ok := kmsg.opts.Context.Value(messageKey{}).([]byte); ok && k != nil {
rec.Key = k
}
if p, ok := kmsg.opts.Context.Value(messagePromiseKey{}).(func(*kgo.Record, error)); ok && p != nil {
promise = p
}
}
setHeaders(rec, msg.Header())
if promise != nil {
ts := time.Now()
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Inc()
b.c.Produce(ctx, rec, func(r *kgo.Record, err error) {
te := time.Since(ts)
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Dec()
b.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
b.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
if err != nil {
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "failure").Inc()
} else {
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "success").Inc()
}
promise(r, err)
})
continue
} else if options.BodyOnly {
rec.Value = msg.Body
} else {
records = append(records, rec)
}
}
if len(records) > 0 {
var errs []string
ts := time.Now()
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", topic, "topic", topic).Set(uint64(len(records)))
results := b.c.ProduceSync(ctx, records...)
te := time.Since(ts)
for _, result := range results {
b.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
b.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Dec()
if result.Err != nil {
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "failure").Inc()
errs = append(errs, result.Err.Error())
} else {
b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "success").Inc()
rec.Value, err = k.opts.Codec.Marshal(msg)
if err != nil {
return err
}
}
records = append(records, rec)
}
if len(errs) > 0 {
return fmt.Errorf("publish error: %s", strings.Join(errs, "\n"))
results := k.writer.ProduceSync(ctx, records...)
for _, result := range results {
if result.Err != nil {
errs = append(errs, result.Err.Error())
}
}
return nil
}
func (k *Broker) TopicExists(ctx context.Context, topic string) error {
mdreq := kmsg.NewMetadataRequest()
mdreq.Topics = []kmsg.MetadataRequestTopic{
{Topic: &topic},
}
mdrsp, err := mdreq.RequestWith(ctx, k.c)
if err != nil {
return err
} else if mdrsp.Topics[0].ErrorCode != 0 {
return fmt.Errorf("topic %s not exists or permission error", topic)
if len(errs) > 0 {
return fmt.Errorf("publish error: %s", strings.Join(errs, "\n"))
}
return nil
}
func (b *Broker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return b.funcSubscribe(ctx, topic, handler, opts...)
type mlogger struct {
l logger.Logger
ctx context.Context
}
func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
if err := broker.IsValidHandler(handler); err != nil {
return nil, err
}
options := broker.NewSubscribeOptions(opts...)
switch handler.(type) {
func (l *mlogger) Log(lvl kgo.LogLevel, msg string, args ...interface{}) {
var mlvl logger.Level
switch lvl {
case kgo.LogLevelNone:
return
case kgo.LogLevelError:
mlvl = logger.ErrorLevel
case kgo.LogLevelWarn:
mlvl = logger.WarnLevel
case kgo.LogLevelInfo:
mlvl = logger.InfoLevel
case kgo.LogLevelDebug:
mlvl = logger.DebugLevel
default:
return nil, broker.ErrInvalidHandler
case func(broker.Message) error:
break
case func([]broker.Message) error:
break
return
}
fields := make(map[string]interface{}, int(len(args)/2))
for i := 0; i < len(args)/2; i += 2 {
fields[fmt.Sprintf("%v", args[i])] = args[i+1]
}
l.l.Fields(fields).Log(l.ctx, mlvl, msg)
}
func (l *mlogger) Level() kgo.LogLevel {
switch l.l.Options().Level {
case logger.ErrorLevel:
return kgo.LogLevelError
case logger.WarnLevel:
return kgo.LogLevelWarn
case logger.InfoLevel:
return kgo.LogLevelInfo
case logger.DebugLevel, logger.TraceLevel:
return kgo.LogLevelDebug
}
return kgo.LogLevelNone
}
func (k *kBroker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return nil, nil
}
func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
options := broker.NewSubscribeOptions(opts...)
if options.Group == "" {
uid, err := id.New()
@@ -472,115 +351,100 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
options.Group = uid
}
commitInterval := DefaultCommitInterval
if b.opts.Context != nil {
if v, ok := b.opts.Context.Value(commitIntervalKey{}).(time.Duration); ok && v > 0 {
commitInterval = v
}
}
kaddrs := k.opts.Addrs
var messagePool bool
var fatalOnError bool
if b.opts.Context != nil {
if v, ok := b.opts.Context.Value(fatalOnErrorKey{}).(bool); ok && v {
fatalOnError = v
}
if v, ok := b.opts.Context.Value(subscribeMessagePoolKey{}).(bool); ok && v {
messagePool = v
}
}
if options.Context != nil {
if v, ok := options.Context.Value(fatalOnErrorKey{}).(bool); ok && v {
fatalOnError = v
}
}
sub := &Subscriber{
topic: topic,
opts: options,
handler: handler,
kopts: b.opts,
consumers: make(map[tp]*consumer),
done: make(chan struct{}),
fatalOnError: fatalOnError,
connected: b.connected,
messagePool: messagePool,
}
kopts := append(b.kopts,
kgo.ConsumerGroup(options.Group),
kgo.ConsumeTopics(topic),
kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()),
kgo.FetchMaxWait(1*time.Second),
kgo.AutoCommitInterval(commitInterval),
kgo.OnPartitionsAssigned(sub.assigned),
kgo.OnPartitionsRevoked(sub.revoked),
kgo.StopProducerOnDataLossDetected(),
kgo.OnPartitionsLost(sub.lost),
kgo.AutoCommitCallback(sub.autocommit),
kgo.AutoCommitMarks(),
)
if options.Context != nil {
if v, ok := options.Context.Value(optionsKey{}).([]kgo.Opt); ok && len(v) > 0 {
kopts = append(kopts, v...)
}
}
c, htracer, err := b.connect(ctx, kopts...)
if err != nil {
return nil, err
}
mdreq := kmsg.NewMetadataRequest()
mdreq.Topics = []kmsg.MetadataRequestTopic{
{Topic: &topic},
}
mdrsp, err := mdreq.RequestWith(ctx, c)
if err != nil {
return nil, err
} else if mdrsp.Topics[0].ErrorCode != 0 {
return nil, fmt.Errorf("topic %s not exists or permission error", topic)
}
sub.c = c
sub.htracer = htracer
go sub.poll(ctx)
b.mu.Lock()
b.subs = append(b.subs, sub)
b.mu.Unlock()
return sub, nil
}
func (k *Broker) String() string {
return "kgo"
}
func NewBroker(opts ...broker.Option) *Broker {
options := broker.NewOptions(opts...)
kaddrs := options.Addrs
// shuffle addrs
var rng mrand.Rand
rng.Shuffle(len(kaddrs), func(i, j int) {
kaddrs[i], kaddrs[j] = kaddrs[j], kaddrs[i]
})
td := DefaultCommitInterval
if k.opts.Context != nil {
if v, ok := k.opts.Context.Value(commitIntervalKey{}).(time.Duration); ok && v > 0 {
td = v
}
}
sub := &subscriber{
topic: topic,
done: make(chan struct{}),
opts: options,
handler: handler,
kopts: k.opts,
consumers: make(map[string]map[int32]worker),
}
kopts := append(k.kopts,
kgo.SeedBrokers(kaddrs...),
kgo.ConsumerGroup(options.Group),
kgo.ConsumeTopics(topic),
kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()),
kgo.FetchMaxWait(1*time.Second),
// kgo.KeepControlRecords(),
kgo.Balancers(kgo.CooperativeStickyBalancer(), kgo.StickyBalancer()),
kgo.FetchIsolationLevel(kgo.ReadUncommitted()),
kgo.WithHooks(&metrics{meter: k.opts.Meter}),
kgo.AutoCommitMarks(),
kgo.AutoCommitInterval(td),
kgo.OnPartitionsAssigned(sub.assigned),
kgo.OnPartitionsRevoked(sub.revoked),
kgo.OnPartitionsLost(sub.revoked),
)
reader, err := kgo.NewClient(kopts...)
if err != nil {
return nil, err
}
sub.reader = reader
go sub.run(ctx)
k.Lock()
k.subs = append(k.subs, sub)
k.Unlock()
return sub, nil
}
func (k *kBroker) String() string {
return "kgo"
}
func NewBroker(opts ...broker.Option) *kBroker {
options := broker.NewOptions(opts...)
if options.Codec.String() != "noop" {
options.Logger.Infof(options.Context, "broker codec not noop, disable plain kafka headers usage")
}
kopts := []kgo.Opt{
kgo.DialTimeout(3 * time.Second),
kgo.DisableIdempotentWrite(),
kgo.ProducerBatchCompression(kgo.NoCompression()),
kgo.WithLogger(&mlogger{l: options.Logger.Clone(logger.WithAddCallerSkipCount(2)), ctx: options.Context}),
kgo.SeedBrokers(kaddrs...),
kgo.RetryBackoffFn(DefaultRetryBackoffFn),
kgo.BlockRebalanceOnPoll(),
kgo.Balancers(kgo.CooperativeStickyBalancer()),
kgo.FetchIsolationLevel(kgo.ReadUncommitted()),
kgo.UnknownTopicRetries(1),
kgo.WithLogger(&mlogger{l: options.Logger, ctx: options.Context}),
kgo.RetryBackoffFn(
func() func(int) time.Duration {
var rng mrand.Rand
return func(fails int) time.Duration {
const (
min = 250 * time.Millisecond
max = 2 * time.Second
)
if fails <= 0 {
return min
}
if fails > 10 {
return max
}
backoff := min * time.Duration(1<<(fails-1))
jitter := 0.8 + 0.4*rng.Float64()
backoff = time.Duration(float64(backoff) * jitter)
if backoff > max {
return max
}
return backoff
}
}(),
),
}
if options.Context != nil {
@@ -589,9 +453,8 @@ func NewBroker(opts ...broker.Option) *Broker {
}
}
return &Broker{
connected: &atomic.Uint32{},
opts: options,
kopts: kopts,
return &kBroker{
opts: options,
kopts: kopts,
}
}

View File

@@ -2,168 +2,54 @@ package kgo_test
import (
"context"
"fmt"
"os"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/twmb/franz-go/pkg/kfake"
kg "github.com/twmb/franz-go/pkg/kgo"
kgo "go.unistack.org/micro-broker-kgo/v4"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/logger/slog"
"go.unistack.org/micro/v4/metadata"
kgo "go.unistack.org/micro-broker-kgo/v3"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
)
var (
msgcnt = int64(1200)
msgcnt = int64(12000000)
group = "38"
prefill = true
loglevel = logger.ErrorLevel
cluster *kfake.Cluster
prefill = false
loglevel = logger.InfoLevel
)
func TestMain(m *testing.M) {
cluster = kfake.MustCluster(
kfake.AllowAutoTopicCreation(),
)
defer cluster.Close()
m.Run()
}
func TestFail(t *testing.T) {
logger.DefaultLogger = slog.NewLogger()
if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
b := kgo.NewBroker(
broker.ContentType("application/octet-stream"),
broker.Codec("application/octet-stream", codec.NewCodec()),
broker.Addrs(cluster.ListenAddrs()...),
kgo.CommitInterval(5*time.Second),
kgo.Options(
kg.ClientID("test"),
kg.FetchMaxBytes(10*1024*1024),
kg.AllowAutoTopicCreation(),
),
)
t.Logf("broker init")
if err := b.Init(); err != nil {
t.Fatal(err)
}
t.Logf("broker connect")
if err := b.Connect(ctx); err != nil {
t.Fatal(err)
}
defer func() {
t.Logf("broker disconnect")
if err := b.Disconnect(ctx); err != nil {
t.Fatal(err)
}
}()
t.Logf("broker health %v", b.Health())
msgs := make([]broker.Message, 0, msgcnt)
for i := int64(0); i < msgcnt; i++ {
m, err := b.NewMessage(ctx, metadata.Pairs("hkey", "hval"), []byte(`test`))
if err != nil {
t.Fatal(err)
}
msgs = append(msgs, m)
}
go func() {
for _, msg := range msgs {
// t.Logf("broker publish")
if err := b.Publish(ctx, "test.fail", msg); err != nil {
t.Fatal(err)
}
}
}()
// t.Skip()
idx := int64(0)
fn := func(msg broker.Message) error {
atomic.AddInt64(&idx, 1)
time.Sleep(100 * time.Millisecond)
// t.Logf("ack")
return msg.Ack()
}
sub, err := b.Subscribe(ctx, "test.fail", fn,
broker.SubscribeAutoAck(true),
broker.SubscribeGroup(group),
broker.SubscribeBodyOnly(true))
if err != nil {
t.Fatal(err)
}
defer func() {
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatal(err)
}
}()
for {
t.Logf("health check")
if !b.Health() {
t.Logf("health works")
break
}
t.Logf("health sleep")
time.Sleep(100 * time.Millisecond)
if err := b.Disconnect(ctx); err != nil {
t.Fatal(err)
}
}
}
func TestConnect(t *testing.T) {
ctx := context.TODO()
b := kgo.NewBroker(
broker.ContentType("application/octet-stream"),
broker.Codec("application/octet-stream", codec.NewCodec()),
broker.Addrs(cluster.ListenAddrs()...),
kgo.CommitInterval(5*time.Second),
kgo.Options(
kg.ClientID("test"),
kg.FetchMaxBytes(10*1024*1024),
kg.AllowAutoTopicCreation(),
),
)
if err := b.Init(); err != nil {
t.Fatal(err)
}
if err := b.Connect(ctx); err != nil {
t.Fatal(err)
}
var bm = &broker.Message{
Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: "test"},
Body: []byte(`"body"`),
}
func TestPubSub(t *testing.T) {
if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel), logger.WithCallerSkipCount(3)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
b := kgo.NewBroker(
broker.ContentType("application/octet-stream"),
broker.Codec("application/octet-stream", codec.NewCodec()),
broker.Addrs(cluster.ListenAddrs()...),
kgo.CommitInterval(5*time.Second),
kgo.Options(
kg.ClientID("test"),
kg.FetchMaxBytes(10*1024*1024),
kg.AllowAutoTopicCreation(),
),
)
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"127.0.0.1:29091", "127.0.0.2:29092", "127.0.0.3:29093"}
} else {
addrs = strings.Split(addr, ",")
}
b := kgo.NewBroker(
broker.Addrs(addrs...),
kgo.CommitInterval(5*time.Second),
kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(10*1024*1024)),
)
if err := b.Init(); err != nil {
t.Fatal(err)
}
@@ -178,26 +64,25 @@ func TestPubSub(t *testing.T) {
}
}()
if prefill {
msgs := make([]broker.Message, 0, msgcnt)
msgs := make([]*broker.Message, 0, msgcnt)
for i := int64(0); i < msgcnt; i++ {
m, _ := b.NewMessage(ctx, metadata.Pairs("hkey", "hval"), []byte(`test`))
msgs = append(msgs, m)
msgs = append(msgs, bm)
}
if err := b.Publish(ctx, "test.pubsub", msgs...); err != nil {
if err := b.BatchPublish(ctx, msgs); err != nil {
t.Fatal(err)
}
// t.Skip()
}
done := make(chan bool, 1)
idx := int64(0)
fn := func(msg broker.Message) error {
fn := func(msg broker.Event) error {
atomic.AddInt64(&idx, 1)
// time.Sleep(200 * time.Millisecond)
return msg.Ack()
}
sub, err := b.Subscribe(ctx, "test.pubsub", fn,
sub, err := b.Subscribe(ctx, "test", fn,
broker.SubscribeAutoAck(true),
broker.SubscribeGroup(group),
broker.SubscribeBodyOnly(true))
@@ -222,7 +107,7 @@ func TestPubSub(t *testing.T) {
if prc := atomic.LoadInt64(&idx); prc == msgcnt {
close(done)
} else {
t.Logf("processed %v of %v\n", prc, msgcnt)
fmt.Printf("processed %v\n", prc)
}
case <-ticker.C:
close(done)

View File

@@ -1,47 +0,0 @@
package kgo
import (
"context"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v4/logger"
)
type mlogger struct {
l logger.Logger
ctx context.Context
}
func (l *mlogger) Log(lvl kgo.LogLevel, msg string, args ...interface{}) {
var mlvl logger.Level
switch lvl {
case kgo.LogLevelNone:
return
case kgo.LogLevelError:
mlvl = logger.ErrorLevel
case kgo.LogLevelWarn:
mlvl = logger.WarnLevel
case kgo.LogLevelInfo:
mlvl = logger.InfoLevel
case kgo.LogLevelDebug:
mlvl = logger.DebugLevel
default:
return
}
l.l.Log(l.ctx, mlvl, msg, args...)
}
func (l *mlogger) Level() kgo.LogLevel {
switch l.l.Options().Level {
case logger.ErrorLevel:
return kgo.LogLevelError
case logger.WarnLevel:
return kgo.LogLevelWarn
case logger.InfoLevel:
return kgo.LogLevelInfo
case logger.DebugLevel, logger.TraceLevel:
return kgo.LogLevelDebug
}
return kgo.LogLevelNone
}

View File

@@ -6,29 +6,22 @@ import (
"time"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v3/meter"
)
type hookMeter struct {
type metrics struct {
meter meter.Meter
}
var (
_ kgo.HookBrokerConnect = &hookMeter{}
_ kgo.HookBrokerDisconnect = &hookMeter{}
// HookBrokerE2E
_ kgo.HookBrokerRead = &hookMeter{}
_ kgo.HookBrokerThrottle = &hookMeter{}
_ kgo.HookBrokerWrite = &hookMeter{}
_ kgo.HookFetchBatchRead = &hookMeter{}
// HookFetchRecordBuffered
// HookFetchRecordUnbuffered
_ kgo.HookGroupManageError = &hookMeter{}
// HookNewClient
_ kgo.HookProduceBatchWritten = &hookMeter{}
// HookProduceRecordBuffered
// HookProduceRecordPartitioned
// HookProduceRecordUnbuffered
_ kgo.HookBrokerConnect = &metrics{}
_ kgo.HookBrokerDisconnect = &metrics{}
_ kgo.HookBrokerRead = &metrics{}
_ kgo.HookBrokerThrottle = &metrics{}
_ kgo.HookBrokerWrite = &metrics{}
_ kgo.HookFetchBatchRead = &metrics{}
_ kgo.HookProduceBatchWritten = &metrics{}
_ kgo.HookGroupManageError = &metrics{}
)
const (
@@ -61,11 +54,11 @@ const (
labelTopic = "topic"
)
func (m *hookMeter) OnGroupManageError(_ error) {
func (m *metrics) OnGroupManageError(err error) {
m.meter.Counter(metricBrokerGroupErrors).Inc()
}
func (m *hookMeter) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) {
func (m *metrics) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) {
node := strconv.Itoa(int(meta.NodeID))
if err != nil {
m.meter.Counter(metricBrokerConnects, labelNode, node, labelStatus, labelFaulure).Inc()
@@ -74,12 +67,12 @@ func (m *hookMeter) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _
m.meter.Counter(metricBrokerConnects, labelNode, node, labelStatus, labelSuccess).Inc()
}
func (m *hookMeter) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) {
func (m *metrics) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) {
node := strconv.Itoa(int(meta.NodeID))
m.meter.Counter(metricBrokerDisconnects, labelNode, node).Inc()
}
func (m *hookMeter) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) {
func (m *metrics) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) {
node := strconv.Itoa(int(meta.NodeID))
if err != nil {
m.meter.Counter(metricBrokerWriteErrors, labelNode, node).Inc()
@@ -90,7 +83,7 @@ func (m *hookMeter) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten
m.meter.Histogram(metricBrokerWriteLatencies, labelNode, node).Update(timeToWrite.Seconds())
}
func (m *hookMeter) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, readWait, timeToRead time.Duration, err error) {
func (m *metrics) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, readWait, timeToRead time.Duration, err error) {
node := strconv.Itoa(int(meta.NodeID))
if err != nil {
m.meter.Counter(metricBrokerReadErrors, labelNode, node).Inc()
@@ -102,18 +95,18 @@ func (m *hookMeter) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int
m.meter.Histogram(metricBrokerReadLatencies, labelNode, node).Update(timeToRead.Seconds())
}
func (m *hookMeter) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) {
func (m *metrics) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) {
node := strconv.Itoa(int(meta.NodeID))
m.meter.Histogram(metricBrokerThrottleLatencies, labelNode, node).Update(throttleInterval.Seconds())
}
func (m *hookMeter) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.ProduceBatchMetrics) {
func (m *metrics) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.ProduceBatchMetrics) {
node := strconv.Itoa(int(meta.NodeID))
m.meter.Counter(metricBrokerProduceBytesUncompressed, labelNode, node, labelTopic, topic).Add(kmetrics.UncompressedBytes)
m.meter.Counter(metricBrokerProduceBytesCompressed, labelNode, node, labelTopic, topic).Add(kmetrics.CompressedBytes)
}
func (m *hookMeter) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.FetchBatchMetrics) {
func (m *metrics) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.FetchBatchMetrics) {
node := strconv.Itoa(int(meta.NodeID))
m.meter.Counter(metricBrokerFetchBytesUncompressed, labelNode, node, labelTopic, topic).Add(kmetrics.UncompressedBytes)
m.meter.Counter(metricBrokerFetchBytesCompressed, labelNode, node, labelTopic, topic).Add(kmetrics.CompressedBytes)

View File

@@ -4,21 +4,14 @@ import (
"context"
"time"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v4/broker"
kgo "github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/server"
)
var (
// DefaultCommitInterval specifies how fast send commit offsets to kafka
DefaultCommitInterval = 5 * time.Second
// DefaultStatsInterval specifies how fast check consumer lag
DefaultStatsInterval = 5 * time.Second
// DefaultSubscribeMaxInflight specifies how much messages keep inflight
DefaultSubscribeMaxInflight = 100
)
// DefaultCommitInterval specifies how fast send commit offsets to kafka
var DefaultCommitInterval = 5 * time.Second
type subscribeContextKey struct{}
@@ -27,11 +20,16 @@ func SubscribeContext(ctx context.Context) broker.SubscribeOption {
return broker.SetSubscribeOption(subscribeContextKey{}, ctx)
}
type messageKey struct{}
type publishKey struct{}
// MessageKey set the kafka message key (broker option)
func MessageKey(key []byte) broker.MessageOption {
return broker.SetMessageOption(messageKey{}, key)
// PublishKey set the kafka message key (broker option)
func PublishKey(key []byte) broker.PublishOption {
return broker.SetPublishOption(publishKey{}, key)
}
// ClientPublishKey set the kafka message key (client option)
func ClientPublishKey(key []byte) client.PublishOption {
return client.SetPublishOption(publishKey{}, key)
}
type optionsKey struct{}
@@ -51,7 +49,7 @@ func Options(opts ...kgo.Opt) broker.Option {
}
}
// SubscribeOptions pass additional options to broker in Subscribe
// SubscribeOptions pass additional options to broker
func SubscribeOptions(opts ...kgo.Opt) broker.SubscribeOption {
return func(o *broker.SubscribeOptions) {
if o.Context == nil {
@@ -66,22 +64,19 @@ func SubscribeOptions(opts ...kgo.Opt) broker.SubscribeOption {
}
}
type fatalOnErrorKey struct{}
func FatalOnError(b bool) broker.Option {
return broker.SetOption(fatalOnErrorKey{}, b)
}
type clientIDKey struct{}
func ClientID(id string) broker.Option {
return broker.SetOption(clientIDKey{}, id)
}
type groupKey struct{}
func Group(id string) broker.Option {
return broker.SetOption(groupKey{}, id)
// SubscriberOptions pass additional options to broker
func SubscriberOptions(opts ...kgo.Opt) server.SubscriberOption {
return func(o *server.SubscriberOptions) {
if o.Context == nil {
o.Context = context.Background()
}
options, ok := o.Context.Value(optionsKey{}).([]kgo.Opt)
if !ok {
options = make([]kgo.Opt, 0, len(opts))
}
options = append(options, opts...)
o.Context = context.WithValue(o.Context, optionsKey{}, options)
}
}
type commitIntervalKey struct{}
@@ -91,28 +86,11 @@ func CommitInterval(td time.Duration) broker.Option {
return broker.SetOption(commitIntervalKey{}, td)
}
var DefaultSubscribeMaxInflight = 1000
type subscribeMaxInflightKey struct{}
// SubscribeMaxInFlight max queued messages
func SubscribeMaxInFlight(n int) broker.SubscribeOption {
return broker.SetSubscribeOption(subscribeMaxInflightKey{}, n)
}
// SubscribeMaxInFlight max queued messages
func SubscribeFatalOnError(b bool) broker.SubscribeOption {
return broker.SetSubscribeOption(fatalOnErrorKey{}, b)
}
type messagePromiseKey struct{}
// MessagePromise set the kafka promise func for Produce
func MessagePromise(fn func(*kgo.Record, error)) broker.MessageOption {
return broker.SetMessageOption(messagePromiseKey{}, fn)
}
type subscribeMessagePoolKey struct{}
// SubscribeMessagePool optionaly enabled/disable message pool
func SubscribeMessagePool(b bool) broker.SubscribeOption {
return broker.SetSubscribeOption(subscribeMessagePoolKey{}, b)
}

View File

@@ -1,320 +0,0 @@
package kgo
import (
"context"
"fmt"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/twmb/franz-go/pkg/kadm"
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/semconv"
"go.unistack.org/micro/v4/tracer"
)
type tp struct {
t string
p int32
}
type consumer struct {
topic string
c *kgo.Client
htracer *hookTracer
quit chan struct{}
done chan struct{}
recs chan kgo.FetchTopicPartition
kopts broker.Options
partition int32
opts broker.SubscribeOptions
handler interface{}
connected *atomic.Uint32
messagePool bool
}
type Subscriber struct {
consumers map[tp]*consumer
c *kgo.Client
htracer *hookTracer
topic string
messagePool bool
handler interface{}
done chan struct{}
kopts broker.Options
opts broker.SubscribeOptions
connected *atomic.Uint32
mu sync.RWMutex
closed bool
fatalOnError bool
}
func (s *Subscriber) Client() *kgo.Client {
return s.c
}
func (s *Subscriber) Options() broker.SubscribeOptions {
return s.opts
}
func (s *Subscriber) Topic() string {
return s.topic
}
func (s *Subscriber) Unsubscribe(ctx context.Context) error {
if s.closed {
return nil
}
s.c.PauseFetchTopics(s.topic)
s.c.CloseAllowingRebalance()
kc := make(map[string][]int32)
for ctp := range s.consumers {
kc[ctp.t] = append(kc[ctp.t], ctp.p)
}
s.killConsumers(ctx, kc)
close(s.done)
s.closed = true
s.c.ResumeFetchTopics(s.topic)
return nil
}
func (s *Subscriber) poll(ctx context.Context) {
maxInflight := DefaultSubscribeMaxInflight
if s.opts.Context != nil {
if n, ok := s.opts.Context.Value(subscribeMaxInflightKey{}).(int); n > 0 && ok {
maxInflight = n
}
}
go func() {
ac := kadm.NewClient(s.c)
ticker := time.NewTicker(DefaultStatsInterval)
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
dgls, err := ac.Lag(ctx, s.opts.Group)
if err != nil || !dgls.Ok() {
continue
}
dgl, ok := dgls[s.opts.Group]
if !ok {
continue
}
lmap, ok := dgl.Lag[s.topic]
if !ok {
continue
}
s.mu.Lock()
for p, l := range lmap {
s.kopts.Meter.Counter(semconv.BrokerGroupLag, "topic", s.topic, "group", s.opts.Group, "partition", strconv.Itoa(int(p))).Set(uint64(l.Lag))
}
s.mu.Unlock()
}
}
}()
for {
select {
case <-ctx.Done():
s.c.CloseAllowingRebalance()
return
case <-s.done:
return
default:
fetches := s.c.PollRecords(ctx, maxInflight)
if !s.closed && fetches.IsClientClosed() {
s.closed = true
return
}
fetches.EachError(func(t string, p int32, err error) {
s.kopts.Logger.Fatal(ctx, fmt.Sprintf("[kgo] fetch topic %s partition %d error", t, p), err)
})
fetches.EachPartition(func(p kgo.FetchTopicPartition) {
tps := tp{p.Topic, p.Partition}
s.mu.Lock()
c := s.consumers[tps]
s.mu.Unlock()
if c != nil {
c.recs <- p
}
})
s.c.AllowRebalance()
}
}
}
func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32) {
var wg sync.WaitGroup
defer wg.Wait()
for topic, partitions := range lost {
for _, partition := range partitions {
tps := tp{topic, partition}
s.mu.Lock()
pc, ok := s.consumers[tps]
s.mu.Unlock()
if !ok {
continue
}
s.mu.Lock()
delete(s.consumers, tps)
s.mu.Unlock()
close(pc.quit)
if s.kopts.Logger.V(logger.DebugLevel) {
s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] waiting for work to finish topic %s partition %d", topic, partition))
}
wg.Add(1)
go func() { <-pc.done; wg.Done() }()
}
}
}
func (s *Subscriber) autocommit(_ *kgo.Client, _ *kmsg.OffsetCommitRequest, _ *kmsg.OffsetCommitResponse, err error) {
if err != nil {
// s.connected.Store(0)
if s.fatalOnError {
s.kopts.Logger.Fatal(context.TODO(), "kgo.AutoCommitCallback error", err)
}
}
}
func (s *Subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) {
if s.kopts.Logger.V(logger.ErrorLevel) {
s.kopts.Logger.Error(ctx, fmt.Sprintf("[kgo] lost %#+v", lost))
}
s.killConsumers(ctx, lost)
// s.connected.Store(0)
}
func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) {
if s.kopts.Logger.V(logger.DebugLevel) {
s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] revoked %#+v", revoked))
}
s.killConsumers(ctx, revoked)
if err := c.CommitMarkedOffsets(ctx); err != nil {
s.kopts.Logger.Error(ctx, "[kgo] revoked CommitMarkedOffsets error", err)
// s.connected.Store(0)
}
}
func (s *Subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[string][]int32) {
for topic, partitions := range assigned {
for _, partition := range partitions {
pc := &consumer{
c: c,
topic: topic,
partition: partition,
htracer: s.htracer,
quit: make(chan struct{}),
done: make(chan struct{}),
recs: make(chan kgo.FetchTopicPartition, 100),
handler: s.handler,
messagePool: s.messagePool,
kopts: s.kopts,
opts: s.opts,
connected: s.connected,
}
s.mu.Lock()
s.consumers[tp{topic, partition}] = pc
s.mu.Unlock()
go pc.consume()
}
}
}
func (pc *consumer) consume() {
var err error
defer close(pc.done)
if pc.kopts.Logger.V(logger.DebugLevel) {
pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("starting, topic %s partition %d", pc.topic, pc.partition))
defer pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("killing, topic %s partition %d", pc.topic, pc.partition))
}
var pm *kgoMessage
for {
select {
case <-pc.quit:
return
case p := <-pc.recs:
for _, record := range p.Records {
ctx, sp := pc.htracer.WithProcessSpan(record)
ts := time.Now()
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Inc()
if pc.messagePool {
pm = messagePool.Get().(*kgoMessage)
} else {
pm = &kgoMessage{}
}
pm.body = record.Value
pm.topic = record.Topic
pm.ack = false
pm.hdr = metadata.New(len(record.Headers))
pm.ctx = ctx
for _, hdr := range record.Headers {
pm.hdr.Set(hdr.Key, string(hdr.Value))
}
pm.hdr.Set("Micro-Offset", strconv.FormatInt(record.Offset, 10))
pm.hdr.Set("Micro-Partition", strconv.FormatInt(int64(record.Partition), 10))
pm.hdr.Set("Micro-Topic", record.Topic)
pm.hdr.Set("Micro-Key", string(record.Key))
pm.hdr.Set("Micro-Timestamp", strconv.FormatInt(record.Timestamp.Unix(), 10))
switch h := pc.handler.(type) {
case func(broker.Message) error:
err = h(pm)
case func([]broker.Message) error:
err = h([]broker.Message{pm})
}
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
if err != nil {
if sp != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc()
} else if pc.opts.AutoAck {
pm.ack = true
}
te := time.Since(ts)
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
ack := pm.ack
if pc.messagePool {
messagePool.Put(p)
}
if ack {
pc.c.MarkCommitRecords(record)
} else {
if sp != nil {
sp.Finish()
}
// pc.connected.Store(0)
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] message not commited")
return
}
if sp != nil {
sp.Finish()
}
}
}
}
}

223
tracer.go
View File

@@ -1,223 +0,0 @@
package kgo
import (
"context"
"unicode/utf8"
"github.com/twmb/franz-go/pkg/kgo"
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/tracer"
)
type hookTracer struct {
tracer tracer.Tracer
clientID string
group string
}
var messagingSystem = semconv.MessagingSystemKey.String("kafka")
var (
_ kgo.HookProduceRecordBuffered = (*hookTracer)(nil)
_ kgo.HookProduceRecordUnbuffered = (*hookTracer)(nil)
_ kgo.HookFetchRecordBuffered = (*hookTracer)(nil)
_ kgo.HookFetchRecordUnbuffered = (*hookTracer)(nil)
)
// OnProduceRecordBuffered starts a new span for the "publish" operation on a
// buffered record.
//
// It sets span options and injects the span context into record and updates
// the record's context, so it can be ended in the OnProduceRecordUnbuffered
// hook.
func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) {
if !m.tracer.Enabled() {
return
}
// Set up span options.
attrs := []interface{}{
messagingSystem,
semconv.MessagingDestinationKindTopic,
semconv.MessagingDestinationName(r.Topic),
semconv.MessagingOperationPublish,
}
attrs = maybeKeyAttr(attrs, r)
if m.clientID != "" {
attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID))
}
opts := []tracer.SpanOption{
tracer.WithSpanLabels(attrs...),
tracer.WithSpanKind(tracer.SpanKindProducer),
}
if r.Context == nil {
r.Context = context.Background()
}
omd, ok := metadata.FromOutgoingContext(r.Context)
if !ok {
omd = metadata.New(len(r.Headers))
}
md := metadata.Copy(omd)
for _, h := range r.Headers {
md.Set(h.Key, string(h.Value))
}
if !ok {
r.Context, _ = m.tracer.Start(metadata.NewOutgoingContext(r.Context, md), "sdk.broker", opts...)
} else {
r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...)
}
setHeaders(r, omd, metadata.HeaderContentType)
}
// OnProduceRecordUnbuffered continues and ends the "publish" span for an
// unbuffered record.
//
// It sets attributes with values unset when producing and records any error
// that occurred during the publish operation.
func (m *hookTracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) {
if !m.tracer.Enabled() {
return
}
if span, ok := tracer.SpanFromContext(r.Context); ok {
span.AddLabels(
semconv.MessagingKafkaDestinationPartition(int(r.Partition)),
)
if err != nil {
span.SetStatus(tracer.SpanStatusError, err.Error())
}
span.Finish()
}
}
// OnFetchRecordBuffered starts a new span for the "receive" operation on a
// buffered record.
//
// It sets the span options and extracts the span context from the record,
// updates the record's context to ensure it can be ended in the
// OnFetchRecordUnbuffered hook and can be used in downstream consumer
// processing.
func (m *hookTracer) OnFetchRecordBuffered(r *kgo.Record) {
if !m.tracer.Enabled() {
return
}
// Set up the span options.
attrs := []interface{}{
messagingSystem,
semconv.MessagingSourceKindTopic,
semconv.MessagingSourceName(r.Topic),
semconv.MessagingOperationReceive,
semconv.MessagingKafkaSourcePartition(int(r.Partition)),
}
attrs = maybeKeyAttr(attrs, r)
if m.clientID != "" {
attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID))
}
if m.group != "" {
attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group))
}
opts := []tracer.SpanOption{
tracer.WithSpanLabels(attrs...),
tracer.WithSpanKind(tracer.SpanKindConsumer),
}
if r.Context == nil {
r.Context = context.Background()
}
omd, ok := metadata.FromIncomingContext(r.Context)
if !ok {
omd = metadata.New(len(r.Headers))
}
md := metadata.Copy(omd)
for _, h := range r.Headers {
md.Set(h.Key, string(h.Value))
}
if !ok {
r.Context, _ = m.tracer.Start(metadata.NewIncomingContext(r.Context, md), "sdk.broker", opts...)
} else {
r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...)
}
setHeaders(r, omd, metadata.HeaderContentType)
}
// OnFetchRecordUnbuffered continues and ends the "receive" span for an
// unbuffered record.
func (m *hookTracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) {
if !m.tracer.Enabled() {
return
}
span, _ := tracer.SpanFromContext(r.Context)
span.Finish()
}
// WithProcessSpan starts a new span for the "process" operation on a consumer
// record.
//
// It sets up the span options. The user's application code is responsible for
// ending the span.
//
// This should only ever be called within a polling loop of a consumed record and
// not a record which has been created for producing, so call this at the start of each
// iteration of your processing for the record.
func (m *hookTracer) WithProcessSpan(r *kgo.Record) (context.Context, tracer.Span) {
if r.Context == nil {
r.Context = context.Background()
}
if !m.tracer.Enabled() {
return r.Context, nil
}
// Set up the span options.
attrs := []interface{}{
messagingSystem,
semconv.MessagingSourceKindTopic,
semconv.MessagingSourceName(r.Topic),
semconv.MessagingOperationProcess,
semconv.MessagingKafkaSourcePartition(int(r.Partition)),
semconv.MessagingKafkaMessageOffset(int(r.Offset)),
}
attrs = maybeKeyAttr(attrs, r)
if m.clientID != "" {
attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID))
}
if m.group != "" {
attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group))
}
opts := []tracer.SpanOption{
tracer.WithSpanLabels(attrs...),
tracer.WithSpanKind(tracer.SpanKindConsumer),
}
if r.Context == nil {
r.Context = context.Background()
}
md, ok := metadata.FromIncomingContext(r.Context)
if !ok {
md = metadata.New(len(r.Headers))
}
for _, h := range r.Headers {
md.Set(h.Key, string(h.Value))
}
// Start a new span using the provided context and options.
return m.tracer.Start(r.Context, "sdk.broker", opts...)
}
func maybeKeyAttr(attrs []interface{}, r *kgo.Record) []interface{} {
if r.Key == nil {
return attrs
}
var keykey string
if !utf8.Valid(r.Key) {
return attrs
}
keykey = string(r.Key)
return append(attrs, semconv.MessagingKafkaMessageKeyKey.String(keykey))
}

244
util.go Normal file
View File

@@ -0,0 +1,244 @@
package kgo
import (
"context"
"errors"
"sync"
kgo "github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
)
var ErrLostMessage = errors.New("message not marked for offsets commit and will be lost in next iteration")
var pPool = sync.Pool{
New: func() interface{} {
return &publication{msg: broker.NewMessage("")}
},
}
type worker struct {
done chan struct{}
recs chan []*kgo.Record
cherr chan error
handler broker.Handler
batchHandler broker.BatchHandler
opts broker.SubscribeOptions
kopts broker.Options
tpmap map[string][]int32
maxInflight int
reader *kgo.Client
ctx context.Context
}
func (s *subscriber) run(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case <-s.kopts.Context.Done():
return
default:
fetches := s.reader.PollFetches(ctx)
if fetches.IsClientClosed() {
// TODO: fatal ?
return
}
if len(fetches.Errors()) > 0 {
for _, err := range fetches.Errors() {
s.kopts.Logger.Fatalf(ctx, "fetch err topic %s partition %d: %v", err.Topic, err.Partition, err.Err)
}
// TODO: fatal ?
return
}
fetches.EachPartition(func(p kgo.FetchTopicPartition) {
s.Lock()
consumers := s.consumers[p.Topic]
s.Unlock()
if consumers == nil {
return
}
w, ok := consumers[p.Partition]
if !ok {
return
}
select {
case err := <-w.cherr:
s.kopts.Logger.Fatalf(ctx, "handle err: %v", err)
return
case w.recs <- p.Records:
case <-w.done:
}
})
}
}
}
func (s *subscriber) assigned(ctx context.Context, _ *kgo.Client, assigned map[string][]int32) {
maxInflight := DefaultSubscribeMaxInflight
if s.opts.Context != nil {
if n, ok := s.opts.Context.Value(subscribeMaxInflightKey{}).(int); n > 0 && ok {
maxInflight = n
}
}
s.Lock()
for topic, partitions := range assigned {
if s.consumers[topic] == nil {
s.consumers[topic] = make(map[int32]worker)
}
for _, partition := range partitions {
w := worker{
done: make(chan struct{}),
recs: make(chan []*kgo.Record),
cherr: make(chan error),
kopts: s.kopts,
opts: s.opts,
ctx: ctx,
tpmap: map[string][]int32{topic: []int32{partition}},
reader: s.reader,
handler: s.handler,
batchHandler: s.batchhandler,
maxInflight: maxInflight,
}
s.consumers[topic][partition] = w
go w.handle()
}
}
s.Unlock()
}
func (s *subscriber) revoked(_ context.Context, _ *kgo.Client, revoked map[string][]int32) {
s.Lock()
for topic, partitions := range revoked {
ptopics := s.consumers[topic]
for _, partition := range partitions {
w := ptopics[partition]
delete(ptopics, partition)
if len(ptopics) == 0 {
delete(s.consumers, topic)
}
close(w.done)
}
}
s.Unlock()
}
func (w *worker) handle() {
var err error
eh := w.kopts.ErrorHandler
if w.opts.ErrorHandler != nil {
eh = w.opts.ErrorHandler
}
paused := false
for {
select {
case <-w.ctx.Done():
w.cherr <- w.ctx.Err()
return
case <-w.done:
return
case recs := <-w.recs:
if len(recs) >= w.maxInflight {
paused = true
w.reader.PauseFetchPartitions(w.tpmap)
}
for _, record := range recs {
p := pPool.Get().(*publication)
p.msg.Header = nil
p.msg.Body = nil
p.topic = record.Topic
p.err = nil
p.ack = false
if w.opts.BodyOnly {
p.msg.Body = record.Value
if l := len(record.Headers); l > 0 {
if p.msg.Header == nil {
p.msg.Header = metadata.New(l)
}
for _, h := range record.Headers {
p.msg.Header.Set(h.Key, string(h.Value))
}
}
} else if w.kopts.Codec.String() == "noop" {
p.msg.Body = record.Value
p.msg.Header = metadata.New(len(record.Headers))
for _, h := range record.Headers {
p.msg.Header.Set(h.Key, string(h.Value))
}
} else {
if err := w.kopts.Codec.Unmarshal(record.Value, p.msg); err != nil {
p.err = err
p.msg.Body = record.Value
if l := len(record.Headers); l > 0 {
if p.msg.Header == nil {
p.msg.Header = metadata.New(l)
}
for _, h := range record.Headers {
p.msg.Header.Set(h.Key, string(h.Value))
}
}
if eh != nil {
_ = eh(p)
if p.ack {
w.reader.MarkCommitRecords(record)
} else {
w.cherr <- ErrLostMessage
pPool.Put(p)
return
}
pPool.Put(p)
continue
} else {
if w.kopts.Logger.V(logger.ErrorLevel) {
w.kopts.Logger.Errorf(w.kopts.Context, "[kgo]: failed to unmarshal: %v", err)
}
}
pPool.Put(p)
w.cherr <- err
return
}
if l := len(record.Headers); l > 0 {
if p.msg.Header == nil {
p.msg.Header = metadata.New(l)
}
for _, h := range record.Headers {
p.msg.Header.Set(h.Key, string(h.Value))
}
}
}
err = w.handler(p)
if err == nil && w.opts.AutoAck {
p.ack = true
} else if err != nil {
p.err = err
if eh != nil {
_ = eh(p)
} else {
if w.kopts.Logger.V(logger.ErrorLevel) {
w.kopts.Logger.Errorf(w.kopts.Context, "[kgo]: subscriber error: %v", err)
}
}
}
if p.ack {
pPool.Put(p)
w.reader.MarkCommitRecords(record)
} else {
pPool.Put(p)
w.cherr <- ErrLostMessage
return
}
}
if paused {
paused = false
w.reader.ResumeFetchPartitions(w.tpmap)
}
}
}
}