Compare commits
	
		
			66 Commits
		
	
	
		
			v4.0.1
			...
			0d2d0fe774
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 0d2d0fe774 | |||
| c71478ee9a | |||
| 2ca4e5c74e | |||
| 2809ebcaeb | |||
| 3f04403319 | |||
| 8edfc11eef | |||
| cb5ef75e09 | |||
| d5bd105cc6 | |||
| 1ba02ed8ad | |||
| 7c5da60556 | |||
| 07fa36d704 | |||
| 849bbd7a09 | |||
| f461bb6876 | |||
| 201d22d1c4 | |||
| ff6a272594 | |||
| 1018abe7bd | |||
| 398c3c81cb | |||
|  | 786d03b521 | ||
| 937c9d5720 | |||
|  | 951fba55fa | ||
|  | ec5238ed14 | ||
| d8f44a924e | |||
|  | ffe9e5d952 | ||
| 8c362fd6ae | |||
| 90365a455c | |||
| 6a218ca7b2 | |||
| ba9b88c650 | |||
| aedd60ea87 | |||
| ea2ac477be | |||
| c1fa2f639d | |||
| 8e3f2c67d7 | |||
| e66194695e | |||
| 894d6f4f20 | |||
| d404fa31ab | |||
| 88777a29ad | |||
| 23c2903c21 | |||
| 8fcc23f639 | |||
| 25dda1f34c | |||
| fe66086c40 | |||
| 7329bc23bc | |||
| c240631cdb | |||
|  | 6a68533824 | ||
| 058b6354c0 | |||
| 1f4cf11afe | |||
| 39177da1d0 | |||
| d559db4050 | |||
| aa946c469a | |||
| 9c4d88bb69 | |||
| 56288f46b1 | |||
| 81dcef8b28 | |||
| ec7a22b2dc | |||
| d2ac0c1360 | |||
| 69dd8c4eea | |||
| 27a6a923cd | |||
| 0a395235d6 | |||
| 23f0ad0f2f | |||
| 4fcbe0a770 | |||
| 28c9865121 | |||
| 697413d829 | |||
| 8a64e8c5cc | |||
| 2c8ca8d14f | |||
| 769ac6322f | |||
| 52318d68b8 | |||
| 5c4332ffc4 | |||
| 3a86d4c0f4 | |||
| 8bbcc30d04 | 
							
								
								
									
										29
									
								
								.gitea/workflows/job_lint.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								.gitea/workflows/job_lint.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| name: lint | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     types: [opened, reopened, synchronize] | ||||
|     branches: | ||||
|     - master | ||||
|     - v3 | ||||
|     - v4 | ||||
|  | ||||
| jobs: | ||||
|   lint: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: checkout code | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         filter: 'blob:none' | ||||
|     - name: setup go | ||||
|       uses: actions/setup-go@v5 | ||||
|       with: | ||||
|         cache-dependency-path: "**/*.sum" | ||||
|         go-version: 'stable'  | ||||
|     - name: setup deps | ||||
|       run: go get -v ./... | ||||
|     - name: run lint | ||||
|       uses: https://github.com/golangci/golangci-lint-action@v6 | ||||
|       with: | ||||
|         version: 'latest' | ||||
							
								
								
									
										34
									
								
								.gitea/workflows/job_test.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								.gitea/workflows/job_test.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| name: test | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     types: [opened, reopened, synchronize] | ||||
|     branches: | ||||
|     - master | ||||
|     - v3 | ||||
|     - v4 | ||||
|   push: | ||||
|     branches: | ||||
|     - master | ||||
|     - v3 | ||||
|     - v4 | ||||
|  | ||||
| jobs: | ||||
|   test: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: checkout code | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         filter: 'blob:none' | ||||
|     - name: setup go | ||||
|       uses: actions/setup-go@v5 | ||||
|       with: | ||||
|         cache-dependency-path: "**/*.sum" | ||||
|         go-version: 'stable' | ||||
|     - name: setup deps | ||||
|       run: go get -v ./... | ||||
|     - name: run test | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|       run: go test -mod readonly -v ./... | ||||
							
								
								
									
										53
									
								
								.gitea/workflows/job_tests.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								.gitea/workflows/job_tests.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| name: test | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     types: [opened, reopened, synchronize] | ||||
|     branches: | ||||
|     - master | ||||
|     - v3 | ||||
|     - v4 | ||||
|   push: | ||||
|     branches: | ||||
|     - master | ||||
|     - v3 | ||||
|     - v4 | ||||
|  | ||||
| jobs: | ||||
|   test: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: checkout code | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         filter: 'blob:none' | ||||
|     - name: checkout tests | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         ref: master | ||||
|         filter: 'blob:none' | ||||
|         repository: unistack-org/micro-tests | ||||
|         path: micro-tests | ||||
|     - name: setup go | ||||
|       uses: actions/setup-go@v5 | ||||
|       with: | ||||
|         cache-dependency-path: "**/*.sum" | ||||
|         go-version: 'stable' | ||||
|     - name: setup go work | ||||
|       env: | ||||
|         GOWORK: /workspace/${{ github.repository_owner }}/go.work | ||||
|       run: | | ||||
|         go work init | ||||
|         go work use . | ||||
|         go work use micro-tests         | ||||
|     - name: setup deps | ||||
|       env: | ||||
|         GOWORK: /workspace/${{ github.repository_owner }}/go.work | ||||
|       run: go get -v ./... | ||||
|     - name: run tests | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|         GOWORK: /workspace/${{ github.repository_owner }}/go.work | ||||
|       run: | | ||||
|         cd micro-tests | ||||
|         go test -mod readonly -v ./... || true         | ||||
							
								
								
									
										19
									
								
								.github/dependabot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								.github/dependabot.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,19 +0,0 @@ | ||||
| # To get started with Dependabot version updates, you'll need to specify which | ||||
| # package ecosystems to update and where the package manifests are located. | ||||
| # Please see the documentation for all configuration options: | ||||
| # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates | ||||
|  | ||||
| version: 2 | ||||
| updates: | ||||
|  | ||||
|   # Maintain dependencies for GitHub Actions | ||||
|   - package-ecosystem: "github-actions" | ||||
|     directory: "/" | ||||
|     schedule: | ||||
|       interval: "daily" | ||||
|  | ||||
|   # Maintain dependencies for Golang | ||||
|   - package-ecosystem: "gomod" | ||||
|     directory: "/" | ||||
|     schedule: | ||||
|       interval: "daily" | ||||
							
								
								
									
										20
									
								
								.github/workflows/autoapprove.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								.github/workflows/autoapprove.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,20 +0,0 @@ | ||||
| name: "autoapprove" | ||||
|  | ||||
| on: | ||||
|   pull_request_target: | ||||
|     types: [assigned, opened, synchronize, reopened] | ||||
|  | ||||
| permissions: | ||||
|   pull-requests: write | ||||
|   contents: write | ||||
|  | ||||
| jobs: | ||||
|   autoapprove: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: approve | ||||
|         uses: hmarr/auto-approve-action@v3 | ||||
|         if: github.actor == 'vtolstov' || github.actor == 'dependabot[bot]' | ||||
|         id: approve | ||||
|         with: | ||||
|           github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
							
								
								
									
										21
									
								
								.github/workflows/automerge.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										21
									
								
								.github/workflows/automerge.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,21 +0,0 @@ | ||||
| name: "automerge" | ||||
|  | ||||
| on: | ||||
|   pull_request_target: | ||||
|     types: [assigned, opened, synchronize, reopened] | ||||
|  | ||||
| permissions: | ||||
|   pull-requests: write | ||||
|   contents: write | ||||
|  | ||||
| jobs: | ||||
|   automerge: | ||||
|     runs-on: ubuntu-latest | ||||
|     if: github.actor == 'vtolstov' | ||||
|     steps: | ||||
|       - name: merge | ||||
|         id: merge | ||||
|         run: gh pr merge --auto --merge "$PR_URL" | ||||
|         env: | ||||
|           PR_URL: ${{github.event.pull_request.html_url}} | ||||
|           GITHUB_TOKEN: ${{secrets.TOKEN}} | ||||
							
								
								
									
										47
									
								
								.github/workflows/build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										47
									
								
								.github/workflows/build.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,47 +0,0 @@ | ||||
| name: build | ||||
| on: | ||||
|  push: | ||||
|     branches: | ||||
|     - master | ||||
|     - v3 | ||||
| jobs: | ||||
|   test: | ||||
|     name: test | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: setup | ||||
|       uses: actions/setup-go@v3 | ||||
|       with: | ||||
|         go-version: 1.17 | ||||
|     - name: checkout | ||||
|       uses: actions/checkout@v3 | ||||
|     - name: cache | ||||
|       uses: actions/cache@v3 | ||||
|       with: | ||||
|         path: ~/go/pkg/mod | ||||
|         key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} | ||||
|         restore-keys: ${{ runner.os }}-go- | ||||
|     - name: deps | ||||
|       run: go get -v -t -d ./... | ||||
|     - name: test | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|       run: go test -mod readonly -v ./... | ||||
|   lint: | ||||
|     name: lint | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: checkout | ||||
|         uses: actions/checkout@v3 | ||||
|       - name: lint | ||||
|         uses: golangci/golangci-lint-action@v3.4.0 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. | ||||
|           version: v1.30 | ||||
|           # Optional: working directory, useful for monorepos | ||||
|           # working-directory: somedir | ||||
|           # Optional: golangci-lint command line arguments. | ||||
|           # args: --issues-exit-code=0 | ||||
|           # Optional: show only new issues if it's a pull request. The default value is `false`. | ||||
|           # only-new-issues: true | ||||
							
								
								
									
										78
									
								
								.github/workflows/codeql-analysis.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										78
									
								
								.github/workflows/codeql-analysis.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,78 +0,0 @@ | ||||
| # For most projects, this workflow file will not need changing; you simply need | ||||
| # to commit it to your repository. | ||||
| # | ||||
| # You may wish to alter this file to override the set of languages analyzed, | ||||
| # or to provide custom queries or build logic. | ||||
| # | ||||
| # ******** NOTE ******** | ||||
| # We have attempted to detect the languages in your repository. Please check | ||||
| # the `language` matrix defined below to confirm you have the correct set of | ||||
| # supported CodeQL languages. | ||||
| # | ||||
| name: "codeql" | ||||
|  | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: ["prbuild"] | ||||
|     types: | ||||
|       - completed | ||||
|   push: | ||||
|     branches: [ master, v3 ] | ||||
|   pull_request: | ||||
|     # The branches below must be a subset of the branches above | ||||
|     branches: [ master, v3 ] | ||||
|   schedule: | ||||
|     - cron: '34 1 * * 0' | ||||
|  | ||||
| jobs: | ||||
|   analyze: | ||||
|     name: analyze | ||||
|     runs-on: ubuntu-latest | ||||
|     permissions: | ||||
|       actions: read | ||||
|       contents: read | ||||
|       security-events: write | ||||
|  | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         language: [ 'go' ] | ||||
|         # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] | ||||
|         # Learn more: | ||||
|         # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed | ||||
|  | ||||
|     steps: | ||||
|     - name: checkout | ||||
|       uses: actions/checkout@v3 | ||||
|     - name: setup | ||||
|       uses: actions/setup-go@v3 | ||||
|       with: | ||||
|         go-version: 1.17 | ||||
|     # Initializes the CodeQL tools for scanning. | ||||
|     - name: init | ||||
|       uses: github/codeql-action/init@v2 | ||||
|       with: | ||||
|         languages: ${{ matrix.language }} | ||||
|         # If you wish to specify custom queries, you can do so here or in a config file. | ||||
|         # By default, queries listed here will override any specified in a config file. | ||||
|         # Prefix the list here with "+" to use these queries and those in the config file. | ||||
|         # queries: ./path/to/local/query, your-org/your-repo/queries@main | ||||
|  | ||||
|     # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java). | ||||
|     # If this step fails, then you should remove it and run the build manually (see below) | ||||
|     - name: autobuild | ||||
|       uses: github/codeql-action/autobuild@v2 | ||||
|  | ||||
|     # ℹ️ Command-line programs to run using the OS shell. | ||||
|     # 📚 https://git.io/JvXDl | ||||
|  | ||||
|     # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines | ||||
|     #    and modify them (or add more) to build your code if your project | ||||
|     #    uses a compiled language | ||||
|  | ||||
|     #- run: | | ||||
|     #   make bootstrap | ||||
|     #   make release | ||||
|  | ||||
|     - name: analyze | ||||
|       uses: github/codeql-action/analyze@v2 | ||||
							
								
								
									
										27
									
								
								.github/workflows/dependabot-automerge.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										27
									
								
								.github/workflows/dependabot-automerge.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,27 +0,0 @@ | ||||
| name: "dependabot-automerge" | ||||
|  | ||||
| on: | ||||
|   pull_request_target: | ||||
|     types: [assigned, opened, synchronize, reopened] | ||||
|  | ||||
| permissions: | ||||
|   pull-requests: write | ||||
|   contents: write | ||||
|  | ||||
| jobs: | ||||
|   automerge: | ||||
|     runs-on: ubuntu-latest | ||||
|     if: github.actor == 'dependabot[bot]' | ||||
|     steps: | ||||
|       - name: metadata | ||||
|         id: metadata | ||||
|         uses: dependabot/fetch-metadata@v1.3.6 | ||||
|         with: | ||||
|           github-token: "${{ secrets.TOKEN }}" | ||||
|       - name: merge | ||||
|         id: merge | ||||
|         if: ${{contains(steps.metadata.outputs.dependency-names, 'go.unistack.org')}} | ||||
|         run: gh pr merge --auto --merge "$PR_URL" | ||||
|         env: | ||||
|           PR_URL: ${{github.event.pull_request.html_url}} | ||||
|           GITHUB_TOKEN: ${{secrets.TOKEN}} | ||||
							
								
								
									
										47
									
								
								.github/workflows/pr.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										47
									
								
								.github/workflows/pr.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,47 +0,0 @@ | ||||
| name: prbuild | ||||
| on: | ||||
|   pull_request: | ||||
|     branches: | ||||
|     - master | ||||
|     - v3 | ||||
| jobs: | ||||
|   test: | ||||
|     name: test | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: setup | ||||
|       uses: actions/setup-go@v3 | ||||
|       with: | ||||
|         go-version: 1.17 | ||||
|     - name: checkout | ||||
|       uses: actions/checkout@v3 | ||||
|     - name: cache | ||||
|       uses: actions/cache@v3 | ||||
|       with: | ||||
|         path: ~/go/pkg/mod | ||||
|         key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} | ||||
|         restore-keys: ${{ runner.os }}-go- | ||||
|     - name: deps | ||||
|       run: go get -v -t -d ./... | ||||
|     - name: test | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|       run: go test -mod readonly -v ./... | ||||
|   lint: | ||||
|     name: lint | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: checkout | ||||
|         uses: actions/checkout@v3 | ||||
|       - name: lint | ||||
|         uses: golangci/golangci-lint-action@v3.4.0 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. | ||||
|           version: v1.30 | ||||
|           # Optional: working directory, useful for monorepos | ||||
|           # working-directory: somedir | ||||
|           # Optional: golangci-lint command line arguments. | ||||
|           # args: --issues-exit-code=0 | ||||
|           # Optional: show only new issues if it's a pull request. The default value is `false`. | ||||
|           # only-new-issues: true | ||||
							
								
								
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -13,3 +13,9 @@ | ||||
|  | ||||
| # Dependency directories (remove the comment below to include it) | ||||
| # vendor/ | ||||
|  | ||||
| # General | ||||
| .DS_Store | ||||
| .idea | ||||
| .vscode | ||||
| bin/ | ||||
|   | ||||
							
								
								
									
										21
									
								
								.golangci.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								.golangci.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| run: | ||||
|   concurrency: 8 | ||||
|   deadline: 5m | ||||
|   issues-exit-code: 1 | ||||
|   tests: true | ||||
|  | ||||
| linters: | ||||
|   enable: | ||||
|     - staticcheck | ||||
|     - unused | ||||
|     - gosimple | ||||
|     - govet | ||||
|     - prealloc | ||||
|     - unconvert | ||||
|     - nakedret | ||||
|  | ||||
| linters-settings: | ||||
|   govet: | ||||
|     check-all: true | ||||
|     enable: | ||||
|       - fieldalignment | ||||
							
								
								
									
										14
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,9 +1,9 @@ | ||||
| # micro-broker-kgo | ||||
| yet another micro kafka broker alternative | ||||
| # broker-kgo | ||||
|  | ||||
| TODO: | ||||
| * dont always append options from context on Init and New | ||||
| * add SubscriberOptions(...kgo.Opt) | ||||
| * add ServerSubscribeOptions(...kgo.Opt) | ||||
| * check PublisherOptions(...kgo.Opt) | ||||
| * check ClientPublisherOptions(...kgo.Opt) | ||||
|  | ||||
| 1) экспортируем текущий оффсет для каждой партиции в группе | ||||
| 2) экспортируем лаг для группы | ||||
| 3) мониторим | ||||
|    1) если есть лаг больше нуля | ||||
|    2) если дельта оффсета за нужное нам время не  | ||||
							
								
								
									
										93
									
								
								carrier.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								carrier.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,93 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"slices" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v3/metadata" | ||||
| ) | ||||
|  | ||||
| // RecordCarrier injects and extracts traces from a kgo.Record. | ||||
| // | ||||
| // This type exists to satisfy the otel/propagation.TextMapCarrier interface. | ||||
| type RecordCarrier struct { | ||||
| 	record *kgo.Record | ||||
| } | ||||
|  | ||||
| // NewRecordCarrier creates a new RecordCarrier. | ||||
| func NewRecordCarrier(record *kgo.Record) RecordCarrier { | ||||
| 	return RecordCarrier{record: record} | ||||
| } | ||||
|  | ||||
| // Get retrieves a single value for a given key if it exists. | ||||
| func (c RecordCarrier) Get(key string) string { | ||||
| 	for _, h := range c.record.Headers { | ||||
| 		if h.Key == key { | ||||
| 			return string(h.Value) | ||||
| 		} | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // Set sets a header. | ||||
| func (c RecordCarrier) Set(key, val string) { | ||||
| 	// Check if key already exists. | ||||
| 	for i, h := range c.record.Headers { | ||||
| 		if h.Key == key { | ||||
| 			// Key exist, update the value. | ||||
| 			c.record.Headers[i].Value = []byte(val) | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	// Key does not exist, append new header. | ||||
| 	c.record.Headers = append(c.record.Headers, kgo.RecordHeader{ | ||||
| 		Key:   key, | ||||
| 		Value: []byte(val), | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // Keys returns a slice of all key identifiers in the carrier. | ||||
| func (c RecordCarrier) Keys() []string { | ||||
| 	out := make([]string, len(c.record.Headers)) | ||||
| 	for i, h := range c.record.Headers { | ||||
| 		out[i] = h.Key | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| func setHeaders(r *kgo.Record, md metadata.Metadata, exclude ...string) { | ||||
| 	seen := make(map[string]struct{}) | ||||
|  | ||||
| loop: | ||||
| 	for k, v := range md { | ||||
| 		k = http.CanonicalHeaderKey(k) | ||||
|  | ||||
| 		if _, ok := seen[k]; ok { | ||||
| 			continue loop | ||||
| 		} | ||||
|  | ||||
| 		if slices.ContainsFunc(exclude, func(s string) bool { | ||||
| 			return strings.EqualFold(s, k) | ||||
| 		}) { | ||||
| 			continue loop | ||||
| 		} | ||||
|  | ||||
| 		for i := 0; i < len(r.Headers); i++ { | ||||
| 			if strings.EqualFold(r.Headers[i].Key, k) { | ||||
| 				// Key exist, update the value. | ||||
| 				r.Headers[i].Value = []byte(v) | ||||
| 				continue loop | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// Key does not exist, append new header. | ||||
| 		r.Headers = append(r.Headers, kgo.RecordHeader{ | ||||
| 			Key:   k, | ||||
| 			Value: []byte(v), | ||||
| 		}) | ||||
|  | ||||
| 		seen[k] = struct{}{} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										14
									
								
								event.go
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								event.go
									
									
									
									
									
								
							| @@ -1,19 +1,27 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
|  | ||||
| 	"go.unistack.org/micro/v3/broker" | ||||
| ) | ||||
|  | ||||
| type event struct { | ||||
| 	topic string | ||||
| 	err   error | ||||
| 	sync.RWMutex | ||||
| 	msg *broker.Message | ||||
| 	err error | ||||
| 	ctx context.Context | ||||
|  | ||||
| 	topic string | ||||
|  | ||||
| 	sync.RWMutex | ||||
| 	ack bool | ||||
| } | ||||
|  | ||||
| func (p *event) Context() context.Context { | ||||
| 	return p.ctx | ||||
| } | ||||
|  | ||||
| func (p *event) Topic() string { | ||||
| 	return p.topic | ||||
| } | ||||
|   | ||||
							
								
								
									
										25
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								go.mod
									
									
									
									
									
								
							| @@ -1,14 +1,27 @@ | ||||
| module go.unistack.org/micro-broker-kgo/v3 | ||||
|  | ||||
| go 1.17 | ||||
| go 1.22.0 | ||||
|  | ||||
| require ( | ||||
| 	github.com/twmb/franz-go v1.11.5 | ||||
| 	github.com/twmb/franz-go/pkg/kmsg v1.3.0 | ||||
| 	go.unistack.org/micro/v3 v3.10.14 | ||||
| 	github.com/google/uuid v1.6.0 | ||||
| 	github.com/twmb/franz-go v1.18.0 | ||||
| 	github.com/twmb/franz-go/pkg/kadm v1.14.0 | ||||
| 	github.com/twmb/franz-go/pkg/kmsg v1.9.0 | ||||
| 	go.opentelemetry.io/otel v1.33.0 | ||||
| 	go.unistack.org/micro/v3 v3.11.37 | ||||
| ) | ||||
|  | ||||
| require ( | ||||
| 	github.com/klauspost/compress v1.15.9 // indirect | ||||
| 	github.com/pierrec/lz4/v4 v4.1.15 // indirect | ||||
| 	github.com/ash3in/uuidv8 v1.2.0 // indirect | ||||
| 	github.com/klauspost/compress v1.17.11 // indirect | ||||
| 	github.com/kr/pretty v0.3.1 // indirect | ||||
| 	github.com/matoous/go-nanoid v1.5.1 // indirect | ||||
| 	github.com/pierrec/lz4/v4 v4.1.22 // indirect | ||||
| 	go.unistack.org/micro-proto/v3 v3.4.1 // indirect | ||||
| 	golang.org/x/crypto v0.31.0 // indirect | ||||
| 	golang.org/x/sys v0.28.0 // indirect | ||||
| 	google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect | ||||
| 	google.golang.org/grpc v1.69.2 // indirect | ||||
| 	google.golang.org/protobuf v1.36.1 // indirect | ||||
| 	gopkg.in/yaml.v3 v3.0.1 // indirect | ||||
| ) | ||||
|   | ||||
							
								
								
									
										82
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										82
									
								
								go.sum
									
									
									
									
									
								
							| @@ -1,24 +1,62 @@ | ||||
| github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= | ||||
| github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= | ||||
| github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | ||||
| github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= | ||||
| github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= | ||||
| github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= | ||||
| github.com/silas/dag v0.0.0-20211117232152-9d50aa809f35/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I= | ||||
| github.com/twmb/franz-go v1.11.5 h1:TTv5lVJd+87XkmP9dWN9Jgpf7IUUr7a7jee+byR8LBE= | ||||
| github.com/twmb/franz-go v1.11.5/go.mod h1:FvaHNlpT6woVYIl6LAuIeL7yHol1Fp6Gv2Dn21AvH78= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.3.0 h1:ouBETB7nTqRxiO5E8/pySoFZtVEW2VWw55z3/bsUzTw= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.3.0/go.mod h1:SxG/xJKhgPu25SamAq0rrucfp7lbzCpEXOC+vH/ELrY= | ||||
| go.unistack.org/micro/v3 v3.10.14 h1:7fgLpwGlCN67twhwtngJDEQvrMkUBDSA5vzZqxIDqNE= | ||||
| go.unistack.org/micro/v3 v3.10.14/go.mod h1:uMAc0U/x7dmtICCrblGf0ZLgYegu3VwQAquu+OFCw1Q= | ||||
| golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= | ||||
| golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= | ||||
| golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||
| golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= | ||||
| github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= | ||||
| github.com/ash3in/uuidv8 v1.2.0 h1:2oogGdtCPwaVtyvPPGin4TfZLtOGE5F+W++E880G6SI= | ||||
| github.com/ash3in/uuidv8 v1.2.0/go.mod h1:BnU0wJBxnzdEKmVg4xckBkD+VZuecTFTUP3M0dWgyY4= | ||||
| github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= | ||||
| github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= | ||||
| github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= | ||||
| github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | ||||
| github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= | ||||
| github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | ||||
| github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= | ||||
| github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= | ||||
| github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= | ||||
| github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= | ||||
| github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= | ||||
| github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= | ||||
| github.com/matoous/go-nanoid v1.5.1 h1:aCjdvTyO9LLnTIi0fgdXhOPPvOHjpXN6Ik9DaNjIct4= | ||||
| github.com/matoous/go-nanoid v1.5.1/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U= | ||||
| github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= | ||||
| github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= | ||||
| github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= | ||||
| github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= | ||||
| github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= | ||||
| github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= | ||||
| github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= | ||||
| github.com/twmb/franz-go v1.18.0 h1:25FjMZfdozBywVX+5xrWC2W+W76i0xykKjTdEeD2ejw= | ||||
| github.com/twmb/franz-go v1.18.0/go.mod h1:zXCGy74M0p5FbXsLeASdyvfLFsBvTubVqctIaa5wQ+I= | ||||
| github.com/twmb/franz-go/pkg/kadm v1.14.0 h1:nAn1co1lXzJQocpzyIyOFOjUBf4WHWs5/fTprXy2IZs= | ||||
| github.com/twmb/franz-go/pkg/kadm v1.14.0/go.mod h1:XjOPz6ZaXXjrW2jVCfLuucP8H1w2TvD6y3PT2M+aAM4= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.9.0 h1:JojYUph2TKAau6SBtErXpXGC7E3gg4vGZMv9xFU/B6M= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.9.0/go.mod h1:CMbfazviCyY6HM0SXuG5t9vOwYDHRCSrJJyBAe5paqg= | ||||
| go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= | ||||
| go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= | ||||
| go.unistack.org/micro-proto/v3 v3.4.1 h1:UTjLSRz2YZuaHk9iSlVqqsA50JQNAEK2ZFboGqtEa9Q= | ||||
| go.unistack.org/micro-proto/v3 v3.4.1/go.mod h1:okx/cnOhzuCX0ggl/vToatbCupi0O44diiiLLsZ93Zo= | ||||
| go.unistack.org/micro/v3 v3.11.37 h1:ZcpnXAYEMcAwmnVb5b7o8/PylGnILxXMHaUlRrPmRI0= | ||||
| go.unistack.org/micro/v3 v3.11.37/go.mod h1:POGU5hstnAT9LH70m8FalyQSNi2GfIew71K75JenIZk= | ||||
| golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= | ||||
| golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= | ||||
| golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= | ||||
| golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= | ||||
| golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= | ||||
| golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
| golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= | ||||
| golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= | ||||
| google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= | ||||
| google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= | ||||
| google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= | ||||
| google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= | ||||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | ||||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
|   | ||||
							
								
								
									
										65
									
								
								kadmtest.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								kadmtest.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,65 @@ | ||||
| //go:build ignore | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kadm" | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/twmb/franz-go/pkg/kversion" | ||||
|  | ||||
| 	//"github.com/twmb/franz-go/pkg/sasl/scram" | ||||
| 	"github.com/twmb/franz-go/pkg/sasl/plain" | ||||
| ) | ||||
|  | ||||
| func die(msg string, args ...any) { | ||||
| 	fmt.Fprintf(os.Stderr, msg, args...) | ||||
| 	os.Exit(1) | ||||
| } | ||||
|  | ||||
| func main() { | ||||
| 	seeds := []string{"vm-kafka-ump01tn.mbrd.ru:9092", "vm-kafka-ump02tn.mbrd.ru:9092", "vm-kafka-ump03tn.mbrd.ru:9092"} | ||||
|  | ||||
| 	pass := "XXXXX" | ||||
| 	user := "XXXXX" | ||||
|  | ||||
| 	var adminClient *kadm.Client | ||||
| 	{ | ||||
| 		client, err := kgo.NewClient( | ||||
| 			kgo.SeedBrokers(seeds...), | ||||
| 			// kgo.SASL((scram.Auth{User: user, Pass: pass}).AsSha512Mechanism()), | ||||
| 			kgo.SASL((plain.Auth{User: user, Pass: pass}).AsMechanism()), | ||||
|  | ||||
| 			// Do not try to send requests newer than 2.4.0 to avoid breaking changes in the request struct. | ||||
| 			// Sometimes there are breaking changes for newer versions where more properties are required to set. | ||||
| 			kgo.MaxVersions(kversion.V2_4_0()), | ||||
| 		) | ||||
| 		if err != nil { | ||||
| 			panic(err) | ||||
| 		} | ||||
| 		defer client.Close() | ||||
|  | ||||
| 		adminClient = kadm.NewClient(client) | ||||
| 	} | ||||
|  | ||||
| 	ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	dg, err := adminClient.DescribeGroups(ctx, "interestrate_loader") | ||||
| 	if err != nil { | ||||
| 		die("failed to describe group: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	for _, m := range dg["interestrate_loader"].Members { | ||||
| 		mc, _ := m.Assigned.AsConsumer() | ||||
| 		for _, mt := range mc.Topics { | ||||
| 			for _, p := range mt.Partitions { | ||||
| 				fmt.Printf("client:%s\tpartitions: %d\n", m.ClientID, p) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										214
									
								
								kgo.go
									
									
									
									
									
								
							
							
						
						
									
										214
									
								
								kgo.go
									
									
									
									
									
								
							| @@ -1,29 +1,33 @@ | ||||
| // Package kgo provides a kafka broker using kgo | ||||
| package kgo // import "go.unistack.org/micro-broker-kgo/v3" | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math/rand" | ||||
| 	"math/rand/v2" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/google/uuid" | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/twmb/franz-go/pkg/kmsg" | ||||
| 	"go.unistack.org/micro/v3/broker" | ||||
| 	"go.unistack.org/micro/v3/logger" | ||||
| 	"go.unistack.org/micro/v3/metadata" | ||||
| 	id "go.unistack.org/micro/v3/util/id" | ||||
| 	"go.unistack.org/micro/v3/semconv" | ||||
| 	"go.unistack.org/micro/v3/tracer" | ||||
| 	mrand "go.unistack.org/micro/v3/util/rand" | ||||
| ) | ||||
|  | ||||
| var _ broker.Broker = &Broker{} | ||||
| var _ broker.Broker = (*Broker)(nil) | ||||
|  | ||||
| var ErrLostMessage = errors.New("message not marked for offsets commit and will be lost in next iteration") | ||||
|  | ||||
| var DefaultRetryBackoffFn = func() func(int) time.Duration { | ||||
| 	var rngMu sync.Mutex | ||||
| 	rng := rand.New(rand.NewSource(time.Now().UnixNano())) | ||||
| 	return func(fails int) time.Duration { | ||||
| 		const ( | ||||
| 			min = 100 * time.Millisecond | ||||
| @@ -39,7 +43,7 @@ var DefaultRetryBackoffFn = func() func(int) time.Duration { | ||||
| 		backoff := min * time.Duration(1<<(fails-1)) | ||||
|  | ||||
| 		rngMu.Lock() | ||||
| 		jitter := 0.8 + 0.4*rng.Float64() | ||||
| 		jitter := 0.8 + 0.4*rand.Float64() | ||||
| 		rngMu.Unlock() | ||||
|  | ||||
| 		backoff = time.Duration(float64(backoff) * jitter) | ||||
| @@ -53,12 +57,27 @@ var DefaultRetryBackoffFn = func() func(int) time.Duration { | ||||
|  | ||||
| type Broker struct { | ||||
| 	c         *kgo.Client | ||||
| 	kopts     []kgo.Opt | ||||
| 	connected bool | ||||
| 	init      bool | ||||
| 	sync.RWMutex | ||||
| 	connected *atomic.Uint32 | ||||
|  | ||||
| 	kopts []kgo.Opt | ||||
| 	subs  []*Subscriber | ||||
|  | ||||
| 	opts broker.Options | ||||
| 	subs []*subscriber | ||||
|  | ||||
| 	sync.RWMutex | ||||
| 	init bool | ||||
| } | ||||
|  | ||||
| func (r *Broker) Live() bool { | ||||
| 	return r.connected.Load() == 1 | ||||
| } | ||||
|  | ||||
| func (r *Broker) Ready() bool { | ||||
| 	return r.connected.Load() == 1 | ||||
| } | ||||
|  | ||||
| func (r *Broker) Health() bool { | ||||
| 	return r.connected.Load() == 1 | ||||
| } | ||||
|  | ||||
| func (k *Broker) Address() string { | ||||
| @@ -69,63 +88,91 @@ func (k *Broker) Name() string { | ||||
| 	return k.opts.Name | ||||
| } | ||||
|  | ||||
| func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, error) { | ||||
| func (k *Broker) Client() *kgo.Client { | ||||
| 	return k.c | ||||
| } | ||||
|  | ||||
| func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *hookTracer, error) { | ||||
| 	var c *kgo.Client | ||||
| 	var err error | ||||
|  | ||||
| 	sp, _ := tracer.SpanFromContext(ctx) | ||||
|  | ||||
| 	clientID := "kgo" | ||||
| 	group := "" | ||||
| 	if k.opts.Context != nil { | ||||
| 		if id, ok := k.opts.Context.Value(clientIDKey{}).(string); ok { | ||||
| 			clientID = id | ||||
| 		} | ||||
| 		if id, ok := k.opts.Context.Value(groupKey{}).(string); ok { | ||||
| 			group = id | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	htracer := &hookTracer{group: group, clientID: clientID, tracer: k.opts.Tracer} | ||||
| 	opts = append(opts, | ||||
| 		kgo.WithHooks(&hookMeter{meter: k.opts.Meter}), | ||||
| 		kgo.WithHooks(htracer), | ||||
| 	) | ||||
|  | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| 		return nil, ctx.Err() | ||||
| 		if ctx.Err() != nil { | ||||
| 			if sp != nil { | ||||
| 				sp.SetStatus(tracer.SpanStatusError, ctx.Err().Error()) | ||||
| 			} | ||||
| 		} | ||||
| 		return nil, nil, ctx.Err() | ||||
| 	default: | ||||
| 		c, err = kgo.NewClient(opts...) | ||||
| 		if err == nil { | ||||
| 			err = c.Ping(ctx) // check connectivity to cluster | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 			if sp != nil { | ||||
| 				sp.SetStatus(tracer.SpanStatusError, err.Error()) | ||||
| 			} | ||||
| 			return nil, nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	return c, nil | ||||
| 	return c, htracer, nil | ||||
| } | ||||
|  | ||||
| func (k *Broker) Connect(ctx context.Context) error { | ||||
| 	k.RLock() | ||||
| 	if k.connected { | ||||
| 		k.RUnlock() | ||||
| 	if k.connected.Load() == 1 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	k.RUnlock() | ||||
|  | ||||
| 	nctx := k.opts.Context | ||||
| 	if ctx != nil { | ||||
| 		nctx = ctx | ||||
| 	} | ||||
|  | ||||
| 	c, err := k.connect(nctx, k.kopts...) | ||||
| 	c, _, err := k.connect(nctx, k.kopts...) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	k.Lock() | ||||
| 	k.c = c | ||||
| 	k.connected = true | ||||
| 	k.connected.Store(1) | ||||
| 	k.Unlock() | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (k *Broker) Disconnect(ctx context.Context) error { | ||||
| 	k.RLock() | ||||
| 	if !k.connected { | ||||
| 		k.RUnlock() | ||||
| 	if k.connected.Load() == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	k.RUnlock() | ||||
|  | ||||
| 	nctx := k.opts.Context | ||||
| 	if ctx != nil { | ||||
| 		nctx = ctx | ||||
| 	} | ||||
| 	var span tracer.Span | ||||
| 	ctx, span = k.opts.Tracer.Start(ctx, "Disconnect") | ||||
| 	defer span.Finish() | ||||
|  | ||||
| 	k.Lock() | ||||
| 	defer k.Unlock() | ||||
| @@ -134,6 +181,9 @@ func (k *Broker) Disconnect(ctx context.Context) error { | ||||
| 		return nctx.Err() | ||||
| 	default: | ||||
| 		for _, sub := range k.subs { | ||||
| 			if sub.closed { | ||||
| 				continue | ||||
| 			} | ||||
| 			if err := sub.Unsubscribe(ctx); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| @@ -144,7 +194,7 @@ func (k *Broker) Disconnect(ctx context.Context) error { | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	k.connected = false | ||||
| 	k.connected.Store(0) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| @@ -152,6 +202,10 @@ func (k *Broker) Init(opts ...broker.Option) error { | ||||
| 	k.Lock() | ||||
| 	defer k.Unlock() | ||||
|  | ||||
| 	if len(opts) == 0 && k.init { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	for _, o := range opts { | ||||
| 		o(&k.opts) | ||||
| 	} | ||||
| @@ -175,6 +229,8 @@ func (k *Broker) Init(opts ...broker.Option) error { | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	k.init = true | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| @@ -192,43 +248,44 @@ func (k *Broker) Publish(ctx context.Context, topic string, msg *broker.Message, | ||||
| } | ||||
|  | ||||
| func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error { | ||||
| 	k.RLock() | ||||
| 	if !k.connected { | ||||
| 		k.RUnlock() | ||||
| 		k.Lock() | ||||
| 		c, err := k.connect(ctx, k.kopts...) | ||||
| 	k.Lock() | ||||
| 	if k.connected.Load() == 0 { | ||||
| 		c, _, err := k.connect(ctx, k.kopts...) | ||||
| 		if err != nil { | ||||
| 			k.Unlock() | ||||
| 			return err | ||||
| 		} | ||||
| 		k.c = c | ||||
| 		k.connected = true | ||||
| 		k.Unlock() | ||||
| 		k.connected.Store(1) | ||||
| 	} | ||||
| 	k.RUnlock() | ||||
| 	k.Unlock() | ||||
|  | ||||
| 	options := broker.NewPublishOptions(opts...) | ||||
| 	records := make([]*kgo.Record, 0, len(msgs)) | ||||
| 	var errs []string | ||||
| 	var err error | ||||
| 	var key []byte | ||||
| 	var promise func(*kgo.Record, error) | ||||
|  | ||||
| 	if options.Context != nil { | ||||
| 		if k, ok := options.Context.Value(publishKey{}).([]byte); ok && k != nil { | ||||
| 			key = k | ||||
| 		} | ||||
| 		if p, ok := options.Context.Value(publishPromiseKey{}).(func(*kgo.Record, error)); ok && p != nil { | ||||
| 			promise = p | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, msg := range msgs { | ||||
| 		rec := &kgo.Record{Context: ctx, Key: key} | ||||
|  | ||||
| 		rec.Topic, _ = msg.Header.Get(metadata.HeaderTopic) | ||||
| 		if options.BodyOnly { | ||||
| 		msg.Header.Del(metadata.HeaderTopic) | ||||
|  | ||||
| 		k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Inc() | ||||
| 		if options.BodyOnly || k.opts.Codec.String() == "noop" { | ||||
| 			rec.Value = msg.Body | ||||
| 		} else if k.opts.Codec.String() == "noop" { | ||||
| 			rec.Value = msg.Body | ||||
| 			for k, v := range msg.Header { | ||||
| 				rec.Headers = append(rec.Headers, kgo.RecordHeader{Key: k, Value: []byte(v)}) | ||||
| 			} | ||||
| 			setHeaders(rec, msg.Header) | ||||
| 		} else { | ||||
| 			rec.Value, err = k.opts.Codec.Marshal(msg) | ||||
| 			if err != nil { | ||||
| @@ -238,10 +295,36 @@ func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...br | ||||
| 		records = append(records, rec) | ||||
| 	} | ||||
|  | ||||
| 	if promise != nil { | ||||
| 		ts := time.Now() | ||||
| 		for _, rec := range records { | ||||
| 			k.c.Produce(ctx, rec, func(r *kgo.Record, err error) { | ||||
| 				te := time.Since(ts) | ||||
| 				k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Dec() | ||||
| 				k.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds()) | ||||
| 				k.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds()) | ||||
| 				if err != nil { | ||||
| 					k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "failure").Inc() | ||||
| 				} else { | ||||
| 					k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "success").Inc() | ||||
| 				} | ||||
| 				promise(r, err) | ||||
| 			}) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	ts := time.Now() | ||||
| 	results := k.c.ProduceSync(ctx, records...) | ||||
| 	te := time.Since(ts) | ||||
| 	for _, result := range results { | ||||
| 		k.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds()) | ||||
| 		k.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds()) | ||||
| 		k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Dec() | ||||
| 		if result.Err != nil { | ||||
| 			k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "failure").Inc() | ||||
| 			errs = append(errs, result.Err.Error()) | ||||
| 		} else { | ||||
| 			k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "success").Inc() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| @@ -252,7 +335,23 @@ func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...br | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (k *Broker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) { | ||||
| func (k *Broker) TopicExists(ctx context.Context, topic string) error { | ||||
| 	mdreq := kmsg.NewMetadataRequest() | ||||
| 	mdreq.Topics = []kmsg.MetadataRequestTopic{ | ||||
| 		{Topic: &topic}, | ||||
| 	} | ||||
|  | ||||
| 	mdrsp, err := mdreq.RequestWith(ctx, k.c) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} else if mdrsp.Topics[0].ErrorCode != 0 { | ||||
| 		return fmt.Errorf("topic %s not exists or permission error", topic) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (k *Broker) BatchSubscribe(_ context.Context, _ string, _ broker.BatchHandler, _ ...broker.SubscribeOption) (broker.Subscriber, error) { | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
| @@ -260,11 +359,11 @@ func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Han | ||||
| 	options := broker.NewSubscribeOptions(opts...) | ||||
|  | ||||
| 	if options.Group == "" { | ||||
| 		uid, err := id.New() | ||||
| 		uid, err := uuid.NewRandom() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		options.Group = uid | ||||
| 		options.Group = uid.String() | ||||
| 	} | ||||
|  | ||||
| 	commitInterval := DefaultCommitInterval | ||||
| @@ -274,7 +373,7 @@ func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Han | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	sub := &subscriber{ | ||||
| 	sub := &Subscriber{ | ||||
| 		topic:     topic, | ||||
| 		opts:      options, | ||||
| 		handler:   handler, | ||||
| @@ -301,12 +400,26 @@ func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Han | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	c, err := k.connect(ctx, kopts...) | ||||
| 	c, htracer, err := k.connect(ctx, kopts...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	mdreq := kmsg.NewMetadataRequest() | ||||
| 	mdreq.Topics = []kmsg.MetadataRequestTopic{ | ||||
| 		{Topic: &topic}, | ||||
| 	} | ||||
|  | ||||
| 	mdrsp, err := mdreq.RequestWith(ctx, c) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} else if mdrsp.Topics[0].ErrorCode != 0 { | ||||
| 		return nil, fmt.Errorf("topic %s not exists or permission error", topic) | ||||
| 	} | ||||
|  | ||||
| 	sub.c = c | ||||
| 	sub.htracer = htracer | ||||
|  | ||||
| 	go sub.poll(ctx) | ||||
|  | ||||
| 	k.Lock() | ||||
| @@ -320,7 +433,6 @@ func (k *Broker) String() string { | ||||
| } | ||||
|  | ||||
| func NewBroker(opts ...broker.Option) *Broker { | ||||
| 	rand.Seed(time.Now().Unix()) | ||||
| 	options := broker.NewOptions(opts...) | ||||
|  | ||||
| 	kaddrs := options.Addrs | ||||
| @@ -333,14 +445,13 @@ func NewBroker(opts ...broker.Option) *Broker { | ||||
| 		kgo.DialTimeout(3 * time.Second), | ||||
| 		kgo.DisableIdempotentWrite(), | ||||
| 		kgo.ProducerBatchCompression(kgo.NoCompression()), | ||||
| 		kgo.WithLogger(&mlogger{l: options.Logger, ctx: options.Context}), | ||||
| 		// kgo.WithLogger(kgo.BasicLogger(os.Stderr, kgo.LogLevelDebug, func() string { return time.Now().Format(time.StampMilli) })), | ||||
| 		kgo.WithHooks(&metrics{meter: options.Meter}), | ||||
| 		kgo.WithLogger(&mlogger{l: options.Logger.Clone(logger.WithAddCallerSkipCount(2)), ctx: options.Context}), | ||||
| 		kgo.SeedBrokers(kaddrs...), | ||||
| 		kgo.RetryBackoffFn(DefaultRetryBackoffFn), | ||||
| 		kgo.BlockRebalanceOnPoll(), | ||||
| 		kgo.Balancers(kgo.CooperativeStickyBalancer()), | ||||
| 		kgo.FetchIsolationLevel(kgo.ReadUncommitted()), | ||||
| 		kgo.UnknownTopicRetries(0), | ||||
| 	} | ||||
|  | ||||
| 	if options.Context != nil { | ||||
| @@ -350,7 +461,8 @@ func NewBroker(opts ...broker.Option) *Broker { | ||||
| 	} | ||||
|  | ||||
| 	return &Broker{ | ||||
| 		opts:  options, | ||||
| 		kopts: kopts, | ||||
| 		connected: &atomic.Uint32{}, | ||||
| 		opts:      options, | ||||
| 		kopts:     kopts, | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -50,7 +50,7 @@ func TestPubSub(t *testing.T) { | ||||
| 		t.Skip() | ||||
| 	} | ||||
|  | ||||
| 	if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel), logger.WithCallerSkipCount(3)); err != nil { | ||||
| 	if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	ctx := context.Background() | ||||
|   | ||||
							
								
								
									
										12
									
								
								logger.go
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								logger.go
									
									
									
									
									
								
							| @@ -2,7 +2,6 @@ package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v3/logger" | ||||
| @@ -29,15 +28,8 @@ func (l *mlogger) Log(lvl kgo.LogLevel, msg string, args ...interface{}) { | ||||
| 	default: | ||||
| 		return | ||||
| 	} | ||||
| 	if len(args) > 0 { | ||||
| 		fields := make(map[string]interface{}, int(len(args)/2)) | ||||
| 		for i := 0; i <= len(args)/2; i += 2 { | ||||
| 			fields[fmt.Sprintf("%v", args[i])] = args[i+1] | ||||
| 		} | ||||
| 		l.l.Fields(fields).Log(l.ctx, mlvl, msg) | ||||
| 	} else { | ||||
| 		l.l.Log(l.ctx, mlvl, msg) | ||||
| 	} | ||||
|  | ||||
| 	l.l.Log(l.ctx, mlvl, msg, args...) | ||||
| } | ||||
|  | ||||
| func (l *mlogger) Level() kgo.LogLevel { | ||||
|   | ||||
| @@ -9,19 +9,26 @@ import ( | ||||
| 	"go.unistack.org/micro/v3/meter" | ||||
| ) | ||||
| 
 | ||||
| type metrics struct { | ||||
| type hookMeter struct { | ||||
| 	meter meter.Meter | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	_ kgo.HookBrokerConnect       = &metrics{} | ||||
| 	_ kgo.HookBrokerDisconnect    = &metrics{} | ||||
| 	_ kgo.HookBrokerRead          = &metrics{} | ||||
| 	_ kgo.HookBrokerThrottle      = &metrics{} | ||||
| 	_ kgo.HookBrokerWrite         = &metrics{} | ||||
| 	_ kgo.HookFetchBatchRead      = &metrics{} | ||||
| 	_ kgo.HookProduceBatchWritten = &metrics{} | ||||
| 	_ kgo.HookGroupManageError    = &metrics{} | ||||
| 	_ kgo.HookBrokerConnect    = &hookMeter{} | ||||
| 	_ kgo.HookBrokerDisconnect = &hookMeter{} | ||||
| 	// HookBrokerE2E | ||||
| 	_ kgo.HookBrokerRead     = &hookMeter{} | ||||
| 	_ kgo.HookBrokerThrottle = &hookMeter{} | ||||
| 	_ kgo.HookBrokerWrite    = &hookMeter{} | ||||
| 	_ kgo.HookFetchBatchRead = &hookMeter{} | ||||
| 	// HookFetchRecordBuffered | ||||
| 	// HookFetchRecordUnbuffered | ||||
| 	_ kgo.HookGroupManageError = &hookMeter{} | ||||
| 	// HookNewClient | ||||
| 	_ kgo.HookProduceBatchWritten = &hookMeter{} | ||||
| 	// HookProduceRecordBuffered | ||||
| 	// HookProduceRecordPartitioned | ||||
| 	// HookProduceRecordUnbuffered | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| @@ -54,11 +61,11 @@ const ( | ||||
| 	labelTopic   = "topic" | ||||
| ) | ||||
| 
 | ||||
| func (m *metrics) OnGroupManageError(err error) { | ||||
| func (m *hookMeter) OnGroupManageError(_ error) { | ||||
| 	m.meter.Counter(metricBrokerGroupErrors).Inc() | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { | ||||
| func (m *hookMeter) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	if err != nil { | ||||
| 		m.meter.Counter(metricBrokerConnects, labelNode, node, labelStatus, labelFaulure).Inc() | ||||
| @@ -67,12 +74,12 @@ func (m *metrics) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ ne | ||||
| 	m.meter.Counter(metricBrokerConnects, labelNode, node, labelStatus, labelSuccess).Inc() | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) { | ||||
| func (m *hookMeter) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Counter(metricBrokerDisconnects, labelNode, node).Inc() | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) { | ||||
| func (m *hookMeter) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	if err != nil { | ||||
| 		m.meter.Counter(metricBrokerWriteErrors, labelNode, node).Inc() | ||||
| @@ -83,7 +90,7 @@ func (m *metrics) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten i | ||||
| 	m.meter.Histogram(metricBrokerWriteLatencies, labelNode, node).Update(timeToWrite.Seconds()) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, readWait, timeToRead time.Duration, err error) { | ||||
| func (m *hookMeter) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, readWait, timeToRead time.Duration, err error) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	if err != nil { | ||||
| 		m.meter.Counter(metricBrokerReadErrors, labelNode, node).Inc() | ||||
| @@ -95,18 +102,18 @@ func (m *metrics) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, | ||||
| 	m.meter.Histogram(metricBrokerReadLatencies, labelNode, node).Update(timeToRead.Seconds()) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) { | ||||
| func (m *hookMeter) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Histogram(metricBrokerThrottleLatencies, labelNode, node).Update(throttleInterval.Seconds()) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.ProduceBatchMetrics) { | ||||
| func (m *hookMeter) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.ProduceBatchMetrics) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Counter(metricBrokerProduceBytesUncompressed, labelNode, node, labelTopic, topic).Add(kmetrics.UncompressedBytes) | ||||
| 	m.meter.Counter(metricBrokerProduceBytesCompressed, labelNode, node, labelTopic, topic).Add(kmetrics.CompressedBytes) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.FetchBatchMetrics) { | ||||
| func (m *hookMeter) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.FetchBatchMetrics) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Counter(metricBrokerFetchBytesUncompressed, labelNode, node, labelTopic, topic).Add(kmetrics.UncompressedBytes) | ||||
| 	m.meter.Counter(metricBrokerFetchBytesCompressed, labelNode, node, labelTopic, topic).Add(kmetrics.CompressedBytes) | ||||
							
								
								
									
										39
									
								
								options.go
									
									
									
									
									
								
							
							
						
						
									
										39
									
								
								options.go
									
									
									
									
									
								
							| @@ -9,8 +9,17 @@ import ( | ||||
| 	"go.unistack.org/micro/v3/client" | ||||
| ) | ||||
|  | ||||
| // DefaultCommitInterval specifies how fast send commit offsets to kafka | ||||
| var DefaultCommitInterval = 5 * time.Second | ||||
| var ( | ||||
|  | ||||
| 	// DefaultCommitInterval specifies how fast send commit offsets to kafka | ||||
| 	DefaultCommitInterval = 5 * time.Second | ||||
|  | ||||
| 	// DefaultStatsInterval specifies how fast check consumer lag | ||||
| 	DefaultStatsInterval = 5 * time.Second | ||||
|  | ||||
| 	// DefaultSubscribeMaxInflight specifies how much messages keep inflight | ||||
| 	DefaultSubscribeMaxInflight = 100 | ||||
| ) | ||||
|  | ||||
| type subscribeContextKey struct{} | ||||
|  | ||||
| @@ -63,6 +72,18 @@ func SubscribeOptions(opts ...kgo.Opt) broker.SubscribeOption { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type clientIDKey struct{} | ||||
|  | ||||
| func ClientID(id string) broker.Option { | ||||
| 	return broker.SetOption(clientIDKey{}, id) | ||||
| } | ||||
|  | ||||
| type groupKey struct{} | ||||
|  | ||||
| func Group(id string) broker.Option { | ||||
| 	return broker.SetOption(groupKey{}, id) | ||||
| } | ||||
|  | ||||
| type commitIntervalKey struct{} | ||||
|  | ||||
| // CommitInterval specifies interval to send commits | ||||
| @@ -70,11 +91,21 @@ func CommitInterval(td time.Duration) broker.Option { | ||||
| 	return broker.SetOption(commitIntervalKey{}, td) | ||||
| } | ||||
|  | ||||
| var DefaultSubscribeMaxInflight = 10 | ||||
|  | ||||
| type subscribeMaxInflightKey struct{} | ||||
|  | ||||
| // SubscribeMaxInFlight max queued messages | ||||
| func SubscribeMaxInFlight(n int) broker.SubscribeOption { | ||||
| 	return broker.SetSubscribeOption(subscribeMaxInflightKey{}, n) | ||||
| } | ||||
|  | ||||
| type publishPromiseKey struct{} | ||||
|  | ||||
| // PublishPromise set the kafka promise func for Produce | ||||
| func PublishPromise(fn func(*kgo.Record, error)) broker.PublishOption { | ||||
| 	return broker.SetPublishOption(publishPromiseKey{}, fn) | ||||
| } | ||||
|  | ||||
| // ClientPublishKey set the kafka message key (client option) | ||||
| func ClientPublishPromise(fn func(*kgo.Record, error)) client.PublishOption { | ||||
| 	return client.SetPublishOption(publishPromiseKey{}, fn) | ||||
| } | ||||
|   | ||||
							
								
								
									
										229
									
								
								subscriber.go
									
									
									
									
									
								
							
							
						
						
									
										229
									
								
								subscriber.go
									
									
									
									
									
								
							| @@ -2,12 +2,18 @@ package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kadm" | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v3/broker" | ||||
| 	"go.unistack.org/micro/v3/logger" | ||||
| 	"go.unistack.org/micro/v3/metadata" | ||||
| 	"go.unistack.org/micro/v3/semconv" | ||||
| 	"go.unistack.org/micro/v3/tracer" | ||||
| ) | ||||
|  | ||||
| type tp struct { | ||||
| @@ -16,132 +22,190 @@ type tp struct { | ||||
| } | ||||
|  | ||||
| type consumer struct { | ||||
| 	c         *kgo.Client | ||||
| 	topic     string | ||||
| 	topic string | ||||
|  | ||||
| 	c *kgo.Client | ||||
| 	htracer   *hookTracer | ||||
|  | ||||
| 	handler broker.Handler | ||||
| 	quit    chan struct{} | ||||
| 	done    chan struct{} | ||||
| 	recs    chan kgo.FetchTopicPartition | ||||
|  | ||||
| 	kopts broker.Options | ||||
| 	opts  broker.SubscribeOptions | ||||
|  | ||||
| 	partition int32 | ||||
| 	opts      broker.SubscribeOptions | ||||
| 	kopts     broker.Options | ||||
| 	handler   broker.Handler | ||||
| 	quit      chan struct{} | ||||
| 	done      chan struct{} | ||||
| 	recs      chan kgo.FetchTopicPartition | ||||
| } | ||||
|  | ||||
| type subscriber struct { | ||||
| 	c         *kgo.Client | ||||
| 	topic     string | ||||
| 	opts      broker.SubscribeOptions | ||||
| 	kopts     broker.Options | ||||
| 	handler   broker.Handler | ||||
| 	closed    bool | ||||
| 	done      chan struct{} | ||||
| type Subscriber struct { | ||||
| 	consumers map[tp]*consumer | ||||
| 	c         *kgo.Client | ||||
| 	htracer   *hookTracer | ||||
| 	topic     string | ||||
|  | ||||
| 	handler broker.Handler | ||||
| 	done    chan struct{} | ||||
| 	kopts   broker.Options | ||||
| 	opts    broker.SubscribeOptions | ||||
|  | ||||
| 	sync.RWMutex | ||||
| 	closed bool | ||||
| } | ||||
|  | ||||
| func (s *subscriber) Options() broker.SubscribeOptions { | ||||
| func (s *Subscriber) Client() *kgo.Client { | ||||
| 	return s.c | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) Options() broker.SubscribeOptions { | ||||
| 	return s.opts | ||||
| } | ||||
|  | ||||
| func (s *subscriber) Topic() string { | ||||
| func (s *Subscriber) Topic() string { | ||||
| 	return s.topic | ||||
| } | ||||
|  | ||||
| func (s *subscriber) Unsubscribe(ctx context.Context) error { | ||||
| func (s *Subscriber) Unsubscribe(ctx context.Context) error { | ||||
| 	if s.closed { | ||||
| 		return nil | ||||
| 	} | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| 		return ctx.Err() | ||||
| 	default: | ||||
| 		close(s.done) | ||||
| 		s.closed = true | ||||
|  | ||||
| 	s.c.PauseFetchTopics(s.topic) | ||||
| 	s.c.CloseAllowingRebalance() | ||||
| 	kc := make(map[string][]int32) | ||||
| 	for ctp := range s.consumers { | ||||
| 		kc[ctp.t] = append(kc[ctp.t], ctp.p) | ||||
| 	} | ||||
| 	s.killConsumers(ctx, kc) | ||||
| 	close(s.done) | ||||
| 	s.closed = true | ||||
| 	s.c.ResumeFetchTopics(s.topic) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (s *subscriber) poll(ctx context.Context) { | ||||
| func (s *Subscriber) poll(ctx context.Context) { | ||||
| 	maxInflight := DefaultSubscribeMaxInflight | ||||
| 	if s.opts.Context != nil { | ||||
| 		if n, ok := s.opts.Context.Value(subscribeMaxInflightKey{}).(int); n > 0 && ok { | ||||
| 			maxInflight = n | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	go func() { | ||||
| 		ac := kadm.NewClient(s.c) | ||||
| 		ticker := time.NewTicker(DefaultStatsInterval) | ||||
|  | ||||
| 		for { | ||||
| 			select { | ||||
| 			case <-ctx.Done(): | ||||
| 				ticker.Stop() | ||||
| 				return | ||||
| 			case <-ticker.C: | ||||
| 				dgls, err := ac.Lag(ctx, s.opts.Group) | ||||
| 				if err != nil || !dgls.Ok() { | ||||
| 					continue | ||||
| 				} | ||||
|  | ||||
| 				dgl, ok := dgls[s.opts.Group] | ||||
| 				if !ok { | ||||
| 					continue | ||||
| 				} | ||||
| 				lmap, ok := dgl.Lag[s.topic] | ||||
| 				if !ok { | ||||
| 					continue | ||||
| 				} | ||||
|  | ||||
| 				s.Lock() | ||||
| 				for p, l := range lmap { | ||||
| 					s.kopts.Meter.Counter(semconv.BrokerGroupLag, "topic", s.topic, "group", s.opts.Group, "partition", strconv.Itoa(int(p)), "lag", strconv.Itoa(int(l.Lag))) | ||||
| 				} | ||||
| 				s.Unlock() | ||||
|  | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ctx.Done(): | ||||
| 			s.c.Close() | ||||
| 			s.c.CloseAllowingRebalance() | ||||
| 			return | ||||
| 		case <-s.done: | ||||
| 			s.c.Close() | ||||
| 			return | ||||
| 		default: | ||||
| 			fetches := s.c.PollRecords(ctx, maxInflight) | ||||
| 			if fetches.IsClientClosed() { | ||||
| 				s.kopts.Logger.Errorf(ctx, "[kgo] client closed") | ||||
| 			if !s.closed && fetches.IsClientClosed() { | ||||
| 				s.closed = true | ||||
| 				return | ||||
| 			} | ||||
| 			fetches.EachError(func(t string, p int32, err error) { | ||||
| 				s.kopts.Logger.Fatalf(ctx, "[kgo] fetch topic %s partition %d err: %v", t, p, err) | ||||
| 				s.kopts.Logger.Fatal(ctx, fmt.Sprintf("[kgo] fetch topic %s partition %d error", t, p), err) | ||||
| 			}) | ||||
|  | ||||
| 			fetches.EachPartition(func(p kgo.FetchTopicPartition) { | ||||
| 				tp := tp{p.Topic, p.Partition} | ||||
| 				s.consumers[tp].recs <- p | ||||
| 				nTp := tp{p.Topic, p.Partition} | ||||
| 				s.consumers[nTp].recs <- p | ||||
| 			}) | ||||
| 			s.c.AllowRebalance() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *subscriber) killConsumers(ctx context.Context, lost map[string][]int32) { | ||||
| func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32) { | ||||
| 	var wg sync.WaitGroup | ||||
| 	defer wg.Wait() | ||||
|  | ||||
| 	for topic, partitions := range lost { | ||||
| 		for _, partition := range partitions { | ||||
| 			tp := tp{topic, partition} | ||||
| 			pc := s.consumers[tp] | ||||
| 			delete(s.consumers, tp) | ||||
| 			nTp := tp{topic, partition} | ||||
| 			pc := s.consumers[nTp] | ||||
| 			delete(s.consumers, nTp) | ||||
| 			close(pc.quit) | ||||
| 			s.kopts.Logger.Debugf(ctx, "[kgo] waiting for work to finish topic %s partition %d", topic, partition) | ||||
| 			if s.kopts.Logger.V(logger.DebugLevel) { | ||||
| 				s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] waiting for work to finish topic %s partition %d", topic, partition)) | ||||
| 			} | ||||
| 			wg.Add(1) | ||||
| 			go func() { <-pc.done; wg.Done() }() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) { | ||||
| 	s.kopts.Logger.Debugf(ctx, "[kgo] lost %#+v", lost) | ||||
| func (s *Subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) { | ||||
| 	if s.kopts.Logger.V(logger.DebugLevel) { | ||||
| 		s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] lost %#+v", lost)) | ||||
| 	} | ||||
| 	s.killConsumers(ctx, lost) | ||||
| } | ||||
|  | ||||
| func (s *subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) { | ||||
| 	s.kopts.Logger.Debugf(ctx, "[kgo] revoked %#+v", revoked) | ||||
| func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) { | ||||
| 	if s.kopts.Logger.V(logger.DebugLevel) { | ||||
| 		s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] revoked %#+v", revoked)) | ||||
| 	} | ||||
| 	s.killConsumers(ctx, revoked) | ||||
| 	if err := c.CommitMarkedOffsets(ctx); err != nil { | ||||
| 		s.kopts.Logger.Errorf(ctx, "[kgo] revoked CommitMarkedOffsets err: %v", err) | ||||
| 		s.kopts.Logger.Error(ctx, "[kgo] revoked CommitMarkedOffsets error", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[string][]int32) { | ||||
| func (s *Subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[string][]int32) { | ||||
| 	for topic, partitions := range assigned { | ||||
| 		for _, partition := range partitions { | ||||
| 			pc := &consumer{ | ||||
| 				c:         c, | ||||
| 				topic:     topic, | ||||
| 				partition: partition, | ||||
|  | ||||
| 				quit:    make(chan struct{}), | ||||
| 				done:    make(chan struct{}), | ||||
| 				recs:    make(chan kgo.FetchTopicPartition, 4), | ||||
| 				handler: s.handler, | ||||
| 				kopts:   s.kopts, | ||||
| 				opts:    s.opts, | ||||
| 				htracer:   s.htracer, | ||||
| 				quit:      make(chan struct{}), | ||||
| 				done:      make(chan struct{}), | ||||
| 				recs:      make(chan kgo.FetchTopicPartition, 100), | ||||
| 				handler:   s.handler, | ||||
| 				kopts:     s.kopts, | ||||
| 				opts:      s.opts, | ||||
| 			} | ||||
| 			s.Lock() | ||||
| 			s.consumers[tp{topic, partition}] = pc | ||||
| 			s.Unlock() | ||||
| 			go pc.consume() | ||||
| 		} | ||||
| 	} | ||||
| @@ -149,8 +213,10 @@ func (s *subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[str | ||||
|  | ||||
| func (pc *consumer) consume() { | ||||
| 	defer close(pc.done) | ||||
| 	pc.kopts.Logger.Debugf(pc.kopts.Context, "starting, topic %s partition %d", pc.topic, pc.partition) | ||||
| 	defer pc.kopts.Logger.Debugf(pc.kopts.Context, "killing, topic %s partition %d", pc.topic, pc.partition) | ||||
| 	if pc.kopts.Logger.V(logger.DebugLevel) { | ||||
| 		pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("starting, topic %s partition %d", pc.topic, pc.partition)) | ||||
| 		defer pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("killing, topic %s partition %d", pc.topic, pc.partition)) | ||||
| 	} | ||||
|  | ||||
| 	eh := pc.kopts.ErrorHandler | ||||
| 	if pc.opts.ErrorHandler != nil { | ||||
| @@ -163,66 +229,99 @@ func (pc *consumer) consume() { | ||||
| 			return | ||||
| 		case p := <-pc.recs: | ||||
| 			for _, record := range p.Records { | ||||
| 				ctx, sp := pc.htracer.WithProcessSpan(record) | ||||
| 				ts := time.Now() | ||||
| 				pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Inc() | ||||
| 				p := eventPool.Get().(*event) | ||||
| 				p.msg.Header = nil | ||||
| 				p.msg.Body = nil | ||||
| 				p.topic = record.Topic | ||||
| 				p.err = nil | ||||
| 				p.ack = false | ||||
| 				p.msg.Header = metadata.New(len(record.Headers)) | ||||
| 				p.ctx = ctx | ||||
| 				for _, hdr := range record.Headers { | ||||
| 					p.msg.Header.Set(hdr.Key, string(hdr.Value)) | ||||
| 				} | ||||
| 				if pc.kopts.Codec.String() == "noop" { | ||||
| 					p.msg.Header = metadata.New(len(record.Headers)) | ||||
| 					for _, hdr := range record.Headers { | ||||
| 						p.msg.Header.Set(hdr.Key, string(hdr.Value)) | ||||
| 					} | ||||
| 					p.msg.Body = record.Value | ||||
| 				} else if pc.opts.BodyOnly { | ||||
| 					p.msg.Body = record.Value | ||||
| 				} else { | ||||
| 					if err := pc.kopts.Codec.Unmarshal(record.Value, p.msg); err != nil { | ||||
| 					sp.AddEvent("codec unmarshal start") | ||||
| 					err := pc.kopts.Codec.Unmarshal(record.Value, p.msg) | ||||
| 					sp.AddEvent("codec unmarshal stop") | ||||
| 					if err != nil { | ||||
| 						sp.SetStatus(tracer.SpanStatusError, err.Error()) | ||||
| 						pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc() | ||||
| 						p.err = err | ||||
| 						p.msg.Body = record.Value | ||||
| 						if eh != nil { | ||||
| 							_ = eh(p) | ||||
| 							pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec() | ||||
| 							if p.ack { | ||||
| 								pc.c.MarkCommitRecords(record) | ||||
| 							} else { | ||||
| 								eventPool.Put(p) | ||||
| 								pc.kopts.Logger.Infof(pc.kopts.Context, "[kgo] ErrLostMessage wtf?") | ||||
| 								pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?") | ||||
| 								return | ||||
| 							} | ||||
| 							eventPool.Put(p) | ||||
| 							te := time.Since(ts) | ||||
| 							pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
| 							pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
| 							continue | ||||
| 						} else { | ||||
| 							if pc.kopts.Logger.V(logger.ErrorLevel) { | ||||
| 								pc.kopts.Logger.Errorf(pc.kopts.Context, "[kgo]: failed to unmarshal: %v", err) | ||||
| 							} | ||||
| 							pc.kopts.Logger.Error(pc.kopts.Context, "[kgo]: unmarshal error", err) | ||||
| 						} | ||||
| 						te := time.Since(ts) | ||||
| 						pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec() | ||||
| 						pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
| 						pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
| 						eventPool.Put(p) | ||||
| 						pc.kopts.Logger.Infof(pc.kopts.Context, "[kgo] Unmarshal err not handled wtf?") | ||||
| 						pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] Unmarshal err not handled wtf?") | ||||
| 						sp.Finish() | ||||
| 						return | ||||
| 					} | ||||
| 				} | ||||
| 				sp.AddEvent("handler start") | ||||
| 				err := pc.handler(p) | ||||
| 				sp.AddEvent("handler stop") | ||||
| 				if err == nil { | ||||
| 					pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "success").Inc() | ||||
| 				} else { | ||||
| 					sp.SetStatus(tracer.SpanStatusError, err.Error()) | ||||
| 					pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc() | ||||
| 				} | ||||
| 				pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec() | ||||
| 				if err == nil && pc.opts.AutoAck { | ||||
| 					p.ack = true | ||||
| 				} else if err != nil { | ||||
| 					p.err = err | ||||
| 					if eh != nil { | ||||
| 						sp.AddEvent("error handler start") | ||||
| 						_ = eh(p) | ||||
| 						sp.AddEvent("error handler stop") | ||||
| 					} else { | ||||
| 						if pc.kopts.Logger.V(logger.ErrorLevel) { | ||||
| 							pc.kopts.Logger.Errorf(pc.kopts.Context, "[kgo]: subscriber error: %v", err) | ||||
| 							pc.kopts.Logger.Error(pc.kopts.Context, "[kgo]: subscriber error", err) | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 				te := time.Since(ts) | ||||
| 				pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
| 				pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
| 				if p.ack { | ||||
| 					eventPool.Put(p) | ||||
| 					pc.c.MarkCommitRecords(record) | ||||
| 				} else { | ||||
| 					eventPool.Put(p) | ||||
| 					pc.kopts.Logger.Fatalf(pc.kopts.Context, "[kgo] ErrLostMessage wtf?") | ||||
| 					pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?") | ||||
| 					sp.SetStatus(tracer.SpanStatusError, "ErrLostMessage") | ||||
| 					sp.Finish() | ||||
| 					return | ||||
| 				} | ||||
| 				sp.Finish() | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|   | ||||
							
								
								
									
										204
									
								
								tracer.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										204
									
								
								tracer.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,204 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	semconv "go.opentelemetry.io/otel/semconv/v1.18.0" | ||||
| 	"go.unistack.org/micro/v3/metadata" | ||||
| 	"go.unistack.org/micro/v3/tracer" | ||||
| ) | ||||
|  | ||||
| type hookTracer struct { | ||||
| 	tracer   tracer.Tracer | ||||
| 	clientID string | ||||
| 	group    string | ||||
| } | ||||
|  | ||||
| var messagingSystem = semconv.MessagingSystemKey.String("kafka") | ||||
|  | ||||
| var ( | ||||
| 	_ kgo.HookProduceRecordBuffered   = (*hookTracer)(nil) | ||||
| 	_ kgo.HookProduceRecordUnbuffered = (*hookTracer)(nil) | ||||
| 	_ kgo.HookFetchRecordBuffered     = (*hookTracer)(nil) | ||||
| 	_ kgo.HookFetchRecordUnbuffered   = (*hookTracer)(nil) | ||||
| ) | ||||
|  | ||||
| // OnProduceRecordBuffered starts a new span for the "publish" operation on a | ||||
| // buffered record. | ||||
| // | ||||
| // It sets span options and injects the span context into record and updates | ||||
| // the record's context, so it can be ended in the OnProduceRecordUnbuffered | ||||
| // hook. | ||||
| func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) { | ||||
| 	// Set up span options. | ||||
| 	attrs := []interface{}{ | ||||
| 		messagingSystem, | ||||
| 		semconv.MessagingDestinationKindTopic, | ||||
| 		semconv.MessagingDestinationName(r.Topic), | ||||
| 		semconv.MessagingOperationPublish, | ||||
| 	} | ||||
| 	attrs = maybeKeyAttr(attrs, r) | ||||
| 	if m.clientID != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID)) | ||||
| 	} | ||||
| 	opts := []tracer.SpanOption{ | ||||
| 		tracer.WithSpanLabels(attrs...), | ||||
| 		tracer.WithSpanKind(tracer.SpanKindProducer), | ||||
| 	} | ||||
|  | ||||
| 	if r.Context == nil { | ||||
| 		r.Context = context.Background() | ||||
| 	} | ||||
|  | ||||
| 	omd, ok := metadata.FromOutgoingContext(r.Context) | ||||
| 	if !ok { | ||||
| 		omd = metadata.New(len(r.Headers)) | ||||
| 	} | ||||
|  | ||||
| 	md := metadata.Copy(omd) | ||||
| 	for _, h := range r.Headers { | ||||
| 		md.Set(h.Key, string(h.Value)) | ||||
| 	} | ||||
|  | ||||
| 	if !ok { | ||||
| 		r.Context, _ = m.tracer.Start(metadata.NewOutgoingContext(r.Context, md), "sdk.broker", opts...) | ||||
| 	} else { | ||||
| 		r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...) | ||||
| 	} | ||||
|  | ||||
| 	setHeaders(r, omd, metadata.HeaderContentType) | ||||
| } | ||||
|  | ||||
| // OnProduceRecordUnbuffered continues and ends the "publish" span for an | ||||
| // unbuffered record. | ||||
| // | ||||
| // It sets attributes with values unset when producing and records any error | ||||
| // that occurred during the publish operation. | ||||
| func (m *hookTracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) { | ||||
| 	if span, ok := tracer.SpanFromContext(r.Context); ok { | ||||
| 		span.AddLabels( | ||||
| 			semconv.MessagingKafkaDestinationPartition(int(r.Partition)), | ||||
| 		) | ||||
| 		if err != nil { | ||||
| 			span.SetStatus(tracer.SpanStatusError, err.Error()) | ||||
| 		} | ||||
| 		span.Finish() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // OnFetchRecordBuffered starts a new span for the "receive" operation on a | ||||
| // buffered record. | ||||
| // | ||||
| // It sets the span options and extracts the span context from the record, | ||||
| // updates the record's context to ensure it can be ended in the | ||||
| // OnFetchRecordUnbuffered hook and can be used in downstream consumer | ||||
| // processing. | ||||
| func (m *hookTracer) OnFetchRecordBuffered(r *kgo.Record) { | ||||
| 	// Set up the span options. | ||||
| 	attrs := []interface{}{ | ||||
| 		messagingSystem, | ||||
| 		semconv.MessagingSourceKindTopic, | ||||
| 		semconv.MessagingSourceName(r.Topic), | ||||
| 		semconv.MessagingOperationReceive, | ||||
| 		semconv.MessagingKafkaSourcePartition(int(r.Partition)), | ||||
| 	} | ||||
| 	attrs = maybeKeyAttr(attrs, r) | ||||
| 	if m.clientID != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID)) | ||||
| 	} | ||||
| 	if m.group != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group)) | ||||
| 	} | ||||
| 	opts := []tracer.SpanOption{ | ||||
| 		tracer.WithSpanLabels(attrs...), | ||||
| 		tracer.WithSpanKind(tracer.SpanKindConsumer), | ||||
| 	} | ||||
|  | ||||
| 	if r.Context == nil { | ||||
| 		r.Context = context.Background() | ||||
| 	} | ||||
| 	omd, ok := metadata.FromIncomingContext(r.Context) | ||||
| 	if !ok { | ||||
| 		omd = metadata.New(len(r.Headers)) | ||||
| 	} | ||||
|  | ||||
| 	md := metadata.Copy(omd) | ||||
| 	for _, h := range r.Headers { | ||||
| 		md.Set(h.Key, string(h.Value)) | ||||
| 	} | ||||
|  | ||||
| 	if !ok { | ||||
| 		r.Context, _ = m.tracer.Start(metadata.NewIncomingContext(r.Context, md), "sdk.broker", opts...) | ||||
| 	} else { | ||||
| 		r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...) | ||||
| 	} | ||||
|  | ||||
| 	setHeaders(r, omd, metadata.HeaderContentType) | ||||
| } | ||||
|  | ||||
| // OnFetchRecordUnbuffered continues and ends the "receive" span for an | ||||
| // unbuffered record. | ||||
| func (m *hookTracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) { | ||||
| 	span, _ := tracer.SpanFromContext(r.Context) | ||||
| 	span.Finish() | ||||
| } | ||||
|  | ||||
| // WithProcessSpan starts a new span for the "process" operation on a consumer | ||||
| // record. | ||||
| // | ||||
| // It sets up the span options. The user's application code is responsible for | ||||
| // ending the span. | ||||
| // | ||||
| // This should only ever be called within a polling loop of a consumed record and | ||||
| // not a record which has been created for producing, so call this at the start of each | ||||
| // iteration of your processing for the record. | ||||
| func (m *hookTracer) WithProcessSpan(r *kgo.Record) (context.Context, tracer.Span) { | ||||
| 	// Set up the span options. | ||||
| 	attrs := []interface{}{ | ||||
| 		messagingSystem, | ||||
| 		semconv.MessagingSourceKindTopic, | ||||
| 		semconv.MessagingSourceName(r.Topic), | ||||
| 		semconv.MessagingOperationProcess, | ||||
| 		semconv.MessagingKafkaSourcePartition(int(r.Partition)), | ||||
| 		semconv.MessagingKafkaMessageOffset(int(r.Offset)), | ||||
| 	} | ||||
| 	attrs = maybeKeyAttr(attrs, r) | ||||
| 	if m.clientID != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID)) | ||||
| 	} | ||||
| 	if m.group != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group)) | ||||
| 	} | ||||
| 	opts := []tracer.SpanOption{ | ||||
| 		tracer.WithSpanLabels(attrs...), | ||||
| 		tracer.WithSpanKind(tracer.SpanKindConsumer), | ||||
| 	} | ||||
|  | ||||
| 	if r.Context == nil { | ||||
| 		r.Context = context.Background() | ||||
| 	} | ||||
| 	md, ok := metadata.FromIncomingContext(r.Context) | ||||
| 	if !ok { | ||||
| 		md = metadata.New(len(r.Headers)) | ||||
| 	} | ||||
| 	for _, h := range r.Headers { | ||||
| 		md.Set(h.Key, string(h.Value)) | ||||
| 	} | ||||
|  | ||||
| 	// Start a new span using the provided context and options. | ||||
| 	return m.tracer.Start(r.Context, "sdk.broker", opts...) | ||||
| } | ||||
|  | ||||
| func maybeKeyAttr(attrs []interface{}, r *kgo.Record) []interface{} { | ||||
| 	if r.Key == nil { | ||||
| 		return attrs | ||||
| 	} | ||||
| 	var keykey string | ||||
| 	if !utf8.Valid(r.Key) { | ||||
| 		return attrs | ||||
| 	} | ||||
| 	keykey = string(r.Key) | ||||
| 	return append(attrs, semconv.MessagingKafkaMessageKeyKey.String(keykey)) | ||||
| } | ||||
		Reference in New Issue
	
	Block a user