Compare commits
	
		
			245 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 9dcdef57f4 | |||
| f8a115595b | |||
|  | 52ea1ef8c0 | ||
| ffd10d7ea9 | |||
|  | c4f8de4aca | ||
| 63d82be92a | |||
| cced3b84c1 | |||
|  | 82c1bda50f | ||
| 4b0b70e7d0 | |||
|  | da88718e03 | ||
| aed304b7e3 | |||
| b796c43257 | |||
| b0691edf82 | |||
| d43b59f2f3 | |||
| f976516d6b | |||
| 8f5c03ad60 | |||
|  | 9e9c1434ff | ||
| fe81a14413 | |||
| eb56a48dda | |||
| 71e8a13c70 | |||
| b538ef82b5 | |||
| 44e2d5f9a4 | |||
|  | 9426208e1c | ||
| d6e73a3419 | |||
| 969e459e3d | |||
|  | b2cd7d8b8e | ||
| 6d85d3ee41 | |||
| d0978fb314 | |||
|  | 0bd81d1253 | ||
| e34f57a515 | |||
| ccd912adb2 | |||
| db97b24904 | |||
| 7410408fc4 | |||
| 4d28a6ebb2 | |||
| f73d9d7347 | |||
| 53f72f1246 | |||
| ac454693a6 | |||
| f4f8793686 | |||
| ec4922ad8b | |||
| eaea14e5a8 | |||
| f6f7139d2f | |||
| 51e4118dfc | |||
| aaf8c43e04 | |||
| cfecb4afd0 | |||
| ae4ae64694 | |||
| 2ca4e5c74e | |||
| 2809ebcaeb | |||
| 3f04403319 | |||
| 8edfc11eef | |||
| cb5ef75e09 | |||
| d5bd105cc6 | |||
| 1ba02ed8ad | |||
| 7c5da60556 | |||
| 07fa36d704 | |||
| 849bbd7a09 | |||
| f461bb6876 | |||
| 201d22d1c4 | |||
| ff6a272594 | |||
| 1018abe7bd | |||
| 398c3c81cb | |||
|  | 786d03b521 | ||
| 937c9d5720 | |||
|  | 951fba55fa | ||
|  | ec5238ed14 | ||
| d8f44a924e | |||
|  | ffe9e5d952 | ||
| 8c362fd6ae | |||
| 90365a455c | |||
| 6a218ca7b2 | |||
| ba9b88c650 | |||
| aedd60ea87 | |||
| ea2ac477be | |||
| c1fa2f639d | |||
| 8e3f2c67d7 | |||
| e66194695e | |||
| 894d6f4f20 | |||
| d404fa31ab | |||
| 88777a29ad | |||
| 23c2903c21 | |||
| 8fcc23f639 | |||
| 25dda1f34c | |||
| fe66086c40 | |||
| 7329bc23bc | |||
| c240631cdb | |||
|  | 6a68533824 | ||
| 058b6354c0 | |||
| 1f4cf11afe | |||
| 39177da1d0 | |||
| d559db4050 | |||
| aa946c469a | |||
| 9c4d88bb69 | |||
| 56288f46b1 | |||
| 81dcef8b28 | |||
| ec7a22b2dc | |||
| d2ac0c1360 | |||
| 69dd8c4eea | |||
| 27a6a923cd | |||
| 0a395235d6 | |||
| 23f0ad0f2f | |||
| 4fcbe0a770 | |||
| 28c9865121 | |||
| 697413d829 | |||
| 8a64e8c5cc | |||
| 2c8ca8d14f | |||
| 769ac6322f | |||
| 52318d68b8 | |||
| 5c4332ffc4 | |||
| 3a86d4c0f4 | |||
| 8bbcc30d04 | |||
| 221b248b1f | |||
| a9b13378f3 | |||
| adb5c9bfc9 | |||
|  | 92bf44e543 | ||
| e48ded17ee | |||
|  | 7753ecd14f | ||
| 7766fbf312 | |||
|  | 6916724a8b | ||
| e584100c18 | |||
|  | 63cc0c1cc6 | ||
| 396e71e9f7 | |||
|  | 4709eaf769 | ||
| 366bdea3d0 | |||
|  | 947fca7cd5 | ||
| 97c55fa16b | |||
|  | c845a00697 | ||
|  | dbee3ac120 | ||
|  | 740926d262 | ||
|  | 03d78ef2ff | ||
|  | 90063ef841 | ||
| 7faf1dfbc8 | |||
|  | 5a80d798c6 | ||
| 0323c5fccc | |||
|  | 80e7f6b19f | ||
| 9a4cc96640 | |||
|  | 4a67d63e4e | ||
| f69d96756e | |||
|  | 67dbeb2c57 | ||
| 328d15311b | |||
|  | 2899b963c4 | ||
|  | 78f07ac959 | ||
| dc1ebd45be | |||
|  | 6925f0291d | ||
| 5d8a72fa86 | |||
|  | f7096e71cf | ||
|  | ab10ca9658 | ||
|  | bb269b5879 | ||
| 24a6b4045e | |||
|  | 09b1b526b8 | ||
| af9bca00a1 | |||
|  | 1c2cc611ef | ||
|  | d6e18b5163 | ||
|  | 37d571076f | ||
|  | ddba046c77 | ||
| 7390973672 | |||
| 6360f5e78f | |||
|  | 9a69314c25 | ||
|  | a4eb047a58 | ||
|  | 7ad74ea1fc | ||
| 821b12484e | |||
| 893794dbf4 | |||
| ea629d6e34 | |||
| 585c94ad65 | |||
| 1d100aee63 | |||
| 6ccc996fe6 | |||
| 0d8c5d3a86 | |||
| 203266aaea | |||
|  | a7fed3241d | ||
|  | d0dbb58f4b | ||
| 71de5058a7 | |||
|  | 0e0762180d | ||
| 80523cf9bb | |||
|  | 9c4f7d65d2 | ||
|  | 4b4add52a6 | ||
|  | 61003dd396 | ||
|  | 12bee708a0 | ||
|  | f4a0c3e5de | ||
|  | 2e065f49c0 | ||
| 2a91aba7bf | |||
|  | fe4cf8a492 | ||
|  | cbf92e21e0 | ||
|  | ce096a23c0 | ||
|  | 105faed19f | ||
| d6ff088e1f | |||
|  | a33b7c1ccb | ||
| 87b5b3dd9f | |||
| 3081e902d2 | |||
|  | 204cd9608c | ||
| caefafeea7 | |||
|  | 05b0ede365 | ||
| b3e8eeb6ff | |||
|  | 1a1e359a4e | ||
| b7a77d4038 | |||
|  | d1b6d2187a | ||
| a4a88c1aad | |||
|  | 3a9c961474 | ||
| 0f0e095629 | |||
|  | 70a4aa1982 | ||
| 7cfe5bcd98 | |||
|  | 7ad07bd79e | ||
| 9d4aa6ce53 | |||
|  | 8f500ef92c | ||
| f6bad32a6a | |||
|  | dc30c68388 | ||
|  | b099c2d0e0 | ||
|  | 389736e131 | ||
|  | e774a716c5 | ||
|  | f208bf9272 | ||
|  | a0a86ef4f6 | ||
|  | 3a5716419d | ||
|  | 10188f073b | ||
|  | df5999629c | ||
|  | 6b051b3fd8 | ||
|  | d856988a16 | ||
| bc0d2026ad | |||
| 1fc59df95b | |||
| ff82f9f960 | |||
|  | 221f07ca97 | ||
| 44b9a59aab | |||
|  | 81cd4c1ec6 | ||
| 50d1343adb | |||
|  | 4c6b057a90 | ||
| 06acad41fe | |||
|  | 0ac121099f | ||
| d30f3d200b | |||
| 4373f485ce | |||
|  | 9faac6a407 | ||
| 02134eacc2 | |||
|  | be302e0816 | ||
| cfd4fb611e | |||
|  | 619d9d0d70 | ||
| ebad3e1369 | |||
|  | 485de5136f | ||
| 6ce9ed8e84 | |||
| 2c74b3232b | |||
| 69fb765b09 | |||
| abfa9da1a9 | |||
|  | a3185dc5b0 | ||
|  | f0dd8ac4bf | ||
|  | 7aef148699 | ||
|  | f34130c30c | ||
|  | a3920dbb36 | ||
|  | e428af2800 | ||
|  | b76d17b510 | ||
|  | ce55b40818 | ||
| be1c68e6a5 | 
							
								
								
									
										19
									
								
								.github/dependabot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								.github/dependabot.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,19 +0,0 @@ | ||||
| # To get started with Dependabot version updates, you'll need to specify which | ||||
| # package ecosystems to update and where the package manifests are located. | ||||
| # Please see the documentation for all configuration options: | ||||
| # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates | ||||
|  | ||||
| version: 2 | ||||
| updates: | ||||
|  | ||||
|   # Maintain dependencies for GitHub Actions | ||||
|   - package-ecosystem: "github-actions" | ||||
|     directory: "/" | ||||
|     schedule: | ||||
|       interval: "daily" | ||||
|  | ||||
|   # Maintain dependencies for Golang | ||||
|   - package-ecosystem: "gomod" | ||||
|     directory: "/" | ||||
|     schedule: | ||||
|       interval: "daily" | ||||
							
								
								
									
										46
									
								
								.github/workflows/build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								.github/workflows/build.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,46 +0,0 @@ | ||||
| name: build | ||||
| on: | ||||
|  push: | ||||
|     branches: | ||||
|     - master | ||||
| jobs: | ||||
|   test: | ||||
|     name: test | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: setup | ||||
|       uses: actions/setup-go@v2 | ||||
|       with: | ||||
|         go-version: 1.16 | ||||
|     - name: checkout | ||||
|       uses: actions/checkout@v2 | ||||
|     - name: cache | ||||
|       uses: actions/cache@v2 | ||||
|       with: | ||||
|         path: ~/go/pkg/mod | ||||
|         key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} | ||||
|         restore-keys: ${{ runner.os }}-go- | ||||
|     - name: deps | ||||
|       run: go get -v -t -d ./... | ||||
|     - name: test | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|       run: go test -mod readonly -v ./... | ||||
|   lint: | ||||
|     name: lint | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: checkout | ||||
|         uses: actions/checkout@v2 | ||||
|       - name: lint | ||||
|         uses: golangci/golangci-lint-action@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. | ||||
|           version: v1.30 | ||||
|           # Optional: working directory, useful for monorepos | ||||
|           # working-directory: somedir | ||||
|           # Optional: golangci-lint command line arguments. | ||||
|           # args: --issues-exit-code=0 | ||||
|           # Optional: show only new issues if it's a pull request. The default value is `false`. | ||||
|           # only-new-issues: true | ||||
							
								
								
									
										75
									
								
								.github/workflows/codeql-analysis.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										75
									
								
								.github/workflows/codeql-analysis.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,75 +0,0 @@ | ||||
| # For most projects, this workflow file will not need changing; you simply need | ||||
| # to commit it to your repository. | ||||
| # | ||||
| # You may wish to alter this file to override the set of languages analyzed, | ||||
| # or to provide custom queries or build logic. | ||||
| # | ||||
| # ******** NOTE ******** | ||||
| # We have attempted to detect the languages in your repository. Please check | ||||
| # the `language` matrix defined below to confirm you have the correct set of | ||||
| # supported CodeQL languages. | ||||
| # | ||||
| name: "CodeQL" | ||||
|  | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: ["prbuild"] | ||||
|     types: | ||||
|       - completed | ||||
|   push: | ||||
|     branches: [ master ] | ||||
|   pull_request: | ||||
|     # The branches below must be a subset of the branches above | ||||
|     branches: [ master ] | ||||
|   schedule: | ||||
|     - cron: '34 1 * * 0' | ||||
|  | ||||
| jobs: | ||||
|   analyze: | ||||
|     name: Analyze | ||||
|     runs-on: ubuntu-latest | ||||
|     permissions: | ||||
|       actions: read | ||||
|       contents: read | ||||
|       security-events: write | ||||
|  | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         language: [ 'go' ] | ||||
|         # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] | ||||
|         # Learn more: | ||||
|         # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed | ||||
|  | ||||
|     steps: | ||||
|     - name: Checkout repository | ||||
|       uses: actions/checkout@v2 | ||||
|  | ||||
|     # Initializes the CodeQL tools for scanning. | ||||
|     - name: Initialize CodeQL | ||||
|       uses: github/codeql-action/init@v1 | ||||
|       with: | ||||
|         languages: ${{ matrix.language }} | ||||
|         # If you wish to specify custom queries, you can do so here or in a config file. | ||||
|         # By default, queries listed here will override any specified in a config file. | ||||
|         # Prefix the list here with "+" to use these queries and those in the config file. | ||||
|         # queries: ./path/to/local/query, your-org/your-repo/queries@main | ||||
|  | ||||
|     # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java). | ||||
|     # If this step fails, then you should remove it and run the build manually (see below) | ||||
|     - name: Autobuild | ||||
|       uses: github/codeql-action/autobuild@v1 | ||||
|  | ||||
|     # ℹ️ Command-line programs to run using the OS shell. | ||||
|     # 📚 https://git.io/JvXDl | ||||
|  | ||||
|     # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines | ||||
|     #    and modify them (or add more) to build your code if your project | ||||
|     #    uses a compiled language | ||||
|  | ||||
|     #- run: | | ||||
|     #   make bootstrap | ||||
|     #   make release | ||||
|  | ||||
|     - name: Perform CodeQL Analysis | ||||
|       uses: github/codeql-action/analyze@v1 | ||||
							
								
								
									
										66
									
								
								.github/workflows/dependabot-automerge.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										66
									
								
								.github/workflows/dependabot-automerge.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,66 +0,0 @@ | ||||
| name: "prautomerge" | ||||
|  | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: ["prbuild"] | ||||
|     types: | ||||
|       - completed | ||||
|  | ||||
| permissions: | ||||
|   contents: write | ||||
|   pull-requests: write | ||||
|  | ||||
| jobs: | ||||
|   Dependabot-Automerge: | ||||
|     runs-on: ubuntu-latest | ||||
|     # Contains workaround to execute if dependabot updates the PR by checking for the base branch in the linked PR | ||||
|     # The the github.event.workflow_run.event value is 'push' and not 'pull_request' | ||||
|     # dont work with multiple workflows when last returns success | ||||
|     if: >- | ||||
|       github.event.workflow_run.conclusion == 'success' | ||||
|       && github.actor == 'dependabot[bot]' | ||||
|       && github.event.sender.login == 'dependabot[bot]' | ||||
|       && github.event.sender.type == 'Bot' | ||||
|       && (github.event.workflow_run.event == 'pull_request' | ||||
|           || (github.event.workflow_run.event == 'push' && github.event.workflow_run.pull_requests[0].base.ref == github.event.repository.default_branch )) | ||||
|     steps: | ||||
|       - name: Approve Changes and Merge changes if label 'dependencies' is set | ||||
|         uses: actions/github-script@v4 | ||||
|         with: | ||||
|           github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|           script: | | ||||
|             console.log(context.payload.workflow_run); | ||||
|              | ||||
|             var labelNames = await github.paginate( | ||||
|               github.issues.listLabelsOnIssue, | ||||
|               { | ||||
|                 repo: context.repo.repo, | ||||
|                 owner: context.repo.owner, | ||||
|                 issue_number: context.payload.workflow_run.pull_requests[0].number, | ||||
|               }, | ||||
|               (response) => response.data.map( | ||||
|                 (label) => label.name | ||||
|               ) | ||||
|             ); | ||||
|  | ||||
|             console.log(labelNames); | ||||
|  | ||||
|             if (labelNames.includes('dependencies')) { | ||||
|               console.log('Found label'); | ||||
|  | ||||
|               await github.pulls.createReview({ | ||||
|                 repo: context.repo.repo, | ||||
|                 owner: context.repo.owner, | ||||
|                 pull_number: context.payload.workflow_run.pull_requests[0].number, | ||||
|                 event: 'APPROVE' | ||||
|               }); | ||||
|               console.log('Approved PR'); | ||||
|  | ||||
|               await github.pulls.merge({ | ||||
|                 repo: context.repo.repo, | ||||
|                 owner: context.repo.owner, | ||||
|                 pull_number: context.payload.workflow_run.pull_requests[0].number, | ||||
|               }); | ||||
|  | ||||
|               console.log('Merged PR'); | ||||
|             } | ||||
							
								
								
									
										53
									
								
								.github/workflows/job_coverage.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								.github/workflows/job_coverage.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| name: coverage | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     branches: [ main, v3, v4 ] | ||||
|     paths-ignore: | ||||
|       - '.github/**' | ||||
|       - '.gitea/**' | ||||
|   pull_request: | ||||
|     branches: [ main, v3, v4 ] | ||||
|  | ||||
| jobs: | ||||
|  | ||||
|   build: | ||||
|     if: github.server_url != 'https://github.com' | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: checkout code | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         filter: 'blob:none' | ||||
|  | ||||
|     - name: setup go | ||||
|       uses: actions/setup-go@v5 | ||||
|       with: | ||||
|         cache-dependency-path: "**/*.sum" | ||||
|         go-version: 'stable' | ||||
|  | ||||
|     - name: test coverage | ||||
|       run: | | ||||
|         go test -v -cover ./... -covermode=count -coverprofile coverage.out -coverpkg ./... | ||||
|         go tool cover -func coverage.out -o coverage.out | ||||
|  | ||||
|     - name: coverage badge | ||||
|       uses: tj-actions/coverage-badge-go@v2 | ||||
|       with: | ||||
|         green: 80 | ||||
|         filename: coverage.out | ||||
|  | ||||
|     - uses: stefanzweifel/git-auto-commit-action@v4 | ||||
|       name: autocommit | ||||
|       with: | ||||
|         commit_message: Apply Code Coverage Badge | ||||
|         skip_fetch: false | ||||
|         skip_checkout: false | ||||
|         file_pattern: ./README.md | ||||
|  | ||||
|     - name: push | ||||
|       if: steps.auto-commit-action.outputs.changes_detected == 'true' | ||||
|       uses: ad-m/github-push-action@master | ||||
|       with: | ||||
|         github_token: ${{ github.token }} | ||||
|         branch: ${{ github.ref }} | ||||
							
								
								
									
										29
									
								
								.github/workflows/job_lint.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								.github/workflows/job_lint.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| name: lint | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     types: [opened, reopened, synchronize] | ||||
|     branches: [ master, v3, v4 ] | ||||
|     paths-ignore: | ||||
|       - '.github/**' | ||||
|       - '.gitea/**' | ||||
|  | ||||
| jobs: | ||||
|   lint: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: checkout code | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         filter: 'blob:none' | ||||
|     - name: setup go | ||||
|       uses: actions/setup-go@v5 | ||||
|       with: | ||||
|         cache-dependency-path: "**/*.sum" | ||||
|         go-version: 'stable' | ||||
|     - name: setup deps | ||||
|       run: go get -v ./... | ||||
|     - name: run lint | ||||
|       uses: golangci/golangci-lint-action@v6 | ||||
|       with: | ||||
|         version: 'latest' | ||||
							
								
								
									
										94
									
								
								.github/workflows/job_sync.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								.github/workflows/job_sync.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,94 @@ | ||||
| name: sync | ||||
|  | ||||
| on: | ||||
|   schedule: | ||||
|     - cron: '*/5 * * * *' | ||||
|   # Allows you to run this workflow manually from the Actions tab | ||||
|   workflow_dispatch: | ||||
|  | ||||
| jobs: | ||||
|   sync: | ||||
|     if: github.server_url != 'https://github.com' | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: init | ||||
|       run: | | ||||
|         git config --global user.email "vtolstov <vtolstov@users.noreply.github.com>" | ||||
|         git config --global user.name "github-actions[bot]" | ||||
|         echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc | ||||
|         echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc | ||||
|  | ||||
|     - name: check master | ||||
|       id: check_master | ||||
|       run: | | ||||
|         src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1) | ||||
|         dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1) | ||||
|         echo "src_hash=$src_hash" | ||||
|         echo "dst_hash=$dst_hash" | ||||
|         if [ "$src_hash" != "$dst_hash" -a "$src_hash" != "" -a "" != "$dst_hash" ]; then | ||||
|           echo "sync_needed=true" >> $GITHUB_OUTPUT | ||||
|         else | ||||
|           echo "sync_needed=false" >> $GITHUB_OUTPUT | ||||
|         fi | ||||
|  | ||||
|     - name: sync master | ||||
|       if: steps.check_master.outputs.sync_needed == 'true' | ||||
|       run: | | ||||
|         git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo | ||||
|         cd repo | ||||
|         git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY} | ||||
|         git pull --rebase upstream master | ||||
|         git push upstream master --progress | ||||
|         git push origin master --progress | ||||
|         cd ../ | ||||
|         rm -rf repo | ||||
|  | ||||
|     - name: check v3 | ||||
|       id: check_v3 | ||||
|       run: | | ||||
|         src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1) | ||||
|         dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1) | ||||
|         echo "src_hash=$src_hash" | ||||
|         echo "dst_hash=$dst_hash" | ||||
|         if [ "$src_hash" != "$dst_hash" ]; then | ||||
|           echo "sync_needed=true" >> $GITHUB_OUTPUT | ||||
|         else | ||||
|           echo "sync_needed=false" >> $GITHUB_OUTPUT | ||||
|         fi | ||||
|  | ||||
|     - name: sync v3 | ||||
|       if: steps.check_v3.outputs.sync_needed == 'true' | ||||
|       run: | | ||||
|         git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo | ||||
|         cd repo | ||||
|         git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY} | ||||
|         git pull --rebase upstream v3 | ||||
|         git push upstream v3 --progress | ||||
|         git push origin v3 --progress | ||||
|         cd ../ | ||||
|         rm -rf repo | ||||
|  | ||||
|     - name: check v4 | ||||
|       id: check_v4 | ||||
|       run: | | ||||
|         src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1) | ||||
|         dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1) | ||||
|         echo "src_hash=$src_hash" | ||||
|         echo "dst_hash=$dst_hash" | ||||
|         if [ "$src_hash" != "$dst_hash" ]; then | ||||
|           echo "sync_needed=true" >> $GITHUB_OUTPUT | ||||
|         else | ||||
|           echo "sync_needed=false" >> $GITHUB_OUTPUT | ||||
|         fi | ||||
|  | ||||
|     - name: sync v4 | ||||
|       if: steps.check_v4.outputs.sync_needed == 'true' | ||||
|       run: | | ||||
|         git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo | ||||
|         cd repo | ||||
|         git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY} | ||||
|         git pull --rebase upstream v4 | ||||
|         git push upstream v4 --progress | ||||
|         git push origin v4 --progress | ||||
|         cd ../ | ||||
|         rm -rf repo | ||||
							
								
								
									
										31
									
								
								.github/workflows/job_test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								.github/workflows/job_test.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,31 @@ | ||||
| name: test | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     types: [opened, reopened, synchronize] | ||||
|     branches: [ master, v3, v4 ] | ||||
|   push: | ||||
|     branches: [ master, v3, v4 ] | ||||
|     paths-ignore: | ||||
|       - '.github/**' | ||||
|       - '.gitea/**' | ||||
|  | ||||
| jobs: | ||||
|   test: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: checkout code | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         filter: 'blob:none' | ||||
|     - name: setup go | ||||
|       uses: actions/setup-go@v5 | ||||
|       with: | ||||
|         cache-dependency-path: "**/*.sum" | ||||
|         go-version: 'stable' | ||||
|     - name: setup deps | ||||
|       run: go get -v ./... | ||||
|     - name: run test | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|       run: go test -mod readonly -v ./... | ||||
							
								
								
									
										50
									
								
								.github/workflows/job_tests.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								.github/workflows/job_tests.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,50 @@ | ||||
| name: test | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     types: [opened, reopened, synchronize] | ||||
|     branches: [ master, v3, v4 ] | ||||
|   push: | ||||
|     branches: [ master, v3, v4 ] | ||||
|     paths-ignore: | ||||
|       - '.github/**' | ||||
|       - '.gitea/**' | ||||
|  | ||||
| jobs: | ||||
|   test: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: checkout code | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         filter: 'blob:none' | ||||
|     - name: checkout tests | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         ref: master | ||||
|         filter: 'blob:none' | ||||
|         repository: unistack-org/micro-tests | ||||
|         path: micro-tests | ||||
|     - name: setup go | ||||
|       uses: actions/setup-go@v5 | ||||
|       with: | ||||
|         cache-dependency-path: "**/*.sum" | ||||
|         go-version: 'stable' | ||||
|     - name: setup go work | ||||
|       env: | ||||
|         GOWORK: ${{ github.workspace }}/go.work | ||||
|       run: | | ||||
|         go work init | ||||
|         go work use . | ||||
|         go work use micro-tests | ||||
|     - name: setup deps | ||||
|       env: | ||||
|         GOWORK: ${{ github.workspace }}/go.work | ||||
|       run: go get -v ./... | ||||
|     - name: run tests | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|         GOWORK: ${{ github.workspace }}/go.work | ||||
|       run: | | ||||
|         cd micro-tests | ||||
|         go test -mod readonly -v ./... || true | ||||
							
								
								
									
										46
									
								
								.github/workflows/pr.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								.github/workflows/pr.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,46 +0,0 @@ | ||||
| name: prbuild | ||||
| on: | ||||
|   pull_request: | ||||
|     branches: | ||||
|     - master | ||||
| jobs: | ||||
|   test: | ||||
|     name: test | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|     - name: setup | ||||
|       uses: actions/setup-go@v2 | ||||
|       with: | ||||
|         go-version: 1.16 | ||||
|     - name: checkout | ||||
|       uses: actions/checkout@v2 | ||||
|     - name: cache | ||||
|       uses: actions/cache@v2 | ||||
|       with: | ||||
|         path: ~/go/pkg/mod | ||||
|         key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} | ||||
|         restore-keys: ${{ runner.os }}-go- | ||||
|     - name: deps | ||||
|       run: go get -v -t -d ./... | ||||
|     - name: test | ||||
|       env: | ||||
|         INTEGRATION_TESTS: yes | ||||
|       run: go test -mod readonly -v ./... | ||||
|   lint: | ||||
|     name: lint | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: checkout | ||||
|         uses: actions/checkout@v2 | ||||
|       - name: lint | ||||
|         uses: golangci/golangci-lint-action@v2 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. | ||||
|           version: v1.30 | ||||
|           # Optional: working directory, useful for monorepos | ||||
|           # working-directory: somedir | ||||
|           # Optional: golangci-lint command line arguments. | ||||
|           # args: --issues-exit-code=0 | ||||
|           # Optional: show only new issues if it's a pull request. The default value is `false`. | ||||
|           # only-new-issues: true | ||||
							
								
								
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -13,3 +13,9 @@ | ||||
|  | ||||
| # Dependency directories (remove the comment below to include it) | ||||
| # vendor/ | ||||
|  | ||||
| # General | ||||
| .DS_Store | ||||
| .idea | ||||
| .vscode | ||||
| bin/ | ||||
|   | ||||
							
								
								
									
										5
									
								
								.golangci.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								.golangci.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| run: | ||||
|   concurrency: 8 | ||||
|   timeout: 5m | ||||
|   issues-exit-code: 1 | ||||
|   tests: true | ||||
| @@ -1,2 +1,2 @@ | ||||
| # micro-broker-kgo | ||||
| yet another micro kafka broker alternative | ||||
|  | ||||
|   | ||||
							
								
								
									
										93
									
								
								carrier.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								carrier.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,93 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"slices" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v4/metadata" | ||||
| ) | ||||
|  | ||||
| // RecordCarrier injects and extracts traces from a kgo.Record. | ||||
| // | ||||
| // This type exists to satisfy the otel/propagation.TextMapCarrier interface. | ||||
| type RecordCarrier struct { | ||||
| 	record *kgo.Record | ||||
| } | ||||
|  | ||||
| // NewRecordCarrier creates a new RecordCarrier. | ||||
| func NewRecordCarrier(record *kgo.Record) RecordCarrier { | ||||
| 	return RecordCarrier{record: record} | ||||
| } | ||||
|  | ||||
| // Get retrieves a single value for a given key if it exists. | ||||
| func (c RecordCarrier) Get(key string) string { | ||||
| 	for _, h := range c.record.Headers { | ||||
| 		if h.Key == key { | ||||
| 			return string(h.Value) | ||||
| 		} | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // Set sets a header. | ||||
| func (c RecordCarrier) Set(key, val string) { | ||||
| 	// Check if key already exists. | ||||
| 	for i, h := range c.record.Headers { | ||||
| 		if h.Key == key { | ||||
| 			// Key exist, update the value. | ||||
| 			c.record.Headers[i].Value = []byte(val) | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	// Key does not exist, append new header. | ||||
| 	c.record.Headers = append(c.record.Headers, kgo.RecordHeader{ | ||||
| 		Key:   key, | ||||
| 		Value: []byte(val), | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // Keys returns a slice of all key identifiers in the carrier. | ||||
| func (c RecordCarrier) Keys() []string { | ||||
| 	out := make([]string, len(c.record.Headers)) | ||||
| 	for i, h := range c.record.Headers { | ||||
| 		out[i] = h.Key | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| func setHeaders(r *kgo.Record, md metadata.Metadata, exclude ...string) { | ||||
| 	seen := make(map[string]struct{}) | ||||
|  | ||||
| loop: | ||||
| 	for k, v := range md { | ||||
| 		k = http.CanonicalHeaderKey(k) | ||||
|  | ||||
| 		if _, ok := seen[k]; ok { | ||||
| 			continue loop | ||||
| 		} | ||||
|  | ||||
| 		if slices.ContainsFunc(exclude, func(s string) bool { | ||||
| 			return strings.EqualFold(s, k) | ||||
| 		}) { | ||||
| 			continue loop | ||||
| 		} | ||||
|  | ||||
| 		for i := 0; i < len(r.Headers); i++ { | ||||
| 			if strings.EqualFold(r.Headers[i].Key, k) { | ||||
| 				// Key exist, update the value. | ||||
| 				r.Headers[i].Value = []byte(strings.Join(v, ",")) | ||||
| 				continue loop | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// Key does not exist, append new header. | ||||
| 		r.Headers = append(r.Headers, kgo.RecordHeader{ | ||||
| 			Key:   k, | ||||
| 			Value: []byte(strings.Join(v, ",")), | ||||
| 		}) | ||||
|  | ||||
| 		seen[k] = struct{}{} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										10
									
								
								errors.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								errors.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| ) | ||||
|  | ||||
| func isContextError(err error) bool { | ||||
| 	return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) | ||||
| } | ||||
							
								
								
									
										30
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										30
									
								
								go.mod
									
									
									
									
									
								
							| @@ -1,11 +1,27 @@ | ||||
| module github.com/unistack-org/micro-broker-kgo/v3 | ||||
| module go.unistack.org/micro-broker-kgo/v4 | ||||
|  | ||||
| go 1.16 | ||||
| go 1.24.0 | ||||
|  | ||||
| require ( | ||||
| 	github.com/klauspost/compress v1.13.6 // indirect | ||||
| 	github.com/twmb/franz-go v1.1.0 | ||||
| 	github.com/twmb/franz-go/pkg/kmsg v0.0.0-20210914174821-2f676c0a574b | ||||
| 	github.com/unistack-org/micro-codec-json/v3 v3.7.7 | ||||
| 	github.com/unistack-org/micro/v3 v3.7.3 | ||||
| 	github.com/stretchr/testify v1.11.1 | ||||
| 	github.com/twmb/franz-go v1.20.2 | ||||
| 	github.com/twmb/franz-go/pkg/kadm v1.17.1 | ||||
| 	github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3 | ||||
| 	github.com/twmb/franz-go/pkg/kmsg v1.12.0 | ||||
| 	go.opentelemetry.io/otel v1.38.0 | ||||
| 	go.unistack.org/micro/v4 v4.1.24 | ||||
| ) | ||||
|  | ||||
| require ( | ||||
| 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect | ||||
| 	github.com/google/uuid v1.6.0 // indirect | ||||
| 	github.com/klauspost/compress v1.18.1 // indirect | ||||
| 	github.com/matoous/go-nanoid v1.5.1 // indirect | ||||
| 	github.com/pierrec/lz4/v4 v4.1.22 // indirect | ||||
| 	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect | ||||
| 	github.com/spf13/cast v1.10.0 // indirect | ||||
| 	go.unistack.org/micro-proto/v4 v4.1.0 // indirect | ||||
| 	golang.org/x/crypto v0.43.0 // indirect | ||||
| 	google.golang.org/protobuf v1.36.10 // indirect | ||||
| 	gopkg.in/yaml.v3 v3.0.1 // indirect | ||||
| ) | ||||
|   | ||||
							
								
								
									
										125
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										125
									
								
								go.sum
									
									
									
									
									
								
							| @@ -1,68 +1,59 @@ | ||||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/ef-ds/deque v1.0.4/go.mod h1:gXDnTC3yqvBcHbq2lcExjtAcVrOnJCbMcZXmuj8Z4tg= | ||||
| github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= | ||||
| github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= | ||||
| github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | ||||
| github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= | ||||
| github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= | ||||
| github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= | ||||
| github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= | ||||
| github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= | ||||
| github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= | ||||
| github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= | ||||
| github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= | ||||
| github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= | ||||
| github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= | ||||
| github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= | ||||
| github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= | ||||
| github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= | ||||
| github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= | ||||
| github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= | ||||
| github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= | ||||
| github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= | ||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/silas/dag v0.0.0-20210121180416-41cf55125c34/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I= | ||||
| github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | ||||
| github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= | ||||
| github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||
| github.com/twmb/franz-go v1.1.0 h1:WR4EbNx7L4eLyTgQmbp3Bg9fX3eW6jHWYj087n07rRc= | ||||
| github.com/twmb/franz-go v1.1.0/go.mod h1:KerrVhzNpasYrWJLr2Yj6Cui43f1BxH4U9SJEDVOjqQ= | ||||
| github.com/twmb/franz-go/pkg/kmsg v0.0.0-20210914042331-106aef61b693/go.mod h1:SxG/xJKhgPu25SamAq0rrucfp7lbzCpEXOC+vH/ELrY= | ||||
| github.com/twmb/franz-go/pkg/kmsg v0.0.0-20210914174821-2f676c0a574b h1:7d6eRt9HEqXVxMzD2fry9qtJ0kRkgeJ5olqW9K+aXv8= | ||||
| github.com/twmb/franz-go/pkg/kmsg v0.0.0-20210914174821-2f676c0a574b/go.mod h1:SxG/xJKhgPu25SamAq0rrucfp7lbzCpEXOC+vH/ELrY= | ||||
| github.com/twmb/go-rbtree v1.0.0 h1:KxN7dXJ8XaZ4cvmHV1qqXTshxX3EBvX/toG5+UR49Mg= | ||||
| github.com/twmb/go-rbtree v1.0.0/go.mod h1:UlIAI8gu3KRPkXSobZnmJfVwCJgEhD/liWzT5ppzIyc= | ||||
| github.com/unistack-org/micro-codec-json/v3 v3.7.7 h1:H8ALEfE3LQsRy6F/oKh5TbPettxSDbpECiOu7AHmE5E= | ||||
| github.com/unistack-org/micro-codec-json/v3 v3.7.7/go.mod h1:Gk0pKcPu1nf9jmSjuyn8yehaIpJsx8AqD+hh69OzWlI= | ||||
| github.com/unistack-org/micro-proto v0.0.9 h1:KrWLS4FUX7UAWNAilQf70uad6ZPf/0EudeddCXllRVc= | ||||
| github.com/unistack-org/micro-proto v0.0.9/go.mod h1:Cckwmzd89gvS7ThxzZp9kQR/EOdksFQcsTAtDDyKwrg= | ||||
| github.com/unistack-org/micro/v3 v3.7.3 h1:7dCv7WuTYp47jZ/doVCXDNMnY75imprbmwSnmchTSJY= | ||||
| github.com/unistack-org/micro/v3 v3.7.3/go.mod h1:78vy7ggElJD+ayx8ruhJsY+SkGUupfR32LXrg1UObvg= | ||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||
| golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= | ||||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||
| golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | ||||
| golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= | ||||
| golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= | ||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||
| golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= | ||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= | ||||
| google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= | ||||
| google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= | ||||
| github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= | ||||
| github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= | ||||
| github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= | ||||
| github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= | ||||
| github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | ||||
| github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= | ||||
| github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= | ||||
| github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= | ||||
| github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= | ||||
| github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= | ||||
| github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= | ||||
| github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= | ||||
| github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= | ||||
| github.com/matoous/go-nanoid v1.5.1 h1:aCjdvTyO9LLnTIi0fgdXhOPPvOHjpXN6Ik9DaNjIct4= | ||||
| github.com/matoous/go-nanoid v1.5.1/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U= | ||||
| github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= | ||||
| github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= | ||||
| github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= | ||||
| github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= | ||||
| github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= | ||||
| github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= | ||||
| github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= | ||||
| github.com/twmb/franz-go v1.19.5 h1:W7+o8D0RsQsedqib71OVlLeZ0zI6CbFra7yTYhZTs5Y= | ||||
| github.com/twmb/franz-go v1.19.5/go.mod h1:4kFJ5tmbbl7asgwAGVuyG1ZMx0NNpYk7EqflvWfPCpM= | ||||
| github.com/twmb/franz-go v1.20.2 h1:CiwhyKZHW6vqSHJkh+RTxFAJkio0jBjM/JQhx/HZ72A= | ||||
| github.com/twmb/franz-go v1.20.2/go.mod h1:YCnepDd4gl6vdzG03I5Wa57RnCTIC6DVEyMpDX/J8UA= | ||||
| github.com/twmb/franz-go/pkg/kadm v1.16.1 h1:IEkrhTljgLHJ0/hT/InhXGjPdmWfFvxp7o/MR7vJ8cw= | ||||
| github.com/twmb/franz-go/pkg/kadm v1.16.1/go.mod h1:Ue/ye1cc9ipsQFg7udFbbGiFNzQMqiH73fGC2y0rwyc= | ||||
| github.com/twmb/franz-go/pkg/kadm v1.17.1 h1:Bt02Y/RLgnFO2NP2HVP1kd2TFtGRiJZx+fSArjZDtpw= | ||||
| github.com/twmb/franz-go/pkg/kadm v1.17.1/go.mod h1:s4duQmrDbloVW9QTMXhs6mViTepze7JLG43xwPcAeTg= | ||||
| github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3 h1:p24opKWPySAy8xSl8NqRgOv7Q+bX7kdrQirBVRJzQfo= | ||||
| github.com/twmb/franz-go/pkg/kfake v0.0.0-20250508175730-72e1646135e3/go.mod h1:7uQs3Ae6HkWT1Y9elMbqtAcNFCI0y6+iS+Phw49L49U= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.11.2 h1:hIw75FpwcAjgeyfIGFqivAvwC5uNIOWRGvQgZhH4mhg= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.11.2/go.mod h1:CFfkkLysDNmukPYhGzuUcDtf46gQSqCZHMW1T4Z+wDE= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.12.0 h1:CbatD7ers1KzDNgJqPbKOq0Bz/WLBdsTH75wgzeVaPc= | ||||
| github.com/twmb/franz-go/pkg/kmsg v1.12.0/go.mod h1:+DPt4NC8RmI6hqb8G09+3giKObE6uD2Eya6CfqBpeJY= | ||||
| go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= | ||||
| go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= | ||||
| go.unistack.org/micro-proto/v4 v4.1.0 h1:qPwL2n/oqh9RE3RTTDgt28XK3QzV597VugQPaw9lKUk= | ||||
| go.unistack.org/micro-proto/v4 v4.1.0/go.mod h1:ArmK7o+uFvxSY3dbJhKBBX4Pm1rhWdLEFf3LxBrMtec= | ||||
| go.unistack.org/micro/v4 v4.1.21 h1:F9PrbI1BhXSDS0FopwcO5wWrT2Xh38w9VhVZZBHjfg8= | ||||
| go.unistack.org/micro/v4 v4.1.21/go.mod h1:nlBXTbx0rQrSZX4HPp2m57PHmpuGPWUd0O+jpUIiPto= | ||||
| go.unistack.org/micro/v4 v4.1.24 h1:PbkSWJS3ssB5A0y0tOdOw6u9e2Mk4yombF4yR0Jshvo= | ||||
| go.unistack.org/micro/v4 v4.1.24/go.mod h1:nlBXTbx0rQrSZX4HPp2m57PHmpuGPWUd0O+jpUIiPto= | ||||
| golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= | ||||
| golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= | ||||
| google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= | ||||
| google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= | ||||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | ||||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
|   | ||||
							
								
								
									
										108
									
								
								hook_event.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										108
									
								
								hook_event.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,108 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v4/logger" | ||||
| ) | ||||
|  | ||||
| type hookEvent struct { | ||||
| 	log          logger.Logger | ||||
| 	fatalOnError bool | ||||
| 	connected    *atomic.Uint32 | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	_ kgo.HookBrokerConnect           = &hookEvent{} | ||||
| 	_ kgo.HookBrokerDisconnect        = &hookEvent{} | ||||
| 	_ kgo.HookBrokerRead              = &hookEvent{} | ||||
| 	_ kgo.HookBrokerWrite             = &hookEvent{} | ||||
| 	_ kgo.HookGroupManageError        = &hookEvent{} | ||||
| 	_ kgo.HookProduceRecordUnbuffered = &hookEvent{} | ||||
| ) | ||||
|  | ||||
| func (m *hookEvent) OnGroupManageError(err error) { | ||||
| 	switch { | ||||
| 	case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err): | ||||
| 		return | ||||
| 	default: | ||||
| 		ctx := context.TODO() | ||||
| 		logMsg := "kgo.OnGroupManageError" | ||||
|  | ||||
| 		if m.fatalOnError { | ||||
| 			m.log.Fatal(ctx, logMsg, err) | ||||
| 		} else { | ||||
| 			m.log.Error(ctx, logMsg, err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *hookEvent) OnBrokerConnect(_ kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { | ||||
| 	switch { | ||||
| 	case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err): | ||||
| 		return | ||||
| 	default: | ||||
| 		ctx := context.TODO() | ||||
| 		logMsg := "kgo.OnBrokerConnect" | ||||
|  | ||||
| 		if m.fatalOnError { | ||||
| 			m.log.Fatal(ctx, logMsg, err) | ||||
| 		} else { | ||||
| 			m.log.Error(ctx, logMsg, err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *hookEvent) OnBrokerDisconnect(_ kgo.BrokerMetadata, _ net.Conn) {} | ||||
|  | ||||
| func (m *hookEvent) OnBrokerWrite(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) { | ||||
| 	switch { | ||||
| 	case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err): | ||||
| 		return | ||||
| 	default: | ||||
| 		ctx := context.TODO() | ||||
| 		logMsg := "kgo.OnBrokerWrite" | ||||
|  | ||||
| 		if m.fatalOnError { | ||||
| 			m.log.Fatal(ctx, logMsg, err) | ||||
| 		} else { | ||||
| 			m.log.Error(ctx, logMsg, err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *hookEvent) OnBrokerRead(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) { | ||||
| 	switch { | ||||
| 	case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err): | ||||
| 		return | ||||
| 	default: | ||||
| 		ctx := context.TODO() | ||||
| 		logMsg := "kgo.OnBrokerRead" | ||||
|  | ||||
| 		if m.fatalOnError { | ||||
| 			m.log.Fatal(ctx, logMsg, err) | ||||
| 		} else { | ||||
| 			m.log.Error(ctx, logMsg, err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *hookEvent) OnProduceRecordUnbuffered(_ *kgo.Record, err error) { | ||||
| 	switch { | ||||
| 	case err == nil || isContextError(err) || kgo.IsRetryableBrokerErr(err): | ||||
| 		return | ||||
| 	default: | ||||
| 		ctx := context.TODO() | ||||
| 		logMsg := "kgo.OnProduceRecordUnbuffered" | ||||
|  | ||||
| 		if m.fatalOnError { | ||||
| 			m.log.Fatal(ctx, logMsg, err) | ||||
| 		} else { | ||||
| 			m.log.Error(ctx, logMsg, err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										471
									
								
								hook_event_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										471
									
								
								hook_event_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,471 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"net" | ||||
| 	"os" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/require" | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v4/logger" | ||||
| ) | ||||
|  | ||||
| func TestHookEvent_OnGroupManageError(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name                  string | ||||
| 		inputErr              error | ||||
| 		fatalOnError          bool | ||||
| 		expectedErrorIsCalled bool | ||||
| 		expectedErrorMsg      string | ||||
| 		expectedFatalIsCalled bool | ||||
| 		expectedFatalMsg      string | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:                  "error is nil", | ||||
| 			inputErr:              nil, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context canceled", | ||||
| 			inputErr:              context.Canceled, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context deadline exceeded", | ||||
| 			inputErr:              context.DeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: deadline exceeded (os package)", | ||||
| 			inputErr:              os.ErrDeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: EOF (io package)", | ||||
| 			inputErr:              io.EOF, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: closed network connection (net package)", | ||||
| 			inputErr:              net.ErrClosed, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (non-fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          false, | ||||
| 			expectedErrorIsCalled: true, | ||||
| 			expectedErrorMsg:      "kgo.OnGroupManageError", | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          true, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: true, | ||||
| 			expectedFatalMsg:      "kgo.OnGroupManageError", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			log := &mockLogger{} | ||||
| 			he := &hookEvent{log: log, fatalOnError: tt.fatalOnError} | ||||
| 			he.OnGroupManageError(tt.inputErr) | ||||
| 			require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled) | ||||
| 			require.Equal(t, tt.expectedErrorMsg, log.errorMsg) | ||||
| 			require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled) | ||||
| 			require.Equal(t, tt.expectedFatalMsg, log.fatalMsg) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestHookEvent_OnBrokerConnect(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name                  string | ||||
| 		inputErr              error | ||||
| 		fatalOnError          bool | ||||
| 		expectedErrorIsCalled bool | ||||
| 		expectedErrorMsg      string | ||||
| 		expectedFatalIsCalled bool | ||||
| 		expectedFatalMsg      string | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:                  "error is nil", | ||||
| 			inputErr:              nil, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context canceled", | ||||
| 			inputErr:              context.Canceled, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context deadline exceeded", | ||||
| 			inputErr:              context.DeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: deadline exceeded (os package)", | ||||
| 			inputErr:              os.ErrDeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: EOF (io package)", | ||||
| 			inputErr:              io.EOF, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: closed network connection (net package)", | ||||
| 			inputErr:              net.ErrClosed, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (non-fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          false, | ||||
| 			expectedErrorIsCalled: true, | ||||
| 			expectedErrorMsg:      "kgo.OnBrokerConnect", | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          true, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: true, | ||||
| 			expectedFatalMsg:      "kgo.OnBrokerConnect", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			log := &mockLogger{} | ||||
| 			he := &hookEvent{log: log, fatalOnError: tt.fatalOnError} | ||||
| 			he.OnBrokerConnect(kgo.BrokerMetadata{}, 0, nil, tt.inputErr) | ||||
| 			require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled) | ||||
| 			require.Equal(t, tt.expectedErrorMsg, log.errorMsg) | ||||
| 			require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled) | ||||
| 			require.Equal(t, tt.expectedFatalMsg, log.fatalMsg) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestHookEvent_OnBrokerWrite(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name                  string | ||||
| 		inputErr              error | ||||
| 		fatalOnError          bool | ||||
| 		expectedErrorIsCalled bool | ||||
| 		expectedErrorMsg      string | ||||
| 		expectedFatalIsCalled bool | ||||
| 		expectedFatalMsg      string | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:                  "error is nil", | ||||
| 			inputErr:              nil, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context canceled", | ||||
| 			inputErr:              context.Canceled, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context deadline exceeded", | ||||
| 			inputErr:              context.DeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: deadline exceeded (os package)", | ||||
| 			inputErr:              os.ErrDeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: EOF (io package)", | ||||
| 			inputErr:              io.EOF, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: closed network connection (net package)", | ||||
| 			inputErr:              net.ErrClosed, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (non-fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          false, | ||||
| 			expectedErrorIsCalled: true, | ||||
| 			expectedErrorMsg:      "kgo.OnBrokerWrite", | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          true, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: true, | ||||
| 			expectedFatalMsg:      "kgo.OnBrokerWrite", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			log := &mockLogger{} | ||||
| 			he := &hookEvent{log: log, fatalOnError: tt.fatalOnError} | ||||
| 			he.OnBrokerWrite(kgo.BrokerMetadata{}, 0, 0, 0, 0, tt.inputErr) | ||||
| 			require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled) | ||||
| 			require.Equal(t, tt.expectedErrorMsg, log.errorMsg) | ||||
| 			require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled) | ||||
| 			require.Equal(t, tt.expectedFatalMsg, log.fatalMsg) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestHookEvent_OnBrokerRead(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name                  string | ||||
| 		inputErr              error | ||||
| 		fatalOnError          bool | ||||
| 		expectedErrorIsCalled bool | ||||
| 		expectedErrorMsg      string | ||||
| 		expectedFatalIsCalled bool | ||||
| 		expectedFatalMsg      string | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:                  "error is nil", | ||||
| 			inputErr:              nil, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context canceled", | ||||
| 			inputErr:              context.Canceled, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context deadline exceeded", | ||||
| 			inputErr:              context.DeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: deadline exceeded (os package)", | ||||
| 			inputErr:              os.ErrDeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: EOF (io package)", | ||||
| 			inputErr:              io.EOF, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: closed network connection (net package)", | ||||
| 			inputErr:              net.ErrClosed, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (non-fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          false, | ||||
| 			expectedErrorIsCalled: true, | ||||
| 			expectedErrorMsg:      "kgo.OnBrokerRead", | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          true, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: true, | ||||
| 			expectedFatalMsg:      "kgo.OnBrokerRead", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			log := &mockLogger{} | ||||
| 			he := &hookEvent{log: log, fatalOnError: tt.fatalOnError} | ||||
| 			he.OnBrokerRead(kgo.BrokerMetadata{}, 0, 0, 0, 0, tt.inputErr) | ||||
| 			require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled) | ||||
| 			require.Equal(t, tt.expectedErrorMsg, log.errorMsg) | ||||
| 			require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled) | ||||
| 			require.Equal(t, tt.expectedFatalMsg, log.fatalMsg) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestHookEvent_OnProduceRecordUnbuffered(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name                  string | ||||
| 		inputErr              error | ||||
| 		fatalOnError          bool | ||||
| 		expectedErrorIsCalled bool | ||||
| 		expectedErrorMsg      string | ||||
| 		expectedFatalIsCalled bool | ||||
| 		expectedFatalMsg      string | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:                  "error is nil", | ||||
| 			inputErr:              nil, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context canceled", | ||||
| 			inputErr:              context.Canceled, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "context deadline exceeded", | ||||
| 			inputErr:              context.DeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: deadline exceeded (os package)", | ||||
| 			inputErr:              os.ErrDeadlineExceeded, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: EOF (io package)", | ||||
| 			inputErr:              io.EOF, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "retryable error: closed network connection (net package)", | ||||
| 			inputErr:              net.ErrClosed, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (non-fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          false, | ||||
| 			expectedErrorIsCalled: true, | ||||
| 			expectedErrorMsg:      "kgo.OnProduceRecordUnbuffered", | ||||
| 			expectedFatalIsCalled: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                  "some error (fatal)", | ||||
| 			inputErr:              errors.New("some error"), | ||||
| 			fatalOnError:          true, | ||||
| 			expectedErrorIsCalled: false, | ||||
| 			expectedFatalIsCalled: true, | ||||
| 			expectedFatalMsg:      "kgo.OnProduceRecordUnbuffered", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			log := &mockLogger{} | ||||
| 			he := &hookEvent{log: log, fatalOnError: tt.fatalOnError} | ||||
| 			he.OnProduceRecordUnbuffered(&kgo.Record{}, tt.inputErr) | ||||
| 			require.Equal(t, tt.expectedErrorIsCalled, log.errorIsCalled) | ||||
| 			require.Equal(t, tt.expectedErrorMsg, log.errorMsg) | ||||
| 			require.Equal(t, tt.expectedFatalIsCalled, log.fatalIsCalled) | ||||
| 			require.Equal(t, tt.expectedFatalMsg, log.fatalMsg) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Mocks | ||||
|  | ||||
| type mockLogger struct { | ||||
| 	errorIsCalled bool | ||||
| 	errorMsg      string | ||||
|  | ||||
| 	fatalIsCalled bool | ||||
| 	fatalMsg      string | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Init(...logger.Option) error { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Clone(...logger.Option) logger.Logger { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) V(logger.Level) bool { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Level(logger.Level) { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Options() logger.Options { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Fields(...interface{}) logger.Logger { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Info(context.Context, string, ...interface{}) { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Trace(context.Context, string, ...interface{}) { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Debug(context.Context, string, ...interface{}) { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Warn(context.Context, string, ...interface{}) { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Error(ctx context.Context, msg string, args ...interface{}) { | ||||
| 	m.errorIsCalled = true | ||||
| 	m.errorMsg = msg | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Fatal(ctx context.Context, msg string, args ...interface{}) { | ||||
| 	m.fatalIsCalled = true | ||||
| 	m.fatalMsg = msg | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Log(context.Context, logger.Level, string, ...interface{}) { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) Name() string { | ||||
| 	panic("implement me") | ||||
| } | ||||
|  | ||||
| func (m *mockLogger) String() string { | ||||
| 	panic("implement me") | ||||
| } | ||||
							
								
								
									
										771
									
								
								kgo.go
									
									
									
									
									
								
							
							
						
						
									
										771
									
								
								kgo.go
									
									
									
									
									
								
							| @@ -3,202 +3,360 @@ package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math/rand/v2" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kerr" | ||||
| 	kgo "github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/twmb/franz-go/pkg/kadm" | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/twmb/franz-go/pkg/kmsg" | ||||
| 	"github.com/twmb/franz-go/pkg/kversion" | ||||
| 	"github.com/unistack-org/micro/v3/broker" | ||||
| 	"github.com/unistack-org/micro/v3/logger" | ||||
| 	"github.com/unistack-org/micro/v3/metadata" | ||||
| 	"github.com/unistack-org/micro/v3/util/id" | ||||
| 	mrand "github.com/unistack-org/micro/v3/util/rand" | ||||
| 	"go.unistack.org/micro/v4/broker" | ||||
| 	"go.unistack.org/micro/v4/codec" | ||||
| 	"go.unistack.org/micro/v4/logger" | ||||
| 	"go.unistack.org/micro/v4/metadata" | ||||
| 	"go.unistack.org/micro/v4/options" | ||||
| 	"go.unistack.org/micro/v4/semconv" | ||||
| 	"go.unistack.org/micro/v4/tracer" | ||||
| 	"go.unistack.org/micro/v4/util/id" | ||||
| 	mrand "go.unistack.org/micro/v4/util/rand" | ||||
| ) | ||||
|  | ||||
| var _ broker.Broker = &kBroker{} | ||||
| var _ broker.Broker = (*Broker)(nil) | ||||
|  | ||||
| var messagePool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		return &kgoMessage{} | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| var ErrLostMessage = errors.New("message not marked for offsets commit and will be lost in next iteration") | ||||
|  | ||||
| var DefaultRetryBackoffFn = func() func(int) time.Duration { | ||||
| 	var rngMu sync.Mutex | ||||
| 	return func(fails int) time.Duration { | ||||
| 		const ( | ||||
| 			min = 100 * time.Millisecond | ||||
| 			max = time.Second | ||||
| 		) | ||||
| 		if fails <= 0 { | ||||
| 			return min | ||||
| 		} | ||||
| 		if fails > 10 { | ||||
| 			return max | ||||
| 		} | ||||
|  | ||||
| 		backoff := min * time.Duration(1<<(fails-1)) | ||||
|  | ||||
| 		rngMu.Lock() | ||||
| 		jitter := 0.8 + 0.4*rand.Float64() | ||||
| 		rngMu.Unlock() | ||||
|  | ||||
| 		backoff = time.Duration(float64(backoff) * jitter) | ||||
|  | ||||
| 		if backoff > max { | ||||
| 			return max | ||||
| 		} | ||||
| 		return backoff | ||||
| 	} | ||||
| }() | ||||
|  | ||||
| type Broker struct { | ||||
| 	funcPublish   broker.FuncPublish | ||||
| 	funcSubscribe broker.FuncSubscribe | ||||
| 	c             *kgo.Client | ||||
| 	connected     *atomic.Uint32 | ||||
|  | ||||
| type kBroker struct { | ||||
| 	writer    *kgo.Client // used only to push messages | ||||
| 	kopts []kgo.Opt | ||||
| 	connected bool | ||||
| 	init      bool | ||||
| 	sync.RWMutex | ||||
| 	subs  []*Subscriber | ||||
|  | ||||
| 	opts broker.Options | ||||
| 	subs []*subscriber | ||||
|  | ||||
| 	mu   sync.RWMutex | ||||
| 	init bool | ||||
| } | ||||
|  | ||||
| type subscriber struct { | ||||
| 	reader       *kgo.Client // used only to pull messages | ||||
| 	topic        string | ||||
| 	opts         broker.SubscribeOptions | ||||
| 	kopts        broker.Options | ||||
| 	handler      broker.Handler | ||||
| 	batchhandler broker.BatchHandler | ||||
| 	closed       bool | ||||
| 	done         chan struct{} | ||||
| 	consumers    map[string]map[int32]worker | ||||
| 	sync.RWMutex | ||||
| func (r *Broker) Live() bool { | ||||
| 	return r.connected.Load() == 1 | ||||
| } | ||||
|  | ||||
| type publication struct { | ||||
| 	topic string | ||||
| 	err   error | ||||
| 	sync.RWMutex | ||||
| 	msg *broker.Message | ||||
| 	ack bool | ||||
| func (r *Broker) Ready() bool { | ||||
| 	return r.connected.Load() == 1 | ||||
| } | ||||
|  | ||||
| func (p *publication) Topic() string { | ||||
| 	return p.topic | ||||
| func (r *Broker) Health() bool { | ||||
| 	return r.connected.Load() == 1 | ||||
| } | ||||
|  | ||||
| func (p *publication) Message() *broker.Message { | ||||
| 	return p.msg | ||||
| } | ||||
|  | ||||
| func (p *publication) Ack() error { | ||||
| 	p.ack = true | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *publication) Error() error { | ||||
| 	return p.err | ||||
| } | ||||
|  | ||||
| func (p *publication) SetError(err error) { | ||||
| 	p.err = err | ||||
| } | ||||
|  | ||||
| func (s *subscriber) Options() broker.SubscribeOptions { | ||||
| 	return s.opts | ||||
| } | ||||
|  | ||||
| func (s *subscriber) Topic() string { | ||||
| 	return s.topic | ||||
| } | ||||
|  | ||||
| func (s *subscriber) Unsubscribe(ctx context.Context) error { | ||||
| 	if s.closed { | ||||
| 		return nil | ||||
| 	} | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| 		return ctx.Err() | ||||
| 	default: | ||||
| 		close(s.done) | ||||
| 		s.closed = true | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Address() string { | ||||
| func (k *Broker) Address() string { | ||||
| 	return strings.Join(k.opts.Addrs, ",") | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Name() string { | ||||
| func (k *Broker) Name() string { | ||||
| 	return k.opts.Name | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Connect(ctx context.Context) error { | ||||
| 	k.RLock() | ||||
| 	if k.connected { | ||||
| 		k.RUnlock() | ||||
| func (k *Broker) Client() *kgo.Client { | ||||
| 	return k.c | ||||
| } | ||||
|  | ||||
| type kgoMessage struct { | ||||
| 	c     codec.Codec | ||||
| 	topic string | ||||
| 	ctx   context.Context | ||||
| 	body  []byte | ||||
| 	hdr   metadata.Metadata | ||||
| 	opts  broker.MessageOptions | ||||
| 	ack   bool | ||||
| } | ||||
|  | ||||
| func (m *kgoMessage) Ack() error { | ||||
| 	m.ack = true | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *kgoMessage) Body() []byte { | ||||
| 	return m.body | ||||
| } | ||||
|  | ||||
| func (m *kgoMessage) Header() metadata.Metadata { | ||||
| 	return m.hdr | ||||
| } | ||||
|  | ||||
| func (m *kgoMessage) Context() context.Context { | ||||
| 	return m.ctx | ||||
| } | ||||
|  | ||||
| func (m *kgoMessage) Topic() string { | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *kgoMessage) Unmarshal(dst interface{}, opts ...codec.Option) error { | ||||
| 	return m.c.Unmarshal(m.body, dst) | ||||
| } | ||||
|  | ||||
| func (b *Broker) newCodec(ct string) (codec.Codec, error) { | ||||
| 	if idx := strings.IndexRune(ct, ';'); idx >= 0 { | ||||
| 		ct = ct[:idx] | ||||
| 	} | ||||
| 	b.mu.RLock() | ||||
| 	c, ok := b.opts.Codecs[ct] | ||||
| 	b.mu.RUnlock() | ||||
| 	if ok { | ||||
| 		return c, nil | ||||
| 	} | ||||
| 	return nil, codec.ErrUnknownContentType | ||||
| } | ||||
|  | ||||
| func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) { | ||||
| 	options := broker.NewMessageOptions(opts...) | ||||
| 	if options.ContentType == "" { | ||||
| 		options.ContentType = b.opts.ContentType | ||||
| 	} | ||||
|  | ||||
| 	m := &kgoMessage{ctx: ctx, hdr: hdr.Copy(), opts: options} | ||||
| 	c, err := b.newCodec(m.opts.ContentType) | ||||
| 	if err == nil { | ||||
| 		m.body, err = c.Marshal(body) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	m.hdr.Set(metadata.HeaderContentType, m.opts.ContentType) | ||||
|  | ||||
| 	return m, nil | ||||
| } | ||||
|  | ||||
| func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *hookTracer, error) { | ||||
| 	var c *kgo.Client | ||||
| 	var err error | ||||
|  | ||||
| 	sp, _ := tracer.SpanFromContext(ctx) | ||||
|  | ||||
| 	clientID := "kgo" | ||||
| 	group := "" | ||||
| 	if k.opts.Context != nil { | ||||
| 		if id, ok := k.opts.Context.Value(clientIDKey{}).(string); ok { | ||||
| 			clientID = id | ||||
| 		} | ||||
| 		if id, ok := k.opts.Context.Value(groupKey{}).(string); ok { | ||||
| 			group = id | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var fatalOnError bool | ||||
| 	if k.opts.Context != nil { | ||||
| 		if v, ok := k.opts.Context.Value(fatalOnErrorKey{}).(bool); ok && v { | ||||
| 			fatalOnError = v | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	htracer := &hookTracer{group: group, clientID: clientID, tracer: k.opts.Tracer} | ||||
| 	opts = append(opts, | ||||
| 		kgo.WithHooks(&hookMeter{meter: k.opts.Meter}), | ||||
| 		kgo.WithHooks(htracer), | ||||
| 		kgo.WithHooks(&hookEvent{log: k.opts.Logger, fatalOnError: fatalOnError, connected: k.connected}), | ||||
| 	) | ||||
|  | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| 		if ctx.Err() != nil { | ||||
| 			if sp != nil { | ||||
| 				sp.SetStatus(tracer.SpanStatusError, ctx.Err().Error()) | ||||
| 			} | ||||
| 		} | ||||
| 		return nil, nil, ctx.Err() | ||||
| 	default: | ||||
| 		c, err = kgo.NewClient(opts...) | ||||
| 		if err == nil { | ||||
| 			err = c.Ping(ctx) // check connectivity to cluster | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			if sp != nil { | ||||
| 				sp.SetStatus(tracer.SpanStatusError, err.Error()) | ||||
| 			} | ||||
| 			return nil, nil, err | ||||
| 		} | ||||
| 		k.connected.Store(1) | ||||
| 		return c, htracer, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (k *Broker) Connect(ctx context.Context) error { | ||||
| 	if k.connected.Load() == 1 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	k.RUnlock() | ||||
|  | ||||
| 	nctx := k.opts.Context | ||||
| 	if ctx != nil { | ||||
| 		nctx = ctx | ||||
| 	} | ||||
|  | ||||
| 	kaddrs := k.opts.Addrs | ||||
|  | ||||
| 	// shuffle addrs | ||||
| 	var rng mrand.Rand | ||||
| 	rng.Shuffle(len(kaddrs), func(i, j int) { | ||||
| 		kaddrs[i], kaddrs[j] = kaddrs[j], kaddrs[i] | ||||
| 	}) | ||||
|  | ||||
| 	kopts := append(k.kopts, kgo.SeedBrokers(kaddrs...)) | ||||
|  | ||||
| 	select { | ||||
| 	case <-nctx.Done(): | ||||
| 		return nctx.Err() | ||||
| 	default: | ||||
| 		c, err := kgo.NewClient(kopts...) | ||||
| 	c, _, err := k.connect(nctx, k.kopts...) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 		// Request versions in order to guess Kafka Cluster version | ||||
| 		versionsReq := kmsg.NewApiVersionsRequest() | ||||
| 		versionsRes, err := versionsReq.RequestWith(ctx, c) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to request api versions: %w", err) | ||||
| 		} | ||||
| 		err = kerr.ErrorForCode(versionsRes.ErrorCode) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to request api versions. Inner kafka error: %w", err) | ||||
| 		} | ||||
| 		versions := kversion.FromApiVersionsResponse(versionsRes) | ||||
| 	k.mu.Lock() | ||||
| 	k.c = c | ||||
| 	k.connected.Store(1) | ||||
| 	k.mu.Unlock() | ||||
|  | ||||
| 		if k.opts.Logger.V(logger.InfoLevel) { | ||||
| 			logger.Infof(ctx, "[kgo] connected to to kafka cluster version %v", versions.VersionGuess()) | ||||
| 	exposeLag := false | ||||
| 	if k.opts.Context != nil { | ||||
| 		if v, ok := k.opts.Context.Value(exposeLagKey{}).(bool); ok && v { | ||||
| 			exposeLag = v | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if exposeLag { | ||||
| 		var mu sync.Mutex | ||||
| 		var lastUpdate time.Time | ||||
| 		type pl struct { | ||||
| 			p string | ||||
| 			l float64 | ||||
| 		} | ||||
|  | ||||
| 		lag := make(map[string]map[string]pl) // topic => group => partition => lag | ||||
| 		ac := kadm.NewClient(k.c) | ||||
|  | ||||
| 		updateStats := func() { | ||||
| 			mu.Lock() | ||||
| 			if time.Since(lastUpdate) < DefaultStatsInterval { | ||||
| 				return | ||||
| 			} | ||||
| 			mu.Unlock() | ||||
|  | ||||
| 			k.mu.Lock() | ||||
| 			groups := make([]string, 0, len(k.subs)) | ||||
| 			for _, g := range k.subs { | ||||
| 				groups = append(groups, g.opts.Group) | ||||
| 			} | ||||
| 			k.mu.Unlock() | ||||
|  | ||||
| 			dgls, err := ac.Lag(ctx, groups...) | ||||
| 			if err != nil || !dgls.Ok() { | ||||
| 				k.opts.Logger.Error(k.opts.Context, "kgo describe group lag error", err) | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			for gn, dgl := range dgls { | ||||
| 				for tn, lmap := range dgl.Lag { | ||||
| 					if _, ok := lag[tn]; !ok { | ||||
| 						lag[tn] = make(map[string]pl) | ||||
| 					} | ||||
| 					for p, l := range lmap { | ||||
| 						lag[tn][gn] = pl{p: strconv.Itoa(int(p)), l: float64(l.Lag)} | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for tn, dg := range lag { | ||||
| 			for gn, gl := range dg { | ||||
| 				k.opts.Meter.Gauge(semconv.BrokerGroupLag, | ||||
| 					func() float64 { updateStats(); return gl.l }, | ||||
| 					"topic", tn, | ||||
| 					"group", gn, | ||||
| 					"partition", gl.p) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		k.Lock() | ||||
| 		k.connected = true | ||||
| 		k.writer = c | ||||
| 		k.Unlock() | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Disconnect(ctx context.Context) error { | ||||
| 	k.RLock() | ||||
| 	if !k.connected { | ||||
| 		k.RUnlock() | ||||
| func (k *Broker) Disconnect(ctx context.Context) error { | ||||
| 	if k.connected.Load() == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	k.RUnlock() | ||||
|  | ||||
| 	k.Lock() | ||||
| 	defer k.Unlock() | ||||
|  | ||||
| 	nctx := k.opts.Context | ||||
| 	if ctx != nil { | ||||
| 		nctx = ctx | ||||
| 	} | ||||
| 	var span tracer.Span | ||||
| 	ctx, span = k.opts.Tracer.Start(ctx, "Disconnect") | ||||
| 	defer span.Finish() | ||||
|  | ||||
| 	k.mu.Lock() | ||||
| 	defer k.mu.Unlock() | ||||
| 	select { | ||||
| 	case <-nctx.Done(): | ||||
| 		return nctx.Err() | ||||
| 	default: | ||||
| 		for _, sub := range k.subs { | ||||
| 			if sub.closed { | ||||
| 				continue | ||||
| 			} | ||||
| 			if err := sub.Unsubscribe(ctx); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 		k.writer.Close() | ||||
| 		if k.c != nil { | ||||
| 			k.c.CloseAllowingRebalance() | ||||
| 			// k.c.Close() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	k.connected = false | ||||
| 	k.connected.Store(0) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Init(opts ...broker.Option) error { | ||||
| 	k.Lock() | ||||
| 	defer k.Unlock() | ||||
| func (k *Broker) Init(opts ...broker.Option) error { | ||||
| 	k.mu.Lock() | ||||
| 	defer k.mu.Unlock() | ||||
|  | ||||
| 	if len(opts) == 0 && k.init { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	for _, o := range opts { | ||||
| 		o(&k.opts) | ||||
| 	} | ||||
| @@ -222,113 +380,154 @@ func (k *kBroker) Init(opts ...broker.Option) error { | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	//	kgo.RecordPartitioner(), | ||||
| 	k.funcPublish = k.fnPublish | ||||
| 	k.funcSubscribe = k.fnSubscribe | ||||
|  | ||||
| 	k.opts.Hooks.EachPrev(func(hook options.Hook) { | ||||
| 		switch h := hook.(type) { | ||||
| 		case broker.HookPublish: | ||||
| 			k.funcPublish = h(k.funcPublish) | ||||
| 		case broker.HookSubscribe: | ||||
| 			k.funcSubscribe = h(k.funcSubscribe) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	k.init = true | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Options() broker.Options { | ||||
| func (k *Broker) Options() broker.Options { | ||||
| 	return k.opts | ||||
| } | ||||
|  | ||||
| func (k *kBroker) BatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error { | ||||
| 	return k.publish(ctx, msgs, opts...) | ||||
| func (b *Broker) Publish(ctx context.Context, topic string, messages ...broker.Message) error { | ||||
| 	return b.funcPublish(ctx, topic, messages...) | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Publish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error { | ||||
| 	msg.Header.Set(metadata.HeaderTopic, topic) | ||||
| 	return k.publish(ctx, []*broker.Message{msg}, opts...) | ||||
| func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker.Message) error { | ||||
| 	return b.publish(ctx, topic, messages...) | ||||
| } | ||||
|  | ||||
| func (k *kBroker) publish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error { | ||||
| 	options := broker.NewPublishOptions(opts...) | ||||
| 	records := make([]*kgo.Record, 0, len(msgs)) | ||||
| 	var errs []string | ||||
| 	var err error | ||||
| func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error { | ||||
| 	var records []*kgo.Record | ||||
|  | ||||
| 	for _, msg := range msgs { | ||||
| 		rec := &kgo.Record{} | ||||
| 		rec.Topic, _ = msg.Header.Get(metadata.HeaderTopic) | ||||
| 		if options.BodyOnly { | ||||
| 			rec.Value = msg.Body | ||||
| 		} else if k.opts.Codec.String() == "noop" { | ||||
| 			rec.Value = msg.Body | ||||
| 			for k, v := range msg.Header { | ||||
| 				rec.Headers = append(rec.Headers, kgo.RecordHeader{Key: k, Value: []byte(v)}) | ||||
| 	for _, msg := range messages { | ||||
|  | ||||
| 		rec := &kgo.Record{ | ||||
| 			Context: msg.Context(), | ||||
| 			Topic:   topic, | ||||
| 			Value:   msg.Body(), | ||||
| 		} | ||||
| 		} else { | ||||
| 			rec.Value, err = k.opts.Codec.Marshal(msg) | ||||
|  | ||||
| 		var promise func(*kgo.Record, error) | ||||
| 		if rec.Context != nil { | ||||
| 			if k, ok := rec.Context.Value(messageKey{}).([]byte); ok && k != nil { | ||||
| 				rec.Key = k | ||||
| 			} | ||||
| 			if p, ok := rec.Context.Value(messagePromiseKey{}).(func(*kgo.Record, error)); ok && p != nil { | ||||
| 				promise = p | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		kmsg, ok := msg.(*kgoMessage) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		if kmsg.opts.Context != nil { | ||||
| 			if k, ok := kmsg.opts.Context.Value(messageKey{}).([]byte); ok && k != nil { | ||||
| 				rec.Key = k | ||||
| 			} | ||||
| 			if p, ok := kmsg.opts.Context.Value(messagePromiseKey{}).(func(*kgo.Record, error)); ok && p != nil { | ||||
| 				promise = p | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		setHeaders(rec, msg.Header()) | ||||
|  | ||||
| 		if promise != nil { | ||||
| 			ts := time.Now() | ||||
| 			b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Inc() | ||||
| 			b.c.Produce(ctx, rec, func(r *kgo.Record, err error) { | ||||
| 				te := time.Since(ts) | ||||
| 				b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Dec() | ||||
| 				b.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds()) | ||||
| 				b.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds()) | ||||
| 				if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 					b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "failure").Inc() | ||||
| 				} else { | ||||
| 					b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "success").Inc() | ||||
| 				} | ||||
| 				promise(r, err) | ||||
| 			}) | ||||
| 			continue | ||||
| 		} else { | ||||
| 			records = append(records, rec) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	results := k.writer.ProduceSync(ctx, records...) | ||||
| 	if len(records) > 0 { | ||||
| 		var errs []string | ||||
| 		ts := time.Now() | ||||
| 		b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", topic, "topic", topic).Set(uint64(len(records))) | ||||
| 		results := b.c.ProduceSync(ctx, records...) | ||||
| 		te := time.Since(ts) | ||||
| 		for _, result := range results { | ||||
| 			b.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds()) | ||||
| 			b.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds()) | ||||
| 			b.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Dec() | ||||
| 			if result.Err != nil { | ||||
| 				b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "failure").Inc() | ||||
| 				errs = append(errs, result.Err.Error()) | ||||
| 			} else { | ||||
| 				b.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "success").Inc() | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if len(errs) > 0 { | ||||
| 			return fmt.Errorf("publish error: %s", strings.Join(errs, "\n")) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type mlogger struct { | ||||
| 	l   logger.Logger | ||||
| 	ctx context.Context | ||||
| } | ||||
|  | ||||
| func (l *mlogger) Log(lvl kgo.LogLevel, msg string, args ...interface{}) { | ||||
| 	var mlvl logger.Level | ||||
| 	switch lvl { | ||||
| 	case kgo.LogLevelNone: | ||||
| 		return | ||||
| 	case kgo.LogLevelError: | ||||
| 		mlvl = logger.ErrorLevel | ||||
| 	case kgo.LogLevelWarn: | ||||
| 		mlvl = logger.WarnLevel | ||||
| 	case kgo.LogLevelInfo: | ||||
| 		mlvl = logger.InfoLevel | ||||
| 	case kgo.LogLevelDebug: | ||||
| 		mlvl = logger.DebugLevel | ||||
| 	default: | ||||
| 		return | ||||
| func (k *Broker) TopicExists(ctx context.Context, topic string) error { | ||||
| 	mdreq := kmsg.NewMetadataRequest() | ||||
| 	mdreq.Topics = []kmsg.MetadataRequestTopic{ | ||||
| 		{Topic: &topic}, | ||||
| 	} | ||||
| 	fields := make(map[string]interface{}, int(len(args)/2)) | ||||
| 	for i := 0; i < len(args)/2; i += 2 { | ||||
| 		fields[fmt.Sprintf("%v", args[i])] = args[i+1] | ||||
|  | ||||
| 	mdrsp, err := mdreq.RequestWith(ctx, k.c) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} else if mdrsp.Topics[0].ErrorCode != 0 { | ||||
| 		return fmt.Errorf("topic %s not exists or permission error", topic) | ||||
| 	} | ||||
| 	l.l.Fields(fields).Log(l.ctx, mlvl, msg) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (l *mlogger) Level() kgo.LogLevel { | ||||
| 	switch l.l.Options().Level { | ||||
| 	case logger.ErrorLevel: | ||||
| 		return kgo.LogLevelError | ||||
| 	case logger.WarnLevel: | ||||
| 		return kgo.LogLevelWarn | ||||
| 	case logger.InfoLevel: | ||||
| 		return kgo.LogLevelInfo | ||||
| 	case logger.DebugLevel, logger.TraceLevel: | ||||
| 		return kgo.LogLevelDebug | ||||
| func (b *Broker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) { | ||||
| 	return b.funcSubscribe(ctx, topic, handler, opts...) | ||||
| } | ||||
|  | ||||
| func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) { | ||||
| 	if err := broker.IsValidHandler(handler); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return kgo.LogLevelNone | ||||
| } | ||||
|  | ||||
| func (k *kBroker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) { | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
| func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) { | ||||
| 	options := broker.NewSubscribeOptions(opts...) | ||||
|  | ||||
| 	switch handler.(type) { | ||||
| 	default: | ||||
| 		return nil, broker.ErrInvalidHandler | ||||
| 	case func(broker.Message) error: | ||||
| 		break | ||||
| 	case func([]broker.Message) error: | ||||
| 		break | ||||
| 	} | ||||
|  | ||||
| 	if options.Group == "" { | ||||
| 		uid, err := id.New() | ||||
| 		if err != nil { | ||||
| @@ -337,100 +536,115 @@ func (k *kBroker) Subscribe(ctx context.Context, topic string, handler broker.Ha | ||||
| 		options.Group = uid | ||||
| 	} | ||||
|  | ||||
| 	kaddrs := k.opts.Addrs | ||||
| 	commitInterval := DefaultCommitInterval | ||||
| 	if b.opts.Context != nil { | ||||
| 		if v, ok := b.opts.Context.Value(commitIntervalKey{}).(time.Duration); ok && v > 0 { | ||||
| 			commitInterval = v | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var messagePool bool | ||||
| 	var fatalOnError bool | ||||
| 	if b.opts.Context != nil { | ||||
| 		if v, ok := b.opts.Context.Value(fatalOnErrorKey{}).(bool); ok && v { | ||||
| 			fatalOnError = v | ||||
| 		} | ||||
| 		if v, ok := b.opts.Context.Value(subscribeMessagePoolKey{}).(bool); ok && v { | ||||
| 			messagePool = v | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if options.Context != nil { | ||||
| 		if v, ok := options.Context.Value(fatalOnErrorKey{}).(bool); ok && v { | ||||
| 			fatalOnError = v | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	sub := &Subscriber{ | ||||
| 		topic:        topic, | ||||
| 		opts:         options, | ||||
| 		handler:      handler, | ||||
| 		kopts:        b.opts, | ||||
| 		consumers:    make(map[tp]*consumer), | ||||
| 		done:         make(chan struct{}), | ||||
| 		fatalOnError: fatalOnError, | ||||
| 		connected:    b.connected, | ||||
| 		messagePool:  messagePool, | ||||
| 	} | ||||
|  | ||||
| 	kopts := append(b.kopts, | ||||
| 		kgo.ConsumerGroup(options.Group), | ||||
| 		kgo.ConsumeTopics(topic), | ||||
| 		kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), | ||||
| 		kgo.FetchMaxWait(1*time.Second), | ||||
| 		kgo.AutoCommitInterval(commitInterval), | ||||
| 		kgo.OnPartitionsAssigned(sub.assigned), | ||||
| 		kgo.OnPartitionsRevoked(sub.revoked), | ||||
| 		kgo.StopProducerOnDataLossDetected(), | ||||
| 		kgo.OnPartitionsLost(sub.lost), | ||||
| 		kgo.AutoCommitCallback(sub.autocommit), | ||||
| 		kgo.AutoCommitMarks(), | ||||
| 	) | ||||
|  | ||||
| 	if options.Context != nil { | ||||
| 		if v, ok := options.Context.Value(optionsKey{}).([]kgo.Opt); ok && len(v) > 0 { | ||||
| 			kopts = append(kopts, v...) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	c, htracer, err := b.connect(ctx, kopts...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	mdreq := kmsg.NewMetadataRequest() | ||||
| 	mdreq.Topics = []kmsg.MetadataRequestTopic{ | ||||
| 		{Topic: &topic}, | ||||
| 	} | ||||
|  | ||||
| 	mdrsp, err := mdreq.RequestWith(ctx, c) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} else if mdrsp.Topics[0].ErrorCode != 0 { | ||||
| 		return nil, fmt.Errorf("topic %s not exists or permission error", topic) | ||||
| 	} | ||||
|  | ||||
| 	sub.c = c | ||||
| 	sub.htracer = htracer | ||||
|  | ||||
| 	go sub.poll(ctx) | ||||
|  | ||||
| 	b.mu.Lock() | ||||
| 	b.subs = append(b.subs, sub) | ||||
| 	b.mu.Unlock() | ||||
|  | ||||
| 	return sub, nil | ||||
| } | ||||
|  | ||||
| func (k *Broker) String() string { | ||||
| 	return "kgo" | ||||
| } | ||||
|  | ||||
| func NewBroker(opts ...broker.Option) *Broker { | ||||
| 	options := broker.NewOptions(opts...) | ||||
|  | ||||
| 	kaddrs := options.Addrs | ||||
| 	// shuffle addrs | ||||
| 	var rng mrand.Rand | ||||
| 	rng.Shuffle(len(kaddrs), func(i, j int) { | ||||
| 		kaddrs[i], kaddrs[j] = kaddrs[j], kaddrs[i] | ||||
| 	}) | ||||
|  | ||||
| 	td := DefaultCommitInterval | ||||
| 	if k.opts.Context != nil { | ||||
| 		if v, ok := k.opts.Context.Value(commitIntervalKey{}).(time.Duration); ok && v > 0 { | ||||
| 			td = v | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	sub := &subscriber{ | ||||
| 		topic:     topic, | ||||
| 		done:      make(chan struct{}), | ||||
| 		opts:      options, | ||||
| 		handler:   handler, | ||||
| 		kopts:     k.opts, | ||||
| 		consumers: make(map[string]map[int32]worker), | ||||
| 	} | ||||
|  | ||||
| 	kopts := append(k.kopts, | ||||
| 		kgo.SeedBrokers(kaddrs...), | ||||
| 		kgo.ConsumerGroup(options.Group), | ||||
| 		kgo.ConsumeTopics(topic), | ||||
| 		kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), | ||||
| 		kgo.FetchMaxWait(1*time.Second), | ||||
| 		// kgo.KeepControlRecords(), | ||||
| 		kgo.Balancers(kgo.CooperativeStickyBalancer(), kgo.StickyBalancer()), | ||||
| 		kgo.FetchIsolationLevel(kgo.ReadUncommitted()), | ||||
| 		kgo.WithHooks(&metrics{meter: k.opts.Meter}), | ||||
| 		kgo.AutoCommitMarks(), | ||||
| 		kgo.AutoCommitInterval(td), | ||||
| 		kgo.OnPartitionsAssigned(sub.assigned), | ||||
| 		kgo.OnPartitionsRevoked(sub.revoked), | ||||
| 		kgo.OnPartitionsLost(sub.revoked), | ||||
| 	) | ||||
|  | ||||
| 	reader, err := kgo.NewClient(kopts...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	sub.reader = reader | ||||
| 	go sub.run(ctx) | ||||
|  | ||||
| 	k.Lock() | ||||
| 	k.subs = append(k.subs, sub) | ||||
| 	k.Unlock() | ||||
| 	return sub, nil | ||||
| } | ||||
|  | ||||
| func (k *kBroker) String() string { | ||||
| 	return "kgo" | ||||
| } | ||||
|  | ||||
| func NewBroker(opts ...broker.Option) *kBroker { | ||||
| 	options := broker.NewOptions(opts...) | ||||
| 	if options.Codec.String() != "noop" { | ||||
| 		options.Logger.Infof(options.Context, "broker codec not noop, disable plain kafka headers usage") | ||||
| 	} | ||||
| 	kopts := []kgo.Opt{ | ||||
| 		kgo.DialTimeout(3 * time.Second), | ||||
| 		kgo.DisableIdempotentWrite(), | ||||
| 		kgo.ProducerBatchCompression(kgo.NoCompression()), | ||||
| 		kgo.WithLogger(&mlogger{l: options.Logger, ctx: options.Context}), | ||||
| 		kgo.RetryBackoffFn( | ||||
| 			func() func(int) time.Duration { | ||||
| 				var rng mrand.Rand | ||||
| 				return func(fails int) time.Duration { | ||||
| 					const ( | ||||
| 						min = 250 * time.Millisecond | ||||
| 						max = 2 * time.Second | ||||
| 					) | ||||
| 					if fails <= 0 { | ||||
| 						return min | ||||
| 					} | ||||
| 					if fails > 10 { | ||||
| 						return max | ||||
| 					} | ||||
|  | ||||
| 					backoff := min * time.Duration(1<<(fails-1)) | ||||
| 					jitter := 0.8 + 0.4*rng.Float64() | ||||
| 					backoff = time.Duration(float64(backoff) * jitter) | ||||
|  | ||||
| 					if backoff > max { | ||||
| 						return max | ||||
| 					} | ||||
| 					return backoff | ||||
| 				} | ||||
| 			}(), | ||||
| 		), | ||||
| 		kgo.WithLogger(&mlogger{l: options.Logger.Clone(logger.WithAddCallerSkipCount(2)), ctx: options.Context}), | ||||
| 		kgo.SeedBrokers(kaddrs...), | ||||
| 		kgo.RetryBackoffFn(DefaultRetryBackoffFn), | ||||
| 		kgo.BlockRebalanceOnPoll(), | ||||
| 		kgo.Balancers(kgo.CooperativeStickyBalancer()), | ||||
| 		kgo.FetchIsolationLevel(kgo.ReadUncommitted()), | ||||
| 		kgo.UnknownTopicRetries(1), | ||||
| 	} | ||||
|  | ||||
| 	if options.Context != nil { | ||||
| @@ -439,7 +653,8 @@ func NewBroker(opts ...broker.Option) *kBroker { | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return &kBroker{ | ||||
| 	return &Broker{ | ||||
| 		connected: &atomic.Uint32{}, | ||||
| 		opts:      options, | ||||
| 		kopts:     kopts, | ||||
| 	} | ||||
|   | ||||
							
								
								
									
										182
									
								
								kgo_test.go
									
									
									
									
									
								
							
							
						
						
									
										182
									
								
								kgo_test.go
									
									
									
									
									
								
							| @@ -2,56 +2,140 @@ package kgo_test | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"sync/atomic" | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kfake" | ||||
| 	kg "github.com/twmb/franz-go/pkg/kgo" | ||||
| 	jsoncodec "github.com/unistack-org/micro-codec-json/v3" | ||||
| 	"github.com/unistack-org/micro/v3/broker" | ||||
| 	"github.com/unistack-org/micro/v3/logger" | ||||
| 	"github.com/unistack-org/micro/v3/metadata" | ||||
|  | ||||
| 	kgo "github.com/unistack-org/micro-broker-kgo/v3" | ||||
| 	kgo "go.unistack.org/micro-broker-kgo/v4" | ||||
| 	"go.unistack.org/micro/v4/broker" | ||||
| 	"go.unistack.org/micro/v4/codec" | ||||
| 	"go.unistack.org/micro/v4/logger" | ||||
| 	"go.unistack.org/micro/v4/logger/slog" | ||||
| 	"go.unistack.org/micro/v4/metadata" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	msgcnt   = int64(12000000) | ||||
| 	msgcnt   = int64(1200) | ||||
| 	group    = "38" | ||||
| 	prefill  = false | ||||
| 	loglevel = logger.InfoLevel | ||||
| 	prefill  = true | ||||
| 	loglevel = logger.ErrorLevel | ||||
| 	cluster  *kfake.Cluster | ||||
| ) | ||||
|  | ||||
| var bm = &broker.Message{ | ||||
| 	Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: "test"}, | ||||
| 	Body:   []byte(`"body"`), | ||||
| func TestMain(m *testing.M) { | ||||
| 	cluster = kfake.MustCluster( | ||||
| 		kfake.AllowAutoTopicCreation(), | ||||
| 	) | ||||
| 	defer cluster.Close() | ||||
| 	m.Run() | ||||
| } | ||||
|  | ||||
| func TestPubSub(t *testing.T) { | ||||
| 	if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 { | ||||
| 		t.Skip() | ||||
| 	} | ||||
|  | ||||
| 	if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel), logger.WithCallerSkipCount(3)); err != nil { | ||||
| func TestFail(t *testing.T) { | ||||
| 	logger.DefaultLogger = slog.NewLogger() | ||||
| 	if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	var addrs []string | ||||
| 	if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 { | ||||
| 		addrs = []string{"127.0.0.1:29091", "127.0.0.2:29092", "127.0.0.3:29093"} | ||||
| 	} else { | ||||
| 		addrs = strings.Split(addr, ",") | ||||
| 	b := kgo.NewBroker( | ||||
| 		broker.ContentType("application/octet-stream"), | ||||
| 		broker.Codec("application/octet-stream", codec.NewCodec()), | ||||
| 		broker.Addrs(cluster.ListenAddrs()...), | ||||
| 		kgo.CommitInterval(5*time.Second), | ||||
| 		kgo.Options( | ||||
| 			kg.ClientID("test"), | ||||
| 			kg.FetchMaxBytes(10*1024*1024), | ||||
| 			kg.AllowAutoTopicCreation(), | ||||
| 		), | ||||
| 	) | ||||
|  | ||||
| 	t.Logf("broker init") | ||||
| 	if err := b.Init(); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	t.Logf("broker connect") | ||||
| 	if err := b.Connect(ctx); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	defer func() { | ||||
| 		t.Logf("broker disconnect") | ||||
| 		if err := b.Disconnect(ctx); err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	t.Logf("broker health %v", b.Health()) | ||||
| 	msgs := make([]broker.Message, 0, msgcnt) | ||||
| 	for i := int64(0); i < msgcnt; i++ { | ||||
| 		m, err := b.NewMessage(ctx, metadata.Pairs("hkey", "hval"), []byte(`test`)) | ||||
| 		if err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| 		msgs = append(msgs, m) | ||||
| 	} | ||||
|  | ||||
| 	go func() { | ||||
| 		for _, msg := range msgs { | ||||
| 			//		t.Logf("broker publish") | ||||
| 			if err := b.Publish(ctx, "test.fail", msg); err != nil { | ||||
| 				t.Fatal(err) | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| 	//	t.Skip() | ||||
|  | ||||
| 	idx := int64(0) | ||||
| 	fn := func(msg broker.Message) error { | ||||
| 		atomic.AddInt64(&idx, 1) | ||||
| 		time.Sleep(100 * time.Millisecond) | ||||
| 		// t.Logf("ack") | ||||
| 		return msg.Ack() | ||||
| 	} | ||||
|  | ||||
| 	sub, err := b.Subscribe(ctx, "test.fail", fn, | ||||
| 		broker.SubscribeAutoAck(true), | ||||
| 		broker.SubscribeGroup(group), | ||||
| 		broker.SubscribeBodyOnly(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	defer func() { | ||||
| 		if err := sub.Unsubscribe(ctx); err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	for { | ||||
| 		t.Logf("health check") | ||||
| 		if !b.Health() { | ||||
| 			t.Logf("health works") | ||||
| 			break | ||||
| 		} | ||||
| 		t.Logf("health sleep") | ||||
| 		time.Sleep(100 * time.Millisecond) | ||||
| 		if err := b.Disconnect(ctx); err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestConnect(t *testing.T) { | ||||
| 	ctx := context.TODO() | ||||
| 	b := kgo.NewBroker( | ||||
| 		broker.Codec(jsoncodec.NewCodec()), | ||||
| 		broker.Addrs(addrs...), | ||||
| 		broker.ContentType("application/octet-stream"), | ||||
| 		broker.Codec("application/octet-stream", codec.NewCodec()), | ||||
| 		broker.Addrs(cluster.ListenAddrs()...), | ||||
| 		kgo.CommitInterval(5*time.Second), | ||||
| 		kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(10*1024*1024)), | ||||
| 		kgo.Options( | ||||
| 			kg.ClientID("test"), | ||||
| 			kg.FetchMaxBytes(10*1024*1024), | ||||
| 			kg.AllowAutoTopicCreation(), | ||||
| 		), | ||||
| 	) | ||||
| 	if err := b.Init(); err != nil { | ||||
| 		t.Fatal(err) | ||||
| @@ -60,6 +144,33 @@ func TestPubSub(t *testing.T) { | ||||
| 	if err := b.Connect(ctx); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestPubSub(t *testing.T) { | ||||
| 	if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	b := kgo.NewBroker( | ||||
| 		broker.ContentType("application/octet-stream"), | ||||
| 		broker.Codec("application/octet-stream", codec.NewCodec()), | ||||
| 		broker.Addrs(cluster.ListenAddrs()...), | ||||
| 		kgo.CommitInterval(5*time.Second), | ||||
| 		kgo.Options( | ||||
| 			kg.ClientID("test"), | ||||
| 			kg.FetchMaxBytes(10*1024*1024), | ||||
| 			kg.AllowAutoTopicCreation(), | ||||
| 		), | ||||
| 	) | ||||
|  | ||||
| 	if err := b.Init(); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	if err := b.Connect(ctx); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	defer func() { | ||||
| 		if err := b.Disconnect(ctx); err != nil { | ||||
| @@ -67,25 +178,26 @@ func TestPubSub(t *testing.T) { | ||||
| 		} | ||||
| 	}() | ||||
| 	if prefill { | ||||
| 		msgs := make([]*broker.Message, 0, msgcnt) | ||||
| 		msgs := make([]broker.Message, 0, msgcnt) | ||||
| 		for i := int64(0); i < msgcnt; i++ { | ||||
| 			msgs = append(msgs, bm) | ||||
| 			m, _ := b.NewMessage(ctx, metadata.Pairs("hkey", "hval"), []byte(`test`)) | ||||
| 			msgs = append(msgs, m) | ||||
| 		} | ||||
|  | ||||
| 		if err := b.BatchPublish(ctx, msgs); err != nil { | ||||
| 		if err := b.Publish(ctx, "test.pubsub", msgs...); err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| 		//	t.Skip() | ||||
| 	} | ||||
| 	done := make(chan bool, 1) | ||||
| 	idx := int64(0) | ||||
| 	fn := func(msg broker.Event) error { | ||||
| 	fn := func(msg broker.Message) error { | ||||
| 		atomic.AddInt64(&idx, 1) | ||||
| 		// time.Sleep(200 * time.Millisecond) | ||||
| 		return msg.Ack() | ||||
| 	} | ||||
|  | ||||
| 	sub, err := b.Subscribe(ctx, "test", fn, | ||||
| 	sub, err := b.Subscribe(ctx, "test.pubsub", fn, | ||||
| 		broker.SubscribeAutoAck(true), | ||||
| 		broker.SubscribeGroup(group), | ||||
| 		broker.SubscribeBodyOnly(true)) | ||||
| @@ -110,7 +222,7 @@ func TestPubSub(t *testing.T) { | ||||
| 				if prc := atomic.LoadInt64(&idx); prc == msgcnt { | ||||
| 					close(done) | ||||
| 				} else { | ||||
| 					fmt.Printf("processed %v\n", prc) | ||||
| 					t.Logf("processed %v of %v\n", prc, msgcnt) | ||||
| 				} | ||||
| 			case <-ticker.C: | ||||
| 				close(done) | ||||
|   | ||||
							
								
								
									
										47
									
								
								logger.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								logger.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,47 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v4/logger" | ||||
| ) | ||||
|  | ||||
| type mlogger struct { | ||||
| 	l   logger.Logger | ||||
| 	ctx context.Context | ||||
| } | ||||
|  | ||||
| func (l *mlogger) Log(lvl kgo.LogLevel, msg string, args ...interface{}) { | ||||
| 	var mlvl logger.Level | ||||
| 	switch lvl { | ||||
| 	case kgo.LogLevelNone: | ||||
| 		return | ||||
| 	case kgo.LogLevelError: | ||||
| 		mlvl = logger.ErrorLevel | ||||
| 	case kgo.LogLevelWarn: | ||||
| 		mlvl = logger.WarnLevel | ||||
| 	case kgo.LogLevelInfo: | ||||
| 		mlvl = logger.InfoLevel | ||||
| 	case kgo.LogLevelDebug: | ||||
| 		mlvl = logger.DebugLevel | ||||
| 	default: | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	l.l.Log(l.ctx, mlvl, msg, args...) | ||||
| } | ||||
|  | ||||
| func (l *mlogger) Level() kgo.LogLevel { | ||||
| 	switch l.l.Options().Level { | ||||
| 	case logger.ErrorLevel: | ||||
| 		return kgo.LogLevelError | ||||
| 	case logger.WarnLevel: | ||||
| 		return kgo.LogLevelWarn | ||||
| 	case logger.InfoLevel: | ||||
| 		return kgo.LogLevelInfo | ||||
| 	case logger.DebugLevel, logger.TraceLevel: | ||||
| 		return kgo.LogLevelDebug | ||||
| 	} | ||||
| 	return kgo.LogLevelNone | ||||
| } | ||||
| @@ -6,22 +6,29 @@ import ( | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/unistack-org/micro/v3/meter" | ||||
| 	"go.unistack.org/micro/v4/meter" | ||||
| ) | ||||
| 
 | ||||
| type metrics struct { | ||||
| type hookMeter struct { | ||||
| 	meter meter.Meter | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	_ kgo.HookBrokerConnect       = &metrics{} | ||||
| 	_ kgo.HookBrokerDisconnect    = &metrics{} | ||||
| 	_ kgo.HookBrokerRead          = &metrics{} | ||||
| 	_ kgo.HookBrokerThrottle      = &metrics{} | ||||
| 	_ kgo.HookBrokerWrite         = &metrics{} | ||||
| 	_ kgo.HookFetchBatchRead      = &metrics{} | ||||
| 	_ kgo.HookProduceBatchWritten = &metrics{} | ||||
| 	_ kgo.HookGroupManageError    = &metrics{} | ||||
| 	_ kgo.HookBrokerConnect    = &hookMeter{} | ||||
| 	_ kgo.HookBrokerDisconnect = &hookMeter{} | ||||
| 	// HookBrokerE2E | ||||
| 	_ kgo.HookBrokerRead     = &hookMeter{} | ||||
| 	_ kgo.HookBrokerThrottle = &hookMeter{} | ||||
| 	_ kgo.HookBrokerWrite    = &hookMeter{} | ||||
| 	_ kgo.HookFetchBatchRead = &hookMeter{} | ||||
| 	// HookFetchRecordBuffered | ||||
| 	// HookFetchRecordUnbuffered | ||||
| 	_ kgo.HookGroupManageError = &hookMeter{} | ||||
| 	// HookNewClient | ||||
| 	_ kgo.HookProduceBatchWritten = &hookMeter{} | ||||
| 	// HookProduceRecordBuffered | ||||
| 	// HookProduceRecordPartitioned | ||||
| 	// HookProduceRecordUnbuffered | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| @@ -54,11 +61,11 @@ const ( | ||||
| 	labelTopic   = "topic" | ||||
| ) | ||||
| 
 | ||||
| func (m *metrics) OnGroupManageError(err error) { | ||||
| func (m *hookMeter) OnGroupManageError(_ error) { | ||||
| 	m.meter.Counter(metricBrokerGroupErrors).Inc() | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { | ||||
| func (m *hookMeter) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	if err != nil { | ||||
| 		m.meter.Counter(metricBrokerConnects, labelNode, node, labelStatus, labelFaulure).Inc() | ||||
| @@ -67,12 +74,12 @@ func (m *metrics) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ ne | ||||
| 	m.meter.Counter(metricBrokerConnects, labelNode, node, labelStatus, labelSuccess).Inc() | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) { | ||||
| func (m *hookMeter) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Counter(metricBrokerDisconnects, labelNode, node).Inc() | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) { | ||||
| func (m *hookMeter) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	if err != nil { | ||||
| 		m.meter.Counter(metricBrokerWriteErrors, labelNode, node).Inc() | ||||
| @@ -83,7 +90,7 @@ func (m *metrics) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten i | ||||
| 	m.meter.Histogram(metricBrokerWriteLatencies, labelNode, node).Update(timeToWrite.Seconds()) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, readWait, timeToRead time.Duration, err error) { | ||||
| func (m *hookMeter) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, readWait, timeToRead time.Duration, err error) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	if err != nil { | ||||
| 		m.meter.Counter(metricBrokerReadErrors, labelNode, node).Inc() | ||||
| @@ -95,18 +102,18 @@ func (m *metrics) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, | ||||
| 	m.meter.Histogram(metricBrokerReadLatencies, labelNode, node).Update(timeToRead.Seconds()) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) { | ||||
| func (m *hookMeter) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Histogram(metricBrokerThrottleLatencies, labelNode, node).Update(throttleInterval.Seconds()) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.ProduceBatchMetrics) { | ||||
| func (m *hookMeter) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.ProduceBatchMetrics) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Counter(metricBrokerProduceBytesUncompressed, labelNode, node, labelTopic, topic).Add(kmetrics.UncompressedBytes) | ||||
| 	m.meter.Counter(metricBrokerProduceBytesCompressed, labelNode, node, labelTopic, topic).Add(kmetrics.CompressedBytes) | ||||
| } | ||||
| 
 | ||||
| func (m *metrics) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.FetchBatchMetrics) { | ||||
| func (m *hookMeter) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.FetchBatchMetrics) { | ||||
| 	node := strconv.Itoa(int(meta.NodeID)) | ||||
| 	m.meter.Counter(metricBrokerFetchBytesUncompressed, labelNode, node, labelTopic, topic).Add(kmetrics.UncompressedBytes) | ||||
| 	m.meter.Counter(metricBrokerFetchBytesCompressed, labelNode, node, labelTopic, topic).Add(kmetrics.CompressedBytes) | ||||
							
								
								
									
										94
									
								
								options.go
									
									
									
									
									
								
							
							
						
						
									
										94
									
								
								options.go
									
									
									
									
									
								
							| @@ -4,13 +4,21 @@ import ( | ||||
| 	"context" | ||||
| 	"time" | ||||
|  | ||||
| 	kgo "github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/unistack-org/micro/v3/broker" | ||||
| 	"github.com/unistack-org/micro/v3/client" | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"go.unistack.org/micro/v4/broker" | ||||
| ) | ||||
|  | ||||
| // DefaultCommitInterval specifies how fast send commit offsets to kafka | ||||
| var DefaultCommitInterval = 5 * time.Second | ||||
| var ( | ||||
|  | ||||
| 	// DefaultCommitInterval specifies how fast send commit offsets to kafka | ||||
| 	DefaultCommitInterval = 5 * time.Second | ||||
|  | ||||
| 	// DefaultStatsInterval specifies how fast check consumer lag | ||||
| 	DefaultStatsInterval = 30 * time.Second | ||||
|  | ||||
| 	// DefaultSubscribeMaxInflight specifies how much messages keep inflight | ||||
| 	DefaultSubscribeMaxInflight = 100 | ||||
| ) | ||||
|  | ||||
| type subscribeContextKey struct{} | ||||
|  | ||||
| @@ -19,16 +27,11 @@ func SubscribeContext(ctx context.Context) broker.SubscribeOption { | ||||
| 	return broker.SetSubscribeOption(subscribeContextKey{}, ctx) | ||||
| } | ||||
|  | ||||
| type publishKey struct{} | ||||
| type messageKey struct{} | ||||
|  | ||||
| // PublishKey set the kafka message key (broker option) | ||||
| func PublishKey(key []byte) broker.PublishOption { | ||||
| 	return broker.SetPublishOption(publishKey{}, key) | ||||
| } | ||||
|  | ||||
| // ClientPublishKey set the kafka message key (client option) | ||||
| func ClientPublishKey(key []byte) client.PublishOption { | ||||
| 	return client.SetPublishOption(publishKey{}, key) | ||||
| // MessageKey set the kafka message key (broker option) | ||||
| func MessageKey(key []byte) broker.MessageOption { | ||||
| 	return broker.SetMessageOption(messageKey{}, key) | ||||
| } | ||||
|  | ||||
| type optionsKey struct{} | ||||
| @@ -48,6 +51,39 @@ func Options(opts ...kgo.Opt) broker.Option { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // SubscribeOptions pass additional options to broker in Subscribe | ||||
| func SubscribeOptions(opts ...kgo.Opt) broker.SubscribeOption { | ||||
| 	return func(o *broker.SubscribeOptions) { | ||||
| 		if o.Context == nil { | ||||
| 			o.Context = context.Background() | ||||
| 		} | ||||
| 		options, ok := o.Context.Value(optionsKey{}).([]kgo.Opt) | ||||
| 		if !ok { | ||||
| 			options = make([]kgo.Opt, 0, len(opts)) | ||||
| 		} | ||||
| 		options = append(options, opts...) | ||||
| 		o.Context = context.WithValue(o.Context, optionsKey{}, options) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type fatalOnErrorKey struct{} | ||||
|  | ||||
| func FatalOnError(b bool) broker.Option { | ||||
| 	return broker.SetOption(fatalOnErrorKey{}, b) | ||||
| } | ||||
|  | ||||
| type clientIDKey struct{} | ||||
|  | ||||
| func ClientID(id string) broker.Option { | ||||
| 	return broker.SetOption(clientIDKey{}, id) | ||||
| } | ||||
|  | ||||
| type groupKey struct{} | ||||
|  | ||||
| func Group(id string) broker.Option { | ||||
| 	return broker.SetOption(groupKey{}, id) | ||||
| } | ||||
|  | ||||
| type commitIntervalKey struct{} | ||||
|  | ||||
| // CommitInterval specifies interval to send commits | ||||
| @@ -55,11 +91,35 @@ func CommitInterval(td time.Duration) broker.Option { | ||||
| 	return broker.SetOption(commitIntervalKey{}, td) | ||||
| } | ||||
|  | ||||
| var DefaultSubscribeMaxInflight = 1000 | ||||
|  | ||||
| type subscribeMaxInflightKey struct{} | ||||
|  | ||||
| // SubscribeMaxInFlight specifies interval to send commits | ||||
| // SubscribeMaxInFlight max queued messages | ||||
| func SubscribeMaxInFlight(n int) broker.SubscribeOption { | ||||
| 	return broker.SetSubscribeOption(subscribeMaxInflightKey{}, n) | ||||
| } | ||||
|  | ||||
| // SubscribeMaxInFlight max queued messages | ||||
| func SubscribeFatalOnError(b bool) broker.SubscribeOption { | ||||
| 	return broker.SetSubscribeOption(fatalOnErrorKey{}, b) | ||||
| } | ||||
|  | ||||
| type messagePromiseKey struct{} | ||||
|  | ||||
| // MessagePromise set the kafka promise func for Produce | ||||
| func MessagePromise(fn func(*kgo.Record, error)) broker.MessageOption { | ||||
| 	return broker.SetMessageOption(messagePromiseKey{}, fn) | ||||
| } | ||||
|  | ||||
| type subscribeMessagePoolKey struct{} | ||||
|  | ||||
| // SubscribeMessagePool optionaly enabled/disable message pool | ||||
| func SubscribeMessagePool(b bool) broker.SubscribeOption { | ||||
| 	return broker.SetSubscribeOption(subscribeMessagePoolKey{}, b) | ||||
| } | ||||
|  | ||||
| type exposeLagKey struct{} | ||||
|  | ||||
| // ExposeLag enabled subscriber lag via [meter.Meter] | ||||
| func ExposeLag(b bool) broker.Option { | ||||
| 	return broker.SetOption(exposeLagKey{}, b) | ||||
| } | ||||
|   | ||||
							
								
								
									
										290
									
								
								subscriber.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										290
									
								
								subscriber.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,290 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/twmb/franz-go/pkg/kmsg" | ||||
| 	"go.unistack.org/micro/v4/broker" | ||||
| 	"go.unistack.org/micro/v4/logger" | ||||
| 	"go.unistack.org/micro/v4/metadata" | ||||
| 	"go.unistack.org/micro/v4/semconv" | ||||
| 	"go.unistack.org/micro/v4/tracer" | ||||
| ) | ||||
|  | ||||
| type tp struct { | ||||
| 	t string | ||||
| 	p int32 | ||||
| } | ||||
|  | ||||
| type consumer struct { | ||||
| 	topic       string | ||||
| 	c           *kgo.Client | ||||
| 	htracer     *hookTracer | ||||
| 	quit        chan struct{} | ||||
| 	done        chan struct{} | ||||
| 	recs        chan kgo.FetchTopicPartition | ||||
| 	kopts       broker.Options | ||||
| 	partition   int32 | ||||
| 	opts        broker.SubscribeOptions | ||||
| 	handler     interface{} | ||||
| 	connected   *atomic.Uint32 | ||||
| 	messagePool bool | ||||
| } | ||||
|  | ||||
| type Subscriber struct { | ||||
| 	consumers    map[tp]*consumer | ||||
| 	c            *kgo.Client | ||||
| 	htracer      *hookTracer | ||||
| 	topic        string | ||||
| 	messagePool  bool | ||||
| 	handler      interface{} | ||||
| 	done         chan struct{} | ||||
| 	kopts        broker.Options | ||||
| 	opts         broker.SubscribeOptions | ||||
| 	connected    *atomic.Uint32 | ||||
| 	mu           sync.RWMutex | ||||
| 	closed       bool | ||||
| 	fatalOnError bool | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) Client() *kgo.Client { | ||||
| 	return s.c | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) Options() broker.SubscribeOptions { | ||||
| 	return s.opts | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) Topic() string { | ||||
| 	return s.topic | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) Unsubscribe(ctx context.Context) error { | ||||
| 	if s.closed { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	s.c.PauseFetchTopics(s.topic) | ||||
| 	s.c.CloseAllowingRebalance() | ||||
| 	kc := make(map[string][]int32) | ||||
| 	for ctp := range s.consumers { | ||||
| 		kc[ctp.t] = append(kc[ctp.t], ctp.p) | ||||
| 	} | ||||
| 	s.killConsumers(ctx, kc) | ||||
| 	close(s.done) | ||||
| 	s.closed = true | ||||
| 	s.c.ResumeFetchTopics(s.topic) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) poll(ctx context.Context) { | ||||
| 	maxInflight := DefaultSubscribeMaxInflight | ||||
|  | ||||
| 	if s.opts.Context != nil { | ||||
| 		if n, ok := s.opts.Context.Value(subscribeMaxInflightKey{}).(int); n > 0 && ok { | ||||
| 			maxInflight = n | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ctx.Done(): | ||||
| 			s.c.CloseAllowingRebalance() | ||||
| 			return | ||||
| 		case <-s.done: | ||||
| 			return | ||||
| 		default: | ||||
| 			fetches := s.c.PollRecords(ctx, maxInflight) | ||||
| 			if !s.closed && fetches.IsClientClosed() { | ||||
| 				s.closed = true | ||||
| 				return | ||||
| 			} | ||||
| 			fetches.EachError(func(t string, p int32, err error) { | ||||
| 				if kgo.IsRetryableBrokerErr(err) { | ||||
| 					s.kopts.Logger.Error(ctx, fmt.Sprintf("[kgo] fetch topic %s partition %d error", t, p), err) | ||||
| 				} else { | ||||
| 					s.kopts.Logger.Fatal(ctx, fmt.Sprintf("[kgo] fetch topic %s partition %d error", t, p), err) | ||||
| 				} | ||||
| 			}) | ||||
|  | ||||
| 			fetches.EachPartition(func(p kgo.FetchTopicPartition) { | ||||
| 				tps := tp{p.Topic, p.Partition} | ||||
| 				s.mu.Lock() | ||||
| 				c := s.consumers[tps] | ||||
| 				s.mu.Unlock() | ||||
| 				if c != nil { | ||||
| 					c.recs <- p | ||||
| 				} | ||||
| 			}) | ||||
| 			s.c.AllowRebalance() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32) { | ||||
| 	var wg sync.WaitGroup | ||||
| 	defer wg.Wait() | ||||
|  | ||||
| 	for topic, partitions := range lost { | ||||
| 		for _, partition := range partitions { | ||||
| 			tps := tp{topic, partition} | ||||
| 			s.mu.Lock() | ||||
| 			pc, ok := s.consumers[tps] | ||||
| 			s.mu.Unlock() | ||||
| 			if !ok { | ||||
| 				continue | ||||
| 			} | ||||
| 			s.mu.Lock() | ||||
| 			delete(s.consumers, tps) | ||||
| 			s.mu.Unlock() | ||||
| 			close(pc.quit) | ||||
| 			if s.kopts.Logger.V(logger.DebugLevel) { | ||||
| 				s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] waiting for work to finish topic %s partition %d", topic, partition)) | ||||
| 			} | ||||
| 			wg.Add(1) | ||||
| 			go func() { <-pc.done; wg.Done() }() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) autocommit(_ *kgo.Client, _ *kmsg.OffsetCommitRequest, _ *kmsg.OffsetCommitResponse, err error) { | ||||
| 	if err != nil { | ||||
| 		//		s.connected.Store(0) | ||||
| 		if s.fatalOnError { | ||||
| 			s.kopts.Logger.Fatal(context.TODO(), "kgo.AutoCommitCallback error", err) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) { | ||||
| 	if s.kopts.Logger.V(logger.ErrorLevel) { | ||||
| 		s.kopts.Logger.Error(ctx, fmt.Sprintf("[kgo] lost %#+v", lost)) | ||||
| 	} | ||||
| 	s.killConsumers(ctx, lost) | ||||
| 	// s.connected.Store(0) | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) { | ||||
| 	if s.kopts.Logger.V(logger.DebugLevel) { | ||||
| 		s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] revoked %#+v", revoked)) | ||||
| 	} | ||||
| 	s.killConsumers(ctx, revoked) | ||||
| 	if err := c.CommitMarkedOffsets(ctx); err != nil { | ||||
| 		s.kopts.Logger.Error(ctx, "[kgo] revoked CommitMarkedOffsets error", err) | ||||
| 		// s.connected.Store(0) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *Subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[string][]int32) { | ||||
| 	for topic, partitions := range assigned { | ||||
| 		for _, partition := range partitions { | ||||
| 			pc := &consumer{ | ||||
| 				c:           c, | ||||
| 				topic:       topic, | ||||
| 				partition:   partition, | ||||
| 				htracer:     s.htracer, | ||||
| 				quit:        make(chan struct{}), | ||||
| 				done:        make(chan struct{}), | ||||
| 				recs:        make(chan kgo.FetchTopicPartition, 100), | ||||
| 				handler:     s.handler, | ||||
| 				messagePool: s.messagePool, | ||||
| 				kopts:       s.kopts, | ||||
| 				opts:        s.opts, | ||||
| 				connected:   s.connected, | ||||
| 			} | ||||
| 			s.mu.Lock() | ||||
| 			s.consumers[tp{topic, partition}] = pc | ||||
| 			s.mu.Unlock() | ||||
| 			go pc.consume() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (pc *consumer) consume() { | ||||
| 	var err error | ||||
|  | ||||
| 	defer close(pc.done) | ||||
| 	if pc.kopts.Logger.V(logger.DebugLevel) { | ||||
| 		pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("starting, topic %s partition %d", pc.topic, pc.partition)) | ||||
| 		defer pc.kopts.Logger.Debug(pc.kopts.Context, fmt.Sprintf("killing, topic %s partition %d", pc.topic, pc.partition)) | ||||
| 	} | ||||
|  | ||||
| 	var pm *kgoMessage | ||||
|  | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-pc.quit: | ||||
| 			return | ||||
| 		case p := <-pc.recs: | ||||
| 			for _, record := range p.Records { | ||||
| 				ctx, sp := pc.htracer.WithProcessSpan(record) | ||||
| 				ts := time.Now() | ||||
| 				pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Inc() | ||||
|  | ||||
| 				if pc.messagePool { | ||||
| 					pm = messagePool.Get().(*kgoMessage) | ||||
| 				} else { | ||||
| 					pm = &kgoMessage{} | ||||
| 				} | ||||
| 				pm.body = record.Value | ||||
| 				pm.topic = record.Topic | ||||
| 				pm.ack = false | ||||
| 				pm.hdr = metadata.New(len(record.Headers)) | ||||
| 				pm.ctx = ctx | ||||
| 				for _, hdr := range record.Headers { | ||||
| 					pm.hdr.Set(hdr.Key, string(hdr.Value)) | ||||
| 				} | ||||
| 				pm.hdr.Set("Micro-Offset", strconv.FormatInt(record.Offset, 10)) | ||||
| 				pm.hdr.Set("Micro-Partition", strconv.FormatInt(int64(record.Partition), 10)) | ||||
| 				pm.hdr.Set("Micro-Topic", record.Topic) | ||||
| 				pm.hdr.Set("Micro-Key", string(record.Key)) | ||||
| 				pm.hdr.Set("Micro-Timestamp", strconv.FormatInt(record.Timestamp.Unix(), 10)) | ||||
| 				switch h := pc.handler.(type) { | ||||
| 				case func(broker.Message) error: | ||||
| 					err = h(pm) | ||||
| 				case func([]broker.Message) error: | ||||
| 					err = h([]broker.Message{pm}) | ||||
| 				} | ||||
|  | ||||
| 				pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec() | ||||
| 				if err != nil { | ||||
| 					if sp != nil { | ||||
| 						sp.SetStatus(tracer.SpanStatusError, err.Error()) | ||||
| 					} | ||||
| 					pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc() | ||||
| 				} else if pc.opts.AutoAck { | ||||
| 					pm.ack = true | ||||
| 				} | ||||
|  | ||||
| 				te := time.Since(ts) | ||||
| 				pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
| 				pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) | ||||
|  | ||||
| 				ack := pm.ack | ||||
| 				if pc.messagePool { | ||||
| 					messagePool.Put(p) | ||||
| 				} | ||||
| 				if ack { | ||||
| 					pc.c.MarkCommitRecords(record) | ||||
| 				} else { | ||||
| 					if sp != nil { | ||||
| 						sp.Finish() | ||||
| 					} | ||||
| 					//					pc.connected.Store(0) | ||||
| 					pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] message not commited") | ||||
| 					return | ||||
| 				} | ||||
| 				if sp != nil { | ||||
| 					sp.Finish() | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										223
									
								
								tracer.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										223
									
								
								tracer.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,223 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"github.com/twmb/franz-go/pkg/kgo" | ||||
| 	semconv "go.opentelemetry.io/otel/semconv/v1.18.0" | ||||
| 	"go.unistack.org/micro/v4/metadata" | ||||
| 	"go.unistack.org/micro/v4/tracer" | ||||
| ) | ||||
|  | ||||
| type hookTracer struct { | ||||
| 	tracer   tracer.Tracer | ||||
| 	clientID string | ||||
| 	group    string | ||||
| } | ||||
|  | ||||
| var messagingSystem = semconv.MessagingSystemKey.String("kafka") | ||||
|  | ||||
| var ( | ||||
| 	_ kgo.HookProduceRecordBuffered   = (*hookTracer)(nil) | ||||
| 	_ kgo.HookProduceRecordUnbuffered = (*hookTracer)(nil) | ||||
| 	_ kgo.HookFetchRecordBuffered     = (*hookTracer)(nil) | ||||
| 	_ kgo.HookFetchRecordUnbuffered   = (*hookTracer)(nil) | ||||
| ) | ||||
|  | ||||
| // OnProduceRecordBuffered starts a new span for the "publish" operation on a | ||||
| // buffered record. | ||||
| // | ||||
| // It sets span options and injects the span context into record and updates | ||||
| // the record's context, so it can be ended in the OnProduceRecordUnbuffered | ||||
| // hook. | ||||
| func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) { | ||||
| 	if !m.tracer.Enabled() { | ||||
| 		return | ||||
| 	} | ||||
| 	// Set up span options. | ||||
| 	attrs := []interface{}{ | ||||
| 		messagingSystem, | ||||
| 		semconv.MessagingDestinationKindTopic, | ||||
| 		semconv.MessagingDestinationName(r.Topic), | ||||
| 		semconv.MessagingOperationPublish, | ||||
| 	} | ||||
| 	attrs = maybeKeyAttr(attrs, r) | ||||
| 	if m.clientID != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID)) | ||||
| 	} | ||||
| 	opts := []tracer.SpanOption{ | ||||
| 		tracer.WithSpanLabels(attrs...), | ||||
| 		tracer.WithSpanKind(tracer.SpanKindProducer), | ||||
| 	} | ||||
|  | ||||
| 	if r.Context == nil { | ||||
| 		r.Context = context.Background() | ||||
| 	} | ||||
|  | ||||
| 	omd, ok := metadata.FromOutgoingContext(r.Context) | ||||
| 	if !ok { | ||||
| 		omd = metadata.New(len(r.Headers)) | ||||
| 	} | ||||
|  | ||||
| 	md := metadata.Copy(omd) | ||||
| 	for _, h := range r.Headers { | ||||
| 		md.Set(h.Key, string(h.Value)) | ||||
| 	} | ||||
|  | ||||
| 	if !ok { | ||||
| 		r.Context, _ = m.tracer.Start(metadata.NewOutgoingContext(r.Context, md), "sdk.broker", opts...) | ||||
| 	} else { | ||||
| 		r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...) | ||||
| 	} | ||||
|  | ||||
| 	setHeaders(r, omd, metadata.HeaderContentType) | ||||
| } | ||||
|  | ||||
| // OnProduceRecordUnbuffered continues and ends the "publish" span for an | ||||
| // unbuffered record. | ||||
| // | ||||
| // It sets attributes with values unset when producing and records any error | ||||
| // that occurred during the publish operation. | ||||
| func (m *hookTracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) { | ||||
| 	if !m.tracer.Enabled() { | ||||
| 		return | ||||
| 	} | ||||
| 	if span, ok := tracer.SpanFromContext(r.Context); ok { | ||||
| 		span.AddLabels( | ||||
| 			semconv.MessagingKafkaDestinationPartition(int(r.Partition)), | ||||
| 		) | ||||
| 		if err != nil { | ||||
| 			span.SetStatus(tracer.SpanStatusError, err.Error()) | ||||
| 		} | ||||
| 		span.Finish() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // OnFetchRecordBuffered starts a new span for the "receive" operation on a | ||||
| // buffered record. | ||||
| // | ||||
| // It sets the span options and extracts the span context from the record, | ||||
| // updates the record's context to ensure it can be ended in the | ||||
| // OnFetchRecordUnbuffered hook and can be used in downstream consumer | ||||
| // processing. | ||||
| func (m *hookTracer) OnFetchRecordBuffered(r *kgo.Record) { | ||||
| 	if !m.tracer.Enabled() { | ||||
| 		return | ||||
| 	} | ||||
| 	// Set up the span options. | ||||
| 	attrs := []interface{}{ | ||||
| 		messagingSystem, | ||||
| 		semconv.MessagingSourceKindTopic, | ||||
| 		semconv.MessagingSourceName(r.Topic), | ||||
| 		semconv.MessagingOperationReceive, | ||||
| 		semconv.MessagingKafkaSourcePartition(int(r.Partition)), | ||||
| 	} | ||||
| 	attrs = maybeKeyAttr(attrs, r) | ||||
| 	if m.clientID != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID)) | ||||
| 	} | ||||
| 	if m.group != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group)) | ||||
| 	} | ||||
| 	opts := []tracer.SpanOption{ | ||||
| 		tracer.WithSpanLabels(attrs...), | ||||
| 		tracer.WithSpanKind(tracer.SpanKindConsumer), | ||||
| 	} | ||||
|  | ||||
| 	if r.Context == nil { | ||||
| 		r.Context = context.Background() | ||||
| 	} | ||||
| 	omd, ok := metadata.FromIncomingContext(r.Context) | ||||
| 	if !ok { | ||||
| 		omd = metadata.New(len(r.Headers)) | ||||
| 	} | ||||
|  | ||||
| 	md := metadata.Copy(omd) | ||||
| 	for _, h := range r.Headers { | ||||
| 		md.Set(h.Key, string(h.Value)) | ||||
| 	} | ||||
|  | ||||
| 	if !ok { | ||||
| 		r.Context, _ = m.tracer.Start(metadata.NewIncomingContext(r.Context, md), "sdk.broker", opts...) | ||||
| 	} else { | ||||
| 		r.Context, _ = m.tracer.Start(r.Context, "sdk.broker", opts...) | ||||
| 	} | ||||
|  | ||||
| 	setHeaders(r, omd, metadata.HeaderContentType) | ||||
| } | ||||
|  | ||||
| // OnFetchRecordUnbuffered continues and ends the "receive" span for an | ||||
| // unbuffered record. | ||||
| func (m *hookTracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) { | ||||
| 	if !m.tracer.Enabled() { | ||||
| 		return | ||||
| 	} | ||||
| 	span, _ := tracer.SpanFromContext(r.Context) | ||||
| 	span.Finish() | ||||
| } | ||||
|  | ||||
| // WithProcessSpan starts a new span for the "process" operation on a consumer | ||||
| // record. | ||||
| // | ||||
| // It sets up the span options. The user's application code is responsible for | ||||
| // ending the span. | ||||
| // | ||||
| // This should only ever be called within a polling loop of a consumed record and | ||||
| // not a record which has been created for producing, so call this at the start of each | ||||
| // iteration of your processing for the record. | ||||
| func (m *hookTracer) WithProcessSpan(r *kgo.Record) (context.Context, tracer.Span) { | ||||
| 	if r.Context == nil { | ||||
| 		r.Context = context.Background() | ||||
| 	} | ||||
|  | ||||
| 	if !m.tracer.Enabled() { | ||||
| 		return r.Context, nil | ||||
| 	} | ||||
| 	// Set up the span options. | ||||
| 	attrs := []interface{}{ | ||||
| 		messagingSystem, | ||||
| 		semconv.MessagingSourceKindTopic, | ||||
| 		semconv.MessagingSourceName(r.Topic), | ||||
| 		semconv.MessagingOperationProcess, | ||||
| 		semconv.MessagingKafkaSourcePartition(int(r.Partition)), | ||||
| 		semconv.MessagingKafkaMessageOffset(int(r.Offset)), | ||||
| 	} | ||||
| 	attrs = maybeKeyAttr(attrs, r) | ||||
| 	if m.clientID != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID)) | ||||
| 	} | ||||
| 	if m.group != "" { | ||||
| 		attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group)) | ||||
| 	} | ||||
| 	opts := []tracer.SpanOption{ | ||||
| 		tracer.WithSpanLabels(attrs...), | ||||
| 		tracer.WithSpanKind(tracer.SpanKindConsumer), | ||||
| 	} | ||||
|  | ||||
| 	if r.Context == nil { | ||||
| 		r.Context = context.Background() | ||||
| 	} | ||||
| 	md, ok := metadata.FromIncomingContext(r.Context) | ||||
| 	if !ok { | ||||
| 		md = metadata.New(len(r.Headers)) | ||||
| 	} | ||||
| 	for _, h := range r.Headers { | ||||
| 		md.Set(h.Key, string(h.Value)) | ||||
| 	} | ||||
|  | ||||
| 	// Start a new span using the provided context and options. | ||||
| 	return m.tracer.Start(r.Context, "sdk.broker", opts...) | ||||
| } | ||||
|  | ||||
| func maybeKeyAttr(attrs []interface{}, r *kgo.Record) []interface{} { | ||||
| 	if r.Key == nil { | ||||
| 		return attrs | ||||
| 	} | ||||
| 	var keykey string | ||||
| 	if !utf8.Valid(r.Key) { | ||||
| 		return attrs | ||||
| 	} | ||||
| 	keykey = string(r.Key) | ||||
| 	return append(attrs, semconv.MessagingKafkaMessageKeyKey.String(keykey)) | ||||
| } | ||||
							
								
								
									
										220
									
								
								util.go
									
									
									
									
									
								
							
							
						
						
									
										220
									
								
								util.go
									
									
									
									
									
								
							| @@ -1,220 +0,0 @@ | ||||
| package kgo | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"sync" | ||||
|  | ||||
| 	kgo "github.com/twmb/franz-go/pkg/kgo" | ||||
| 	"github.com/unistack-org/micro/v3/broker" | ||||
| 	"github.com/unistack-org/micro/v3/logger" | ||||
| 	"github.com/unistack-org/micro/v3/metadata" | ||||
| ) | ||||
|  | ||||
| var ErrLostMessage = errors.New("message not marked for offsets commit and will be lost in next iteration") | ||||
|  | ||||
| var pPool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		return &publication{msg: &broker.Message{}} | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| type worker struct { | ||||
| 	done         chan struct{} | ||||
| 	recs         chan []*kgo.Record | ||||
| 	cherr        chan error | ||||
| 	handler      broker.Handler | ||||
| 	batchHandler broker.BatchHandler | ||||
| 	opts         broker.SubscribeOptions | ||||
| 	kopts        broker.Options | ||||
| 	tpmap        map[string][]int32 | ||||
| 	maxInflight  int | ||||
| 	reader       *kgo.Client | ||||
| 	ctx          context.Context | ||||
| } | ||||
|  | ||||
| func (s *subscriber) run(ctx context.Context) { | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ctx.Done(): | ||||
| 			return | ||||
| 		case <-s.kopts.Context.Done(): | ||||
| 			return | ||||
| 		default: | ||||
| 			fetches := s.reader.PollFetches(ctx) | ||||
| 			if fetches.IsClientClosed() { | ||||
| 				// TODO: fatal ? | ||||
| 				return | ||||
| 			} | ||||
| 			if len(fetches.Errors()) > 0 { | ||||
| 				for _, err := range fetches.Errors() { | ||||
| 					s.kopts.Logger.Fatalf(ctx, "fetch err topic %s partition %d: %v", err.Topic, err.Partition, err.Err) | ||||
| 				} | ||||
| 				// TODO: fatal ? | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			fetches.EachPartition(func(p kgo.FetchTopicPartition) { | ||||
| 				s.Lock() | ||||
| 				consumers := s.consumers[p.Topic] | ||||
| 				s.Unlock() | ||||
| 				if consumers == nil { | ||||
| 					return | ||||
| 				} | ||||
| 				w, ok := consumers[p.Partition] | ||||
| 				if !ok { | ||||
| 					return | ||||
| 				} | ||||
| 				select { | ||||
| 				case err := <-w.cherr: | ||||
| 					s.kopts.Logger.Fatalf(ctx, "handle err: %v", err) | ||||
| 					return | ||||
| 				case w.recs <- p.Records: | ||||
| 				case <-w.done: | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *subscriber) assigned(ctx context.Context, _ *kgo.Client, assigned map[string][]int32) { | ||||
| 	maxInflight := DefaultSubscribeMaxInflight | ||||
|  | ||||
| 	if s.opts.Context != nil { | ||||
| 		if n, ok := s.opts.Context.Value(subscribeMaxInflightKey{}).(int); n > 0 && ok { | ||||
| 			maxInflight = n | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	s.Lock() | ||||
| 	for topic, partitions := range assigned { | ||||
| 		if s.consumers[topic] == nil { | ||||
| 			s.consumers[topic] = make(map[int32]worker) | ||||
| 		} | ||||
| 		for _, partition := range partitions { | ||||
| 			w := worker{ | ||||
| 				done:         make(chan struct{}), | ||||
| 				recs:         make(chan []*kgo.Record), | ||||
| 				cherr:        make(chan error), | ||||
| 				kopts:        s.kopts, | ||||
| 				opts:         s.opts, | ||||
| 				ctx:          ctx, | ||||
| 				tpmap:        map[string][]int32{topic: []int32{partition}}, | ||||
| 				reader:       s.reader, | ||||
| 				handler:      s.handler, | ||||
| 				batchHandler: s.batchhandler, | ||||
| 				maxInflight:  maxInflight, | ||||
| 			} | ||||
| 			s.consumers[topic][partition] = w | ||||
| 			go w.handle() | ||||
| 		} | ||||
| 	} | ||||
| 	s.Unlock() | ||||
| } | ||||
|  | ||||
| func (s *subscriber) revoked(_ context.Context, _ *kgo.Client, revoked map[string][]int32) { | ||||
| 	s.Lock() | ||||
| 	for topic, partitions := range revoked { | ||||
| 		ptopics := s.consumers[topic] | ||||
| 		for _, partition := range partitions { | ||||
| 			w := ptopics[partition] | ||||
| 			delete(ptopics, partition) | ||||
| 			if len(ptopics) == 0 { | ||||
| 				delete(s.consumers, topic) | ||||
| 			} | ||||
| 			close(w.done) | ||||
| 		} | ||||
| 	} | ||||
| 	s.Unlock() | ||||
| } | ||||
|  | ||||
| func (w *worker) handle() { | ||||
| 	var err error | ||||
|  | ||||
| 	eh := w.kopts.ErrorHandler | ||||
| 	if w.opts.ErrorHandler != nil { | ||||
| 		eh = w.opts.ErrorHandler | ||||
| 	} | ||||
|  | ||||
| 	paused := false | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-w.ctx.Done(): | ||||
| 			w.cherr <- w.ctx.Err() | ||||
| 			return | ||||
| 		case <-w.done: | ||||
| 			return | ||||
| 		case recs := <-w.recs: | ||||
| 			if len(recs) >= w.maxInflight { | ||||
| 				paused = true | ||||
| 				w.reader.PauseFetchPartitions(w.tpmap) | ||||
| 			} | ||||
| 			for _, record := range recs { | ||||
| 				p := pPool.Get().(*publication) | ||||
| 				p.msg.Header = nil | ||||
| 				p.msg.Body = nil | ||||
| 				p.topic = record.Topic | ||||
| 				p.err = nil | ||||
| 				p.ack = false | ||||
| 				if w.opts.BodyOnly { | ||||
| 					p.msg.Body = record.Value | ||||
| 				} else if w.kopts.Codec.String() == "noop" { | ||||
| 					p.msg.Body = record.Value | ||||
| 					p.msg.Header = metadata.New(len(record.Headers)) | ||||
| 					for _, h := range record.Headers { | ||||
| 						p.msg.Header.Set(h.Key, string(h.Value)) | ||||
| 					} | ||||
| 				} else { | ||||
| 					if err := w.kopts.Codec.Unmarshal(record.Value, p.msg); err != nil { | ||||
| 						p.err = err | ||||
| 						p.msg.Body = record.Value | ||||
| 						if eh != nil { | ||||
| 							_ = eh(p) | ||||
| 							if p.ack { | ||||
| 								w.reader.MarkCommitRecords(record) | ||||
| 							} else { | ||||
| 								w.cherr <- ErrLostMessage | ||||
| 								pPool.Put(p) | ||||
| 								return | ||||
| 							} | ||||
| 							pPool.Put(p) | ||||
| 							continue | ||||
| 						} else { | ||||
| 							if w.kopts.Logger.V(logger.ErrorLevel) { | ||||
| 								w.kopts.Logger.Errorf(w.kopts.Context, "[kgo]: failed to unmarshal: %v", err) | ||||
| 							} | ||||
| 						} | ||||
| 						pPool.Put(p) | ||||
| 						w.cherr <- err | ||||
| 						return | ||||
| 					} | ||||
| 				} | ||||
| 				err = w.handler(p) | ||||
| 				if err == nil && w.opts.AutoAck { | ||||
| 					p.ack = true | ||||
| 				} else if err != nil { | ||||
| 					p.err = err | ||||
| 					if eh != nil { | ||||
| 						_ = eh(p) | ||||
| 					} else { | ||||
| 						if w.kopts.Logger.V(logger.ErrorLevel) { | ||||
| 							w.kopts.Logger.Errorf(w.kopts.Context, "[kgo]: subscriber error: %v", err) | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 				if p.ack { | ||||
| 					pPool.Put(p) | ||||
| 					w.reader.MarkCommitRecords(record) | ||||
| 				} else { | ||||
| 					pPool.Put(p) | ||||
| 					w.cherr <- ErrLostMessage | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			if paused { | ||||
| 				paused = false | ||||
| 				w.reader.ResumeFetchPartitions(w.tpmap) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
		Reference in New Issue
	
	Block a user