Compare commits

...

18 Commits

Author SHA1 Message Date
894d6f4f20 tracing fixes
Some checks failed
build / lint (push) Successful in 27s
build / test (push) Failing after 29s
codeql / analyze (go) (push) Failing after 50s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-07-22 01:11:33 +03:00
d404fa31ab export Subscriber
Some checks failed
build / test (push) Failing after 1m38s
codeql / analyze (go) (push) Failing after 1m59s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-22 18:28:51 +03:00
88777a29ad add helper funcs
Some checks failed
build / test (push) Failing after 1m39s
codeql / analyze (go) (push) Failing after 2m8s
build / lint (push) Successful in 9m13s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-22 17:49:36 +03:00
23c2903c21 fixup tracing
Some checks failed
build / test (push) Failing after 1m36s
codeql / analyze (go) (push) Failing after 1m42s
build / lint (push) Successful in 9m13s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-06 08:20:27 +03:00
8fcc23f639 fixup tracing
Some checks failed
build / test (push) Failing after 1m46s
codeql / analyze (go) (push) Failing after 1m45s
build / lint (push) Successful in 9m12s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-06 07:30:17 +03:00
25dda1f34c fixup tracing
Some checks failed
build / test (push) Failing after 1m31s
codeql / analyze (go) (push) Failing after 1m49s
build / lint (push) Successful in 9m17s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-05 20:20:34 +03:00
fe66086c40 fixup tracing
Some checks failed
build / test (push) Failing after 2m10s
codeql / analyze (go) (push) Failing after 2m7s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-05 16:20:05 +03:00
7329bc23bc export lag for all partition, not only owned
Some checks failed
build / test (push) Failing after 1m14s
build / lint (push) Successful in 9m28s
codeql / analyze (go) (push) Failing after 14m55s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-02 23:01:04 +03:00
c240631cdb fixup panic
Some checks failed
build / test (push) Failing after 1m32s
codeql / analyze (go) (push) Failing after 2m37s
build / lint (push) Successful in 9m31s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-05-02 10:32:33 +03:00
Кирилл Горбунов
6a68533824 #133 fix race. (#134)
Some checks failed
build / test (push) Has been cancelled
build / lint (push) Has been cancelled
codeql / analyze (go) (push) Has been cancelled
Co-authored-by: Gorbunov Kirill Andreevich <kgorbunov@mtsbank.ru>
Reviewed-on: #134
Co-authored-by: Кирилл Горбунов <kirya_gorbunov_2015@mail.ru>
Co-committed-by: Кирилл Горбунов <kirya_gorbunov_2015@mail.ru>
2024-04-19 19:26:06 +03:00
058b6354c0 fixup tracing
Some checks failed
build / test (push) Failing after 1m27s
codeql / analyze (go) (push) Failing after 1m43s
build / lint (push) Successful in 9m20s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-14 23:17:38 +03:00
1f4cf11afe fix group lag exporter
Some checks failed
build / test (push) Failing after 1m36s
codeql / analyze (go) (push) Failing after 1m37s
build / lint (push) Successful in 9m14s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-13 02:40:45 +03:00
39177da1d0 massive meter usage
Some checks failed
build / test (push) Failing after 1m33s
codeql / analyze (go) (push) Failing after 1m39s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-13 02:24:16 +03:00
d559db4050 fixup logger caller skip count
Some checks failed
build / test (push) Failing after 1m17s
codeql / analyze (go) (push) Failing after 8m52s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-13 00:53:09 +03:00
aa946c469a fixup logger caller skip count
Some checks failed
codeql / analyze (go) (push) Failing after 1m51s
build / test (push) Failing after 1m55s
build / lint (push) Successful in 9m14s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-04-13 00:40:12 +03:00
9c4d88bb69 fixup for attrs
Some checks failed
build / test (push) Failing after 1m31s
codeql / analyze (go) (push) Failing after 1m53s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-15 09:38:31 +03:00
56288f46b1 cleanup tracing
Some checks failed
build / test (push) Failing after 1m30s
codeql / analyze (go) (push) Failing after 1m57s
build / lint (push) Successful in 9m19s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-14 23:25:19 +03:00
81dcef8b28 fixup tracer span labels
Some checks failed
build / test (push) Failing after 1m29s
codeql / analyze (go) (push) Failing after 1m53s
build / lint (push) Successful in 9m15s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2024-03-14 16:47:44 +03:00
8 changed files with 281 additions and 173 deletions

View File

@@ -1,12 +1,14 @@
package kgo
import (
"context"
"sync"
"go.unistack.org/micro/v3/broker"
)
type event struct {
ctx context.Context
topic string
err error
sync.RWMutex
@@ -14,6 +16,10 @@ type event struct {
ack bool
}
func (p *event) Context() context.Context {
return p.ctx
}
func (p *event) Topic() string {
return p.topic
}

19
go.mod
View File

@@ -1,21 +1,24 @@
module go.unistack.org/micro-broker-kgo/v3
go 1.19
go 1.21
toolchain go1.22.1
require (
github.com/google/uuid v1.6.0
github.com/twmb/franz-go v1.16.1
github.com/twmb/franz-go/pkg/kadm v1.11.0
github.com/twmb/franz-go/pkg/kmsg v1.7.0
go.opentelemetry.io/otel v1.24.0
go.unistack.org/micro/v3 v3.10.46
go.opentelemetry.io/otel v1.25.0
go.unistack.org/micro/v3 v3.10.59
)
require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
golang.org/x/sys v0.18.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 // indirect
google.golang.org/grpc v1.62.1 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/sys v0.19.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240412170617-26222e5d3d56 // indirect
google.golang.org/grpc v1.63.2 // indirect
google.golang.org/protobuf v1.33.0 // indirect
)

42
go.sum
View File

@@ -1,32 +1,40 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/twmb/franz-go v1.16.1 h1:rpWc7fB9jd7TgmCyfxzenBI+QbgS8ZfJOUQE+tzPtbE=
github.com/twmb/franz-go v1.16.1/go.mod h1:/pER254UPPGp/4WfGqRi+SIRGE50RSQzVubQp6+N4FA=
github.com/twmb/franz-go/pkg/kadm v1.11.0 h1:FfeWJ0qadntFpAcQt8JzNXW4dijjytZNLrzJuzzzuxA=
github.com/twmb/franz-go/pkg/kadm v1.11.0/go.mod h1:qrhkdH+SWS3ivmbqOgHbpgVHamhaKcjH0UM+uOp0M1A=
github.com/twmb/franz-go/pkg/kmsg v1.7.0 h1:a457IbvezYfA5UkiBvyV3zj0Is3y1i8EJgqjJYoij2E=
github.com/twmb/franz-go/pkg/kmsg v1.7.0/go.mod h1:se9Mjdt0Nwzc9lnjJ0HyDtLyBnaBDAd7pCje47OhSyw=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.unistack.org/micro/v3 v3.10.46 h1:rnuEqiFkerwJmKzHmHBXRgxFemZustxWz2hRNLQQ8cU=
go.unistack.org/micro/v3 v3.10.46/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
go.unistack.org/micro/v3 v3.10.59 h1:eneYXJLgyu5MZpSvyI0K17CeXvgOoUCN5dWZaPV5lI4=
go.unistack.org/micro/v3 v3.10.59/go.mod h1:erMgt3Bl7vQQ0e9UpQyR5NlLiZ9pKeEJ9+1tfYFaqUg=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 h1:IR+hp6ypxjH24bkMfEJ0yHR21+gwPWdV+/IBrPQyn3k=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240412170617-26222e5d3d56 h1:zviK8GX4VlMstrK3JkexM5UHjH1VOkRebH9y3jhSBGk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240412170617-26222e5d3d56/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

94
kgo.go
View File

@@ -14,7 +14,9 @@ import (
"github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v3/tracer"
mrand "go.unistack.org/micro/v3/util/rand"
)
@@ -60,7 +62,7 @@ type Broker struct {
connected bool
sync.RWMutex
opts broker.Options
subs []*subscriber
subs []*Subscriber
}
func (k *Broker) Address() string {
@@ -71,12 +73,15 @@ func (k *Broker) Name() string {
return k.opts.Name
}
func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, error) {
func (k *Broker) Client() *kgo.Client {
return k.c
}
func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *hookTracer, error) {
var c *kgo.Client
var err error
var span tracer.Span
ctx, span = k.opts.Tracer.Start(ctx, "Connect")
defer span.Finish()
sp, _ := tracer.SpanFromContext(ctx)
clientID := "kgo"
group := ""
@@ -89,28 +94,33 @@ func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, err
}
}
htracer := &hookTracer{group: group, clientID: clientID, tracer: k.opts.Tracer}
opts = append(opts,
kgo.WithHooks(&hookMeter{meter: k.opts.Meter}),
kgo.WithHooks(&hookTracer{group: group, clientID: clientID, tracer: k.opts.Tracer}),
kgo.WithHooks(htracer),
)
select {
case <-ctx.Done():
if ctx.Err() != nil {
span.SetStatus(tracer.SpanStatusError, ctx.Err().Error())
if sp != nil {
sp.SetStatus(tracer.SpanStatusError, ctx.Err().Error())
}
}
return nil, ctx.Err()
return nil, nil, ctx.Err()
default:
c, err = kgo.NewClient(opts...)
if err == nil {
err = c.Ping(ctx) // check connectivity to cluster
}
if err != nil {
span.SetStatus(tracer.SpanStatusError, err.Error())
return nil, err
if sp != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
}
return nil, nil, err
}
}
return c, nil
return c, htracer, nil
}
func (k *Broker) Connect(ctx context.Context) error {
@@ -126,7 +136,7 @@ func (k *Broker) Connect(ctx context.Context) error {
nctx = ctx
}
c, err := k.connect(nctx, k.kopts...)
c, _, err := k.connect(nctx, k.kopts...)
if err != nil {
return err
}
@@ -229,13 +239,9 @@ func (k *Broker) Publish(ctx context.Context, topic string, msg *broker.Message,
}
func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
var span tracer.Span
ctx, span = k.opts.Tracer.Start(ctx, "Publish")
defer span.Finish()
k.Lock()
if !k.connected {
c, err := k.connect(ctx, k.kopts...)
c, _, err := k.connect(ctx, k.kopts...)
if err != nil {
k.Unlock()
return err
@@ -265,7 +271,7 @@ func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...br
rec := &kgo.Record{Context: ctx, Key: key}
rec.Topic, _ = msg.Header.Get(metadata.HeaderTopic)
msg.Header.Del(metadata.HeaderTopic)
// k.opts.Meter.Counter(broker.PublishMessageInflight, "endpoint", rec.Topic).Inc()
k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Inc()
if options.BodyOnly || k.opts.Codec.String() == "noop" {
rec.Value = msg.Body
for k, v := range msg.Header {
@@ -281,35 +287,35 @@ func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...br
}
if promise != nil {
// ts := time.Now()
ts := time.Now()
for _, rec := range records {
k.c.Produce(ctx, rec, func(r *kgo.Record, err error) {
// te := time.Since(ts)
// k.opts.Meter.Counter(broker.PublishMessageInflight, "endpoint", rec.Topic).Dec()
// k.opts.Meter.Summary(broker.PublishMessageLatencyMicroseconds, "endpoint", r.Topic).Update(te.Seconds())
// k.opts.Meter.Histogram(broker.PublishMessageDurationSeconds, "endpoint", r.Topic).Update(te.Seconds())
te := time.Since(ts)
k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", rec.Topic, "topic", rec.Topic).Dec()
k.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
k.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", rec.Topic, "topic", rec.Topic).Update(te.Seconds())
if err != nil {
// k.opts.Meter.Counter(broker.PublishMessageTotal, "endpoint", r.Topic, "status", "failure").Inc()
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "failure").Inc()
} else {
// k.opts.Meter.Counter(broker.PublishMessageTotal, "endpoint", r.Topic, "status", "success").Inc()
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", rec.Topic, "topic", rec.Topic, "status", "success").Inc()
}
promise(r, err)
})
}
return nil
}
// ts := time.Now()
ts := time.Now()
results := k.c.ProduceSync(ctx, records...)
// te := time.Since(ts)
te := time.Since(ts)
for _, result := range results {
// k.opts.Meter.Summary(broker.PublishMessageLatencyMicroseconds, "endpoint", result.Record.Topic).Update(te.Seconds())
// k.opts.Meter.Histogram(broker.PublishMessageDurationSeconds, "endpoint", result.Record.Topic).Update(te.Seconds())
/// k.opts.Meter.Counter(broker.PublishMessageInflight, "endpoint", result.Record.Topic).Dec()
k.opts.Meter.Summary(semconv.PublishMessageLatencyMicroseconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
k.opts.Meter.Histogram(semconv.PublishMessageDurationSeconds, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Update(te.Seconds())
k.opts.Meter.Counter(semconv.PublishMessageInflight, "endpoint", result.Record.Topic, "topic", result.Record.Topic).Dec()
if result.Err != nil {
// k.opts.Meter.Counter(broker.PublishMessageTotal, "endpoint", result.Record.Topic, "status", "failure").Inc()
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "failure").Inc()
errs = append(errs, result.Err.Error())
} else {
// k.opts.Meter.Counter(broker.PublishMessageTotal, "endpoint", result.Record.Topic, "status", "success").Inc()
k.opts.Meter.Counter(semconv.PublishMessageTotal, "endpoint", result.Record.Topic, "topic", result.Record.Topic, "status", "success").Inc()
}
}
@@ -320,6 +326,22 @@ func (k *Broker) publish(ctx context.Context, msgs []*broker.Message, opts ...br
return nil
}
func (k *Broker) TopicExists(ctx context.Context, topic string) error {
mdreq := kmsg.NewMetadataRequest()
mdreq.Topics = []kmsg.MetadataRequestTopic{
{Topic: &topic},
}
mdrsp, err := mdreq.RequestWith(ctx, k.c)
if err != nil {
return err
} else if mdrsp.Topics[0].ErrorCode != 0 {
return fmt.Errorf("topic %s not exists or permission error", topic)
}
return nil
}
func (k *Broker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return nil, nil
}
@@ -342,7 +364,7 @@ func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Han
}
}
sub := &subscriber{
sub := &Subscriber{
topic: topic,
opts: options,
handler: handler,
@@ -369,7 +391,7 @@ func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Han
}
}
c, err := k.connect(ctx, kopts...)
c, htracer, err := k.connect(ctx, kopts...)
if err != nil {
return nil, err
}
@@ -387,6 +409,8 @@ func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Han
}
sub.c = c
sub.htracer = htracer
go sub.poll(ctx)
k.Lock()
@@ -412,7 +436,7 @@ func NewBroker(opts ...broker.Option) *Broker {
kgo.DialTimeout(3 * time.Second),
kgo.DisableIdempotentWrite(),
kgo.ProducerBatchCompression(kgo.NoCompression()),
kgo.WithLogger(&mlogger{l: options.Logger, ctx: options.Context}),
kgo.WithLogger(&mlogger{l: options.Logger.Clone(logger.WithCallerSkipCount(options.Logger.Options().CallerSkipCount + 2)), ctx: options.Context}),
kgo.SeedBrokers(kaddrs...),
kgo.RetryBackoffFn(DefaultRetryBackoffFn),
kgo.BlockRebalanceOnPoll(),

View File

@@ -14,14 +14,21 @@ type hookMeter struct {
}
var (
_ kgo.HookBrokerConnect = &hookMeter{}
_ kgo.HookBrokerDisconnect = &hookMeter{}
_ kgo.HookBrokerRead = &hookMeter{}
_ kgo.HookBrokerThrottle = &hookMeter{}
_ kgo.HookBrokerWrite = &hookMeter{}
_ kgo.HookFetchBatchRead = &hookMeter{}
_ kgo.HookBrokerConnect = &hookMeter{}
_ kgo.HookBrokerDisconnect = &hookMeter{}
// HookBrokerE2E
_ kgo.HookBrokerRead = &hookMeter{}
_ kgo.HookBrokerThrottle = &hookMeter{}
_ kgo.HookBrokerWrite = &hookMeter{}
_ kgo.HookFetchBatchRead = &hookMeter{}
// HookFetchRecordBuffered
// HookFetchRecordUnbuffered
_ kgo.HookGroupManageError = &hookMeter{}
// HookNewClient
_ kgo.HookProduceBatchWritten = &hookMeter{}
_ kgo.HookGroupManageError = &hookMeter{}
// HookProduceRecordBuffered
// HookProduceRecordPartitioned
// HookProduceRecordUnbuffered
)
const (

View File

@@ -9,8 +9,17 @@ import (
"go.unistack.org/micro/v3/client"
)
// DefaultCommitInterval specifies how fast send commit offsets to kafka
var DefaultCommitInterval = 5 * time.Second
var (
// DefaultCommitInterval specifies how fast send commit offsets to kafka
DefaultCommitInterval = 5 * time.Second
// DefaultStatsInterval specifies how fast check consumer lag
DefaultStatsInterval = 5 * time.Second
// DefaultSubscribeMaxInflight specifies how much messages keep inflight
DefaultSubscribeMaxInflight = 100
)
type subscribeContextKey struct{}
@@ -82,8 +91,6 @@ func CommitInterval(td time.Duration) broker.Option {
return broker.SetOption(commitIntervalKey{}, td)
}
var DefaultSubscribeMaxInflight = 10
type subscribeMaxInflightKey struct{}
// SubscribeMaxInFlight max queued messages

View File

@@ -2,12 +2,17 @@ package kgo
import (
"context"
"strconv"
"sync"
"time"
"github.com/twmb/franz-go/pkg/kadm"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v3/tracer"
)
type tp struct {
@@ -19,6 +24,7 @@ type consumer struct {
c *kgo.Client
topic string
partition int32
htracer *hookTracer
opts broker.SubscribeOptions
kopts broker.Options
handler broker.Handler
@@ -27,9 +33,10 @@ type consumer struct {
recs chan kgo.FetchTopicPartition
}
type subscriber struct {
type Subscriber struct {
c *kgo.Client
topic string
htracer *hookTracer
opts broker.SubscribeOptions
kopts broker.Options
handler broker.Handler
@@ -39,15 +46,19 @@ type subscriber struct {
sync.RWMutex
}
func (s *subscriber) Options() broker.SubscribeOptions {
func (s *Subscriber) Client() *kgo.Client {
return s.c
}
func (s *Subscriber) Options() broker.SubscribeOptions {
return s.opts
}
func (s *subscriber) Topic() string {
func (s *Subscriber) Topic() string {
return s.topic
}
func (s *subscriber) Unsubscribe(ctx context.Context) error {
func (s *Subscriber) Unsubscribe(ctx context.Context) error {
if s.closed {
return nil
}
@@ -69,13 +80,48 @@ func (s *subscriber) Unsubscribe(ctx context.Context) error {
return nil
}
func (s *subscriber) poll(ctx context.Context) {
func (s *Subscriber) poll(ctx context.Context) {
maxInflight := DefaultSubscribeMaxInflight
if s.opts.Context != nil {
if n, ok := s.opts.Context.Value(subscribeMaxInflightKey{}).(int); n > 0 && ok {
maxInflight = n
}
}
go func() {
ac := kadm.NewClient(s.c)
ticker := time.NewTicker(DefaultStatsInterval)
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
dgls, err := ac.Lag(ctx, s.opts.Group)
if err != nil || !dgls.Ok() {
continue
}
dgl, ok := dgls[s.opts.Group]
if !ok {
continue
}
lmap, ok := dgl.Lag[s.topic]
if !ok {
continue
}
s.Lock()
for p, l := range lmap {
s.kopts.Meter.Counter(semconv.BrokerGroupLag, "topic", s.topic, "group", s.opts.Group, "partition", strconv.Itoa(int(p)), "lag", strconv.Itoa(int(l.Lag)))
}
s.Unlock()
}
}
}()
for {
select {
case <-ctx.Done():
@@ -102,7 +148,7 @@ func (s *subscriber) poll(ctx context.Context) {
}
}
func (s *subscriber) killConsumers(ctx context.Context, lost map[string][]int32) {
func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32) {
var wg sync.WaitGroup
defer wg.Wait()
@@ -119,12 +165,12 @@ func (s *subscriber) killConsumers(ctx context.Context, lost map[string][]int32)
}
}
func (s *subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) {
func (s *Subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) {
s.kopts.Logger.Debugf(ctx, "[kgo] lost %#+v", lost)
s.killConsumers(ctx, lost)
}
func (s *subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) {
func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) {
s.kopts.Logger.Debugf(ctx, "[kgo] revoked %#+v", revoked)
s.killConsumers(ctx, revoked)
if err := c.CommitMarkedOffsets(ctx); err != nil {
@@ -132,22 +178,24 @@ func (s *subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[str
}
}
func (s *subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[string][]int32) {
func (s *Subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[string][]int32) {
for topic, partitions := range assigned {
for _, partition := range partitions {
pc := &consumer{
c: c,
topic: topic,
partition: partition,
quit: make(chan struct{}),
done: make(chan struct{}),
recs: make(chan kgo.FetchTopicPartition, 100),
handler: s.handler,
kopts: s.kopts,
opts: s.opts,
htracer: s.htracer,
quit: make(chan struct{}),
done: make(chan struct{}),
recs: make(chan kgo.FetchTopicPartition, 100),
handler: s.handler,
kopts: s.kopts,
opts: s.opts,
}
s.Lock()
s.consumers[tp{topic, partition}] = pc
s.Unlock()
go pc.consume()
}
}
@@ -169,8 +217,9 @@ func (pc *consumer) consume() {
return
case p := <-pc.recs:
for _, record := range p.Records {
// ts := time.Now()
// pc.kopts.Meter.Counter(broker.SubscribeMessageInflight, "endpoint", record.Topic).Inc()
ctx, sp := pc.htracer.WithProcessSpan(record)
ts := time.Now()
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Inc()
p := eventPool.Get().(*event)
p.msg.Header = nil
p.msg.Body = nil
@@ -178,6 +227,7 @@ func (pc *consumer) consume() {
p.err = nil
p.ack = false
p.msg.Header = metadata.New(len(record.Headers))
p.ctx = ctx
for _, hdr := range record.Headers {
p.msg.Header.Set(hdr.Key, string(hdr.Value))
}
@@ -186,13 +236,17 @@ func (pc *consumer) consume() {
} else if pc.opts.BodyOnly {
p.msg.Body = record.Value
} else {
if err := pc.kopts.Codec.Unmarshal(record.Value, p.msg); err != nil {
// pc.kopts.Meter.Counter(broker.SubscribeMessageTotal, "endpoint", record.Topic, "status", "failure").Inc()
sp.AddEvent("codec unmarshal start")
err := pc.kopts.Codec.Unmarshal(record.Value, p.msg)
sp.AddEvent("codec unmarshal stop")
if err != nil {
sp.SetStatus(tracer.SpanStatusError, err.Error())
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc()
p.err = err
p.msg.Body = record.Value
if eh != nil {
_ = eh(p)
// pc.kopts.Meter.Counter(broker.SubscribeMessageInflight, "endpoint", record.Topic).Dec()
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
if p.ack {
pc.c.MarkCommitRecords(record)
} else {
@@ -201,54 +255,63 @@ func (pc *consumer) consume() {
return
}
eventPool.Put(p)
// te := time.Since(ts)
// pc.kopts.Meter.Summary(broker.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic).Update(te.Seconds())
// pc.kopts.Meter.Histogram(broker.SubscribeMessageDurationSeconds, "endpoint", record.Topic).Update(te.Seconds())
te := time.Since(ts)
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
continue
} else {
if pc.kopts.Logger.V(logger.ErrorLevel) {
pc.kopts.Logger.Errorf(pc.kopts.Context, "[kgo]: failed to unmarshal: %v", err)
}
}
// te := time.Since(ts)
// pc.kopts.Meter.Counter(broker.SubscribeMessageInflight, "endpoint", record.Topic).Dec()
// pc.kopts.Meter.Summary(broker.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic).Update(te.Seconds())
// pc.kopts.Meter.Histogram(broker.SubscribeMessageDurationSeconds, "endpoint", record.Topic).Update(te.Seconds())
te := time.Since(ts)
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
eventPool.Put(p)
pc.kopts.Logger.Fatalf(pc.kopts.Context, "[kgo] Unmarshal err not handled wtf?")
sp.Finish()
return
}
}
sp.AddEvent("handler start")
err := pc.handler(p)
sp.AddEvent("handler stop")
if err == nil {
// pc.kopts.Meter.Counter(broker.SubscribeMessageTotal, "endpoint", record.Topic, "status", "success").Inc()
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "success").Inc()
} else {
// pc.kopts.Meter.Counter(broker.SubscribeMessageTotal, "endpoint", record.Topic, "status", "failure").Inc()
sp.SetStatus(tracer.SpanStatusError, err.Error())
pc.kopts.Meter.Counter(semconv.SubscribeMessageTotal, "endpoint", record.Topic, "topic", record.Topic, "status", "failure").Inc()
}
// pc.kopts.Meter.Counter(broker.SubscribeMessageInflight, "endpoint", record.Topic).Dec()
pc.kopts.Meter.Counter(semconv.SubscribeMessageInflight, "endpoint", record.Topic, "topic", record.Topic).Dec()
if err == nil && pc.opts.AutoAck {
p.ack = true
} else if err != nil {
p.err = err
if eh != nil {
sp.AddEvent("error handler start")
_ = eh(p)
sp.AddEvent("error handler stop")
} else {
if pc.kopts.Logger.V(logger.ErrorLevel) {
pc.kopts.Logger.Errorf(pc.kopts.Context, "[kgo]: subscriber error: %v", err)
}
}
}
// te := time.Since(ts)
// pc.kopts.Meter.Summary(broker.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic).Update(te.Seconds())
// pc.kopts.Meter.Histogram(broker.SubscribeMessageDurationSeconds, "endpoint", record.Topic).Update(te.Seconds())
te := time.Since(ts)
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
if p.ack {
eventPool.Put(p)
pc.c.MarkCommitRecords(record)
} else {
eventPool.Put(p)
pc.kopts.Logger.Fatalf(pc.kopts.Context, "[kgo] ErrLostMessage wtf?")
sp.SetStatus(tracer.SpanStatusError, "ErrLostMessage")
sp.Finish()
return
}
sp.Finish()
}
}
}

126
tracer.go
View File

@@ -2,14 +2,12 @@ package kgo
import (
"context"
"net"
"time"
"unicode/utf8"
"github.com/twmb/franz-go/pkg/kgo"
"go.unistack.org/micro/v3/tracer"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/tracer"
)
type hookTracer struct {
@@ -19,40 +17,12 @@ type hookTracer struct {
}
var (
_ kgo.HookBrokerConnect = &hookTracer{}
_ kgo.HookBrokerDisconnect = &hookTracer{}
_ kgo.HookBrokerRead = &hookTracer{}
_ kgo.HookBrokerThrottle = &hookTracer{}
_ kgo.HookBrokerWrite = &hookTracer{}
_ kgo.HookFetchBatchRead = &hookTracer{}
_ kgo.HookProduceBatchWritten = &hookTracer{}
_ kgo.HookGroupManageError = &hookTracer{}
_ kgo.HookProduceRecordBuffered = (*hookTracer)(nil)
_ kgo.HookProduceRecordUnbuffered = (*hookTracer)(nil)
_ kgo.HookFetchRecordBuffered = (*hookTracer)(nil)
_ kgo.HookFetchRecordUnbuffered = (*hookTracer)(nil)
)
func (m *hookTracer) OnGroupManageError(err error) {
}
func (m *hookTracer) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) {
}
func (m *hookTracer) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) {
}
func (m *hookTracer) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) {
}
func (m *hookTracer) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, readWait, timeToRead time.Duration, err error) {
}
func (m *hookTracer) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) {
}
func (m *hookTracer) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.ProduceBatchMetrics) {
}
func (m *hookTracer) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, kmetrics kgo.FetchBatchMetrics) {
}
// OnProduceRecordBuffered starts a new span for the "publish" operation on a
// buffered record.
//
@@ -61,7 +31,7 @@ func (m *hookTracer) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ i
// hook.
func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) {
// Set up span options.
attrs := []attribute.KeyValue{
attrs := []interface{}{
semconv.MessagingSystemKey.String("kafka"),
semconv.MessagingDestinationKindTopic,
semconv.MessagingDestinationName(r.Topic),
@@ -71,20 +41,32 @@ func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) {
if m.clientID != "" {
attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID))
}
ifattrs := make([]interface{}, 0, len(attrs))
for _, attr := range attrs {
ifattrs = append(ifattrs, attr)
}
opts := []tracer.SpanOption{
tracer.WithSpanLabels(ifattrs...),
tracer.WithSpanLabels(attrs...),
tracer.WithSpanKind(tracer.SpanKindProducer),
}
// Start the "publish" span.
ctx, _ := m.tracer.Start(r.Context, r.Topic+" publish", opts...)
if r.Context == nil {
r.Context = context.Background()
}
md, ok := metadata.FromOutgoingContext(r.Context)
if !ok {
md = metadata.New(len(r.Headers))
}
for _, h := range r.Headers {
md.Set(h.Key, string(h.Value))
}
// Inject the span context into the record.
// t.propagators.Inject(ctx, NewRecordCarrier(r))
// Update the record context.
r.Context = ctx
if !ok {
r.Context, _ = m.tracer.Start(metadata.NewOutgoingContext(r.Context, md), r.Topic+" publish", opts...)
} else {
r.Context, _ = m.tracer.Start(r.Context, r.Topic+" publish", opts...)
}
}
// OnProduceRecordUnbuffered continues and ends the "publish" span for an
@@ -93,17 +75,14 @@ func (m *hookTracer) OnProduceRecordBuffered(r *kgo.Record) {
// It sets attributes with values unset when producing and records any error
// that occurred during the publish operation.
func (m *hookTracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) {
span, ok := tracer.SpanFromContext(r.Context)
if !ok {
return
}
defer span.Finish()
span, _ := tracer.SpanFromContext(r.Context)
span.AddLabels(
semconv.MessagingKafkaDestinationPartition(int(r.Partition)),
)
if err != nil {
span.SetStatus(tracer.SpanStatusError, err.Error())
}
span.Finish()
}
// OnFetchRecordBuffered starts a new span for the "receive" operation on a
@@ -115,7 +94,7 @@ func (m *hookTracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) {
// processing.
func (m *hookTracer) OnFetchRecordBuffered(r *kgo.Record) {
// Set up the span options.
attrs := []attribute.KeyValue{
attrs := []interface{}{
semconv.MessagingSystemKey.String("kafka"),
semconv.MessagingSourceKindTopic,
semconv.MessagingSourceName(r.Topic),
@@ -129,32 +108,39 @@ func (m *hookTracer) OnFetchRecordBuffered(r *kgo.Record) {
if m.group != "" {
attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group))
}
ifattrs := make([]interface{}, 0, len(attrs))
for _, attr := range attrs {
ifattrs = append(ifattrs, attr)
}
opts := []tracer.SpanOption{
tracer.WithSpanLabels(ifattrs...),
tracer.WithSpanLabels(attrs...),
tracer.WithSpanKind(tracer.SpanKindConsumer),
}
if r.Context == nil {
r.Context = context.Background()
}
md, ok := metadata.FromIncomingContext(r.Context)
if !ok {
md = metadata.New(len(r.Headers))
}
for _, h := range r.Headers {
md.Set(h.Key, string(h.Value))
}
// Extract the span context from the record.
// ctx := t.propagators.Extract(r.Context, NewRecordCarrier(r))
// Start the "receive" span.
newCtx, _ := m.tracer.Start(r.Context, r.Topic+" receive", opts...)
if !ok {
r.Context, _ = m.tracer.Start(metadata.NewIncomingContext(r.Context, md), r.Topic+" receive", opts...)
} else {
r.Context, _ = m.tracer.Start(r.Context, r.Topic+" receive", opts...)
}
// Update the record context.
r.Context = newCtx
}
// OnFetchRecordUnbuffered continues and ends the "receive" span for an
// unbuffered record.
func (m *hookTracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) {
if span, ok := tracer.SpanFromContext(r.Context); ok {
defer span.Finish()
}
span, _ := tracer.SpanFromContext(r.Context)
span.Finish()
}
// WithProcessSpan starts a new span for the "process" operation on a consumer
@@ -168,7 +154,7 @@ func (m *hookTracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) {
// iteration of your processing for the record.
func (m *hookTracer) WithProcessSpan(r *kgo.Record) (context.Context, tracer.Span) {
// Set up the span options.
attrs := []attribute.KeyValue{
attrs := []interface{}{
semconv.MessagingSystemKey.String("kafka"),
semconv.MessagingSourceKindTopic,
semconv.MessagingSourceName(r.Topic),
@@ -177,10 +163,6 @@ func (m *hookTracer) WithProcessSpan(r *kgo.Record) (context.Context, tracer.Spa
semconv.MessagingKafkaMessageOffset(int(r.Offset)),
}
attrs = maybeKeyAttr(attrs, r)
ifattrs := make([]interface{}, 0, len(attrs))
for _, attr := range attrs {
ifattrs = append(ifattrs, attr)
}
if m.clientID != "" {
attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(m.clientID))
}
@@ -188,18 +170,26 @@ func (m *hookTracer) WithProcessSpan(r *kgo.Record) (context.Context, tracer.Spa
attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(m.group))
}
opts := []tracer.SpanOption{
tracer.WithSpanLabels(ifattrs...),
tracer.WithSpanLabels(attrs...),
tracer.WithSpanKind(tracer.SpanKindConsumer),
}
if r.Context == nil {
r.Context = context.Background()
}
md, ok := metadata.FromIncomingContext(r.Context)
if !ok {
md = metadata.New(len(r.Headers))
}
for _, h := range r.Headers {
md.Set(h.Key, string(h.Value))
}
// Start a new span using the provided context and options.
return m.tracer.Start(r.Context, r.Topic+" process", opts...)
}
func maybeKeyAttr(attrs []attribute.KeyValue, r *kgo.Record) []attribute.KeyValue {
func maybeKeyAttr(attrs []interface{}, r *kgo.Record) []interface{} {
if r.Key == nil {
return attrs
}