export broker state
All checks were successful
test / test (push) Successful in 3m16s

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
This commit is contained in:
2025-02-25 17:30:48 +03:00
parent ae4ae64694
commit cfecb4afd0
3 changed files with 90 additions and 19 deletions

56
broker.go Normal file
View File

@@ -0,0 +1,56 @@
package kgo
import (
"net"
"sync/atomic"
"time"
"github.com/twmb/franz-go/pkg/kgo"
)
type hookEvent struct {
connected *atomic.Uint32
}
var (
_ kgo.HookBrokerConnect = &hookEvent{}
_ kgo.HookBrokerDisconnect = &hookEvent{}
_ kgo.HookBrokerRead = &hookEvent{}
_ kgo.HookBrokerWrite = &hookEvent{}
_ kgo.HookGroupManageError = &hookEvent{}
_ kgo.HookProduceRecordUnbuffered = &hookEvent{}
)
func (m *hookEvent) OnGroupManageError(err error) {
if err != nil {
m.connected.Store(0)
}
}
func (m *hookEvent) OnBrokerConnect(_ kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) {
if err != nil {
m.connected.Store(0)
}
}
func (m *hookEvent) OnBrokerDisconnect(_ kgo.BrokerMetadata, _ net.Conn) {
m.connected.Store(0)
}
func (m *hookEvent) OnBrokerWrite(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) {
if err != nil {
m.connected.Store(0)
}
}
func (m *hookEvent) OnBrokerRead(_ kgo.BrokerMetadata, _ int16, _ int, _ time.Duration, _ time.Duration, err error) {
if err != nil {
m.connected.Store(0)
}
}
func (m *hookEvent) OnProduceRecordUnbuffered(_ *kgo.Record, err error) {
if err != nil {
m.connected.Store(0)
}
}

3
kgo.go
View File

@@ -113,6 +113,7 @@ func (k *Broker) connect(ctx context.Context, opts ...kgo.Opt) (*kgo.Client, *ho
opts = append(opts, opts = append(opts,
kgo.WithHooks(&hookMeter{meter: k.opts.Meter}), kgo.WithHooks(&hookMeter{meter: k.opts.Meter}),
kgo.WithHooks(htracer), kgo.WithHooks(htracer),
kgo.WithHooks(&hookEvent{connected: k.connected}),
) )
select { select {
@@ -390,7 +391,9 @@ func (k *Broker) Subscribe(ctx context.Context, topic string, handler broker.Han
kgo.AutoCommitInterval(commitInterval), kgo.AutoCommitInterval(commitInterval),
kgo.OnPartitionsAssigned(sub.assigned), kgo.OnPartitionsAssigned(sub.assigned),
kgo.OnPartitionsRevoked(sub.revoked), kgo.OnPartitionsRevoked(sub.revoked),
kgo.StopProducerOnDataLossDetected(),
kgo.OnPartitionsLost(sub.lost), kgo.OnPartitionsLost(sub.lost),
kgo.AutoCommitCallback(sub.autocommit),
kgo.AutoCommitMarks(), kgo.AutoCommitMarks(),
) )

View File

@@ -5,10 +5,12 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/twmb/franz-go/pkg/kadm" "github.com/twmb/franz-go/pkg/kadm"
"github.com/twmb/franz-go/pkg/kgo" "github.com/twmb/franz-go/pkg/kgo"
"github.com/twmb/franz-go/pkg/kmsg"
"go.unistack.org/micro/v3/broker" "go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger" "go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata" "go.unistack.org/micro/v3/metadata"
@@ -22,20 +24,17 @@ type tp struct {
} }
type consumer struct { type consumer struct {
topic string topic string
c *kgo.Client
c *kgo.Client
htracer *hookTracer htracer *hookTracer
quit chan struct{}
handler broker.Handler done chan struct{}
quit chan struct{} recs chan kgo.FetchTopicPartition
done chan struct{} kopts broker.Options
recs chan kgo.FetchTopicPartition
kopts broker.Options
opts broker.SubscribeOptions
partition int32 partition int32
opts broker.SubscribeOptions
handler broker.Handler
connected *atomic.Uint32
} }
type Subscriber struct { type Subscriber struct {
@@ -49,6 +48,7 @@ type Subscriber struct {
kopts broker.Options kopts broker.Options
opts broker.SubscribeOptions opts broker.SubscribeOptions
connected *atomic.Uint32
sync.RWMutex sync.RWMutex
closed bool closed bool
} }
@@ -144,8 +144,8 @@ func (s *Subscriber) poll(ctx context.Context) {
}) })
fetches.EachPartition(func(p kgo.FetchTopicPartition) { fetches.EachPartition(func(p kgo.FetchTopicPartition) {
nTp := tp{p.Topic, p.Partition} tps := tp{p.Topic, p.Partition}
s.consumers[nTp].recs <- p s.consumers[tps].recs <- p
}) })
s.c.AllowRebalance() s.c.AllowRebalance()
} }
@@ -158,9 +158,9 @@ func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32)
for topic, partitions := range lost { for topic, partitions := range lost {
for _, partition := range partitions { for _, partition := range partitions {
nTp := tp{topic, partition} tps := tp{topic, partition}
pc := s.consumers[nTp] pc := s.consumers[tps]
delete(s.consumers, nTp) delete(s.consumers, tps)
close(pc.quit) close(pc.quit)
if s.kopts.Logger.V(logger.DebugLevel) { if s.kopts.Logger.V(logger.DebugLevel) {
s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] waiting for work to finish topic %s partition %d", topic, partition)) s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] waiting for work to finish topic %s partition %d", topic, partition))
@@ -171,11 +171,18 @@ func (s *Subscriber) killConsumers(ctx context.Context, lost map[string][]int32)
} }
} }
func (s *Subscriber) autocommit(_ *kgo.Client, _ *kmsg.OffsetCommitRequest, _ *kmsg.OffsetCommitResponse, err error) {
if err != nil {
s.connected.Store(0)
}
}
func (s *Subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) { func (s *Subscriber) lost(ctx context.Context, _ *kgo.Client, lost map[string][]int32) {
if s.kopts.Logger.V(logger.DebugLevel) { if s.kopts.Logger.V(logger.ErrorLevel) {
s.kopts.Logger.Debug(ctx, fmt.Sprintf("[kgo] lost %#+v", lost)) s.kopts.Logger.Error(ctx, fmt.Sprintf("[kgo] lost %#+v", lost))
} }
s.killConsumers(ctx, lost) s.killConsumers(ctx, lost)
// s.connected.Store(0)
} }
func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) { func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[string][]int32) {
@@ -185,6 +192,7 @@ func (s *Subscriber) revoked(ctx context.Context, c *kgo.Client, revoked map[str
s.killConsumers(ctx, revoked) s.killConsumers(ctx, revoked)
if err := c.CommitMarkedOffsets(ctx); err != nil { if err := c.CommitMarkedOffsets(ctx); err != nil {
s.kopts.Logger.Error(ctx, "[kgo] revoked CommitMarkedOffsets error", err) s.kopts.Logger.Error(ctx, "[kgo] revoked CommitMarkedOffsets error", err)
// s.connected.Store(0)
} }
} }
@@ -202,6 +210,7 @@ func (s *Subscriber) assigned(_ context.Context, c *kgo.Client, assigned map[str
handler: s.handler, handler: s.handler,
kopts: s.kopts, kopts: s.kopts,
opts: s.opts, opts: s.opts,
connected: s.connected,
} }
s.Lock() s.Lock()
s.consumers[tp{topic, partition}] = pc s.consumers[tp{topic, partition}] = pc
@@ -263,6 +272,7 @@ func (pc *consumer) consume() {
pc.c.MarkCommitRecords(record) pc.c.MarkCommitRecords(record)
} else { } else {
eventPool.Put(p) eventPool.Put(p)
pc.connected.Store(0)
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?") pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?")
return return
} }
@@ -279,6 +289,7 @@ func (pc *consumer) consume() {
pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) pc.kopts.Meter.Summary(semconv.SubscribeMessageLatencyMicroseconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds()) pc.kopts.Meter.Histogram(semconv.SubscribeMessageDurationSeconds, "endpoint", record.Topic, "topic", record.Topic).Update(te.Seconds())
eventPool.Put(p) eventPool.Put(p)
pc.connected.Store(0)
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] Unmarshal err not handled wtf?") pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] Unmarshal err not handled wtf?")
sp.Finish() sp.Finish()
return return
@@ -316,6 +327,7 @@ func (pc *consumer) consume() {
pc.c.MarkCommitRecords(record) pc.c.MarkCommitRecords(record)
} else { } else {
eventPool.Put(p) eventPool.Put(p)
pc.connected.Store(0)
pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?") pc.kopts.Logger.Fatal(pc.kopts.Context, "[kgo] ErrLostMessage wtf?")
sp.SetStatus(tracer.SpanStatusError, "ErrLostMessage") sp.SetStatus(tracer.SpanStatusError, "ErrLostMessage")
sp.Finish() sp.Finish()