refactor tests && fix concurent read from map

This commit is contained in:
Evstigneev Denis
2026-01-14 14:38:12 +03:00
parent c8eeb34efe
commit f8d9e0584f
5 changed files with 109 additions and 142 deletions

View File

@@ -3,16 +3,16 @@ package kgo_test
import (
"context"
"os"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/twmb/franz-go/pkg/kfake"
kg "github.com/twmb/franz-go/pkg/kgo"
kgo "go.unistack.org/micro-broker-kgo/v3"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/logger/slog"
"go.unistack.org/micro/v3/metadata"
)
@@ -20,61 +20,55 @@ var (
msgcnt = int64(10)
group = "38"
prefill = true
loglevel = logger.DebugLevel
loglevel = logger.ErrorLevel
cluster *kfake.Cluster
)
var bm = &broker.Message{
Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: "test"},
Body: []byte(`"body"`),
func TestMain(m *testing.M) {
cluster = kfake.MustCluster(
kfake.AllowAutoTopicCreation(),
)
defer cluster.Close()
os.Exit(m.Run())
}
func TestFail(t *testing.T) {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
logger.DefaultLogger = slog.NewLogger()
if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"127.0.0.1:9092"}
} else {
addrs = strings.Split(addr, ",")
}
func helperCreateBroker() *kgo.Broker {
b := kgo.NewBroker(
broker.Addrs(addrs...),
broker.Addrs(cluster.ListenAddrs()...),
kgo.CommitInterval(5*time.Second),
kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(10*1024*1024),
kg.AllowAutoTopicCreation(),
),
)
return b
}
func TestFail(t *testing.T) {
ctx := context.Background()
err := logger.DefaultLogger.Init(logger.WithLevel(loglevel))
require.Nil(t, err)
b := helperCreateBroker()
t.Logf("broker init")
if err := b.Init(); err != nil {
t.Fatal(err)
}
require.Nil(t, b.Init())
t.Logf("broker connect")
if err := b.Connect(ctx); err != nil {
t.Fatal(err)
}
require.Nil(t, b.Connect(ctx))
defer func() {
t.Logf("broker disconnect")
if err := b.Disconnect(ctx); err != nil {
t.Fatal(err)
}
require.Nil(t, b.Disconnect(ctx))
}()
t.Logf("broker health %v", b.Health())
msgs := make([]*broker.Message, 0, msgcnt)
for i := int64(0); i < msgcnt; i++ {
msgs = append(msgs, bm)
msgs = append(msgs, &broker.Message{
Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: "test"},
Body: []byte(`"body"`),
})
}
for _, msg := range msgs {
@@ -83,7 +77,6 @@ func TestFail(t *testing.T) {
break
}
}
// t.Skip()
idx := int64(0)
fn := func(msg broker.Event) error {
@@ -97,13 +90,11 @@ func TestFail(t *testing.T) {
broker.SubscribeAutoAck(true),
broker.SubscribeGroup(group),
broker.SubscribeBodyOnly(true))
if err != nil {
t.Fatal(err)
}
require.Nil(t, err)
defer func() {
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatal(err)
}
require.Nil(t, sub.Unsubscribe(ctx))
}()
for {
@@ -117,92 +108,54 @@ func TestFail(t *testing.T) {
}
func TestConnect(t *testing.T) {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
var addrs []string
ctx := context.TODO()
b := kgo.NewBroker(
broker.Addrs(addrs...),
kgo.CommitInterval(5*time.Second),
kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(10*1024*1024)),
)
if err := b.Init(); err != nil {
t.Fatal(err)
}
b := helperCreateBroker()
if err := b.Connect(ctx); err != nil {
t.Fatal(err)
}
require.Nil(t, b.Init())
require.Nil(t, b.Connect(ctx))
}
func TestPubSub(t *testing.T) {
if tr := os.Getenv("INTEGRATION_TESTS"); len(tr) > 0 {
t.Skip()
}
if err := logger.DefaultLogger.Init(logger.WithLevel(loglevel)); err != nil {
t.Fatal(err)
}
ctx := context.Background()
err := logger.DefaultLogger.Init(logger.WithLevel(loglevel))
require.Nil(t, err)
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"127.0.0.1:29091", "127.0.0.2:29092", "127.0.0.3:29093"}
} else {
addrs = strings.Split(addr, ",")
}
b := kgo.NewBroker(
broker.Addrs(addrs...),
kgo.CommitInterval(5*time.Second),
kgo.Options(kg.ClientID("test"), kg.FetchMaxBytes(10*1024*1024)),
)
if err := b.Init(); err != nil {
t.Fatal(err)
}
if err := b.Connect(ctx); err != nil {
t.Fatal(err)
}
b := helperCreateBroker()
require.Nil(t, b.Init())
require.Nil(t, b.Connect(ctx))
defer func() {
if err := b.Disconnect(ctx); err != nil {
t.Fatal(err)
}
require.Nil(t, b.Disconnect(ctx))
}()
if prefill {
msgs := make([]*broker.Message, 0, msgcnt)
for i := int64(0); i < msgcnt; i++ {
msgs = append(msgs, bm)
}
if err := b.BatchPublish(ctx, msgs); err != nil {
t.Fatal(err)
if prefill {
var msgs []*broker.Message
for i := int64(0); i < msgcnt; i++ {
msgs = append(msgs, &broker.Message{
Header: map[string]string{"hkey": "hval", metadata.HeaderTopic: "test.pubsub"},
Body: []byte(`"body"`),
})
}
// t.Skip()
require.Nil(t, b.BatchPublish(ctx, msgs))
}
done := make(chan bool, 1)
idx := int64(0)
fn := func(msg broker.Event) error {
atomic.AddInt64(&idx, 1)
// time.Sleep(200 * time.Millisecond)
return msg.Ack()
}
sub, err := b.Subscribe(ctx, "test", fn,
sub, err := b.Subscribe(ctx, "test.pubsub", fn,
broker.SubscribeAutoAck(true),
broker.SubscribeGroup(group),
broker.SubscribeBodyOnly(true))
if err != nil {
t.Fatal(err)
}
broker.SubscribeBodyOnly(true),
)
require.Nil(t, err)
defer func() {
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatal(err)
}
require.Nil(t, sub.Unsubscribe(ctx))
}()
ticker := time.NewTicker(2 * time.Minute)
@@ -210,14 +163,16 @@ func TestPubSub(t *testing.T) {
pticker := time.NewTicker(1 * time.Second)
defer pticker.Stop()
go func() {
for {
select {
case <-pticker.C:
if prc := atomic.LoadInt64(&idx); prc == msgcnt {
t.Log("everything is read")
close(done)
} else {
t.Logf("processed %v\n", prc)
t.Logf("processed %v of %v\n", prc, msgcnt)
}
case <-ticker.C:
close(done)