Compare commits

...

18 Commits

Author SHA1 Message Date
ea84ac094f Merge branch 'v4' into hasql
Some checks failed
test / test (pull_request) Failing after 17m58s
coverage / build (pull_request) Failing after 18m40s
lint / lint (pull_request) Failing after 1m41s
2025-09-18 14:35:10 +03:00
2886a7fe8a initial hasql support
Some checks failed
test / test (pull_request) Failing after 19m47s
lint / lint (pull_request) Failing after 19m59s
coverage / build (pull_request) Failing after 20m4s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-09-18 14:34:48 +03:00
vtolstov
268b3dbff4 Apply Code Coverage Badge 2025-07-12 21:20:05 +00:00
f9d2c14597 fixup tests
Some checks failed
sync / sync (push) Successful in 1m8s
coverage / build (push) Successful in 2m3s
test / test (push) Failing after 2m55s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-07-13 00:11:08 +03:00
e6bf914dd9 tracer: write log fields only if span exists and recording
Some checks failed
coverage / build (push) Failing after 1m14s
test / test (push) Has been cancelled
sync / sync (push) Successful in 1m37s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-07-13 00:08:30 +03:00
b59f4a16f0 meter: disable auto sorting labels
Some checks failed
coverage / build (push) Failing after 1m39s
test / test (push) Successful in 4m37s
sync / sync (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-06-17 19:02:06 +03:00
3deb572f72 [v4] fix out-of-bounds behavior in seeker buffer and add tests (#219)
Some checks failed
coverage / build (push) Failing after 2m12s
test / test (push) Successful in 4m27s
sync / sync (push) Successful in 7s
* add check negative position to Read() and write tests

* add tests for Write() method

* add tests for Write() method

* add checks of whence and negative position to Seek() and write tests

* add tests for Rewind()

* add tests for Close()

* add tests for Reset()

* add tests for Len()

* add tests for Bytes()

* tests polishing

* tests polishing

* tests polishing

* tests polishing
2025-06-15 17:24:48 +03:00
0e668c0f0f fixup tests
Some checks failed
coverage / build (push) Failing after 2m13s
test / test (push) Failing after 19m18s
sync / sync (push) Successful in 19s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-06-09 17:36:11 +03:00
2bac878845 broker: fix message options
Some checks failed
coverage / build (push) Failing after 1m58s
test / test (push) Has started running
sync / sync (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-06-09 17:23:30 +03:00
9ee31fb5a6 fixup compile
Some checks failed
coverage / build (push) Has been cancelled
test / test (push) Has been cancelled
sync / sync (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-29 12:46:23 +03:00
ed5d30a58e store/noop: fixup Exists
Some checks failed
coverage / build (push) Has been cancelled
test / test (push) Has been cancelled
sync / sync (push) Has been cancelled
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-29 12:43:39 +03:00
vtolstov
b4b67a8b41 Apply Code Coverage Badge 2025-05-25 02:41:23 +00:00
13f90ff716 changed embedded mutex to private field (#217)
Some checks failed
sync / sync (push) Failing after 16m12s
test / test (push) Failing after 17m28s
coverage / build (push) Failing after 17m40s
2025-05-25 01:15:03 +03:00
0f8f12aee0 add tracer enabled status
Some checks failed
coverage / build (push) Successful in 2m52s
test / test (push) Failing after 18m53s
sync / sync (push) Successful in 26s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-19 09:33:01 +03:00
8b406cf963 util/buffer: add Reset() method
Some checks failed
coverage / build (push) Failing after 1m36s
test / test (push) Successful in 3m35s
sync / sync (push) Successful in 7s
closes #402

Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-12 19:18:45 +03:00
029a434a2b broker: pass broker content type if message options not pass it
All checks were successful
coverage / build (push) Successful in 1m44s
test / test (push) Successful in 3m5s
sync / sync (push) Successful in 7s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-09 13:51:35 +03:00
vtolstov
847259bc39 Apply Code Coverage Badge 2025-05-09 09:36:02 +00:00
a1ee8728ad broker: add Content-Type and DefaultContentType
All checks were successful
coverage / build (push) Successful in 1m58s
sync / sync (push) Successful in 1m37s
test / test (push) Successful in 3m47s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-05-09 12:34:46 +03:00
34 changed files with 1362 additions and 239 deletions

View File

@@ -1,5 +1,5 @@
# Micro # Micro
![Coverage](https://img.shields.io/badge/Coverage-33.6%25-yellow) ![Coverage](https://img.shields.io/badge/Coverage-33.8%25-yellow)
[![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview) [![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview)
[![Status](https://git.unistack.org/unistack-org/micro/actions/workflows/job_tests.yml/badge.svg?branch=v4)](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush) [![Status](https://git.unistack.org/unistack-org/micro/actions/workflows/job_tests.yml/badge.svg?branch=v4)](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush)

View File

@@ -41,7 +41,7 @@ type Broker interface {
// Disconnect disconnect from broker // Disconnect disconnect from broker
Disconnect(ctx context.Context) error Disconnect(ctx context.Context) error
// NewMessage create new broker message to publish. // NewMessage create new broker message to publish.
NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error)
// Publish message to broker topic // Publish message to broker topic
Publish(ctx context.Context, topic string, messages ...Message) error Publish(ctx context.Context, topic string, messages ...Message) error
// Subscribe subscribes to topic message via handler // Subscribe subscribes to topic message via handler

View File

@@ -42,9 +42,9 @@ func SetSubscribeOption(k, v interface{}) SubscribeOption {
} }
} }
// SetPublishOption returns a function to setup a context with given value // SetMessageOption returns a function to setup a context with given value
func SetPublishOption(k, v interface{}) PublishOption { func SetMessageOption(k, v interface{}) MessageOption {
return func(o *PublishOptions) { return func(o *MessageOptions) {
if o.Context == nil { if o.Context == nil {
o.Context = context.Background() o.Context = context.Background()
} }

View File

@@ -22,8 +22,8 @@ type Broker struct {
subscribers map[string][]*Subscriber subscribers map[string][]*Subscriber
addr string addr string
opts broker.Options opts broker.Options
sync.RWMutex mu sync.RWMutex
connected bool connected bool
} }
type memoryMessage struct { type memoryMessage struct {
@@ -32,7 +32,7 @@ type memoryMessage struct {
ctx context.Context ctx context.Context
body []byte body []byte
hdr metadata.Metadata hdr metadata.Metadata
opts broker.PublishOptions opts broker.MessageOptions
} }
func (m *memoryMessage) Ack() error { func (m *memoryMessage) Ack() error {
@@ -72,9 +72,9 @@ func (b *Broker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 { if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx] ct = ct[:idx]
} }
b.RLock() b.mu.RLock()
c, ok := b.opts.Codecs[ct] c, ok := b.opts.Codecs[ct]
b.RUnlock() b.mu.RUnlock()
if ok { if ok {
return c, nil return c, nil
} }
@@ -96,8 +96,8 @@ func (b *Broker) Connect(ctx context.Context) error {
default: default:
} }
b.Lock() b.mu.Lock()
defer b.Unlock() defer b.mu.Unlock()
if b.connected { if b.connected {
return nil return nil
@@ -126,8 +126,8 @@ func (b *Broker) Disconnect(ctx context.Context) error {
default: default:
} }
b.Lock() b.mu.Lock()
defer b.Unlock() defer b.mu.Unlock()
if !b.connected { if !b.connected {
return nil return nil
@@ -157,8 +157,11 @@ func (b *Broker) Init(opts ...broker.Option) error {
return nil return nil
} }
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.PublishOption) (broker.Message, error) { func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) {
options := broker.NewPublishOptions(opts...) options := broker.NewMessageOptions(opts...)
if options.ContentType == "" {
options.ContentType = b.opts.ContentType
}
m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options} m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType) c, err := b.newCodec(m.opts.ContentType)
if err == nil { if err == nil {
@@ -180,12 +183,12 @@ func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker
} }
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error { func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
b.RLock() b.mu.RLock()
if !b.connected { if !b.connected {
b.RUnlock() b.mu.RUnlock()
return broker.ErrNotConnected return broker.ErrNotConnected
} }
b.RUnlock() b.mu.RUnlock()
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -193,9 +196,9 @@ func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.M
default: default:
} }
b.RLock() b.mu.RLock()
subs, ok := b.subscribers[topic] subs, ok := b.subscribers[topic]
b.RUnlock() b.mu.RUnlock()
if !ok { if !ok {
return nil return nil
} }
@@ -252,12 +255,12 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
return nil, err return nil, err
} }
b.RLock() b.mu.RLock()
if !b.connected { if !b.connected {
b.RUnlock() b.mu.RUnlock()
return nil, broker.ErrNotConnected return nil, broker.ErrNotConnected
} }
b.RUnlock() b.mu.RUnlock()
sid, err := id.New() sid, err := id.New()
if err != nil { if err != nil {
@@ -275,13 +278,13 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
ctx: ctx, ctx: ctx,
} }
b.Lock() b.mu.Lock()
b.subscribers[topic] = append(b.subscribers[topic], sub) b.subscribers[topic] = append(b.subscribers[topic], sub)
b.Unlock() b.mu.Unlock()
go func() { go func() {
<-sub.exit <-sub.exit
b.Lock() b.mu.Lock()
newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1) newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1)
for _, sb := range b.subscribers[topic] { for _, sb := range b.subscribers[topic] {
if sb.id == sub.id { if sb.id == sub.id {
@@ -290,7 +293,7 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
newSubscribers = append(newSubscribers, sb) newSubscribers = append(newSubscribers, sb)
} }
b.subscribers[topic] = newSubscribers b.subscribers[topic] = newSubscribers
b.Unlock() b.mu.Unlock()
}() }()
return sub, nil return sub, nil

View File

@@ -49,7 +49,7 @@ func TestMemoryBroker(t *testing.T) {
"id", fmt.Sprintf("%d", i), "id", fmt.Sprintf("%d", i),
), ),
[]byte(`"hello world"`), []byte(`"hello world"`),
broker.PublishContentType("application/octet-stream"), broker.MessageContentType("application/octet-stream"),
) )
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@@ -14,16 +14,16 @@ type NoopBroker struct {
funcPublish FuncPublish funcPublish FuncPublish
funcSubscribe FuncSubscribe funcSubscribe FuncSubscribe
opts Options opts Options
sync.RWMutex mu sync.RWMutex
} }
func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) { func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 { if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx] ct = ct[:idx]
} }
b.RLock() b.mu.RLock()
c, ok := b.opts.Codecs[ct] c, ok := b.opts.Codecs[ct]
b.RUnlock() b.mu.RUnlock()
if ok { if ok {
return c, nil return c, nil
} }
@@ -99,7 +99,7 @@ type noopMessage struct {
ctx context.Context ctx context.Context
body []byte body []byte
hdr metadata.Metadata hdr metadata.Metadata
opts PublishOptions opts MessageOptions
} }
func (m *noopMessage) Ack() error { func (m *noopMessage) Ack() error {
@@ -126,8 +126,11 @@ func (m *noopMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
return m.c.Unmarshal(m.body, dst) return m.c.Unmarshal(m.body, dst)
} }
func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error) { func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error) {
options := NewPublishOptions(opts...) options := NewMessageOptions(opts...)
if options.ContentType == "" {
options.ContentType = b.opts.ContentType
}
m := &noopMessage{ctx: ctx, hdr: hdr, opts: options} m := &noopMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType) c, err := b.newCodec(m.opts.ContentType)
if err == nil { if err == nil {

View File

@@ -45,6 +45,9 @@ type Options struct {
// GracefulTimeout contains time to wait to finish in flight requests // GracefulTimeout contains time to wait to finish in flight requests
GracefulTimeout time.Duration GracefulTimeout time.Duration
// ContentType will be used if no content-type set when creating message
ContentType string
} }
// NewOptions create new Options // NewOptions create new Options
@@ -57,14 +60,19 @@ func NewOptions(opts ...Option) Options {
Codecs: make(map[string]codec.Codec), Codecs: make(map[string]codec.Codec),
Tracer: tracer.DefaultTracer, Tracer: tracer.DefaultTracer,
GracefulTimeout: DefaultGracefulTimeout, GracefulTimeout: DefaultGracefulTimeout,
ContentType: DefaultContentType,
} }
for _, o := range opts { for _, o := range opts {
o(&options) o(&options)
} }
return options return options
} }
// DefaultContentType is the default content-type if not specified
var DefaultContentType = ""
// Context sets the context option // Context sets the context option
func Context(ctx context.Context) Option { func Context(ctx context.Context) Option {
return func(o *Options) { return func(o *Options) {
@@ -72,8 +80,15 @@ func Context(ctx context.Context) Option {
} }
} }
// PublishOptions struct // ContentType used by default if not specified
type PublishOptions struct { func ContentType(ct string) Option {
return func(o *Options) {
o.ContentType = ct
}
}
// MessageOptions struct
type MessageOptions struct {
// ContentType for message body // ContentType for message body
ContentType string ContentType string
// BodyOnly flag says the message contains raw body bytes and don't need // BodyOnly flag says the message contains raw body bytes and don't need
@@ -83,9 +98,9 @@ type PublishOptions struct {
Context context.Context Context context.Context
} }
// NewPublishOptions creates PublishOptions struct // NewMessageOptions creates MessageOptions struct
func NewPublishOptions(opts ...PublishOption) PublishOptions { func NewMessageOptions(opts ...MessageOption) MessageOptions {
options := PublishOptions{ options := MessageOptions{
Context: context.Background(), Context: context.Background(),
} }
for _, o := range opts { for _, o := range opts {
@@ -113,19 +128,19 @@ type SubscribeOptions struct {
// Option func // Option func
type Option func(*Options) type Option func(*Options)
// PublishOption func // MessageOption func
type PublishOption func(*PublishOptions) type MessageOption func(*MessageOptions)
// PublishContentType sets message content-type that used to Marshal // MessageContentType sets message content-type that used to Marshal
func PublishContentType(ct string) PublishOption { func MessageContentType(ct string) MessageOption {
return func(o *PublishOptions) { return func(o *MessageOptions) {
o.ContentType = ct o.ContentType = ct
} }
} }
// PublishBodyOnly publish only body of the message // MessageBodyOnly publish only body of the message
func PublishBodyOnly(b bool) PublishOption { func MessageBodyOnly(b bool) MessageOption {
return func(o *PublishOptions) { return func(o *MessageOptions) {
o.BodyOnly = b o.BodyOnly = b
} }
} }

3
go.mod
View File

@@ -1,6 +1,6 @@
module go.unistack.org/micro/v4 module go.unistack.org/micro/v4
go 1.22.0 go 1.24
require ( require (
dario.cat/mergo v1.0.1 dario.cat/mergo v1.0.1
@@ -17,6 +17,7 @@ require (
go.uber.org/automaxprocs v1.6.0 go.uber.org/automaxprocs v1.6.0
go.unistack.org/micro-proto/v4 v4.1.0 go.unistack.org/micro-proto/v4 v4.1.0
golang.org/x/sync v0.10.0 golang.org/x/sync v0.10.0
golang.yandex/hasql/v2 v2.1.0
google.golang.org/grpc v1.69.4 google.golang.org/grpc v1.69.4
google.golang.org/protobuf v1.36.3 google.golang.org/protobuf v1.36.3
) )

2
go.sum
View File

@@ -56,6 +56,8 @@ golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.yandex/hasql/v2 v2.1.0 h1:7CaFFWeHoK5TvA+QvZzlKHlIN5sqNpqM8NSrXskZD/k=
golang.yandex/hasql/v2 v2.1.0/go.mod h1:3Au1AxuJDCTXmS117BpbI6e+70kGWeyLR1qJAH6HdtA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=

504
hooks/sql/cluster.go Normal file
View File

@@ -0,0 +1,504 @@
package sql
import (
"context"
"database/sql"
"errors"
"fmt"
"math"
"reflect"
"time"
"unsafe"
"golang.yandex/hasql/v2"
)
var errNoAliveNodes = errors.New("no alive nodes")
func newSQLRowError() *sql.Row {
row := &sql.Row{}
t := reflect.TypeOf(row).Elem()
field, _ := t.FieldByName("err")
rowPtr := unsafe.Pointer(row)
errFieldPtr := unsafe.Pointer(uintptr(rowPtr) + field.Offset)
errPtr := (*error)(errFieldPtr)
*errPtr = errNoAliveNodes
return row
}
func newSQLRowsError() *sql.Rows {
rows := &sql.Rows{}
t := reflect.TypeOf(rows).Elem()
field, _ := t.FieldByName("lasterr")
rowPtr := unsafe.Pointer(rows)
errFieldPtr := unsafe.Pointer(uintptr(rowPtr) + field.Offset)
errPtr := (*error)(errFieldPtr)
*errPtr = errNoAliveNodes
return rows
}
type ClusterQuerier interface {
Querier
WaitForNodes(ctx context.Context, criterion ...hasql.NodeStateCriterion) error
}
type Querier interface {
// Basic connection methods
PingContext(ctx context.Context) error
Close() error
// Query methods with context
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
// Prepared statements with context
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
// Transaction management with context
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
// Connection pool management
SetConnMaxLifetime(d time.Duration)
SetConnMaxIdleTime(d time.Duration)
SetMaxOpenConns(n int)
SetMaxIdleConns(n int)
Stats() sql.DBStats
Conn(ctx context.Context) (*sql.Conn, error)
}
var (
ErrClusterChecker = errors.New("cluster node checker required")
ErrClusterDiscoverer = errors.New("cluster node discoverer required")
ErrClusterPicker = errors.New("cluster node picker required")
)
type Cluster struct {
hasql *hasql.Cluster[Querier]
options ClusterOptions
}
// NewCluster returns Querier that provides cluster of nodes
func NewCluster[T Querier](opts ...ClusterOption) (ClusterQuerier, error) {
options := ClusterOptions{Context: context.Background()}
for _, opt := range opts {
opt(&options)
}
if options.NodeChecker == nil {
return nil, ErrClusterChecker
}
if options.NodeDiscoverer == nil {
return nil, ErrClusterDiscoverer
}
if options.NodePicker == nil {
return nil, ErrClusterPicker
}
options.Options = append(options.Options, hasql.WithNodePicker(options.NodePicker))
if p, ok := options.NodePicker.(*CustomPicker[Querier]); ok {
p.opts.Priority = options.NodePriority
}
c, err := hasql.NewCluster(
options.NodeDiscoverer,
options.NodeChecker,
options.Options...,
)
if err != nil {
return nil, err
}
return &Cluster{hasql: c, options: options}, nil
}
// compile time guard
var _ hasql.NodePicker[Querier] = (*CustomPicker[Querier])(nil)
type nodeStateCriterionKey struct{}
// NodeStateCriterion inject hasql.NodeStateCriterion to context
func NodeStateCriterion(ctx context.Context, c hasql.NodeStateCriterion) context.Context {
return context.WithValue(ctx, nodeStateCriterionKey{}, c)
}
func getNodeStateCriterion(ctx context.Context) hasql.NodeStateCriterion {
if v, ok := ctx.Value(nodeStateCriterionKey{}).(hasql.NodeStateCriterion); ok {
return v
}
return hasql.PreferPrimary
}
// CustomPickerOptions holds options to pick nodes
type CustomPickerOptions struct {
MaxLag int
Priority map[string]int32
RetryOnError bool
}
// CustomPickerOption func apply option to CustomPickerOptions
type CustomPickerOption func(*CustomPickerOptions)
// CustomPickerMaxLag specifies max lag for which node can be used
func CustomPickerMaxLag(n int) CustomPickerOption {
return func(o *CustomPickerOptions) {
o.MaxLag = n
}
}
// NewCustomPicker creates new node picker
func NewCustomPicker[T Querier](opts ...CustomPickerOption) *CustomPicker[Querier] {
options := CustomPickerOptions{}
for _, o := range opts {
o(&options)
}
return &CustomPicker[Querier]{opts: options}
}
// CustomPicker holds node picker options
type CustomPicker[T Querier] struct {
opts CustomPickerOptions
}
// PickNode used to return specific node
func (p *CustomPicker[T]) PickNode(cnodes []hasql.CheckedNode[T]) hasql.CheckedNode[T] {
for _, n := range cnodes {
fmt.Printf("node %s\n", n.Node.String())
}
return cnodes[0]
}
func (p *CustomPicker[T]) getPriority(nodeName string) int32 {
if prio, ok := p.opts.Priority[nodeName]; ok {
return prio
}
return math.MaxInt32 // Default to lowest priority
}
// CompareNodes used to sort nodes
func (p *CustomPicker[T]) CompareNodes(a, b hasql.CheckedNode[T]) int {
fmt.Printf("CompareNodes %s %s\n", a.Node.String(), b.Node.String())
// Get replication lag values
aLag := a.Info.(interface{ ReplicationLag() int }).ReplicationLag()
bLag := b.Info.(interface{ ReplicationLag() int }).ReplicationLag()
// First check that lag lower then MaxLag
if aLag > p.opts.MaxLag && bLag > p.opts.MaxLag {
fmt.Printf("CompareNodes aLag > p.opts.MaxLag && bLag > p.opts.MaxLag\n")
return 0 // both are equal
}
// If one node exceeds MaxLag and the other doesn't, prefer the one that doesn't
if aLag > p.opts.MaxLag {
fmt.Printf("CompareNodes aLag > p.opts.MaxLag\n")
return 1 // b is better
}
if bLag > p.opts.MaxLag {
fmt.Printf("CompareNodes bLag > p.opts.MaxLag\n")
return -1 // a is better
}
// Get node priorities
aPrio := p.getPriority(a.Node.String())
bPrio := p.getPriority(b.Node.String())
// if both priority equals
if aPrio == bPrio {
fmt.Printf("CompareNodes aPrio == bPrio\n")
// First compare by replication lag
if aLag < bLag {
fmt.Printf("CompareNodes aLag < bLag\n")
return -1
}
if aLag > bLag {
fmt.Printf("CompareNodes aLag > bLag\n")
return 1
}
// If replication lag is equal, compare by latency
aLatency := a.Info.(interface{ Latency() time.Duration }).Latency()
bLatency := b.Info.(interface{ Latency() time.Duration }).Latency()
if aLatency < bLatency {
return -1
}
if aLatency > bLatency {
return 1
}
// If lag and latency is equal
return 0
}
// If priorities are different, prefer the node with lower priority value
if aPrio < bPrio {
return -1
}
return 1
}
// ClusterOptions contains cluster specific options
type ClusterOptions struct {
NodeChecker hasql.NodeChecker
NodePicker hasql.NodePicker[Querier]
NodeDiscoverer hasql.NodeDiscoverer[Querier]
Options []hasql.ClusterOpt[Querier]
Context context.Context
RetryOnError bool
NodePriority map[string]int32
}
// ClusterOption apply cluster options to ClusterOptions
type ClusterOption func(*ClusterOptions)
// WithClusterNodeChecker pass hasql.NodeChecker to cluster options
func WithClusterNodeChecker(c hasql.NodeChecker) ClusterOption {
return func(o *ClusterOptions) {
o.NodeChecker = c
}
}
// WithClusterNodePicker pass hasql.NodePicker to cluster options
func WithClusterNodePicker(p hasql.NodePicker[Querier]) ClusterOption {
return func(o *ClusterOptions) {
o.NodePicker = p
}
}
// WithClusterNodeDiscoverer pass hasql.NodeDiscoverer to cluster options
func WithClusterNodeDiscoverer(d hasql.NodeDiscoverer[Querier]) ClusterOption {
return func(o *ClusterOptions) {
o.NodeDiscoverer = d
}
}
// WithRetryOnError retry on other nodes on error
func WithRetryOnError(b bool) ClusterOption {
return func(o *ClusterOptions) {
o.RetryOnError = b
}
}
// WithClusterContext pass context.Context to cluster options and used for checks
func WithClusterContext(ctx context.Context) ClusterOption {
return func(o *ClusterOptions) {
o.Context = ctx
}
}
// WithClusterOptions pass hasql.ClusterOpt
func WithClusterOptions(opts ...hasql.ClusterOpt[Querier]) ClusterOption {
return func(o *ClusterOptions) {
o.Options = append(o.Options, opts...)
}
}
type ClusterNode struct {
Name string
DB Querier
Priority int32
}
// WithClusterNodes create cluster with static NodeDiscoverer
func WithClusterNodes(cns ...ClusterNode) ClusterOption {
return func(o *ClusterOptions) {
nodes := make([]*hasql.Node[Querier], 0, len(cns))
if o.NodePriority == nil {
o.NodePriority = make(map[string]int32, len(cns))
}
for _, cn := range cns {
nodes = append(nodes, hasql.NewNode(cn.Name, cn.DB))
if cn.Priority == 0 {
cn.Priority = math.MaxInt32
}
o.NodePriority[cn.Name] = cn.Priority
}
o.NodeDiscoverer = hasql.NewStaticNodeDiscoverer(nodes...)
}
}
func (c *Cluster) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
var tx *sql.Tx
var err error
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
if tx, err = n.DB().BeginTx(ctx, opts); err != nil && !c.options.RetryOnError {
return true
}
return false
})
if tx == nil && err == nil {
err = errNoAliveNodes
}
return tx, err
}
func (c *Cluster) Close() error {
return c.hasql.Close()
}
func (c *Cluster) Conn(ctx context.Context) (*sql.Conn, error) {
var conn *sql.Conn
var err error
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
if conn, err = n.DB().Conn(ctx); err != nil && !c.options.RetryOnError {
return true
}
return false
})
if conn == nil && err == nil {
err = errNoAliveNodes
}
return conn, err
}
func (c *Cluster) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
var res sql.Result
var err error
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
if res, err = n.DB().ExecContext(ctx, query, args...); err != nil && !c.options.RetryOnError {
return true
}
return false
})
if res == nil && err == nil {
err = errNoAliveNodes
}
return res, err
}
func (c *Cluster) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
var res *sql.Stmt
var err error
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
if res, err = n.DB().PrepareContext(ctx, query); err != nil && !c.options.RetryOnError {
return true
}
return false
})
if res == nil && err == nil {
err = errNoAliveNodes
}
return res, err
}
func (c *Cluster) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
var res *sql.Rows
var err error
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
if res, err = n.DB().QueryContext(ctx, query); err != nil && err != sql.ErrNoRows && !c.options.RetryOnError {
return true
}
return false
})
if res == nil && err == nil {
err = errNoAliveNodes
}
return res, err
}
func (c *Cluster) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
var res *sql.Row
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
res = n.DB().QueryRowContext(ctx, query, args...)
if res.Err() == nil {
return false
} else if res.Err() != nil && !c.options.RetryOnError {
return false
}
return true
})
if res == nil {
res = newSQLRowError()
}
return res
}
func (c *Cluster) PingContext(ctx context.Context) error {
var err error
var ok bool
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
ok = true
if err = n.DB().PingContext(ctx); err != nil && !c.options.RetryOnError {
return true
}
return false
})
if !ok {
err = errNoAliveNodes
}
return err
}
func (c *Cluster) WaitForNodes(ctx context.Context, criterions ...hasql.NodeStateCriterion) error {
for _, criterion := range criterions {
if _, err := c.hasql.WaitForNode(ctx, criterion); err != nil {
return err
}
}
return nil
}
func (c *Cluster) SetConnMaxLifetime(td time.Duration) {
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
n.DB().SetConnMaxIdleTime(td)
return false
})
}
func (c *Cluster) SetConnMaxIdleTime(td time.Duration) {
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
n.DB().SetConnMaxIdleTime(td)
return false
})
}
func (c *Cluster) SetMaxOpenConns(nc int) {
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
n.DB().SetMaxOpenConns(nc)
return false
})
}
func (c *Cluster) SetMaxIdleConns(nc int) {
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
n.DB().SetMaxIdleConns(nc)
return false
})
}
func (c *Cluster) Stats() sql.DBStats {
s := sql.DBStats{}
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
st := n.DB().Stats()
s.Idle += st.Idle
s.InUse += st.InUse
s.MaxIdleClosed += st.MaxIdleClosed
s.MaxIdleTimeClosed += st.MaxIdleTimeClosed
s.MaxOpenConnections += st.MaxOpenConnections
s.OpenConnections += st.OpenConnections
s.WaitCount += st.WaitCount
s.WaitDuration += st.WaitDuration
return false
})
return s
}

171
hooks/sql/cluster_test.go Normal file
View File

@@ -0,0 +1,171 @@
package sql
import (
"context"
"fmt"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"golang.yandex/hasql/v2"
)
func TestNewCluster(t *testing.T) {
dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbMaster.Close()
dbMasterMock.MatchExpectationsInOrder(false)
dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(1, 0)).
RowsWillBeClosed().
WithoutArgs()
dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("master-dc1"))
dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbDRMaster.Close()
dbDRMasterMock.MatchExpectationsInOrder(false)
dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 40)).
RowsWillBeClosed().
WithoutArgs()
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("drmaster1-dc2"))
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("drmaster"))
dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbSlaveDC1.Close()
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 50)).
RowsWillBeClosed().
WithoutArgs()
dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbSlaveDC2.Close()
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 50)).
RowsWillBeClosed().
WithoutArgs()
dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
c, err := NewCluster[Querier](
WithClusterContext(tctx),
WithClusterNodeChecker(hasql.PostgreSQLChecker),
WithClusterNodePicker(NewCustomPicker[Querier](
CustomPickerMaxLag(100),
)),
WithClusterNodes(
ClusterNode{"slave-dc1", dbSlaveDC1, 1},
ClusterNode{"master-dc1", dbMaster, 1},
ClusterNode{"slave-dc2", dbSlaveDC2, 2},
ClusterNode{"drmaster1-dc2", dbDRMaster, 0},
),
WithClusterOptions(
hasql.WithUpdateInterval[Querier](2*time.Second),
hasql.WithUpdateTimeout[Querier](1*time.Second),
),
)
if err != nil {
t.Fatal(err)
}
defer c.Close()
if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil {
t.Fatal(err)
}
time.Sleep(500 * time.Millisecond)
node1Name := ""
fmt.Printf("check for Standby\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.Standby), "SELECT node_name as name"); row.Err() != nil {
t.Fatal(row.Err())
} else if err = row.Scan(&node1Name); err != nil {
t.Fatal(err)
} else if "slave-dc1" != node1Name {
t.Fatalf("invalid node name %s != %s", "slave-dc1", node1Name)
}
dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
node2Name := ""
fmt.Printf("check for PreferStandby\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() != nil {
t.Fatal(row.Err())
} else if err = row.Scan(&node2Name); err != nil {
t.Fatal(err)
} else if "slave-dc1" != node2Name {
t.Fatalf("invalid node name %s != %s", "slave-dc1", node2Name)
}
node3Name := ""
fmt.Printf("check for PreferPrimary\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferPrimary), "SELECT node_name as name"); row.Err() != nil {
t.Fatal(row.Err())
} else if err = row.Scan(&node3Name); err != nil {
t.Fatal(err)
} else if "master-dc1" != node3Name {
t.Fatalf("invalid node name %s != %s", "master-dc1", node3Name)
}
dbSlaveDC1Mock.ExpectQuery(`.*`).WillReturnRows(sqlmock.NewRows([]string{"role"}).RowError(1, fmt.Errorf("row error")))
time.Sleep(2 * time.Second)
fmt.Printf("check for PreferStandby\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() == nil {
t.Fatal("must return error")
}
if dbMasterErr := dbMasterMock.ExpectationsWereMet(); dbMasterErr != nil {
t.Error(dbMasterErr)
}
}

View File

@@ -80,7 +80,7 @@ func TestTime(t *testing.T) {
WithHandlerFunc(slog.NewTextHandler), WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true), logger.WithAddStacktrace(true),
logger.WithTimeFunc(func() time.Time { logger.WithTimeFunc(func() time.Time {
return time.Unix(0, 0) return time.Unix(0, 0).UTC()
}), }),
) )
if err := l.Init(logger.WithFields("key1", "val1")); err != nil { if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
@@ -89,8 +89,7 @@ func TestTime(t *testing.T) {
l.Error(ctx, "msg1", errors.New("err")) l.Error(ctx, "msg1", errors.New("err"))
if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T03:00:00.000000000+03:00`)) && if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) {
!bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes()) t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
} }
} }

View File

@@ -4,8 +4,8 @@ package meter
import ( import (
"io" "io"
"sort" "sort"
"strconv"
"strings" "strings"
"sync"
"time" "time"
) )
@@ -117,6 +117,39 @@ func BuildLabels(labels ...string) []string {
return labels return labels
} }
var spool = newStringsPool(500)
type stringsPool struct {
p *sync.Pool
c int
}
func newStringsPool(size int) *stringsPool {
p := &stringsPool{c: size}
p.p = &sync.Pool{
New: func() interface{} {
return &strings.Builder{}
},
}
return p
}
func (p *stringsPool) Cap() int {
return p.c
}
func (p *stringsPool) Get() *strings.Builder {
return p.p.Get().(*strings.Builder)
}
func (p *stringsPool) Put(b *strings.Builder) {
if b.Cap() > p.c {
return
}
b.Reset()
p.p.Put(b)
}
// BuildName used to combine metric with labels. // BuildName used to combine metric with labels.
// If labels count is odd, drop last element // If labels count is odd, drop last element
func BuildName(name string, labels ...string) string { func BuildName(name string, labels ...string) string {
@@ -125,8 +158,6 @@ func BuildName(name string, labels ...string) string {
} }
if len(labels) > 2 { if len(labels) > 2 {
sort.Sort(byKey(labels))
idx := 0 idx := 0
for { for {
if labels[idx] == labels[idx+2] { if labels[idx] == labels[idx+2] {
@@ -141,7 +172,9 @@ func BuildName(name string, labels ...string) string {
} }
} }
var b strings.Builder b := spool.Get()
defer spool.Put(b)
_, _ = b.WriteString(name) _, _ = b.WriteString(name)
_, _ = b.WriteRune('{') _, _ = b.WriteRune('{')
for idx := 0; idx < len(labels); idx += 2 { for idx := 0; idx < len(labels); idx += 2 {
@@ -149,8 +182,9 @@ func BuildName(name string, labels ...string) string {
_, _ = b.WriteRune(',') _, _ = b.WriteRune(',')
} }
_, _ = b.WriteString(labels[idx]) _, _ = b.WriteString(labels[idx])
_, _ = b.WriteString(`=`) _, _ = b.WriteString(`="`)
_, _ = b.WriteString(strconv.Quote(labels[idx+1])) _, _ = b.WriteString(labels[idx+1])
_, _ = b.WriteRune('"')
} }
_, _ = b.WriteRune('}') _, _ = b.WriteRune('}')

View File

@@ -50,11 +50,12 @@ func TestBuildName(t *testing.T) {
data := map[string][]string{ data := map[string][]string{
`my_metric{firstlabel="value2",zerolabel="value3"}`: { `my_metric{firstlabel="value2",zerolabel="value3"}`: {
"my_metric", "my_metric",
"zerolabel", "value3", "firstlabel", "value2", "firstlabel", "value2",
"zerolabel", "value3",
}, },
`my_metric{broker="broker2",register="mdns",server="tcp"}`: { `my_metric{broker="broker2",register="mdns",server="tcp"}`: {
"my_metric", "my_metric",
"broker", "broker1", "broker", "broker2", "server", "http", "server", "tcp", "register", "mdns", "broker", "broker1", "broker", "broker2", "register", "mdns", "server", "http", "server", "tcp",
}, },
`my_metric{aaa="aaa"}`: { `my_metric{aaa="aaa"}`: {
"my_metric", "my_metric",

View File

@@ -91,7 +91,7 @@ func (p *bro) Connect(_ context.Context) error { return nil }
func (p *bro) Disconnect(_ context.Context) error { return nil } func (p *bro) Disconnect(_ context.Context) error { return nil }
// NewMessage creates new message // NewMessage creates new message
func (p *bro) NewMessage(_ context.Context, _ metadata.Metadata, _ interface{}, _ ...broker.PublishOption) (broker.Message, error) { func (p *bro) NewMessage(_ context.Context, _ metadata.Metadata, _ interface{}, _ ...broker.MessageOption) (broker.Message, error) {
return nil, nil return nil, nil
} }

View File

@@ -11,8 +11,8 @@ import (
) )
type httpProfile struct { type httpProfile struct {
server *http.Server server *http.Server
sync.Mutex mu sync.Mutex
running bool running bool
} }
@@ -21,8 +21,8 @@ var DefaultAddress = ":6060"
// Start the profiler // Start the profiler
func (h *httpProfile) Start() error { func (h *httpProfile) Start() error {
h.Lock() h.mu.Lock()
defer h.Unlock() defer h.mu.Unlock()
if h.running { if h.running {
return nil return nil
@@ -30,9 +30,9 @@ func (h *httpProfile) Start() error {
go func() { go func() {
if err := h.server.ListenAndServe(); err != nil { if err := h.server.ListenAndServe(); err != nil {
h.Lock() h.mu.Lock()
h.running = false h.running = false
h.Unlock() h.mu.Unlock()
} }
}() }()
@@ -43,8 +43,8 @@ func (h *httpProfile) Start() error {
// Stop the profiler // Stop the profiler
func (h *httpProfile) Stop() error { func (h *httpProfile) Stop() error {
h.Lock() h.mu.Lock()
defer h.Unlock() defer h.mu.Unlock()
if !h.running { if !h.running {
return nil return nil

View File

@@ -17,7 +17,7 @@ type profiler struct {
cpuFile *os.File cpuFile *os.File
memFile *os.File memFile *os.File
opts profile.Options opts profile.Options
sync.Mutex mu sync.Mutex
running bool running bool
} }
@@ -39,8 +39,8 @@ func (p *profiler) writeHeap(f *os.File) {
} }
func (p *profiler) Start() error { func (p *profiler) Start() error {
p.Lock() p.mu.Lock()
defer p.Unlock() defer p.mu.Unlock()
if p.running { if p.running {
return nil return nil
@@ -86,8 +86,8 @@ func (p *profiler) Start() error {
} }
func (p *profiler) Stop() error { func (p *profiler) Stop() error {
p.Lock() p.mu.Lock()
defer p.Unlock() defer p.mu.Unlock()
select { select {
case <-p.exit: case <-p.exit:

View File

@@ -33,7 +33,7 @@ type memory struct {
records map[string]services records map[string]services
watchers map[string]*watcher watchers map[string]*watcher
opts register.Options opts register.Options
sync.RWMutex mu sync.RWMutex
} }
// services is a KV map with service name as the key and a map of records as the value // services is a KV map with service name as the key and a map of records as the value
@@ -57,7 +57,7 @@ func (m *memory) ttlPrune() {
defer prune.Stop() defer prune.Stop()
for range prune.C { for range prune.C {
m.Lock() m.mu.Lock()
for namespace, services := range m.records { for namespace, services := range m.records {
for service, versions := range services { for service, versions := range services {
for version, record := range versions { for version, record := range versions {
@@ -72,24 +72,24 @@ func (m *memory) ttlPrune() {
} }
} }
} }
m.Unlock() m.mu.Unlock()
} }
} }
func (m *memory) sendEvent(r *register.Result) { func (m *memory) sendEvent(r *register.Result) {
m.RLock() m.mu.RLock()
watchers := make([]*watcher, 0, len(m.watchers)) watchers := make([]*watcher, 0, len(m.watchers))
for _, w := range m.watchers { for _, w := range m.watchers {
watchers = append(watchers, w) watchers = append(watchers, w)
} }
m.RUnlock() m.mu.RUnlock()
for _, w := range watchers { for _, w := range watchers {
select { select {
case <-w.exit: case <-w.exit:
m.Lock() m.mu.Lock()
delete(m.watchers, w.id) delete(m.watchers, w.id)
m.Unlock() m.mu.Unlock()
default: default:
select { select {
case w.res <- r: case w.res <- r:
@@ -113,8 +113,8 @@ func (m *memory) Init(opts ...register.Option) error {
} }
// add services // add services
m.Lock() m.mu.Lock()
defer m.Unlock() defer m.mu.Unlock()
return nil return nil
} }
@@ -124,8 +124,8 @@ func (m *memory) Options() register.Options {
} }
func (m *memory) Register(_ context.Context, s *register.Service, opts ...register.RegisterOption) error { func (m *memory) Register(_ context.Context, s *register.Service, opts ...register.RegisterOption) error {
m.Lock() m.mu.Lock()
defer m.Unlock() defer m.mu.Unlock()
options := register.NewRegisterOptions(opts...) options := register.NewRegisterOptions(opts...)
@@ -197,8 +197,8 @@ func (m *memory) Register(_ context.Context, s *register.Service, opts ...regist
} }
func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...register.DeregisterOption) error { func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...register.DeregisterOption) error {
m.Lock() m.mu.Lock()
defer m.Unlock() defer m.mu.Unlock()
options := register.NewDeregisterOptions(opts...) options := register.NewDeregisterOptions(opts...)
@@ -264,9 +264,9 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe
// if it's a wildcard domain, return from all domains // if it's a wildcard domain, return from all domains
if options.Namespace == register.WildcardNamespace { if options.Namespace == register.WildcardNamespace {
m.RLock() m.mu.RLock()
recs := m.records recs := m.records
m.RUnlock() m.mu.RUnlock()
var services []*register.Service var services []*register.Service
@@ -286,8 +286,8 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe
return services, nil return services, nil
} }
m.RLock() m.mu.RLock()
defer m.RUnlock() defer m.mu.RUnlock()
// check the domain exists // check the domain exists
services, ok := m.records[options.Namespace] services, ok := m.records[options.Namespace]
@@ -319,9 +319,9 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption)
// if it's a wildcard domain, list from all domains // if it's a wildcard domain, list from all domains
if options.Namespace == register.WildcardNamespace { if options.Namespace == register.WildcardNamespace {
m.RLock() m.mu.RLock()
recs := m.records recs := m.records
m.RUnlock() m.mu.RUnlock()
var services []*register.Service var services []*register.Service
@@ -336,8 +336,8 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption)
return services, nil return services, nil
} }
m.RLock() m.mu.RLock()
defer m.RUnlock() defer m.mu.RUnlock()
// ensure the domain exists // ensure the domain exists
services, ok := m.records[options.Namespace] services, ok := m.records[options.Namespace]
@@ -371,9 +371,9 @@ func (m *memory) Watch(ctx context.Context, opts ...register.WatchOption) (regis
wo: wo, wo: wo,
} }
m.Lock() m.mu.Lock()
m.watchers[w.id] = w m.watchers[w.id] = w
m.Unlock() m.mu.Unlock()
return w, nil return w, nil
} }

View File

@@ -51,13 +51,13 @@ func (r *rpcHandler) Options() HandlerOptions {
} }
type noopServer struct { type noopServer struct {
h Handler h Handler
wg *sync.WaitGroup wg *sync.WaitGroup
rsvc *register.Service rsvc *register.Service
handlers map[string]Handler handlers map[string]Handler
exit chan chan error exit chan chan error
opts Options opts Options
sync.RWMutex mu sync.RWMutex
registered bool registered bool
started bool started bool
} }
@@ -125,10 +125,10 @@ func (n *noopServer) String() string {
//nolint:gocyclo //nolint:gocyclo
func (n *noopServer) Register() error { func (n *noopServer) Register() error {
n.RLock() n.mu.RLock()
rsvc := n.rsvc rsvc := n.rsvc
config := n.opts config := n.opts
n.RUnlock() n.mu.RUnlock()
// if service already filled, reuse it and return early // if service already filled, reuse it and return early
if rsvc != nil { if rsvc != nil {
@@ -144,9 +144,9 @@ func (n *noopServer) Register() error {
return err return err
} }
n.RLock() n.mu.RLock()
registered := n.registered registered := n.registered
n.RUnlock() n.mu.RUnlock()
if !registered { if !registered {
if config.Logger.V(logger.InfoLevel) { if config.Logger.V(logger.InfoLevel) {
@@ -164,8 +164,8 @@ func (n *noopServer) Register() error {
return nil return nil
} }
n.Lock() n.mu.Lock()
defer n.Unlock() defer n.mu.Unlock()
n.registered = true n.registered = true
if cacheService { if cacheService {
@@ -178,9 +178,9 @@ func (n *noopServer) Register() error {
func (n *noopServer) Deregister() error { func (n *noopServer) Deregister() error {
var err error var err error
n.RLock() n.mu.RLock()
config := n.opts config := n.opts
n.RUnlock() n.mu.RUnlock()
service, err := NewRegisterService(n) service, err := NewRegisterService(n)
if err != nil { if err != nil {
@@ -195,29 +195,29 @@ func (n *noopServer) Deregister() error {
return err return err
} }
n.Lock() n.mu.Lock()
n.rsvc = nil n.rsvc = nil
if !n.registered { if !n.registered {
n.Unlock() n.mu.Unlock()
return nil return nil
} }
n.registered = false n.registered = false
n.Unlock() n.mu.Unlock()
return nil return nil
} }
//nolint:gocyclo //nolint:gocyclo
func (n *noopServer) Start() error { func (n *noopServer) Start() error {
n.RLock() n.mu.RLock()
if n.started { if n.started {
n.RUnlock() n.mu.RUnlock()
return nil return nil
} }
config := n.Options() config := n.Options()
n.RUnlock() n.mu.RUnlock()
// use 127.0.0.1 to avoid scan of all network interfaces // use 127.0.0.1 to avoid scan of all network interfaces
addr, err := maddr.Extract("127.0.0.1") addr, err := maddr.Extract("127.0.0.1")
@@ -235,11 +235,11 @@ func (n *noopServer) Start() error {
config.Logger.Info(n.opts.Context, "server [noop] Listening on "+config.Address) config.Logger.Info(n.opts.Context, "server [noop] Listening on "+config.Address)
} }
n.Lock() n.mu.Lock()
if len(config.Advertise) == 0 { if len(config.Advertise) == 0 {
config.Advertise = config.Address config.Advertise = config.Address
} }
n.Unlock() n.mu.Unlock()
// use RegisterCheck func before register // use RegisterCheck func before register
// nolint: nestif // nolint: nestif
@@ -273,9 +273,9 @@ func (n *noopServer) Start() error {
select { select {
// register self on interval // register self on interval
case <-t.C: case <-t.C:
n.RLock() n.mu.RLock()
registered := n.registered registered := n.registered
n.RUnlock() n.mu.RUnlock()
rerr := config.RegisterCheck(config.Context) rerr := config.RegisterCheck(config.Context)
// nolint: nestif // nolint: nestif
if rerr != nil && registered { if rerr != nil && registered {
@@ -332,29 +332,29 @@ func (n *noopServer) Start() error {
}() }()
// mark the server as started // mark the server as started
n.Lock() n.mu.Lock()
n.started = true n.started = true
n.Unlock() n.mu.Unlock()
return nil return nil
} }
func (n *noopServer) Stop() error { func (n *noopServer) Stop() error {
n.RLock() n.mu.RLock()
if !n.started { if !n.started {
n.RUnlock() n.mu.RUnlock()
return nil return nil
} }
n.RUnlock() n.mu.RUnlock()
ch := make(chan error) ch := make(chan error)
n.exit <- ch n.exit <- ch
err := <-ch err := <-ch
n.Lock() n.mu.Lock()
n.rsvc = nil n.rsvc = nil
n.started = false n.started = false
n.Unlock() n.mu.Unlock()
return err return err
} }

View File

@@ -96,9 +96,9 @@ func RegisterHandler(s server.Server, h interface{}, opts ...server.HandlerOptio
} }
type service struct { type service struct {
done chan struct{} done chan struct{}
opts Options opts Options
sync.RWMutex mu sync.RWMutex
stopped bool stopped bool
} }
@@ -321,9 +321,9 @@ func (s *service) Health() bool {
func (s *service) Start() error { func (s *service) Start() error {
var err error var err error
s.RLock() s.mu.RLock()
config := s.opts config := s.opts
s.RUnlock() s.mu.RUnlock()
for _, cfg := range s.opts.Configs { for _, cfg := range s.opts.Configs {
if cfg.Options().Struct == nil { if cfg.Options().Struct == nil {
@@ -380,9 +380,9 @@ func (s *service) Start() error {
} }
func (s *service) Stop() error { func (s *service) Stop() error {
s.RLock() s.mu.RLock()
config := s.opts config := s.opts
s.RUnlock() s.mu.RUnlock()
if config.Loggers[0].V(logger.InfoLevel) { if config.Loggers[0].V(logger.InfoLevel) {
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name())) config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name()))
@@ -457,13 +457,13 @@ func (s *service) Run() error {
// notifyShutdown marks the service as stopped and closes the done channel. // notifyShutdown marks the service as stopped and closes the done channel.
// It ensures the channel is closed only once, preventing multiple closures. // It ensures the channel is closed only once, preventing multiple closures.
func (s *service) notifyShutdown() { func (s *service) notifyShutdown() {
s.Lock() s.mu.Lock()
if s.stopped { if s.stopped {
s.Unlock() s.mu.Unlock()
return return
} }
s.stopped = true s.stopped = true
s.Unlock() s.mu.Unlock()
close(s.done) close(s.done)
} }

View File

@@ -139,7 +139,7 @@ func (n *noopStore) fnExists(ctx context.Context, _ string, _ ...ExistsOption) e
return ctx.Err() return ctx.Err()
default: default:
} }
return nil return ErrNotFound
} }
func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error { func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error {

View File

@@ -2,6 +2,7 @@ package store
import ( import (
"context" "context"
"errors"
"testing" "testing"
) )
@@ -25,7 +26,8 @@ func TestHook(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err := s.Exists(context.TODO(), "test"); err != nil { err := s.Exists(context.TODO(), "test")
if !errors.Is(err, ErrNotFound) {
t.Fatal(err) t.Fatal(err)
} }

View File

@@ -9,7 +9,7 @@ type memorySync struct {
locks map[string]*memoryLock locks map[string]*memoryLock
options Options options Options
mtx gosync.RWMutex mu gosync.RWMutex
} }
type memoryLock struct { type memoryLock struct {
@@ -74,7 +74,7 @@ func (m *memorySync) Options() Options {
func (m *memorySync) Lock(id string, opts ...LockOption) error { func (m *memorySync) Lock(id string, opts ...LockOption) error {
// lock our access // lock our access
m.mtx.Lock() m.mu.Lock()
var options LockOptions var options LockOptions
for _, o := range opts { for _, o := range opts {
@@ -90,11 +90,11 @@ func (m *memorySync) Lock(id string, opts ...LockOption) error {
release: make(chan bool), release: make(chan bool),
} }
// unlock // unlock
m.mtx.Unlock() m.mu.Unlock()
return nil return nil
} }
m.mtx.Unlock() m.mu.Unlock()
// set wait time // set wait time
var wait <-chan time.Time var wait <-chan time.Time
@@ -124,12 +124,12 @@ lockLoop:
// wait for the lock to be released // wait for the lock to be released
select { select {
case <-lk.release: case <-lk.release:
m.mtx.Lock() m.mu.Lock()
// someone locked before us // someone locked before us
lk, ok = m.locks[id] lk, ok = m.locks[id]
if ok { if ok {
m.mtx.Unlock() m.mu.Unlock()
continue continue
} }
@@ -141,7 +141,7 @@ lockLoop:
release: make(chan bool), release: make(chan bool),
} }
m.mtx.Unlock() m.mu.Unlock()
break lockLoop break lockLoop
case <-ttl: case <-ttl:
@@ -160,8 +160,8 @@ lockLoop:
} }
func (m *memorySync) Unlock(id string) error { func (m *memorySync) Unlock(id string) error {
m.mtx.Lock() m.mu.Lock()
defer m.mtx.Unlock() defer m.mu.Unlock()
lk, ok := m.locks[id] lk, ok := m.locks[id]
// no lock exists // no lock exists

View File

@@ -46,6 +46,10 @@ func (s memoryStringer) String() string {
return s.s return s.s
} }
func (t *Tracer) Enabled() bool {
return t.opts.Enabled
}
func (t *Tracer) Flush(_ context.Context) error { func (t *Tracer) Flush(_ context.Context) error {
return nil return nil
} }

View File

@@ -20,6 +20,10 @@ func (t *noopTracer) Spans() []Span {
var uuidNil = uuid.Nil.String() var uuidNil = uuid.Nil.String()
func (t *noopTracer) Enabled() bool {
return t.opts.Enabled
}
func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
options := NewSpanOptions(opts...) options := NewSpanOptions(opts...)
span := &noopSpan{ span := &noopSpan{

View File

@@ -142,6 +142,8 @@ type Options struct {
Name string Name string
// ContextAttrFuncs contains funcs that provides tracing // ContextAttrFuncs contains funcs that provides tracing
ContextAttrFuncs []ContextAttrFunc ContextAttrFuncs []ContextAttrFunc
// Enabled specify trace status
Enabled bool
} }
// Option func signature // Option func signature
@@ -181,6 +183,7 @@ func NewOptions(opts ...Option) Options {
Logger: logger.DefaultLogger, Logger: logger.DefaultLogger,
Context: context.Background(), Context: context.Background(),
ContextAttrFuncs: DefaultContextAttrFuncs, ContextAttrFuncs: DefaultContextAttrFuncs,
Enabled: true,
} }
for _, o := range opts { for _, o := range opts {
o(&options) o(&options)
@@ -194,3 +197,10 @@ func Name(n string) Option {
o.Name = n o.Name = n
} }
} }
// Disabled disable tracer
func Disabled(b bool) Option {
return func(o *Options) {
o.Enabled = !b
}
}

View File

@@ -29,10 +29,10 @@ type ContextAttrFunc func(ctx context.Context) []interface{}
func init() { func init() {
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs,
func(ctx context.Context) []interface{} { func(ctx context.Context) []interface{} {
if span, ok := SpanFromContext(ctx); ok { if sp, ok := SpanFromContext(ctx); ok && sp != nil && sp.IsRecording() {
return []interface{}{ return []interface{}{
TraceIDKey, span.TraceID(), TraceIDKey, sp.TraceID(),
SpanIDKey, span.SpanID(), SpanIDKey, sp.SpanID(),
} }
} }
return nil return nil
@@ -51,6 +51,8 @@ type Tracer interface {
// Extract(ctx context.Context) // Extract(ctx context.Context)
// Flush flushes spans // Flush flushes spans
Flush(ctx context.Context) error Flush(ctx context.Context) error
// Enabled returns tracer status
Enabled() bool
} }
type Span interface { type Span interface {

View File

@@ -1,13 +1,16 @@
package buffer package buffer
import "io" import (
"fmt"
"io"
)
var _ interface { var _ interface {
io.ReadCloser io.ReadCloser
io.ReadSeeker io.ReadSeeker
} = (*SeekerBuffer)(nil) } = (*SeekerBuffer)(nil)
// Buffer is a ReadWriteCloser that supports seeking. It's intended to // SeekerBuffer is a ReadWriteCloser that supports seeking. It's intended to
// replicate the functionality of bytes.Buffer that I use in my projects. // replicate the functionality of bytes.Buffer that I use in my projects.
// //
// Note that the seeking is limited to the read marker; all writes are // Note that the seeking is limited to the read marker; all writes are
@@ -23,6 +26,7 @@ func NewSeekerBuffer(data []byte) *SeekerBuffer {
} }
} }
// Read reads up to len(p) bytes into p from the current read position.
func (b *SeekerBuffer) Read(p []byte) (int, error) { func (b *SeekerBuffer) Read(p []byte) (int, error) {
if b.pos >= int64(len(b.data)) { if b.pos >= int64(len(b.data)) {
return 0, io.EOF return 0, io.EOF
@@ -30,29 +34,51 @@ func (b *SeekerBuffer) Read(p []byte) (int, error) {
n := copy(p, b.data[b.pos:]) n := copy(p, b.data[b.pos:])
b.pos += int64(n) b.pos += int64(n)
return n, nil return n, nil
} }
// Write appends the contents of p to the end of the buffer. It does not affect the read position.
func (b *SeekerBuffer) Write(p []byte) (int, error) { func (b *SeekerBuffer) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
b.data = append(b.data, p...) b.data = append(b.data, p...)
return len(p), nil return len(p), nil
} }
// Seek sets the read pointer to pos. // Seek sets the offset for the next Read operation.
// The offset is interpreted according to whence:
// - io.SeekStart: relative to the beginning of the buffer
// - io.SeekCurrent: relative to the current position
// - io.SeekEnd: relative to the end of the buffer
//
// Returns an error if the resulting position is negative or if whence is invalid.
func (b *SeekerBuffer) Seek(offset int64, whence int) (int64, error) { func (b *SeekerBuffer) Seek(offset int64, whence int) (int64, error) {
var newPos int64
switch whence { switch whence {
case io.SeekStart: case io.SeekStart:
b.pos = offset newPos = offset
case io.SeekEnd: case io.SeekEnd:
b.pos = int64(len(b.data)) + offset newPos = int64(len(b.data)) + offset
case io.SeekCurrent: case io.SeekCurrent:
b.pos += offset newPos = b.pos + offset
default:
return 0, fmt.Errorf("invalid whence: %d", whence)
} }
if newPos < 0 {
return 0, fmt.Errorf("invalid seek: resulting position %d is negative", newPos)
}
b.pos = newPos
return b.pos, nil return b.pos, nil
} }
// Rewind resets the read pointer to 0. // Rewind resets the read position to 0.
func (b *SeekerBuffer) Rewind() error { func (b *SeekerBuffer) Rewind() error {
if _, err := b.Seek(0, io.SeekStart); err != nil { if _, err := b.Seek(0, io.SeekStart); err != nil {
return err return err
@@ -67,12 +93,24 @@ func (b *SeekerBuffer) Close() error {
return nil return nil
} }
// Reset clears all the data out of the buffer and sets the read position to 0.
func (b *SeekerBuffer) Reset() {
b.data = nil
b.pos = 0
}
// Len returns the length of data remaining to be read. // Len returns the length of data remaining to be read.
func (b *SeekerBuffer) Len() int { func (b *SeekerBuffer) Len() int {
if b.pos >= int64(len(b.data)) {
return 0
}
return len(b.data[b.pos:]) return len(b.data[b.pos:])
} }
// Bytes returns the underlying bytes from the current position. // Bytes returns the underlying bytes from the current position.
func (b *SeekerBuffer) Bytes() []byte { func (b *SeekerBuffer) Bytes() []byte {
if b.pos >= int64(len(b.data)) {
return []byte{}
}
return b.data[b.pos:] return b.data[b.pos:]
} }

View File

@@ -2,54 +2,384 @@ package buffer
import ( import (
"fmt" "fmt"
"strings" "io"
"testing" "testing"
"github.com/stretchr/testify/require"
) )
func noErrorT(t *testing.T, err error) { func TestNewSeekerBuffer(t *testing.T) {
if nil != err { input := []byte{'a', 'b', 'c', 'd', 'e'}
t.Fatalf("%s", err) expected := &SeekerBuffer{data: []byte{'a', 'b', 'c', 'd', 'e'}, pos: 0}
require.Equal(t, expected, NewSeekerBuffer(input))
}
func TestSeekerBuffer_Read(t *testing.T) {
tests := []struct {
name string
data []byte
initPos int64
readBuf []byte
expectedN int
expectedData []byte
expectedErr error
expectedPos int64
}{
{
name: "read with empty buffer",
data: []byte("hello"),
initPos: 0,
readBuf: []byte{},
expectedN: 0,
expectedData: []byte{},
expectedErr: nil,
expectedPos: 0,
},
{
name: "read with nil buffer",
data: []byte("hello"),
initPos: 0,
readBuf: nil,
expectedN: 0,
expectedData: nil,
expectedErr: nil,
expectedPos: 0,
},
{
name: "read full buffer",
data: []byte("hello"),
initPos: 0,
readBuf: make([]byte, 5),
expectedN: 5,
expectedData: []byte("hello"),
expectedErr: nil,
expectedPos: 5,
},
{
name: "read partial buffer",
data: []byte("hello"),
initPos: 2,
readBuf: make([]byte, 2),
expectedN: 2,
expectedData: []byte("ll"),
expectedErr: nil,
expectedPos: 4,
},
{
name: "read after end",
data: []byte("hello"),
initPos: 5,
readBuf: make([]byte, 5),
expectedN: 0,
expectedData: make([]byte, 5),
expectedErr: io.EOF,
expectedPos: 5,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sb := NewSeekerBuffer(tt.data)
sb.pos = tt.initPos
n, err := sb.Read(tt.readBuf)
if tt.expectedErr != nil {
require.Equal(t, err, tt.expectedErr)
} else {
require.NoError(t, err)
}
require.Equal(t, tt.expectedN, n)
require.Equal(t, tt.expectedData, tt.readBuf)
require.Equal(t, tt.expectedPos, sb.pos)
})
} }
} }
func boolT(t *testing.T, cond bool, s ...string) { func TestSeekerBuffer_Write(t *testing.T) {
if !cond { tests := []struct {
what := strings.Join(s, ", ") name string
if len(what) > 0 { initialData []byte
what = ": " + what initialPos int64
} writeData []byte
t.Fatalf("assert.Bool failed%s", what) expectedData []byte
expectedN int
}{
{
name: "write empty slice",
initialData: []byte("data"),
initialPos: 0,
writeData: []byte{},
expectedData: []byte("data"),
expectedN: 0,
},
{
name: "write nil slice",
initialData: []byte("data"),
initialPos: 0,
writeData: nil,
expectedData: []byte("data"),
expectedN: 0,
},
{
name: "write to empty buffer",
initialData: nil,
initialPos: 0,
writeData: []byte("abc"),
expectedData: []byte("abc"),
expectedN: 3,
},
{
name: "write to existing buffer",
initialData: []byte("hello"),
initialPos: 0,
writeData: []byte(" world"),
expectedData: []byte("hello world"),
expectedN: 6,
},
{
name: "write after read",
initialData: []byte("abc"),
initialPos: 2,
writeData: []byte("XYZ"),
expectedData: []byte("abcXYZ"),
expectedN: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sb := NewSeekerBuffer(tt.initialData)
sb.pos = tt.initialPos
n, err := sb.Write(tt.writeData)
require.NoError(t, err)
require.Equal(t, tt.expectedN, n)
require.Equal(t, tt.expectedData, sb.data)
require.Equal(t, tt.initialPos, sb.pos)
})
} }
} }
func TestSeeking(t *testing.T) { func TestSeekerBuffer_Seek(t *testing.T) {
partA := []byte("hello, ") tests := []struct {
partB := []byte("world!") name string
initialData []byte
initialPos int64
offset int64
whence int
expectedPos int64
expectedErr error
}{
{
name: "seek with invalid whence",
initialData: []byte("abcdef"),
initialPos: 0,
offset: 1,
whence: 12345,
expectedPos: 0,
expectedErr: fmt.Errorf("invalid whence: %d", 12345),
},
{
name: "seek negative from start",
initialData: []byte("abcdef"),
initialPos: 0,
offset: -1,
whence: io.SeekStart,
expectedPos: 0,
expectedErr: fmt.Errorf("invalid seek: resulting position %d is negative", -1),
},
{
name: "seek from start to 0",
initialData: []byte("abcdef"),
initialPos: 0,
offset: 0,
whence: io.SeekStart,
expectedPos: 0,
expectedErr: nil,
},
{
name: "seek from start to 3",
initialData: []byte("abcdef"),
initialPos: 0,
offset: 3,
whence: io.SeekStart,
expectedPos: 3,
expectedErr: nil,
},
{
name: "seek from end to -1 (last byte)",
initialData: []byte("abcdef"),
initialPos: 0,
offset: -1,
whence: io.SeekEnd,
expectedPos: 5,
expectedErr: nil,
},
{
name: "seek from current forward",
initialData: []byte("abcdef"),
initialPos: 2,
offset: 2,
whence: io.SeekCurrent,
expectedPos: 4,
expectedErr: nil,
},
{
name: "seek from current backward",
initialData: []byte("abcdef"),
initialPos: 4,
offset: -2,
whence: io.SeekCurrent,
expectedPos: 2,
expectedErr: nil,
},
{
name: "seek to end exactly",
initialData: []byte("abcdef"),
initialPos: 0,
offset: 0,
whence: io.SeekEnd,
expectedPos: 6,
expectedErr: nil,
},
{
name: "seek to out of range",
initialData: []byte("abcdef"),
initialPos: 0,
offset: 2,
whence: io.SeekEnd,
expectedPos: 8,
expectedErr: nil,
},
}
buf := NewSeekerBuffer(partA) for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
sb := NewSeekerBuffer(tt.initialData)
sb.pos = tt.initialPos
boolT(t, buf.Len() == len(partA), fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA))) newPos, err := sb.Seek(tt.offset, tt.whence)
b := make([]byte, 32) if tt.expectedErr != nil {
require.Equal(t, tt.expectedErr, err)
n, err := buf.Read(b) } else {
noErrorT(t, err) require.NoError(t, err)
boolT(t, buf.Len() == 0, fmt.Sprintf("after reading 1: have length %d, want length 0", buf.Len())) require.Equal(t, tt.expectedPos, newPos)
boolT(t, n == len(partA), fmt.Sprintf("after reading 2: have length %d, want length %d", n, len(partA))) require.Equal(t, tt.expectedPos, sb.pos)
}
n, err = buf.Write(partB) })
noErrorT(t, err) }
boolT(t, n == len(partB), fmt.Sprintf("after writing: have length %d, want length %d", n, len(partB))) }
n, err = buf.Read(b) func TestSeekerBuffer_Rewind(t *testing.T) {
noErrorT(t, err) buf := NewSeekerBuffer([]byte("hello world"))
boolT(t, buf.Len() == 0, fmt.Sprintf("after rereading 1: have length %d, want length 0", buf.Len())) buf.pos = 4
boolT(t, n == len(partB), fmt.Sprintf("after rereading 2: have length %d, want length %d", n, len(partB)))
require.NoError(t, buf.Rewind())
partsLen := len(partA) + len(partB) require.Equal(t, []byte("hello world"), buf.data)
_ = buf.Rewind() require.Equal(t, int64(0), buf.pos)
boolT(t, buf.Len() == partsLen, fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen)) }
buf.Close() func TestSeekerBuffer_Close(t *testing.T) {
boolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len())) buf := NewSeekerBuffer([]byte("hello world"))
buf.pos = 2
require.NoError(t, buf.Close())
require.Nil(t, buf.data)
require.Equal(t, int64(0), buf.pos)
}
func TestSeekerBuffer_Reset(t *testing.T) {
buf := NewSeekerBuffer([]byte("hello world"))
buf.pos = 2
buf.Reset()
require.Nil(t, buf.data)
require.Equal(t, int64(0), buf.pos)
}
func TestSeekerBuffer_Len(t *testing.T) {
tests := []struct {
name string
data []byte
pos int64
expected int
}{
{
name: "full buffer",
data: []byte("abcde"),
pos: 0,
expected: 5,
},
{
name: "partial read",
data: []byte("abcde"),
pos: 2,
expected: 3,
},
{
name: "fully read",
data: []byte("abcde"),
pos: 5,
expected: 0,
},
{
name: "pos > len",
data: []byte("abcde"),
pos: 10,
expected: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := NewSeekerBuffer(tt.data)
buf.pos = tt.pos
require.Equal(t, tt.expected, buf.Len())
})
}
}
func TestSeekerBuffer_Bytes(t *testing.T) {
tests := []struct {
name string
data []byte
pos int64
expected []byte
}{
{
name: "start of buffer",
data: []byte("abcde"),
pos: 0,
expected: []byte("abcde"),
},
{
name: "middle of buffer",
data: []byte("abcde"),
pos: 2,
expected: []byte("cde"),
},
{
name: "end of buffer",
data: []byte("abcde"),
pos: 5,
expected: []byte{},
},
{
name: "pos beyond end",
data: []byte("abcde"),
pos: 10,
expected: []byte{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := NewSeekerBuffer(tt.data)
buf.pos = tt.pos
require.Equal(t, tt.expected, buf.Bytes())
})
}
} }

View File

@@ -137,7 +137,7 @@ type cache struct {
opts Options opts Options
sync.RWMutex mu sync.RWMutex
} }
type cacheEntry struct { type cacheEntry struct {
@@ -171,7 +171,7 @@ func (c *cache) put(req string, res string) {
ttl = c.opts.MaxCacheTTL ttl = c.opts.MaxCacheTTL
} }
c.Lock() c.mu.Lock()
if c.entries == nil { if c.entries == nil {
c.entries = make(map[string]cacheEntry) c.entries = make(map[string]cacheEntry)
} }
@@ -207,7 +207,7 @@ func (c *cache) put(req string, res string) {
} }
c.opts.Meter.Counter(semconv.CacheItemsTotal, "type", "dns").Inc() c.opts.Meter.Counter(semconv.CacheItemsTotal, "type", "dns").Inc()
c.Unlock() c.mu.Unlock()
} }
func (c *cache) get(req string) (res string) { func (c *cache) get(req string) (res string) {
@@ -219,8 +219,8 @@ func (c *cache) get(req string) (res string) {
return "" return ""
} }
c.RLock() c.mu.RLock()
defer c.RUnlock() defer c.mu.RUnlock()
if c.entries == nil { if c.entries == nil {
return "" return ""

View File

@@ -20,7 +20,7 @@ type dnsConn struct {
ibuf bytes.Buffer ibuf bytes.Buffer
obuf bytes.Buffer obuf bytes.Buffer
sync.Mutex mu sync.Mutex
} }
type roundTripper func(ctx context.Context, req string) (res string, err error) type roundTripper func(ctx context.Context, req string) (res string, err error)
@@ -42,15 +42,15 @@ func (c *dnsConn) Read(b []byte) (n int, err error) {
} }
func (c *dnsConn) Write(b []byte) (n int, err error) { func (c *dnsConn) Write(b []byte) (n int, err error) {
c.Lock() c.mu.Lock()
defer c.Unlock() defer c.mu.Unlock()
return c.ibuf.Write(b) return c.ibuf.Write(b)
} }
func (c *dnsConn) Close() error { func (c *dnsConn) Close() error {
c.Lock() c.mu.Lock()
cancel := c.cancel cancel := c.cancel
c.Unlock() c.mu.Unlock()
if cancel != nil { if cancel != nil {
cancel() cancel()
@@ -78,9 +78,9 @@ func (c *dnsConn) SetDeadline(t time.Time) error {
} }
func (c *dnsConn) SetReadDeadline(t time.Time) error { func (c *dnsConn) SetReadDeadline(t time.Time) error {
c.Lock() c.mu.Lock()
c.deadline = t c.deadline = t
c.Unlock() c.mu.Unlock()
return nil return nil
} }
@@ -90,8 +90,8 @@ func (c *dnsConn) SetWriteDeadline(_ time.Time) error {
} }
func (c *dnsConn) drainBuffers(b []byte) (string, int, error) { func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
c.Lock() c.mu.Lock()
defer c.Unlock() defer c.mu.Unlock()
// drain the output buffer // drain the output buffer
if c.obuf.Len() > 0 { if c.obuf.Len() > 0 {
@@ -119,8 +119,8 @@ func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
} }
func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) { func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
c.Lock() c.mu.Lock()
defer c.Unlock() defer c.mu.Unlock()
c.obuf.WriteByte(byte(len(str) >> 8)) c.obuf.WriteByte(byte(len(str) >> 8))
c.obuf.WriteByte(byte(len(str))) c.obuf.WriteByte(byte(len(str)))
c.obuf.WriteString(str) c.obuf.WriteString(str)
@@ -128,8 +128,8 @@ func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
} }
func (c *dnsConn) childContext() (context.Context, context.CancelFunc) { func (c *dnsConn) childContext() (context.Context, context.CancelFunc) {
c.Lock() c.mu.Lock()
defer c.Unlock() defer c.mu.Unlock()
if c.ctx == nil { if c.ctx == nil {
c.ctx, c.cancel = context.WithCancel(context.Background()) c.ctx, c.cancel = context.WithCancel(context.Background())
} }

View File

@@ -52,7 +52,7 @@ type clientTracer struct {
tr tracer.Tracer tr tracer.Tracer
activeHooks map[string]context.Context activeHooks map[string]context.Context
root tracer.Span root tracer.Span
mtx sync.Mutex mu sync.Mutex
} }
func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrace { func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrace {
@@ -83,8 +83,8 @@ func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrac
} }
func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) { func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
ct.mtx.Lock() ct.mu.Lock()
defer ct.mtx.Unlock() defer ct.mu.Unlock()
if hookCtx, found := ct.activeHooks[hook]; !found { if hookCtx, found := ct.activeHooks[hook]; !found {
var sp tracer.Span var sp tracer.Span
@@ -104,8 +104,8 @@ func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
} }
func (ct *clientTracer) end(hook string, err error, attrs ...interface{}) { func (ct *clientTracer) end(hook string, err error, attrs ...interface{}) {
ct.mtx.Lock() ct.mu.Lock()
defer ct.mtx.Unlock() defer ct.mu.Unlock()
if ctx, ok := ct.activeHooks[hook]; ok { // nolint:nestif if ctx, ok := ct.activeHooks[hook]; ok { // nolint:nestif
if span, ok := tracer.SpanFromContext(ctx); ok { if span, ok := tracer.SpanFromContext(ctx); ok {
if err != nil { if err != nil {
@@ -136,8 +136,8 @@ func (ct *clientTracer) getParentContext(hook string) context.Context {
} }
func (ct *clientTracer) span(hook string) (tracer.Span, bool) { func (ct *clientTracer) span(hook string) (tracer.Span, bool) {
ct.mtx.Lock() ct.mu.Lock()
defer ct.mtx.Unlock() defer ct.mu.Unlock()
if ctx, ok := ct.activeHooks[hook]; ok { if ctx, ok := ct.activeHooks[hook]; ok {
return tracer.SpanFromContext(ctx) return tracer.SpanFromContext(ctx)
} }

View File

@@ -14,7 +14,7 @@ type Buffer struct {
vals []*Entry vals []*Entry
size int size int
sync.RWMutex mu sync.RWMutex
} }
// Entry is ring buffer data entry // Entry is ring buffer data entry
@@ -35,8 +35,8 @@ type Stream struct {
// Put adds a new value to ring buffer // Put adds a new value to ring buffer
func (b *Buffer) Put(v interface{}) { func (b *Buffer) Put(v interface{}) {
b.Lock() b.mu.Lock()
defer b.Unlock() defer b.mu.Unlock()
// append to values // append to values
entry := &Entry{ entry := &Entry{
@@ -63,8 +63,8 @@ func (b *Buffer) Put(v interface{}) {
// Get returns the last n entries // Get returns the last n entries
func (b *Buffer) Get(n int) []*Entry { func (b *Buffer) Get(n int) []*Entry {
b.RLock() b.mu.RLock()
defer b.RUnlock() defer b.mu.RUnlock()
// reset any invalid values // reset any invalid values
if n > len(b.vals) || n < 0 { if n > len(b.vals) || n < 0 {
@@ -80,8 +80,8 @@ func (b *Buffer) Get(n int) []*Entry {
// Since returns the entries since a specific time // Since returns the entries since a specific time
func (b *Buffer) Since(t time.Time) []*Entry { func (b *Buffer) Since(t time.Time) []*Entry {
b.RLock() b.mu.RLock()
defer b.RUnlock() defer b.mu.RUnlock()
// return all the values // return all the values
if t.IsZero() { if t.IsZero() {
@@ -109,8 +109,8 @@ func (b *Buffer) Since(t time.Time) []*Entry {
// Stream logs from the buffer // Stream logs from the buffer
// Close the channel when you want to stop // Close the channel when you want to stop
func (b *Buffer) Stream() (<-chan *Entry, chan bool) { func (b *Buffer) Stream() (<-chan *Entry, chan bool) {
b.Lock() b.mu.Lock()
defer b.Unlock() defer b.mu.Unlock()
entries := make(chan *Entry, 128) entries := make(chan *Entry, 128)
id := id.MustNew() id := id.MustNew()

View File

@@ -24,7 +24,7 @@ type stream struct {
err error err error
request *request request *request
sync.RWMutex mu sync.RWMutex
} }
type request struct { type request struct {
@@ -57,9 +57,9 @@ func (s *stream) Request() server.Request {
func (s *stream) Send(v interface{}) error { func (s *stream) Send(v interface{}) error {
err := s.Stream.SendMsg(v) err := s.Stream.SendMsg(v)
if err != nil { if err != nil {
s.Lock() s.mu.Lock()
s.err = err s.err = err
s.Unlock() s.mu.Unlock()
} }
return err return err
} }
@@ -68,17 +68,17 @@ func (s *stream) Send(v interface{}) error {
func (s *stream) Recv(v interface{}) error { func (s *stream) Recv(v interface{}) error {
err := s.Stream.RecvMsg(v) err := s.Stream.RecvMsg(v)
if err != nil { if err != nil {
s.Lock() s.mu.Lock()
s.err = err s.err = err
s.Unlock() s.mu.Unlock()
} }
return err return err
} }
// Error returns error that stream holds // Error returns error that stream holds
func (s *stream) Error() error { func (s *stream) Error() error {
s.RLock() s.mu.RLock()
defer s.RUnlock() defer s.mu.RUnlock()
return s.err return s.err
} }