Compare commits
	
		
			19 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| b0cbddcfdd | |||
|  | d0534a7d05 | ||
| ab051405c5 | |||
|  | 268b3dbff4 | ||
| f9d2c14597 | |||
| e6bf914dd9 | |||
| b59f4a16f0 | |||
| 3deb572f72 | |||
| 0e668c0f0f | |||
| 2bac878845 | |||
| 9ee31fb5a6 | |||
| ed5d30a58e | |||
|  | b4b67a8b41 | ||
| 13f90ff716 | |||
| 0f8f12aee0 | |||
| 8b406cf963 | |||
| 029a434a2b | |||
|  | 847259bc39 | ||
| a1ee8728ad | 
| @@ -1,5 +1,5 @@ | ||||
| # Micro | ||||
|  | ||||
|  | ||||
| [](https://opensource.org/licenses/Apache-2.0) | ||||
| [](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview) | ||||
| [](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush) | ||||
|   | ||||
| @@ -41,7 +41,7 @@ type Broker interface { | ||||
| 	// Disconnect disconnect from broker | ||||
| 	Disconnect(ctx context.Context) error | ||||
| 	// NewMessage create new broker message to publish. | ||||
| 	NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error) | ||||
| 	NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error) | ||||
| 	// Publish message to broker topic | ||||
| 	Publish(ctx context.Context, topic string, messages ...Message) error | ||||
| 	// Subscribe subscribes to topic message via handler | ||||
|   | ||||
| @@ -42,9 +42,9 @@ func SetSubscribeOption(k, v interface{}) SubscribeOption { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // SetPublishOption returns a function to setup a context with given value | ||||
| func SetPublishOption(k, v interface{}) PublishOption { | ||||
| 	return func(o *PublishOptions) { | ||||
| // SetMessageOption returns a function to setup a context with given value | ||||
| func SetMessageOption(k, v interface{}) MessageOption { | ||||
| 	return func(o *MessageOptions) { | ||||
| 		if o.Context == nil { | ||||
| 			o.Context = context.Background() | ||||
| 		} | ||||
|   | ||||
| @@ -22,7 +22,7 @@ type Broker struct { | ||||
| 	subscribers   map[string][]*Subscriber | ||||
| 	addr          string | ||||
| 	opts          broker.Options | ||||
| 	sync.RWMutex | ||||
| 	mu            sync.RWMutex | ||||
| 	connected     bool | ||||
| } | ||||
|  | ||||
| @@ -32,7 +32,7 @@ type memoryMessage struct { | ||||
| 	ctx   context.Context | ||||
| 	body  []byte | ||||
| 	hdr   metadata.Metadata | ||||
| 	opts  broker.PublishOptions | ||||
| 	opts  broker.MessageOptions | ||||
| } | ||||
|  | ||||
| func (m *memoryMessage) Ack() error { | ||||
| @@ -72,9 +72,9 @@ func (b *Broker) newCodec(ct string) (codec.Codec, error) { | ||||
| 	if idx := strings.IndexRune(ct, ';'); idx >= 0 { | ||||
| 		ct = ct[:idx] | ||||
| 	} | ||||
| 	b.RLock() | ||||
| 	b.mu.RLock() | ||||
| 	c, ok := b.opts.Codecs[ct] | ||||
| 	b.RUnlock() | ||||
| 	b.mu.RUnlock() | ||||
| 	if ok { | ||||
| 		return c, nil | ||||
| 	} | ||||
| @@ -96,8 +96,8 @@ func (b *Broker) Connect(ctx context.Context) error { | ||||
| 	default: | ||||
| 	} | ||||
|  | ||||
| 	b.Lock() | ||||
| 	defer b.Unlock() | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
|  | ||||
| 	if b.connected { | ||||
| 		return nil | ||||
| @@ -126,8 +126,8 @@ func (b *Broker) Disconnect(ctx context.Context) error { | ||||
| 	default: | ||||
| 	} | ||||
|  | ||||
| 	b.Lock() | ||||
| 	defer b.Unlock() | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
|  | ||||
| 	if !b.connected { | ||||
| 		return nil | ||||
| @@ -157,8 +157,11 @@ func (b *Broker) Init(opts ...broker.Option) error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.PublishOption) (broker.Message, error) { | ||||
| 	options := broker.NewPublishOptions(opts...) | ||||
| func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) { | ||||
| 	options := broker.NewMessageOptions(opts...) | ||||
| 	if options.ContentType == "" { | ||||
| 		options.ContentType = b.opts.ContentType | ||||
| 	} | ||||
| 	m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options} | ||||
| 	c, err := b.newCodec(m.opts.ContentType) | ||||
| 	if err == nil { | ||||
| @@ -180,12 +183,12 @@ func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker | ||||
| } | ||||
|  | ||||
| func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error { | ||||
| 	b.RLock() | ||||
| 	b.mu.RLock() | ||||
| 	if !b.connected { | ||||
| 		b.RUnlock() | ||||
| 		b.mu.RUnlock() | ||||
| 		return broker.ErrNotConnected | ||||
| 	} | ||||
| 	b.RUnlock() | ||||
| 	b.mu.RUnlock() | ||||
|  | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| @@ -193,9 +196,9 @@ func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.M | ||||
| 	default: | ||||
| 	} | ||||
|  | ||||
| 	b.RLock() | ||||
| 	b.mu.RLock() | ||||
| 	subs, ok := b.subscribers[topic] | ||||
| 	b.RUnlock() | ||||
| 	b.mu.RUnlock() | ||||
| 	if !ok { | ||||
| 		return nil | ||||
| 	} | ||||
| @@ -252,12 +255,12 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	b.RLock() | ||||
| 	b.mu.RLock() | ||||
| 	if !b.connected { | ||||
| 		b.RUnlock() | ||||
| 		b.mu.RUnlock() | ||||
| 		return nil, broker.ErrNotConnected | ||||
| 	} | ||||
| 	b.RUnlock() | ||||
| 	b.mu.RUnlock() | ||||
|  | ||||
| 	sid, err := id.New() | ||||
| 	if err != nil { | ||||
| @@ -275,13 +278,13 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac | ||||
| 		ctx:     ctx, | ||||
| 	} | ||||
|  | ||||
| 	b.Lock() | ||||
| 	b.mu.Lock() | ||||
| 	b.subscribers[topic] = append(b.subscribers[topic], sub) | ||||
| 	b.Unlock() | ||||
| 	b.mu.Unlock() | ||||
|  | ||||
| 	go func() { | ||||
| 		<-sub.exit | ||||
| 		b.Lock() | ||||
| 		b.mu.Lock() | ||||
| 		newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1) | ||||
| 		for _, sb := range b.subscribers[topic] { | ||||
| 			if sb.id == sub.id { | ||||
| @@ -290,7 +293,7 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac | ||||
| 			newSubscribers = append(newSubscribers, sb) | ||||
| 		} | ||||
| 		b.subscribers[topic] = newSubscribers | ||||
| 		b.Unlock() | ||||
| 		b.mu.Unlock() | ||||
| 	}() | ||||
|  | ||||
| 	return sub, nil | ||||
|   | ||||
| @@ -49,7 +49,7 @@ func TestMemoryBroker(t *testing.T) { | ||||
| 				"id", fmt.Sprintf("%d", i), | ||||
| 			), | ||||
| 			[]byte(`"hello world"`), | ||||
| 			broker.PublishContentType("application/octet-stream"), | ||||
| 			broker.MessageContentType("application/octet-stream"), | ||||
| 		) | ||||
| 		if err != nil { | ||||
| 			t.Fatal(err) | ||||
|   | ||||
| @@ -14,16 +14,16 @@ type NoopBroker struct { | ||||
| 	funcPublish   FuncPublish | ||||
| 	funcSubscribe FuncSubscribe | ||||
| 	opts          Options | ||||
| 	sync.RWMutex | ||||
| 	mu            sync.RWMutex | ||||
| } | ||||
|  | ||||
| func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) { | ||||
| 	if idx := strings.IndexRune(ct, ';'); idx >= 0 { | ||||
| 		ct = ct[:idx] | ||||
| 	} | ||||
| 	b.RLock() | ||||
| 	b.mu.RLock() | ||||
| 	c, ok := b.opts.Codecs[ct] | ||||
| 	b.RUnlock() | ||||
| 	b.mu.RUnlock() | ||||
| 	if ok { | ||||
| 		return c, nil | ||||
| 	} | ||||
| @@ -99,7 +99,7 @@ type noopMessage struct { | ||||
| 	ctx  context.Context | ||||
| 	body []byte | ||||
| 	hdr  metadata.Metadata | ||||
| 	opts PublishOptions | ||||
| 	opts MessageOptions | ||||
| } | ||||
|  | ||||
| func (m *noopMessage) Ack() error { | ||||
| @@ -126,8 +126,11 @@ func (m *noopMessage) Unmarshal(dst interface{}, opts ...codec.Option) error { | ||||
| 	return m.c.Unmarshal(m.body, dst) | ||||
| } | ||||
|  | ||||
| func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error) { | ||||
| 	options := NewPublishOptions(opts...) | ||||
| func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error) { | ||||
| 	options := NewMessageOptions(opts...) | ||||
| 	if options.ContentType == "" { | ||||
| 		options.ContentType = b.opts.ContentType | ||||
| 	} | ||||
| 	m := &noopMessage{ctx: ctx, hdr: hdr, opts: options} | ||||
| 	c, err := b.newCodec(m.opts.ContentType) | ||||
| 	if err == nil { | ||||
|   | ||||
| @@ -45,6 +45,9 @@ type Options struct { | ||||
|  | ||||
| 	// GracefulTimeout contains time to wait to finish in flight requests | ||||
| 	GracefulTimeout time.Duration | ||||
|  | ||||
| 	// ContentType will be used if no content-type set when creating message | ||||
| 	ContentType string | ||||
| } | ||||
|  | ||||
| // NewOptions create new Options | ||||
| @@ -57,14 +60,19 @@ func NewOptions(opts ...Option) Options { | ||||
| 		Codecs:          make(map[string]codec.Codec), | ||||
| 		Tracer:          tracer.DefaultTracer, | ||||
| 		GracefulTimeout: DefaultGracefulTimeout, | ||||
| 		ContentType:     DefaultContentType, | ||||
| 	} | ||||
|  | ||||
| 	for _, o := range opts { | ||||
| 		o(&options) | ||||
| 	} | ||||
|  | ||||
| 	return options | ||||
| } | ||||
|  | ||||
| // DefaultContentType is the default content-type if not specified | ||||
| var DefaultContentType = "" | ||||
|  | ||||
| // Context sets the context option | ||||
| func Context(ctx context.Context) Option { | ||||
| 	return func(o *Options) { | ||||
| @@ -72,8 +80,15 @@ func Context(ctx context.Context) Option { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // PublishOptions struct | ||||
| type PublishOptions struct { | ||||
| // ContentType used by default if not specified | ||||
| func ContentType(ct string) Option { | ||||
| 	return func(o *Options) { | ||||
| 		o.ContentType = ct | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // MessageOptions struct | ||||
| type MessageOptions struct { | ||||
| 	// ContentType for message body | ||||
| 	ContentType string | ||||
| 	// BodyOnly flag says the message contains raw body bytes and don't need | ||||
| @@ -83,9 +98,9 @@ type PublishOptions struct { | ||||
| 	Context context.Context | ||||
| } | ||||
|  | ||||
| // NewPublishOptions creates PublishOptions struct | ||||
| func NewPublishOptions(opts ...PublishOption) PublishOptions { | ||||
| 	options := PublishOptions{ | ||||
| // NewMessageOptions creates MessageOptions struct | ||||
| func NewMessageOptions(opts ...MessageOption) MessageOptions { | ||||
| 	options := MessageOptions{ | ||||
| 		Context: context.Background(), | ||||
| 	} | ||||
| 	for _, o := range opts { | ||||
| @@ -113,19 +128,19 @@ type SubscribeOptions struct { | ||||
| // Option func | ||||
| type Option func(*Options) | ||||
|  | ||||
| // PublishOption func | ||||
| type PublishOption func(*PublishOptions) | ||||
| // MessageOption func | ||||
| type MessageOption func(*MessageOptions) | ||||
|  | ||||
| // PublishContentType sets message content-type that used to Marshal | ||||
| func PublishContentType(ct string) PublishOption { | ||||
| 	return func(o *PublishOptions) { | ||||
| // MessageContentType sets message content-type that used to Marshal | ||||
| func MessageContentType(ct string) MessageOption { | ||||
| 	return func(o *MessageOptions) { | ||||
| 		o.ContentType = ct | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // PublishBodyOnly publish only body of the message | ||||
| func PublishBodyOnly(b bool) PublishOption { | ||||
| 	return func(o *PublishOptions) { | ||||
| // MessageBodyOnly publish only body of the message | ||||
| func MessageBodyOnly(b bool) MessageOption { | ||||
| 	return func(o *MessageOptions) { | ||||
| 		o.BodyOnly = b | ||||
| 	} | ||||
| } | ||||
|   | ||||
							
								
								
									
										235
									
								
								cluster/hasql/cluster.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										235
									
								
								cluster/hasql/cluster.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,235 @@ | ||||
| package sql | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"database/sql" | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
|  | ||||
| 	"golang.yandex/hasql/v2" | ||||
| ) | ||||
|  | ||||
| func newSQLRowError() *sql.Row { | ||||
| 	row := &sql.Row{} | ||||
| 	t := reflect.TypeOf(row).Elem() | ||||
| 	field, _ := t.FieldByName("err") | ||||
| 	rowPtr := unsafe.Pointer(row) | ||||
| 	errFieldPtr := unsafe.Pointer(uintptr(rowPtr) + field.Offset) | ||||
| 	errPtr := (*error)(errFieldPtr) | ||||
| 	*errPtr = ErrorNoAliveNodes | ||||
| 	return row | ||||
| } | ||||
|  | ||||
| type ClusterQuerier interface { | ||||
| 	Querier | ||||
| 	WaitForNodes(ctx context.Context, criterion ...hasql.NodeStateCriterion) error | ||||
| } | ||||
|  | ||||
| type Cluster struct { | ||||
| 	hasql   *hasql.Cluster[Querier] | ||||
| 	options ClusterOptions | ||||
| } | ||||
|  | ||||
| // NewCluster returns [Querier] that provides cluster of nodes | ||||
| func NewCluster[T Querier](opts ...ClusterOption) (ClusterQuerier, error) { | ||||
| 	options := ClusterOptions{Context: context.Background()} | ||||
| 	for _, opt := range opts { | ||||
| 		opt(&options) | ||||
| 	} | ||||
| 	if options.NodeChecker == nil { | ||||
| 		return nil, ErrClusterChecker | ||||
| 	} | ||||
| 	if options.NodeDiscoverer == nil { | ||||
| 		return nil, ErrClusterDiscoverer | ||||
| 	} | ||||
| 	if options.NodePicker == nil { | ||||
| 		return nil, ErrClusterPicker | ||||
| 	} | ||||
|  | ||||
| 	if options.Retries < 1 { | ||||
| 		options.Retries = 1 | ||||
| 	} | ||||
|  | ||||
| 	if options.NodeStateCriterion == 0 { | ||||
| 		options.NodeStateCriterion = hasql.Primary | ||||
| 	} | ||||
|  | ||||
| 	options.Options = append(options.Options, hasql.WithNodePicker(options.NodePicker)) | ||||
| 	if p, ok := options.NodePicker.(*CustomPicker[Querier]); ok { | ||||
| 		p.opts.Priority = options.NodePriority | ||||
| 	} | ||||
|  | ||||
| 	c, err := hasql.NewCluster( | ||||
| 		options.NodeDiscoverer, | ||||
| 		options.NodeChecker, | ||||
| 		options.Options..., | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &Cluster{hasql: c, options: options}, nil | ||||
| } | ||||
|  | ||||
| func (c *Cluster) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) { | ||||
| 	var tx *sql.Tx | ||||
| 	var err error | ||||
|  | ||||
| 	retries := 0 | ||||
| 	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool { | ||||
| 		for ; retries < c.options.Retries; retries++ { | ||||
| 			if tx, err = n.DB().BeginTx(ctx, opts); err != nil && retries >= c.options.Retries { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 		return false | ||||
| 	}) | ||||
|  | ||||
| 	if tx == nil && err == nil { | ||||
| 		err = ErrorNoAliveNodes | ||||
| 	} | ||||
|  | ||||
| 	return tx, err | ||||
| } | ||||
|  | ||||
| func (c *Cluster) Close() error { | ||||
| 	return c.hasql.Close() | ||||
| } | ||||
|  | ||||
| func (c *Cluster) Conn(ctx context.Context) (*sql.Conn, error) { | ||||
| 	var conn *sql.Conn | ||||
| 	var err error | ||||
|  | ||||
| 	retries := 0 | ||||
| 	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool { | ||||
| 		for ; retries < c.options.Retries; retries++ { | ||||
| 			if conn, err = n.DB().Conn(ctx); err != nil && retries >= c.options.Retries { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 		return false | ||||
| 	}) | ||||
|  | ||||
| 	if conn == nil && err == nil { | ||||
| 		err = ErrorNoAliveNodes | ||||
| 	} | ||||
|  | ||||
| 	return conn, err | ||||
| } | ||||
|  | ||||
| func (c *Cluster) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { | ||||
| 	var res sql.Result | ||||
| 	var err error | ||||
|  | ||||
| 	retries := 0 | ||||
| 	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool { | ||||
| 		for ; retries < c.options.Retries; retries++ { | ||||
| 			if res, err = n.DB().ExecContext(ctx, query, args...); err != nil && retries >= c.options.Retries { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 		return false | ||||
| 	}) | ||||
|  | ||||
| 	if res == nil && err == nil { | ||||
| 		err = ErrorNoAliveNodes | ||||
| 	} | ||||
|  | ||||
| 	return res, err | ||||
| } | ||||
|  | ||||
| func (c *Cluster) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) { | ||||
| 	var res *sql.Stmt | ||||
| 	var err error | ||||
|  | ||||
| 	retries := 0 | ||||
| 	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool { | ||||
| 		for ; retries < c.options.Retries; retries++ { | ||||
| 			if res, err = n.DB().PrepareContext(ctx, query); err != nil && retries >= c.options.Retries { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 		return false | ||||
| 	}) | ||||
|  | ||||
| 	if res == nil && err == nil { | ||||
| 		err = ErrorNoAliveNodes | ||||
| 	} | ||||
|  | ||||
| 	return res, err | ||||
| } | ||||
|  | ||||
| func (c *Cluster) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { | ||||
| 	var res *sql.Rows | ||||
| 	var err error | ||||
|  | ||||
| 	retries := 0 | ||||
| 	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool { | ||||
| 		for ; retries < c.options.Retries; retries++ { | ||||
| 			if res, err = n.DB().QueryContext(ctx, query); err != nil && err != sql.ErrNoRows && retries >= c.options.Retries { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 		return false | ||||
| 	}) | ||||
|  | ||||
| 	if res == nil && err == nil { | ||||
| 		err = ErrorNoAliveNodes | ||||
| 	} | ||||
|  | ||||
| 	return res, err | ||||
| } | ||||
|  | ||||
| func (c *Cluster) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row { | ||||
| 	var res *sql.Row | ||||
|  | ||||
| 	retries := 0 | ||||
| 	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool { | ||||
| 		for ; retries < c.options.Retries; retries++ { | ||||
| 			res = n.DB().QueryRowContext(ctx, query, args...) | ||||
| 			if res.Err() == nil { | ||||
| 				return false | ||||
| 			} else if res.Err() != nil && retries >= c.options.Retries { | ||||
| 				return false | ||||
| 			} | ||||
| 		} | ||||
| 		return true | ||||
| 	}) | ||||
|  | ||||
| 	if res == nil { | ||||
| 		res = newSQLRowError() | ||||
| 	} | ||||
|  | ||||
| 	return res | ||||
| } | ||||
|  | ||||
| func (c *Cluster) PingContext(ctx context.Context) error { | ||||
| 	var err error | ||||
| 	var ok bool | ||||
|  | ||||
| 	retries := 0 | ||||
| 	c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool { | ||||
| 		ok = true | ||||
| 		for ; retries < c.options.Retries; retries++ { | ||||
| 			if err = n.DB().PingContext(ctx); err != nil && retries >= c.options.Retries { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 		return false | ||||
| 	}) | ||||
|  | ||||
| 	if !ok { | ||||
| 		err = ErrorNoAliveNodes | ||||
| 	} | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (c *Cluster) WaitForNodes(ctx context.Context, criterions ...hasql.NodeStateCriterion) error { | ||||
| 	for _, criterion := range criterions { | ||||
| 		if _, err := c.hasql.WaitForNode(ctx, criterion); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										171
									
								
								cluster/hasql/cluster_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										171
									
								
								cluster/hasql/cluster_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,171 @@ | ||||
| package sql | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/DATA-DOG/go-sqlmock" | ||||
| 	"golang.yandex/hasql/v2" | ||||
| ) | ||||
|  | ||||
| func TestNewCluster(t *testing.T) { | ||||
| 	dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbMaster.Close() | ||||
| 	dbMasterMock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(1, 0)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("master-dc1")) | ||||
|  | ||||
| 	dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbDRMaster.Close() | ||||
| 	dbDRMasterMock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(2, 40)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("drmaster1-dc2")) | ||||
|  | ||||
| 	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("drmaster")) | ||||
|  | ||||
| 	dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbSlaveDC1.Close() | ||||
| 	dbSlaveDC1Mock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(2, 50)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("slave-dc1")) | ||||
|  | ||||
| 	dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbSlaveDC2.Close() | ||||
| 	dbSlaveDC1Mock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(2, 50)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("slave-dc1")) | ||||
|  | ||||
| 	tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	c, err := NewCluster[Querier]( | ||||
| 		WithClusterContext(tctx), | ||||
| 		WithClusterNodeChecker(hasql.PostgreSQLChecker), | ||||
| 		WithClusterNodePicker(NewCustomPicker[Querier]( | ||||
| 			CustomPickerMaxLag(100), | ||||
| 		)), | ||||
| 		WithClusterNodes( | ||||
| 			ClusterNode{"slave-dc1", dbSlaveDC1, 1}, | ||||
| 			ClusterNode{"master-dc1", dbMaster, 1}, | ||||
| 			ClusterNode{"slave-dc2", dbSlaveDC2, 2}, | ||||
| 			ClusterNode{"drmaster1-dc2", dbDRMaster, 0}, | ||||
| 		), | ||||
| 		WithClusterOptions( | ||||
| 			hasql.WithUpdateInterval[Querier](2*time.Second), | ||||
| 			hasql.WithUpdateTimeout[Querier](1*time.Second), | ||||
| 		), | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer c.Close() | ||||
|  | ||||
| 	if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	time.Sleep(500 * time.Millisecond) | ||||
|  | ||||
| 	node1Name := "" | ||||
| 	fmt.Printf("check for Standby\n") | ||||
| 	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.Standby), "SELECT node_name as name"); row.Err() != nil { | ||||
| 		t.Fatal(row.Err()) | ||||
| 	} else if err = row.Scan(&node1Name); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} else if "slave-dc1" != node1Name { | ||||
| 		t.Fatalf("invalid node name %s != %s", "slave-dc1", node1Name) | ||||
| 	} | ||||
|  | ||||
| 	dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("slave-dc1")) | ||||
|  | ||||
| 	node2Name := "" | ||||
| 	fmt.Printf("check for PreferStandby\n") | ||||
| 	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() != nil { | ||||
| 		t.Fatal(row.Err()) | ||||
| 	} else if err = row.Scan(&node2Name); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} else if "slave-dc1" != node2Name { | ||||
| 		t.Fatalf("invalid node name %s != %s", "slave-dc1", node2Name) | ||||
| 	} | ||||
|  | ||||
| 	node3Name := "" | ||||
| 	fmt.Printf("check for PreferPrimary\n") | ||||
| 	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferPrimary), "SELECT node_name as name"); row.Err() != nil { | ||||
| 		t.Fatal(row.Err()) | ||||
| 	} else if err = row.Scan(&node3Name); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} else if "master-dc1" != node3Name { | ||||
| 		t.Fatalf("invalid node name %s != %s", "master-dc1", node3Name) | ||||
| 	} | ||||
|  | ||||
| 	dbSlaveDC1Mock.ExpectQuery(`.*`).WillReturnRows(sqlmock.NewRows([]string{"role"}).RowError(1, fmt.Errorf("row error"))) | ||||
|  | ||||
| 	time.Sleep(2 * time.Second) | ||||
|  | ||||
| 	fmt.Printf("check for PreferStandby\n") | ||||
| 	if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() == nil { | ||||
| 		t.Fatal("must return error") | ||||
| 	} | ||||
|  | ||||
| 	if dbMasterErr := dbMasterMock.ExpectationsWereMet(); dbMasterErr != nil { | ||||
| 		t.Error(dbMasterErr) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										25
									
								
								cluster/hasql/db.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								cluster/hasql/db.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| package sql | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"database/sql" | ||||
| ) | ||||
|  | ||||
| type Querier interface { | ||||
| 	// Basic connection methods | ||||
| 	PingContext(ctx context.Context) error | ||||
| 	Close() error | ||||
|  | ||||
| 	// Query methods with context | ||||
| 	ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) | ||||
| 	QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) | ||||
| 	QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row | ||||
|  | ||||
| 	// Prepared statements with context | ||||
| 	PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) | ||||
|  | ||||
| 	// Transaction management with context | ||||
| 	BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) | ||||
|  | ||||
| 	Conn(ctx context.Context) (*sql.Conn, error) | ||||
| } | ||||
							
								
								
									
										295
									
								
								cluster/hasql/driver.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										295
									
								
								cluster/hasql/driver.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,295 @@ | ||||
| package sql | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"database/sql" | ||||
| 	"database/sql/driver" | ||||
| 	"io" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| // OpenDBWithCluster creates a [*sql.DB] that uses the [ClusterQuerier] | ||||
| func OpenDBWithCluster(db ClusterQuerier) (*sql.DB, error) { | ||||
| 	driver := NewClusterDriver(db) | ||||
| 	connector, err := driver.OpenConnector("") | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return sql.OpenDB(connector), nil | ||||
| } | ||||
|  | ||||
| // ClusterDriver implements [driver.Driver] and driver.Connector for an existing [Querier] | ||||
| type ClusterDriver struct { | ||||
| 	db ClusterQuerier | ||||
| } | ||||
|  | ||||
| // NewClusterDriver creates a new [driver.Driver] that uses an existing [ClusterQuerier] | ||||
| func NewClusterDriver(db ClusterQuerier) *ClusterDriver { | ||||
| 	return &ClusterDriver{db: db} | ||||
| } | ||||
|  | ||||
| // Open implements [driver.Driver.Open] | ||||
| func (d *ClusterDriver) Open(name string) (driver.Conn, error) { | ||||
| 	return d.Connect(context.Background()) | ||||
| } | ||||
|  | ||||
| // OpenConnector implements [driver.DriverContext.OpenConnector] | ||||
| func (d *ClusterDriver) OpenConnector(name string) (driver.Connector, error) { | ||||
| 	return d, nil | ||||
| } | ||||
|  | ||||
| // Connect implements [driver.Connector.Connect] | ||||
| func (d *ClusterDriver) Connect(ctx context.Context) (driver.Conn, error) { | ||||
| 	conn, err := d.db.Conn(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &dbConn{conn: conn}, nil | ||||
| } | ||||
|  | ||||
| // Driver implements [driver.Connector.Driver] | ||||
| func (d *ClusterDriver) Driver() driver.Driver { | ||||
| 	return d | ||||
| } | ||||
|  | ||||
| // dbConn implements driver.Conn with both context and legacy methods | ||||
| type dbConn struct { | ||||
| 	conn *sql.Conn | ||||
| 	mu   sync.Mutex | ||||
| } | ||||
|  | ||||
| // Prepare implements [driver.Conn.Prepare] (legacy method) | ||||
| func (c *dbConn) Prepare(query string) (driver.Stmt, error) { | ||||
| 	return c.PrepareContext(context.Background(), query) | ||||
| } | ||||
|  | ||||
| // PrepareContext implements [driver.ConnPrepareContext.PrepareContext] | ||||
| func (c *dbConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	stmt, err := c.conn.PrepareContext(ctx, query) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &dbStmt{stmt: stmt}, nil | ||||
| } | ||||
|  | ||||
| // Exec implements [driver.Execer.Exec] (legacy method) | ||||
| func (c *dbConn) Exec(query string, args []driver.Value) (driver.Result, error) { | ||||
| 	namedArgs := make([]driver.NamedValue, len(args)) | ||||
| 	for i, value := range args { | ||||
| 		namedArgs[i] = driver.NamedValue{Value: value} | ||||
| 	} | ||||
| 	return c.ExecContext(context.Background(), query, namedArgs) | ||||
| } | ||||
|  | ||||
| // ExecContext implements [driver.ExecerContext.ExecContext] | ||||
| func (c *dbConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	// Convert driver.NamedValue to any | ||||
| 	interfaceArgs := make([]any, len(args)) | ||||
| 	for i, arg := range args { | ||||
| 		interfaceArgs[i] = arg.Value | ||||
| 	} | ||||
|  | ||||
| 	return c.conn.ExecContext(ctx, query, interfaceArgs...) | ||||
| } | ||||
|  | ||||
| // Query implements [driver.Queryer.Query] (legacy method) | ||||
| func (c *dbConn) Query(query string, args []driver.Value) (driver.Rows, error) { | ||||
| 	namedArgs := make([]driver.NamedValue, len(args)) | ||||
| 	for i, value := range args { | ||||
| 		namedArgs[i] = driver.NamedValue{Value: value} | ||||
| 	} | ||||
| 	return c.QueryContext(context.Background(), query, namedArgs) | ||||
| } | ||||
|  | ||||
| // QueryContext implements [driver.QueryerContext.QueryContext] | ||||
| func (c *dbConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	// Convert driver.NamedValue to any | ||||
| 	interfaceArgs := make([]any, len(args)) | ||||
| 	for i, arg := range args { | ||||
| 		interfaceArgs[i] = arg.Value | ||||
| 	} | ||||
|  | ||||
| 	rows, err := c.conn.QueryContext(ctx, query, interfaceArgs...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &dbRows{rows: rows}, nil | ||||
| } | ||||
|  | ||||
| // Begin implements [driver.Conn.Begin] (legacy method) | ||||
| func (c *dbConn) Begin() (driver.Tx, error) { | ||||
| 	return c.BeginTx(context.Background(), driver.TxOptions{}) | ||||
| } | ||||
|  | ||||
| // BeginTx implements [driver.ConnBeginTx.BeginTx] | ||||
| func (c *dbConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	sqlOpts := &sql.TxOptions{ | ||||
| 		Isolation: sql.IsolationLevel(opts.Isolation), | ||||
| 		ReadOnly:  opts.ReadOnly, | ||||
| 	} | ||||
|  | ||||
| 	tx, err := c.conn.BeginTx(ctx, sqlOpts) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &dbTx{tx: tx}, nil | ||||
| } | ||||
|  | ||||
| // Ping implements [driver.Pinger.Ping] | ||||
| func (c *dbConn) Ping(ctx context.Context) error { | ||||
| 	return c.conn.PingContext(ctx) | ||||
| } | ||||
|  | ||||
| // Close implements [driver.Conn.Close] | ||||
| func (c *dbConn) Close() error { | ||||
| 	return c.conn.Close() | ||||
| } | ||||
|  | ||||
| // IsValid implements [driver.Validator.IsValid] | ||||
| func (c *dbConn) IsValid() bool { | ||||
| 	// Ping with a short timeout to check if the connection is still valid | ||||
| 	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	return c.conn.PingContext(ctx) == nil | ||||
| } | ||||
|  | ||||
| // dbStmt implements [driver.Stmt] with both context and legacy methods | ||||
| type dbStmt struct { | ||||
| 	stmt *sql.Stmt | ||||
| 	mu   sync.Mutex | ||||
| } | ||||
|  | ||||
| // Close implements [driver.Stmt.Close] | ||||
| func (s *dbStmt) Close() error { | ||||
| 	s.mu.Lock() | ||||
| 	defer s.mu.Unlock() | ||||
| 	return s.stmt.Close() | ||||
| } | ||||
|  | ||||
| // Close implements [driver.Stmt.NumInput] | ||||
| func (s *dbStmt) NumInput() int { | ||||
| 	return -1 // Number of parameters is unknown | ||||
| } | ||||
|  | ||||
| // Exec implements [driver.Stmt.Exec] (legacy method) | ||||
| func (s *dbStmt) Exec(args []driver.Value) (driver.Result, error) { | ||||
| 	namedArgs := make([]driver.NamedValue, len(args)) | ||||
| 	for i, value := range args { | ||||
| 		namedArgs[i] = driver.NamedValue{Value: value} | ||||
| 	} | ||||
| 	return s.ExecContext(context.Background(), namedArgs) | ||||
| } | ||||
|  | ||||
| // ExecContext implements [driver.StmtExecContext.ExecContext] | ||||
| func (s *dbStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { | ||||
| 	s.mu.Lock() | ||||
| 	defer s.mu.Unlock() | ||||
|  | ||||
| 	interfaceArgs := make([]any, len(args)) | ||||
| 	for i, arg := range args { | ||||
| 		interfaceArgs[i] = arg.Value | ||||
| 	} | ||||
| 	return s.stmt.ExecContext(ctx, interfaceArgs...) | ||||
| } | ||||
|  | ||||
| // Query implements [driver.Stmt.Query] (legacy method) | ||||
| func (s *dbStmt) Query(args []driver.Value) (driver.Rows, error) { | ||||
| 	namedArgs := make([]driver.NamedValue, len(args)) | ||||
| 	for i, value := range args { | ||||
| 		namedArgs[i] = driver.NamedValue{Value: value} | ||||
| 	} | ||||
| 	return s.QueryContext(context.Background(), namedArgs) | ||||
| } | ||||
|  | ||||
| // QueryContext implements [driver.StmtQueryContext.QueryContext] | ||||
| func (s *dbStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { | ||||
| 	s.mu.Lock() | ||||
| 	defer s.mu.Unlock() | ||||
|  | ||||
| 	interfaceArgs := make([]any, len(args)) | ||||
| 	for i, arg := range args { | ||||
| 		interfaceArgs[i] = arg.Value | ||||
| 	} | ||||
|  | ||||
| 	rows, err := s.stmt.QueryContext(ctx, interfaceArgs...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &dbRows{rows: rows}, nil | ||||
| } | ||||
|  | ||||
| // dbRows implements [driver.Rows] | ||||
| type dbRows struct { | ||||
| 	rows *sql.Rows | ||||
| } | ||||
|  | ||||
| // Columns implements [driver.Rows.Columns] | ||||
| func (r *dbRows) Columns() []string { | ||||
| 	cols, err := r.rows.Columns() | ||||
| 	if err != nil { | ||||
| 		// This shouldn't happen if the query was successful | ||||
| 		return []string{} | ||||
| 	} | ||||
| 	return cols | ||||
| } | ||||
|  | ||||
| // Close implements [driver.Rows.Close] | ||||
| func (r *dbRows) Close() error { | ||||
| 	return r.rows.Close() | ||||
| } | ||||
|  | ||||
| // Next implements [driver.Rows.Next] | ||||
| func (r *dbRows) Next(dest []driver.Value) error { | ||||
| 	if !r.rows.Next() { | ||||
| 		if err := r.rows.Err(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return io.EOF | ||||
| 	} | ||||
|  | ||||
| 	// Create a slice of interfaces to scan into | ||||
| 	scanArgs := make([]any, len(dest)) | ||||
| 	for i := range scanArgs { | ||||
| 		scanArgs[i] = &dest[i] | ||||
| 	} | ||||
|  | ||||
| 	return r.rows.Scan(scanArgs...) | ||||
| } | ||||
|  | ||||
| // dbTx implements [driver.Tx] | ||||
| type dbTx struct { | ||||
| 	tx *sql.Tx | ||||
| 	mu sync.Mutex | ||||
| } | ||||
|  | ||||
| // Commit implements [driver.Tx.Commit] | ||||
| func (t *dbTx) Commit() error { | ||||
| 	t.mu.Lock() | ||||
| 	defer t.mu.Unlock() | ||||
| 	return t.tx.Commit() | ||||
| } | ||||
|  | ||||
| // Rollback implements [driver.Tx.Rollback] | ||||
| func (t *dbTx) Rollback() error { | ||||
| 	t.mu.Lock() | ||||
| 	defer t.mu.Unlock() | ||||
| 	return t.tx.Rollback() | ||||
| } | ||||
							
								
								
									
										141
									
								
								cluster/hasql/driver_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								cluster/hasql/driver_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,141 @@ | ||||
| package sql | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/DATA-DOG/go-sqlmock" | ||||
| 	"golang.yandex/hasql/v2" | ||||
| ) | ||||
|  | ||||
| func TestDriver(t *testing.T) { | ||||
| 	dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbMaster.Close() | ||||
| 	dbMasterMock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(1, 0)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("master-dc1")) | ||||
|  | ||||
| 	dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbDRMaster.Close() | ||||
| 	dbDRMasterMock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(2, 40)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("drmaster1-dc2")) | ||||
|  | ||||
| 	dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("drmaster")) | ||||
|  | ||||
| 	dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbSlaveDC1.Close() | ||||
| 	dbSlaveDC1Mock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(2, 50)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("slave-dc1")) | ||||
|  | ||||
| 	dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer dbSlaveDC2.Close() | ||||
| 	dbSlaveDC1Mock.MatchExpectationsInOrder(false) | ||||
|  | ||||
| 	dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows( | ||||
| 		sqlmock.NewRowsWithColumnDefinition( | ||||
| 			sqlmock.NewColumn("role").OfType("int8", 0), | ||||
| 			sqlmock.NewColumn("replication_lag").OfType("int8", 0)). | ||||
| 			AddRow(2, 50)). | ||||
| 		RowsWillBeClosed(). | ||||
| 		WithoutArgs() | ||||
|  | ||||
| 	dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows( | ||||
| 		sqlmock.NewRows([]string{"name"}). | ||||
| 			AddRow("slave-dc1")) | ||||
|  | ||||
| 	tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	c, err := NewCluster[Querier]( | ||||
| 		WithClusterContext(tctx), | ||||
| 		WithClusterNodeChecker(hasql.PostgreSQLChecker), | ||||
| 		WithClusterNodePicker(NewCustomPicker[Querier]( | ||||
| 			CustomPickerMaxLag(100), | ||||
| 		)), | ||||
| 		WithClusterNodes( | ||||
| 			ClusterNode{"slave-dc1", dbSlaveDC1, 1}, | ||||
| 			ClusterNode{"master-dc1", dbMaster, 1}, | ||||
| 			ClusterNode{"slave-dc2", dbSlaveDC2, 2}, | ||||
| 			ClusterNode{"drmaster1-dc2", dbDRMaster, 0}, | ||||
| 		), | ||||
| 		WithClusterOptions( | ||||
| 			hasql.WithUpdateInterval[Querier](2*time.Second), | ||||
| 			hasql.WithUpdateTimeout[Querier](1*time.Second), | ||||
| 		), | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	defer c.Close() | ||||
|  | ||||
| 	if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	db, err := OpenDBWithCluster(c) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	// Use context methods | ||||
| 	row := db.QueryRowContext(NodeStateCriterion(t.Context(), hasql.Primary), "SELECT node_name as name") | ||||
| 	if err = row.Err(); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	nodeName := "" | ||||
| 	if err = row.Scan(&nodeName); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	if nodeName != "master-dc1" { | ||||
| 		t.Fatalf("invalid node_name %s != %s", "master-dc1", nodeName) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										10
									
								
								cluster/hasql/error.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								cluster/hasql/error.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| package sql | ||||
|  | ||||
| import "errors" | ||||
|  | ||||
| var ( | ||||
| 	ErrClusterChecker    = errors.New("cluster node checker required") | ||||
| 	ErrClusterDiscoverer = errors.New("cluster node discoverer required") | ||||
| 	ErrClusterPicker     = errors.New("cluster node picker required") | ||||
| 	ErrorNoAliveNodes    = errors.New("cluster no alive nodes") | ||||
| ) | ||||
							
								
								
									
										110
									
								
								cluster/hasql/options.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								cluster/hasql/options.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,110 @@ | ||||
| package sql | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"math" | ||||
|  | ||||
| 	"golang.yandex/hasql/v2" | ||||
| ) | ||||
|  | ||||
| // ClusterOptions contains cluster specific options | ||||
| type ClusterOptions struct { | ||||
| 	NodeChecker        hasql.NodeChecker | ||||
| 	NodePicker         hasql.NodePicker[Querier] | ||||
| 	NodeDiscoverer     hasql.NodeDiscoverer[Querier] | ||||
| 	Options            []hasql.ClusterOpt[Querier] | ||||
| 	Context            context.Context | ||||
| 	Retries            int | ||||
| 	NodePriority       map[string]int32 | ||||
| 	NodeStateCriterion hasql.NodeStateCriterion | ||||
| } | ||||
|  | ||||
| // ClusterOption apply cluster options to ClusterOptions | ||||
| type ClusterOption func(*ClusterOptions) | ||||
|  | ||||
| // WithClusterNodeChecker pass hasql.NodeChecker to cluster options | ||||
| func WithClusterNodeChecker(c hasql.NodeChecker) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		o.NodeChecker = c | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithClusterNodePicker pass hasql.NodePicker to cluster options | ||||
| func WithClusterNodePicker(p hasql.NodePicker[Querier]) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		o.NodePicker = p | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithClusterNodeDiscoverer pass hasql.NodeDiscoverer to cluster options | ||||
| func WithClusterNodeDiscoverer(d hasql.NodeDiscoverer[Querier]) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		o.NodeDiscoverer = d | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithRetries retry count on other nodes in case of error | ||||
| func WithRetries(n int) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		o.Retries = n | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithClusterContext pass context.Context to cluster options and used for checks | ||||
| func WithClusterContext(ctx context.Context) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		o.Context = ctx | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithClusterOptions pass hasql.ClusterOpt | ||||
| func WithClusterOptions(opts ...hasql.ClusterOpt[Querier]) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		o.Options = append(o.Options, opts...) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithClusterNodeStateCriterion pass default hasql.NodeStateCriterion | ||||
| func WithClusterNodeStateCriterion(c hasql.NodeStateCriterion) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		o.NodeStateCriterion = c | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type ClusterNode struct { | ||||
| 	Name     string | ||||
| 	DB       Querier | ||||
| 	Priority int32 | ||||
| } | ||||
|  | ||||
| // WithClusterNodes create cluster with static NodeDiscoverer | ||||
| func WithClusterNodes(cns ...ClusterNode) ClusterOption { | ||||
| 	return func(o *ClusterOptions) { | ||||
| 		nodes := make([]*hasql.Node[Querier], 0, len(cns)) | ||||
| 		if o.NodePriority == nil { | ||||
| 			o.NodePriority = make(map[string]int32, len(cns)) | ||||
| 		} | ||||
| 		for _, cn := range cns { | ||||
| 			nodes = append(nodes, hasql.NewNode(cn.Name, cn.DB)) | ||||
| 			if cn.Priority == 0 { | ||||
| 				cn.Priority = math.MaxInt32 | ||||
| 			} | ||||
| 			o.NodePriority[cn.Name] = cn.Priority | ||||
| 		} | ||||
| 		o.NodeDiscoverer = hasql.NewStaticNodeDiscoverer(nodes...) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type nodeStateCriterionKey struct{} | ||||
|  | ||||
| // NodeStateCriterion inject hasql.NodeStateCriterion to context | ||||
| func NodeStateCriterion(ctx context.Context, c hasql.NodeStateCriterion) context.Context { | ||||
| 	return context.WithValue(ctx, nodeStateCriterionKey{}, c) | ||||
| } | ||||
|  | ||||
| func (c *Cluster) getNodeStateCriterion(ctx context.Context) hasql.NodeStateCriterion { | ||||
| 	if v, ok := ctx.Value(nodeStateCriterionKey{}).(hasql.NodeStateCriterion); ok { | ||||
| 		return v | ||||
| 	} | ||||
| 	return c.options.NodeStateCriterion | ||||
| } | ||||
							
								
								
									
										113
									
								
								cluster/hasql/picker.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										113
									
								
								cluster/hasql/picker.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,113 @@ | ||||
| package sql | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"time" | ||||
|  | ||||
| 	"golang.yandex/hasql/v2" | ||||
| ) | ||||
|  | ||||
| // compile time guard | ||||
| var _ hasql.NodePicker[Querier] = (*CustomPicker[Querier])(nil) | ||||
|  | ||||
| // CustomPickerOptions holds options to pick nodes | ||||
| type CustomPickerOptions struct { | ||||
| 	MaxLag   int | ||||
| 	Priority map[string]int32 | ||||
| 	Retries  int | ||||
| } | ||||
|  | ||||
| // CustomPickerOption func apply option to CustomPickerOptions | ||||
| type CustomPickerOption func(*CustomPickerOptions) | ||||
|  | ||||
| // CustomPickerMaxLag specifies max lag for which node can be used | ||||
| func CustomPickerMaxLag(n int) CustomPickerOption { | ||||
| 	return func(o *CustomPickerOptions) { | ||||
| 		o.MaxLag = n | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewCustomPicker creates new node picker | ||||
| func NewCustomPicker[T Querier](opts ...CustomPickerOption) *CustomPicker[Querier] { | ||||
| 	options := CustomPickerOptions{} | ||||
| 	for _, o := range opts { | ||||
| 		o(&options) | ||||
| 	} | ||||
| 	return &CustomPicker[Querier]{opts: options} | ||||
| } | ||||
|  | ||||
| // CustomPicker holds node picker options | ||||
| type CustomPicker[T Querier] struct { | ||||
| 	opts CustomPickerOptions | ||||
| } | ||||
|  | ||||
| // PickNode used to return specific node | ||||
| func (p *CustomPicker[T]) PickNode(cnodes []hasql.CheckedNode[T]) hasql.CheckedNode[T] { | ||||
| 	for _, n := range cnodes { | ||||
| 		fmt.Printf("node %s\n", n.Node.String()) | ||||
| 	} | ||||
| 	return cnodes[0] | ||||
| } | ||||
|  | ||||
| func (p *CustomPicker[T]) getPriority(nodeName string) int32 { | ||||
| 	if prio, ok := p.opts.Priority[nodeName]; ok { | ||||
| 		return prio | ||||
| 	} | ||||
| 	return math.MaxInt32 // Default to lowest priority | ||||
| } | ||||
|  | ||||
| // CompareNodes used to sort nodes | ||||
| func (p *CustomPicker[T]) CompareNodes(a, b hasql.CheckedNode[T]) int { | ||||
| 	// Get replication lag values | ||||
| 	aLag := a.Info.(interface{ ReplicationLag() int }).ReplicationLag() | ||||
| 	bLag := b.Info.(interface{ ReplicationLag() int }).ReplicationLag() | ||||
|  | ||||
| 	// First check that lag lower then MaxLag | ||||
| 	if aLag > p.opts.MaxLag && bLag > p.opts.MaxLag { | ||||
| 		return 0 // both are equal | ||||
| 	} | ||||
|  | ||||
| 	// If one node exceeds MaxLag and the other doesn't, prefer the one that doesn't | ||||
| 	if aLag > p.opts.MaxLag { | ||||
| 		return 1 // b is better | ||||
| 	} | ||||
| 	if bLag > p.opts.MaxLag { | ||||
| 		return -1 // a is better | ||||
| 	} | ||||
|  | ||||
| 	// Get node priorities | ||||
| 	aPrio := p.getPriority(a.Node.String()) | ||||
| 	bPrio := p.getPriority(b.Node.String()) | ||||
|  | ||||
| 	// if both priority equals | ||||
| 	if aPrio == bPrio { | ||||
| 		// First compare by replication lag | ||||
| 		if aLag < bLag { | ||||
| 			return -1 | ||||
| 		} | ||||
| 		if aLag > bLag { | ||||
| 			return 1 | ||||
| 		} | ||||
| 		// If replication lag is equal, compare by latency | ||||
| 		aLatency := a.Info.(interface{ Latency() time.Duration }).Latency() | ||||
| 		bLatency := b.Info.(interface{ Latency() time.Duration }).Latency() | ||||
|  | ||||
| 		if aLatency < bLatency { | ||||
| 			return -1 | ||||
| 		} | ||||
| 		if aLatency > bLatency { | ||||
| 			return 1 | ||||
| 		} | ||||
|  | ||||
| 		// If lag and latency is equal | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	// If priorities are different, prefer the node with lower priority value | ||||
| 	if aPrio < bPrio { | ||||
| 		return -1 | ||||
| 	} | ||||
|  | ||||
| 	return 1 | ||||
| } | ||||
							
								
								
									
										3
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								go.mod
									
									
									
									
									
								
							| @@ -1,6 +1,6 @@ | ||||
| module go.unistack.org/micro/v4 | ||||
|  | ||||
| go 1.22.0 | ||||
| go 1.24 | ||||
|  | ||||
| require ( | ||||
| 	dario.cat/mergo v1.0.1 | ||||
| @@ -17,6 +17,7 @@ require ( | ||||
| 	go.uber.org/automaxprocs v1.6.0 | ||||
| 	go.unistack.org/micro-proto/v4 v4.1.0 | ||||
| 	golang.org/x/sync v0.10.0 | ||||
| 	golang.yandex/hasql/v2 v2.1.0 | ||||
| 	google.golang.org/grpc v1.69.4 | ||||
| 	google.golang.org/protobuf v1.36.3 | ||||
| ) | ||||
|   | ||||
							
								
								
									
										2
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								go.sum
									
									
									
									
									
								
							| @@ -56,6 +56,8 @@ golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= | ||||
| golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
| golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= | ||||
| golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= | ||||
| golang.yandex/hasql/v2 v2.1.0 h1:7CaFFWeHoK5TvA+QvZzlKHlIN5sqNpqM8NSrXskZD/k= | ||||
| golang.yandex/hasql/v2 v2.1.0/go.mod h1:3Au1AxuJDCTXmS117BpbI6e+70kGWeyLR1qJAH6HdtA= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= | ||||
| google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= | ||||
|   | ||||
| @@ -3,6 +3,7 @@ package sql | ||||
| import ( | ||||
| 	"context" | ||||
| 	"database/sql" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| @@ -11,31 +12,84 @@ type Statser interface { | ||||
| } | ||||
|  | ||||
| func NewStatsMeter(ctx context.Context, db Statser, opts ...Option) { | ||||
| 	options := NewOptions(opts...) | ||||
|  | ||||
| 	go func() { | ||||
| 		ticker := time.NewTicker(options.MeterStatsInterval) | ||||
| 		defer ticker.Stop() | ||||
|  | ||||
| 		for { | ||||
| 			select { | ||||
| 			case <-ctx.Done(): | ||||
| 				return | ||||
| 			case <-ticker.C: | ||||
| 	if db == nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	options := NewOptions(opts...) | ||||
|  | ||||
| 	var ( | ||||
| 		statsMu                                                     sync.Mutex | ||||
| 		lastUpdated                                                 time.Time | ||||
| 		maxOpenConnections, openConnections, inUse, idle, waitCount float64 | ||||
| 		maxIdleClosed, maxIdleTimeClosed, maxLifetimeClosed         float64 | ||||
| 		waitDuration                                                float64 | ||||
| 	) | ||||
|  | ||||
| 	updateFn := func() { | ||||
| 		statsMu.Lock() | ||||
| 		defer statsMu.Unlock() | ||||
|  | ||||
| 		if time.Since(lastUpdated) < options.MeterStatsInterval { | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		stats := db.Stats() | ||||
| 				options.Meter.Counter(MaxOpenConnections).Set(uint64(stats.MaxOpenConnections)) | ||||
| 				options.Meter.Counter(OpenConnections).Set(uint64(stats.OpenConnections)) | ||||
| 				options.Meter.Counter(InuseConnections).Set(uint64(stats.InUse)) | ||||
| 				options.Meter.Counter(IdleConnections).Set(uint64(stats.Idle)) | ||||
| 				options.Meter.Counter(WaitConnections).Set(uint64(stats.WaitCount)) | ||||
| 				options.Meter.FloatCounter(BlockedSeconds).Set(stats.WaitDuration.Seconds()) | ||||
| 				options.Meter.Counter(MaxIdleClosed).Set(uint64(stats.MaxIdleClosed)) | ||||
| 				options.Meter.Counter(MaxIdletimeClosed).Set(uint64(stats.MaxIdleTimeClosed)) | ||||
| 				options.Meter.Counter(MaxLifetimeClosed).Set(uint64(stats.MaxLifetimeClosed)) | ||||
| 		maxOpenConnections = float64(stats.MaxOpenConnections) | ||||
| 		openConnections = float64(stats.OpenConnections) | ||||
| 		inUse = float64(stats.InUse) | ||||
| 		idle = float64(stats.Idle) | ||||
| 		waitCount = float64(stats.WaitCount) | ||||
| 		maxIdleClosed = float64(stats.MaxIdleClosed) | ||||
| 		maxIdleTimeClosed = float64(stats.MaxIdleTimeClosed) | ||||
| 		maxLifetimeClosed = float64(stats.MaxLifetimeClosed) | ||||
| 		waitDuration = float64(stats.WaitDuration.Seconds()) | ||||
|  | ||||
| 		lastUpdated = time.Now() | ||||
| 	} | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	options.Meter.Gauge(MaxOpenConnections, func() float64 { | ||||
| 		updateFn() | ||||
| 		return maxOpenConnections | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(OpenConnections, func() float64 { | ||||
| 		updateFn() | ||||
| 		return openConnections | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(InuseConnections, func() float64 { | ||||
| 		updateFn() | ||||
| 		return inUse | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(IdleConnections, func() float64 { | ||||
| 		updateFn() | ||||
| 		return idle | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(WaitConnections, func() float64 { | ||||
| 		updateFn() | ||||
| 		return waitCount | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(BlockedSeconds, func() float64 { | ||||
| 		updateFn() | ||||
| 		return waitDuration | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(MaxIdleClosed, func() float64 { | ||||
| 		updateFn() | ||||
| 		return maxIdleClosed | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(MaxIdletimeClosed, func() float64 { | ||||
| 		updateFn() | ||||
| 		return maxIdleTimeClosed | ||||
| 	}) | ||||
|  | ||||
| 	options.Meter.Gauge(MaxLifetimeClosed, func() float64 { | ||||
| 		updateFn() | ||||
| 		return maxLifetimeClosed | ||||
| 	}) | ||||
| } | ||||
|   | ||||
| @@ -80,7 +80,7 @@ func TestTime(t *testing.T) { | ||||
| 		WithHandlerFunc(slog.NewTextHandler), | ||||
| 		logger.WithAddStacktrace(true), | ||||
| 		logger.WithTimeFunc(func() time.Time { | ||||
| 			return time.Unix(0, 0) | ||||
| 			return time.Unix(0, 0).UTC() | ||||
| 		}), | ||||
| 	) | ||||
| 	if err := l.Init(logger.WithFields("key1", "val1")); err != nil { | ||||
| @@ -89,8 +89,7 @@ func TestTime(t *testing.T) { | ||||
|  | ||||
| 	l.Error(ctx, "msg1", errors.New("err")) | ||||
|  | ||||
| 	if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T03:00:00.000000000+03:00`)) && | ||||
| 		!bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) { | ||||
| 	if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) { | ||||
| 		t.Fatalf("logger error not works, buf contains: %s", buf.Bytes()) | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -4,8 +4,8 @@ package meter | ||||
| import ( | ||||
| 	"io" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| @@ -59,6 +59,8 @@ type Meter interface { | ||||
| 	Options() Options | ||||
| 	// String return meter type | ||||
| 	String() string | ||||
| 	// Unregister metric name and drop all data | ||||
| 	Unregister(name string, labels ...string) bool | ||||
| } | ||||
|  | ||||
| // Counter is a counter | ||||
| @@ -117,6 +119,39 @@ func BuildLabels(labels ...string) []string { | ||||
| 	return labels | ||||
| } | ||||
|  | ||||
| var spool = newStringsPool(500) | ||||
|  | ||||
| type stringsPool struct { | ||||
| 	p *sync.Pool | ||||
| 	c int | ||||
| } | ||||
|  | ||||
| func newStringsPool(size int) *stringsPool { | ||||
| 	p := &stringsPool{c: size} | ||||
| 	p.p = &sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			return &strings.Builder{} | ||||
| 		}, | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| func (p *stringsPool) Cap() int { | ||||
| 	return p.c | ||||
| } | ||||
|  | ||||
| func (p *stringsPool) Get() *strings.Builder { | ||||
| 	return p.p.Get().(*strings.Builder) | ||||
| } | ||||
|  | ||||
| func (p *stringsPool) Put(b *strings.Builder) { | ||||
| 	if b.Cap() > p.c { | ||||
| 		return | ||||
| 	} | ||||
| 	b.Reset() | ||||
| 	p.p.Put(b) | ||||
| } | ||||
|  | ||||
| // BuildName used to combine metric with labels. | ||||
| // If labels count is odd, drop last element | ||||
| func BuildName(name string, labels ...string) string { | ||||
| @@ -125,8 +160,6 @@ func BuildName(name string, labels ...string) string { | ||||
| 	} | ||||
|  | ||||
| 	if len(labels) > 2 { | ||||
| 		sort.Sort(byKey(labels)) | ||||
|  | ||||
| 		idx := 0 | ||||
| 		for { | ||||
| 			if labels[idx] == labels[idx+2] { | ||||
| @@ -141,7 +174,9 @@ func BuildName(name string, labels ...string) string { | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var b strings.Builder | ||||
| 	b := spool.Get() | ||||
| 	defer spool.Put(b) | ||||
|  | ||||
| 	_, _ = b.WriteString(name) | ||||
| 	_, _ = b.WriteRune('{') | ||||
| 	for idx := 0; idx < len(labels); idx += 2 { | ||||
| @@ -149,8 +184,9 @@ func BuildName(name string, labels ...string) string { | ||||
| 			_, _ = b.WriteRune(',') | ||||
| 		} | ||||
| 		_, _ = b.WriteString(labels[idx]) | ||||
| 		_, _ = b.WriteString(`=`) | ||||
| 		_, _ = b.WriteString(strconv.Quote(labels[idx+1])) | ||||
| 		_, _ = b.WriteString(`="`) | ||||
| 		_, _ = b.WriteString(labels[idx+1]) | ||||
| 		_, _ = b.WriteRune('"') | ||||
| 	} | ||||
| 	_, _ = b.WriteRune('}') | ||||
|  | ||||
|   | ||||
| @@ -50,11 +50,12 @@ func TestBuildName(t *testing.T) { | ||||
| 	data := map[string][]string{ | ||||
| 		`my_metric{firstlabel="value2",zerolabel="value3"}`: { | ||||
| 			"my_metric", | ||||
| 			"zerolabel", "value3", "firstlabel", "value2", | ||||
| 			"firstlabel", "value2", | ||||
| 			"zerolabel", "value3", | ||||
| 		}, | ||||
| 		`my_metric{broker="broker2",register="mdns",server="tcp"}`: { | ||||
| 			"my_metric", | ||||
| 			"broker", "broker1", "broker", "broker2", "server", "http", "server", "tcp", "register", "mdns", | ||||
| 			"broker", "broker1", "broker", "broker2", "register", "mdns", "server", "http", "server", "tcp", | ||||
| 		}, | ||||
| 		`my_metric{aaa="aaa"}`: { | ||||
| 			"my_metric", | ||||
|   | ||||
| @@ -28,6 +28,10 @@ func (r *noopMeter) Name() string { | ||||
| 	return r.opts.Name | ||||
| } | ||||
|  | ||||
| func (r *noopMeter) Unregister(name string, labels ...string) bool { | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Init initialize options | ||||
| func (r *noopMeter) Init(opts ...Option) error { | ||||
| 	for _, o := range opts { | ||||
|   | ||||
| @@ -91,7 +91,7 @@ func (p *bro) Connect(_ context.Context) error { return nil } | ||||
| func (p *bro) Disconnect(_ context.Context) error { return nil } | ||||
|  | ||||
| // NewMessage creates new message | ||||
| func (p *bro) NewMessage(_ context.Context, _ metadata.Metadata, _ interface{}, _ ...broker.PublishOption) (broker.Message, error) { | ||||
| func (p *bro) NewMessage(_ context.Context, _ metadata.Metadata, _ interface{}, _ ...broker.MessageOption) (broker.Message, error) { | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -12,7 +12,7 @@ import ( | ||||
|  | ||||
| type httpProfile struct { | ||||
| 	server  *http.Server | ||||
| 	sync.Mutex | ||||
| 	mu      sync.Mutex | ||||
| 	running bool | ||||
| } | ||||
|  | ||||
| @@ -21,8 +21,8 @@ var DefaultAddress = ":6060" | ||||
|  | ||||
| // Start the profiler | ||||
| func (h *httpProfile) Start() error { | ||||
| 	h.Lock() | ||||
| 	defer h.Unlock() | ||||
| 	h.mu.Lock() | ||||
| 	defer h.mu.Unlock() | ||||
|  | ||||
| 	if h.running { | ||||
| 		return nil | ||||
| @@ -30,9 +30,9 @@ func (h *httpProfile) Start() error { | ||||
|  | ||||
| 	go func() { | ||||
| 		if err := h.server.ListenAndServe(); err != nil { | ||||
| 			h.Lock() | ||||
| 			h.mu.Lock() | ||||
| 			h.running = false | ||||
| 			h.Unlock() | ||||
| 			h.mu.Unlock() | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| @@ -43,8 +43,8 @@ func (h *httpProfile) Start() error { | ||||
|  | ||||
| // Stop the profiler | ||||
| func (h *httpProfile) Stop() error { | ||||
| 	h.Lock() | ||||
| 	defer h.Unlock() | ||||
| 	h.mu.Lock() | ||||
| 	defer h.mu.Unlock() | ||||
|  | ||||
| 	if !h.running { | ||||
| 		return nil | ||||
|   | ||||
| @@ -17,7 +17,7 @@ type profiler struct { | ||||
| 	cpuFile *os.File | ||||
| 	memFile *os.File | ||||
| 	opts    profile.Options | ||||
| 	sync.Mutex | ||||
| 	mu      sync.Mutex | ||||
| 	running bool | ||||
| } | ||||
|  | ||||
| @@ -39,8 +39,8 @@ func (p *profiler) writeHeap(f *os.File) { | ||||
| } | ||||
|  | ||||
| func (p *profiler) Start() error { | ||||
| 	p.Lock() | ||||
| 	defer p.Unlock() | ||||
| 	p.mu.Lock() | ||||
| 	defer p.mu.Unlock() | ||||
|  | ||||
| 	if p.running { | ||||
| 		return nil | ||||
| @@ -86,8 +86,8 @@ func (p *profiler) Start() error { | ||||
| } | ||||
|  | ||||
| func (p *profiler) Stop() error { | ||||
| 	p.Lock() | ||||
| 	defer p.Unlock() | ||||
| 	p.mu.Lock() | ||||
| 	defer p.mu.Unlock() | ||||
|  | ||||
| 	select { | ||||
| 	case <-p.exit: | ||||
|   | ||||
| @@ -33,7 +33,7 @@ type memory struct { | ||||
| 	records  map[string]services | ||||
| 	watchers map[string]*watcher | ||||
| 	opts     register.Options | ||||
| 	sync.RWMutex | ||||
| 	mu       sync.RWMutex | ||||
| } | ||||
|  | ||||
| // services is a KV map with service name as the key and a map of records as the value | ||||
| @@ -57,7 +57,7 @@ func (m *memory) ttlPrune() { | ||||
| 	defer prune.Stop() | ||||
|  | ||||
| 	for range prune.C { | ||||
| 		m.Lock() | ||||
| 		m.mu.Lock() | ||||
| 		for namespace, services := range m.records { | ||||
| 			for service, versions := range services { | ||||
| 				for version, record := range versions { | ||||
| @@ -72,24 +72,24 @@ func (m *memory) ttlPrune() { | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		m.Unlock() | ||||
| 		m.mu.Unlock() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *memory) sendEvent(r *register.Result) { | ||||
| 	m.RLock() | ||||
| 	m.mu.RLock() | ||||
| 	watchers := make([]*watcher, 0, len(m.watchers)) | ||||
| 	for _, w := range m.watchers { | ||||
| 		watchers = append(watchers, w) | ||||
| 	} | ||||
| 	m.RUnlock() | ||||
| 	m.mu.RUnlock() | ||||
|  | ||||
| 	for _, w := range watchers { | ||||
| 		select { | ||||
| 		case <-w.exit: | ||||
| 			m.Lock() | ||||
| 			m.mu.Lock() | ||||
| 			delete(m.watchers, w.id) | ||||
| 			m.Unlock() | ||||
| 			m.mu.Unlock() | ||||
| 		default: | ||||
| 			select { | ||||
| 			case w.res <- r: | ||||
| @@ -113,8 +113,8 @@ func (m *memory) Init(opts ...register.Option) error { | ||||
| 	} | ||||
|  | ||||
| 	// add services | ||||
| 	m.Lock() | ||||
| 	defer m.Unlock() | ||||
| 	m.mu.Lock() | ||||
| 	defer m.mu.Unlock() | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
| @@ -124,8 +124,8 @@ func (m *memory) Options() register.Options { | ||||
| } | ||||
|  | ||||
| func (m *memory) Register(_ context.Context, s *register.Service, opts ...register.RegisterOption) error { | ||||
| 	m.Lock() | ||||
| 	defer m.Unlock() | ||||
| 	m.mu.Lock() | ||||
| 	defer m.mu.Unlock() | ||||
|  | ||||
| 	options := register.NewRegisterOptions(opts...) | ||||
|  | ||||
| @@ -197,8 +197,8 @@ func (m *memory) Register(_ context.Context, s *register.Service, opts ...regist | ||||
| } | ||||
|  | ||||
| func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...register.DeregisterOption) error { | ||||
| 	m.Lock() | ||||
| 	defer m.Unlock() | ||||
| 	m.mu.Lock() | ||||
| 	defer m.mu.Unlock() | ||||
|  | ||||
| 	options := register.NewDeregisterOptions(opts...) | ||||
|  | ||||
| @@ -264,9 +264,9 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe | ||||
|  | ||||
| 	// if it's a wildcard domain, return from all domains | ||||
| 	if options.Namespace == register.WildcardNamespace { | ||||
| 		m.RLock() | ||||
| 		m.mu.RLock() | ||||
| 		recs := m.records | ||||
| 		m.RUnlock() | ||||
| 		m.mu.RUnlock() | ||||
|  | ||||
| 		var services []*register.Service | ||||
|  | ||||
| @@ -286,8 +286,8 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe | ||||
| 		return services, nil | ||||
| 	} | ||||
|  | ||||
| 	m.RLock() | ||||
| 	defer m.RUnlock() | ||||
| 	m.mu.RLock() | ||||
| 	defer m.mu.RUnlock() | ||||
|  | ||||
| 	// check the domain exists | ||||
| 	services, ok := m.records[options.Namespace] | ||||
| @@ -319,9 +319,9 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption) | ||||
|  | ||||
| 	// if it's a wildcard domain, list from all domains | ||||
| 	if options.Namespace == register.WildcardNamespace { | ||||
| 		m.RLock() | ||||
| 		m.mu.RLock() | ||||
| 		recs := m.records | ||||
| 		m.RUnlock() | ||||
| 		m.mu.RUnlock() | ||||
|  | ||||
| 		var services []*register.Service | ||||
|  | ||||
| @@ -336,8 +336,8 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption) | ||||
| 		return services, nil | ||||
| 	} | ||||
|  | ||||
| 	m.RLock() | ||||
| 	defer m.RUnlock() | ||||
| 	m.mu.RLock() | ||||
| 	defer m.mu.RUnlock() | ||||
|  | ||||
| 	// ensure the domain exists | ||||
| 	services, ok := m.records[options.Namespace] | ||||
| @@ -371,9 +371,9 @@ func (m *memory) Watch(ctx context.Context, opts ...register.WatchOption) (regis | ||||
| 		wo:   wo, | ||||
| 	} | ||||
|  | ||||
| 	m.Lock() | ||||
| 	m.mu.Lock() | ||||
| 	m.watchers[w.id] = w | ||||
| 	m.Unlock() | ||||
| 	m.mu.Unlock() | ||||
|  | ||||
| 	return w, nil | ||||
| } | ||||
|   | ||||
| @@ -57,7 +57,7 @@ type noopServer struct { | ||||
| 	handlers   map[string]Handler | ||||
| 	exit       chan chan error | ||||
| 	opts       Options | ||||
| 	sync.RWMutex | ||||
| 	mu         sync.RWMutex | ||||
| 	registered bool | ||||
| 	started    bool | ||||
| } | ||||
| @@ -125,10 +125,10 @@ func (n *noopServer) String() string { | ||||
|  | ||||
| //nolint:gocyclo | ||||
| func (n *noopServer) Register() error { | ||||
| 	n.RLock() | ||||
| 	n.mu.RLock() | ||||
| 	rsvc := n.rsvc | ||||
| 	config := n.opts | ||||
| 	n.RUnlock() | ||||
| 	n.mu.RUnlock() | ||||
|  | ||||
| 	// if service already filled, reuse it and return early | ||||
| 	if rsvc != nil { | ||||
| @@ -144,9 +144,9 @@ func (n *noopServer) Register() error { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	n.RLock() | ||||
| 	n.mu.RLock() | ||||
| 	registered := n.registered | ||||
| 	n.RUnlock() | ||||
| 	n.mu.RUnlock() | ||||
|  | ||||
| 	if !registered { | ||||
| 		if config.Logger.V(logger.InfoLevel) { | ||||
| @@ -164,8 +164,8 @@ func (n *noopServer) Register() error { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	n.Lock() | ||||
| 	defer n.Unlock() | ||||
| 	n.mu.Lock() | ||||
| 	defer n.mu.Unlock() | ||||
|  | ||||
| 	n.registered = true | ||||
| 	if cacheService { | ||||
| @@ -178,9 +178,9 @@ func (n *noopServer) Register() error { | ||||
| func (n *noopServer) Deregister() error { | ||||
| 	var err error | ||||
|  | ||||
| 	n.RLock() | ||||
| 	n.mu.RLock() | ||||
| 	config := n.opts | ||||
| 	n.RUnlock() | ||||
| 	n.mu.RUnlock() | ||||
|  | ||||
| 	service, err := NewRegisterService(n) | ||||
| 	if err != nil { | ||||
| @@ -195,29 +195,29 @@ func (n *noopServer) Deregister() error { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	n.Lock() | ||||
| 	n.mu.Lock() | ||||
| 	n.rsvc = nil | ||||
|  | ||||
| 	if !n.registered { | ||||
| 		n.Unlock() | ||||
| 		n.mu.Unlock() | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	n.registered = false | ||||
|  | ||||
| 	n.Unlock() | ||||
| 	n.mu.Unlock() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| //nolint:gocyclo | ||||
| func (n *noopServer) Start() error { | ||||
| 	n.RLock() | ||||
| 	n.mu.RLock() | ||||
| 	if n.started { | ||||
| 		n.RUnlock() | ||||
| 		n.mu.RUnlock() | ||||
| 		return nil | ||||
| 	} | ||||
| 	config := n.Options() | ||||
| 	n.RUnlock() | ||||
| 	n.mu.RUnlock() | ||||
|  | ||||
| 	// use 127.0.0.1 to avoid scan of all network interfaces | ||||
| 	addr, err := maddr.Extract("127.0.0.1") | ||||
| @@ -235,11 +235,11 @@ func (n *noopServer) Start() error { | ||||
| 		config.Logger.Info(n.opts.Context, "server [noop] Listening on "+config.Address) | ||||
| 	} | ||||
|  | ||||
| 	n.Lock() | ||||
| 	n.mu.Lock() | ||||
| 	if len(config.Advertise) == 0 { | ||||
| 		config.Advertise = config.Address | ||||
| 	} | ||||
| 	n.Unlock() | ||||
| 	n.mu.Unlock() | ||||
|  | ||||
| 	// use RegisterCheck func before register | ||||
| 	// nolint: nestif | ||||
| @@ -273,9 +273,9 @@ func (n *noopServer) Start() error { | ||||
| 			select { | ||||
| 			// register self on interval | ||||
| 			case <-t.C: | ||||
| 				n.RLock() | ||||
| 				n.mu.RLock() | ||||
| 				registered := n.registered | ||||
| 				n.RUnlock() | ||||
| 				n.mu.RUnlock() | ||||
| 				rerr := config.RegisterCheck(config.Context) | ||||
| 				// nolint: nestif | ||||
| 				if rerr != nil && registered { | ||||
| @@ -332,29 +332,29 @@ func (n *noopServer) Start() error { | ||||
| 	}() | ||||
|  | ||||
| 	// mark the server as started | ||||
| 	n.Lock() | ||||
| 	n.mu.Lock() | ||||
| 	n.started = true | ||||
| 	n.Unlock() | ||||
| 	n.mu.Unlock() | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (n *noopServer) Stop() error { | ||||
| 	n.RLock() | ||||
| 	n.mu.RLock() | ||||
| 	if !n.started { | ||||
| 		n.RUnlock() | ||||
| 		n.mu.RUnlock() | ||||
| 		return nil | ||||
| 	} | ||||
| 	n.RUnlock() | ||||
| 	n.mu.RUnlock() | ||||
|  | ||||
| 	ch := make(chan error) | ||||
| 	n.exit <- ch | ||||
|  | ||||
| 	err := <-ch | ||||
| 	n.Lock() | ||||
| 	n.mu.Lock() | ||||
| 	n.rsvc = nil | ||||
| 	n.started = false | ||||
| 	n.Unlock() | ||||
| 	n.mu.Unlock() | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|   | ||||
							
								
								
									
										16
									
								
								service.go
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								service.go
									
									
									
									
									
								
							| @@ -98,7 +98,7 @@ func RegisterHandler(s server.Server, h interface{}, opts ...server.HandlerOptio | ||||
| type service struct { | ||||
| 	done    chan struct{} | ||||
| 	opts    Options | ||||
| 	sync.RWMutex | ||||
| 	mu      sync.RWMutex | ||||
| 	stopped bool | ||||
| } | ||||
|  | ||||
| @@ -321,9 +321,9 @@ func (s *service) Health() bool { | ||||
| func (s *service) Start() error { | ||||
| 	var err error | ||||
|  | ||||
| 	s.RLock() | ||||
| 	s.mu.RLock() | ||||
| 	config := s.opts | ||||
| 	s.RUnlock() | ||||
| 	s.mu.RUnlock() | ||||
|  | ||||
| 	for _, cfg := range s.opts.Configs { | ||||
| 		if cfg.Options().Struct == nil { | ||||
| @@ -380,9 +380,9 @@ func (s *service) Start() error { | ||||
| } | ||||
|  | ||||
| func (s *service) Stop() error { | ||||
| 	s.RLock() | ||||
| 	s.mu.RLock() | ||||
| 	config := s.opts | ||||
| 	s.RUnlock() | ||||
| 	s.mu.RUnlock() | ||||
|  | ||||
| 	if config.Loggers[0].V(logger.InfoLevel) { | ||||
| 		config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name())) | ||||
| @@ -457,13 +457,13 @@ func (s *service) Run() error { | ||||
| // notifyShutdown marks the service as stopped and closes the done channel. | ||||
| // It ensures the channel is closed only once, preventing multiple closures. | ||||
| func (s *service) notifyShutdown() { | ||||
| 	s.Lock() | ||||
| 	s.mu.Lock() | ||||
| 	if s.stopped { | ||||
| 		s.Unlock() | ||||
| 		s.mu.Unlock() | ||||
| 		return | ||||
| 	} | ||||
| 	s.stopped = true | ||||
| 	s.Unlock() | ||||
| 	s.mu.Unlock() | ||||
|  | ||||
| 	close(s.done) | ||||
| } | ||||
|   | ||||
| @@ -139,7 +139,7 @@ func (n *noopStore) fnExists(ctx context.Context, _ string, _ ...ExistsOption) e | ||||
| 		return ctx.Err() | ||||
| 	default: | ||||
| 	} | ||||
| 	return nil | ||||
| 	return ErrNotFound | ||||
| } | ||||
|  | ||||
| func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error { | ||||
|   | ||||
| @@ -2,6 +2,7 @@ package store | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"testing" | ||||
| ) | ||||
|  | ||||
| @@ -25,7 +26,8 @@ func TestHook(t *testing.T) { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	if err := s.Exists(context.TODO(), "test"); err != nil { | ||||
| 	err := s.Exists(context.TODO(), "test") | ||||
| 	if !errors.Is(err, ErrNotFound) { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
|   | ||||
| @@ -9,7 +9,7 @@ type memorySync struct { | ||||
| 	locks   map[string]*memoryLock | ||||
| 	options Options | ||||
|  | ||||
| 	mtx gosync.RWMutex | ||||
| 	mu gosync.RWMutex | ||||
| } | ||||
|  | ||||
| type memoryLock struct { | ||||
| @@ -74,7 +74,7 @@ func (m *memorySync) Options() Options { | ||||
|  | ||||
| func (m *memorySync) Lock(id string, opts ...LockOption) error { | ||||
| 	// lock our access | ||||
| 	m.mtx.Lock() | ||||
| 	m.mu.Lock() | ||||
|  | ||||
| 	var options LockOptions | ||||
| 	for _, o := range opts { | ||||
| @@ -90,11 +90,11 @@ func (m *memorySync) Lock(id string, opts ...LockOption) error { | ||||
| 			release: make(chan bool), | ||||
| 		} | ||||
| 		// unlock | ||||
| 		m.mtx.Unlock() | ||||
| 		m.mu.Unlock() | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	m.mtx.Unlock() | ||||
| 	m.mu.Unlock() | ||||
|  | ||||
| 	// set wait time | ||||
| 	var wait <-chan time.Time | ||||
| @@ -124,12 +124,12 @@ lockLoop: | ||||
| 		// wait for the lock to be released | ||||
| 		select { | ||||
| 		case <-lk.release: | ||||
| 			m.mtx.Lock() | ||||
| 			m.mu.Lock() | ||||
|  | ||||
| 			// someone locked before us | ||||
| 			lk, ok = m.locks[id] | ||||
| 			if ok { | ||||
| 				m.mtx.Unlock() | ||||
| 				m.mu.Unlock() | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| @@ -141,7 +141,7 @@ lockLoop: | ||||
| 				release: make(chan bool), | ||||
| 			} | ||||
|  | ||||
| 			m.mtx.Unlock() | ||||
| 			m.mu.Unlock() | ||||
|  | ||||
| 			break lockLoop | ||||
| 		case <-ttl: | ||||
| @@ -160,8 +160,8 @@ lockLoop: | ||||
| } | ||||
|  | ||||
| func (m *memorySync) Unlock(id string) error { | ||||
| 	m.mtx.Lock() | ||||
| 	defer m.mtx.Unlock() | ||||
| 	m.mu.Lock() | ||||
| 	defer m.mu.Unlock() | ||||
|  | ||||
| 	lk, ok := m.locks[id] | ||||
| 	// no lock exists | ||||
|   | ||||
| @@ -46,6 +46,10 @@ func (s memoryStringer) String() string { | ||||
| 	return s.s | ||||
| } | ||||
|  | ||||
| func (t *Tracer) Enabled() bool { | ||||
| 	return t.opts.Enabled | ||||
| } | ||||
|  | ||||
| func (t *Tracer) Flush(_ context.Context) error { | ||||
| 	return nil | ||||
| } | ||||
|   | ||||
| @@ -20,6 +20,10 @@ func (t *noopTracer) Spans() []Span { | ||||
|  | ||||
| var uuidNil = uuid.Nil.String() | ||||
|  | ||||
| func (t *noopTracer) Enabled() bool { | ||||
| 	return t.opts.Enabled | ||||
| } | ||||
|  | ||||
| func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { | ||||
| 	options := NewSpanOptions(opts...) | ||||
| 	span := &noopSpan{ | ||||
|   | ||||
| @@ -142,6 +142,8 @@ type Options struct { | ||||
| 	Name string | ||||
| 	// ContextAttrFuncs contains funcs that provides tracing | ||||
| 	ContextAttrFuncs []ContextAttrFunc | ||||
| 	// Enabled specify trace status | ||||
| 	Enabled bool | ||||
| } | ||||
|  | ||||
| // Option func signature | ||||
| @@ -181,6 +183,7 @@ func NewOptions(opts ...Option) Options { | ||||
| 		Logger:           logger.DefaultLogger, | ||||
| 		Context:          context.Background(), | ||||
| 		ContextAttrFuncs: DefaultContextAttrFuncs, | ||||
| 		Enabled:          true, | ||||
| 	} | ||||
| 	for _, o := range opts { | ||||
| 		o(&options) | ||||
| @@ -194,3 +197,10 @@ func Name(n string) Option { | ||||
| 		o.Name = n | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Disabled disable tracer | ||||
| func Disabled(b bool) Option { | ||||
| 	return func(o *Options) { | ||||
| 		o.Enabled = !b | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -29,10 +29,10 @@ type ContextAttrFunc func(ctx context.Context) []interface{} | ||||
| func init() { | ||||
| 	logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, | ||||
| 		func(ctx context.Context) []interface{} { | ||||
| 			if span, ok := SpanFromContext(ctx); ok { | ||||
| 			if sp, ok := SpanFromContext(ctx); ok && sp != nil && sp.IsRecording() { | ||||
| 				return []interface{}{ | ||||
| 					TraceIDKey, span.TraceID(), | ||||
| 					SpanIDKey, span.SpanID(), | ||||
| 					TraceIDKey, sp.TraceID(), | ||||
| 					SpanIDKey, sp.SpanID(), | ||||
| 				} | ||||
| 			} | ||||
| 			return nil | ||||
| @@ -51,6 +51,8 @@ type Tracer interface { | ||||
| 	// Extract(ctx context.Context) | ||||
| 	// Flush flushes spans | ||||
| 	Flush(ctx context.Context) error | ||||
| 	// Enabled returns tracer status | ||||
| 	Enabled() bool | ||||
| } | ||||
|  | ||||
| type Span interface { | ||||
|   | ||||
| @@ -1,13 +1,16 @@ | ||||
| package buffer | ||||
|  | ||||
| import "io" | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| var _ interface { | ||||
| 	io.ReadCloser | ||||
| 	io.ReadSeeker | ||||
| } = (*SeekerBuffer)(nil) | ||||
|  | ||||
| // Buffer is a ReadWriteCloser that supports seeking. It's intended to | ||||
| // SeekerBuffer is a ReadWriteCloser that supports seeking. It's intended to | ||||
| // replicate the functionality of bytes.Buffer that I use in my projects. | ||||
| // | ||||
| // Note that the seeking is limited to the read marker; all writes are | ||||
| @@ -23,6 +26,7 @@ func NewSeekerBuffer(data []byte) *SeekerBuffer { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Read reads up to len(p) bytes into p from the current read position. | ||||
| func (b *SeekerBuffer) Read(p []byte) (int, error) { | ||||
| 	if b.pos >= int64(len(b.data)) { | ||||
| 		return 0, io.EOF | ||||
| @@ -30,29 +34,51 @@ func (b *SeekerBuffer) Read(p []byte) (int, error) { | ||||
|  | ||||
| 	n := copy(p, b.data[b.pos:]) | ||||
| 	b.pos += int64(n) | ||||
|  | ||||
| 	return n, nil | ||||
| } | ||||
|  | ||||
| // Write appends the contents of p to the end of the buffer. It does not affect the read position. | ||||
| func (b *SeekerBuffer) Write(p []byte) (int, error) { | ||||
| 	if len(p) == 0 { | ||||
| 		return 0, nil | ||||
| 	} | ||||
|  | ||||
| 	b.data = append(b.data, p...) | ||||
|  | ||||
| 	return len(p), nil | ||||
| } | ||||
|  | ||||
| // Seek sets the read pointer to pos. | ||||
| // Seek sets the offset for the next Read operation. | ||||
| // The offset is interpreted according to whence: | ||||
| // - io.SeekStart: relative to the beginning of the buffer | ||||
| // - io.SeekCurrent: relative to the current position | ||||
| // - io.SeekEnd: relative to the end of the buffer | ||||
| // | ||||
| // Returns an error if the resulting position is negative or if whence is invalid. | ||||
| func (b *SeekerBuffer) Seek(offset int64, whence int) (int64, error) { | ||||
| 	var newPos int64 | ||||
|  | ||||
| 	switch whence { | ||||
| 	case io.SeekStart: | ||||
| 		b.pos = offset | ||||
| 		newPos = offset | ||||
| 	case io.SeekEnd: | ||||
| 		b.pos = int64(len(b.data)) + offset | ||||
| 		newPos = int64(len(b.data)) + offset | ||||
| 	case io.SeekCurrent: | ||||
| 		b.pos += offset | ||||
| 		newPos = b.pos + offset | ||||
| 	default: | ||||
| 		return 0, fmt.Errorf("invalid whence: %d", whence) | ||||
| 	} | ||||
|  | ||||
| 	if newPos < 0 { | ||||
| 		return 0, fmt.Errorf("invalid seek: resulting position %d is negative", newPos) | ||||
| 	} | ||||
|  | ||||
| 	b.pos = newPos | ||||
| 	return b.pos, nil | ||||
| } | ||||
|  | ||||
| // Rewind resets the read pointer to 0. | ||||
| // Rewind resets the read position to 0. | ||||
| func (b *SeekerBuffer) Rewind() error { | ||||
| 	if _, err := b.Seek(0, io.SeekStart); err != nil { | ||||
| 		return err | ||||
| @@ -67,12 +93,24 @@ func (b *SeekerBuffer) Close() error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Reset clears all the data out of the buffer and sets the read position to 0. | ||||
| func (b *SeekerBuffer) Reset() { | ||||
| 	b.data = nil | ||||
| 	b.pos = 0 | ||||
| } | ||||
|  | ||||
| // Len returns the length of data remaining to be read. | ||||
| func (b *SeekerBuffer) Len() int { | ||||
| 	if b.pos >= int64(len(b.data)) { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return len(b.data[b.pos:]) | ||||
| } | ||||
|  | ||||
| // Bytes returns the underlying bytes from the current position. | ||||
| func (b *SeekerBuffer) Bytes() []byte { | ||||
| 	if b.pos >= int64(len(b.data)) { | ||||
| 		return []byte{} | ||||
| 	} | ||||
| 	return b.data[b.pos:] | ||||
| } | ||||
|   | ||||
| @@ -2,54 +2,384 @@ package buffer | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"io" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/require" | ||||
| ) | ||||
|  | ||||
| func noErrorT(t *testing.T, err error) { | ||||
| 	if nil != err { | ||||
| 		t.Fatalf("%s", err) | ||||
| func TestNewSeekerBuffer(t *testing.T) { | ||||
| 	input := []byte{'a', 'b', 'c', 'd', 'e'} | ||||
| 	expected := &SeekerBuffer{data: []byte{'a', 'b', 'c', 'd', 'e'}, pos: 0} | ||||
| 	require.Equal(t, expected, NewSeekerBuffer(input)) | ||||
| } | ||||
|  | ||||
| func TestSeekerBuffer_Read(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name         string | ||||
| 		data         []byte | ||||
| 		initPos      int64 | ||||
| 		readBuf      []byte | ||||
| 		expectedN    int | ||||
| 		expectedData []byte | ||||
| 		expectedErr  error | ||||
| 		expectedPos  int64 | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:         "read with empty buffer", | ||||
| 			data:         []byte("hello"), | ||||
| 			initPos:      0, | ||||
| 			readBuf:      []byte{}, | ||||
| 			expectedN:    0, | ||||
| 			expectedData: []byte{}, | ||||
| 			expectedErr:  nil, | ||||
| 			expectedPos:  0, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "read with nil buffer", | ||||
| 			data:         []byte("hello"), | ||||
| 			initPos:      0, | ||||
| 			readBuf:      nil, | ||||
| 			expectedN:    0, | ||||
| 			expectedData: nil, | ||||
| 			expectedErr:  nil, | ||||
| 			expectedPos:  0, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "read full buffer", | ||||
| 			data:         []byte("hello"), | ||||
| 			initPos:      0, | ||||
| 			readBuf:      make([]byte, 5), | ||||
| 			expectedN:    5, | ||||
| 			expectedData: []byte("hello"), | ||||
| 			expectedErr:  nil, | ||||
| 			expectedPos:  5, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "read partial buffer", | ||||
| 			data:         []byte("hello"), | ||||
| 			initPos:      2, | ||||
| 			readBuf:      make([]byte, 2), | ||||
| 			expectedN:    2, | ||||
| 			expectedData: []byte("ll"), | ||||
| 			expectedErr:  nil, | ||||
| 			expectedPos:  4, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "read after end", | ||||
| 			data:         []byte("hello"), | ||||
| 			initPos:      5, | ||||
| 			readBuf:      make([]byte, 5), | ||||
| 			expectedN:    0, | ||||
| 			expectedData: make([]byte, 5), | ||||
| 			expectedErr:  io.EOF, | ||||
| 			expectedPos:  5, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			sb := NewSeekerBuffer(tt.data) | ||||
| 			sb.pos = tt.initPos | ||||
|  | ||||
| 			n, err := sb.Read(tt.readBuf) | ||||
|  | ||||
| 			if tt.expectedErr != nil { | ||||
| 				require.Equal(t, err, tt.expectedErr) | ||||
| 			} else { | ||||
| 				require.NoError(t, err) | ||||
| 			} | ||||
|  | ||||
| 			require.Equal(t, tt.expectedN, n) | ||||
| 			require.Equal(t, tt.expectedData, tt.readBuf) | ||||
| 			require.Equal(t, tt.expectedPos, sb.pos) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func boolT(t *testing.T, cond bool, s ...string) { | ||||
| 	if !cond { | ||||
| 		what := strings.Join(s, ", ") | ||||
| 		if len(what) > 0 { | ||||
| 			what = ": " + what | ||||
| func TestSeekerBuffer_Write(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name         string | ||||
| 		initialData  []byte | ||||
| 		initialPos   int64 | ||||
| 		writeData    []byte | ||||
| 		expectedData []byte | ||||
| 		expectedN    int | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:         "write empty slice", | ||||
| 			initialData:  []byte("data"), | ||||
| 			initialPos:   0, | ||||
| 			writeData:    []byte{}, | ||||
| 			expectedData: []byte("data"), | ||||
| 			expectedN:    0, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "write nil slice", | ||||
| 			initialData:  []byte("data"), | ||||
| 			initialPos:   0, | ||||
| 			writeData:    nil, | ||||
| 			expectedData: []byte("data"), | ||||
| 			expectedN:    0, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "write to empty buffer", | ||||
| 			initialData:  nil, | ||||
| 			initialPos:   0, | ||||
| 			writeData:    []byte("abc"), | ||||
| 			expectedData: []byte("abc"), | ||||
| 			expectedN:    3, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "write to existing buffer", | ||||
| 			initialData:  []byte("hello"), | ||||
| 			initialPos:   0, | ||||
| 			writeData:    []byte(" world"), | ||||
| 			expectedData: []byte("hello world"), | ||||
| 			expectedN:    6, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:         "write after read", | ||||
| 			initialData:  []byte("abc"), | ||||
| 			initialPos:   2, | ||||
| 			writeData:    []byte("XYZ"), | ||||
| 			expectedData: []byte("abcXYZ"), | ||||
| 			expectedN:    3, | ||||
| 		}, | ||||
| 	} | ||||
| 		t.Fatalf("assert.Bool failed%s", what) | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			sb := NewSeekerBuffer(tt.initialData) | ||||
| 			sb.pos = tt.initialPos | ||||
|  | ||||
| 			n, err := sb.Write(tt.writeData) | ||||
| 			require.NoError(t, err) | ||||
| 			require.Equal(t, tt.expectedN, n) | ||||
| 			require.Equal(t, tt.expectedData, sb.data) | ||||
| 			require.Equal(t, tt.initialPos, sb.pos) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestSeeking(t *testing.T) { | ||||
| 	partA := []byte("hello, ") | ||||
| 	partB := []byte("world!") | ||||
|  | ||||
| 	buf := NewSeekerBuffer(partA) | ||||
|  | ||||
| 	boolT(t, buf.Len() == len(partA), fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA))) | ||||
|  | ||||
| 	b := make([]byte, 32) | ||||
|  | ||||
| 	n, err := buf.Read(b) | ||||
| 	noErrorT(t, err) | ||||
| 	boolT(t, buf.Len() == 0, fmt.Sprintf("after reading 1: have length %d, want length 0", buf.Len())) | ||||
| 	boolT(t, n == len(partA), fmt.Sprintf("after reading 2: have length %d, want length %d", n, len(partA))) | ||||
|  | ||||
| 	n, err = buf.Write(partB) | ||||
| 	noErrorT(t, err) | ||||
| 	boolT(t, n == len(partB), fmt.Sprintf("after writing: have length %d, want length %d", n, len(partB))) | ||||
|  | ||||
| 	n, err = buf.Read(b) | ||||
| 	noErrorT(t, err) | ||||
| 	boolT(t, buf.Len() == 0, fmt.Sprintf("after rereading 1: have length %d, want length 0", buf.Len())) | ||||
| 	boolT(t, n == len(partB), fmt.Sprintf("after rereading 2: have length %d, want length %d", n, len(partB))) | ||||
|  | ||||
| 	partsLen := len(partA) + len(partB) | ||||
| 	_ = buf.Rewind() | ||||
| 	boolT(t, buf.Len() == partsLen, fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen)) | ||||
|  | ||||
| 	buf.Close() | ||||
| 	boolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len())) | ||||
| func TestSeekerBuffer_Seek(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name        string | ||||
| 		initialData []byte | ||||
| 		initialPos  int64 | ||||
| 		offset      int64 | ||||
| 		whence      int | ||||
| 		expectedPos int64 | ||||
| 		expectedErr error | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:        "seek with invalid whence", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  0, | ||||
| 			offset:      1, | ||||
| 			whence:      12345, | ||||
| 			expectedPos: 0, | ||||
| 			expectedErr: fmt.Errorf("invalid whence: %d", 12345), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek negative from start", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  0, | ||||
| 			offset:      -1, | ||||
| 			whence:      io.SeekStart, | ||||
| 			expectedPos: 0, | ||||
| 			expectedErr: fmt.Errorf("invalid seek: resulting position %d is negative", -1), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek from start to 0", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  0, | ||||
| 			offset:      0, | ||||
| 			whence:      io.SeekStart, | ||||
| 			expectedPos: 0, | ||||
| 			expectedErr: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek from start to 3", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  0, | ||||
| 			offset:      3, | ||||
| 			whence:      io.SeekStart, | ||||
| 			expectedPos: 3, | ||||
| 			expectedErr: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek from end to -1 (last byte)", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  0, | ||||
| 			offset:      -1, | ||||
| 			whence:      io.SeekEnd, | ||||
| 			expectedPos: 5, | ||||
| 			expectedErr: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek from current forward", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  2, | ||||
| 			offset:      2, | ||||
| 			whence:      io.SeekCurrent, | ||||
| 			expectedPos: 4, | ||||
| 			expectedErr: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek from current backward", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  4, | ||||
| 			offset:      -2, | ||||
| 			whence:      io.SeekCurrent, | ||||
| 			expectedPos: 2, | ||||
| 			expectedErr: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek to end exactly", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  0, | ||||
| 			offset:      0, | ||||
| 			whence:      io.SeekEnd, | ||||
| 			expectedPos: 6, | ||||
| 			expectedErr: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "seek to out of range", | ||||
| 			initialData: []byte("abcdef"), | ||||
| 			initialPos:  0, | ||||
| 			offset:      2, | ||||
| 			whence:      io.SeekEnd, | ||||
| 			expectedPos: 8, | ||||
| 			expectedErr: nil, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			sb := NewSeekerBuffer(tt.initialData) | ||||
| 			sb.pos = tt.initialPos | ||||
|  | ||||
| 			newPos, err := sb.Seek(tt.offset, tt.whence) | ||||
|  | ||||
| 			if tt.expectedErr != nil { | ||||
| 				require.Equal(t, tt.expectedErr, err) | ||||
| 			} else { | ||||
| 				require.NoError(t, err) | ||||
| 				require.Equal(t, tt.expectedPos, newPos) | ||||
| 				require.Equal(t, tt.expectedPos, sb.pos) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestSeekerBuffer_Rewind(t *testing.T) { | ||||
| 	buf := NewSeekerBuffer([]byte("hello world")) | ||||
| 	buf.pos = 4 | ||||
|  | ||||
| 	require.NoError(t, buf.Rewind()) | ||||
| 	require.Equal(t, []byte("hello world"), buf.data) | ||||
| 	require.Equal(t, int64(0), buf.pos) | ||||
| } | ||||
|  | ||||
| func TestSeekerBuffer_Close(t *testing.T) { | ||||
| 	buf := NewSeekerBuffer([]byte("hello world")) | ||||
| 	buf.pos = 2 | ||||
|  | ||||
| 	require.NoError(t, buf.Close()) | ||||
| 	require.Nil(t, buf.data) | ||||
| 	require.Equal(t, int64(0), buf.pos) | ||||
| } | ||||
|  | ||||
| func TestSeekerBuffer_Reset(t *testing.T) { | ||||
| 	buf := NewSeekerBuffer([]byte("hello world")) | ||||
| 	buf.pos = 2 | ||||
|  | ||||
| 	buf.Reset() | ||||
| 	require.Nil(t, buf.data) | ||||
| 	require.Equal(t, int64(0), buf.pos) | ||||
| } | ||||
|  | ||||
| func TestSeekerBuffer_Len(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name     string | ||||
| 		data     []byte | ||||
| 		pos      int64 | ||||
| 		expected int | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:     "full buffer", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      0, | ||||
| 			expected: 5, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "partial read", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      2, | ||||
| 			expected: 3, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "fully read", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      5, | ||||
| 			expected: 0, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "pos > len", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      10, | ||||
| 			expected: 0, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			buf := NewSeekerBuffer(tt.data) | ||||
| 			buf.pos = tt.pos | ||||
| 			require.Equal(t, tt.expected, buf.Len()) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestSeekerBuffer_Bytes(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name     string | ||||
| 		data     []byte | ||||
| 		pos      int64 | ||||
| 		expected []byte | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:     "start of buffer", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      0, | ||||
| 			expected: []byte("abcde"), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "middle of buffer", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      2, | ||||
| 			expected: []byte("cde"), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "end of buffer", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      5, | ||||
| 			expected: []byte{}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:     "pos beyond end", | ||||
| 			data:     []byte("abcde"), | ||||
| 			pos:      10, | ||||
| 			expected: []byte{}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			buf := NewSeekerBuffer(tt.data) | ||||
| 			buf.pos = tt.pos | ||||
| 			require.Equal(t, tt.expected, buf.Bytes()) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -137,7 +137,7 @@ type cache struct { | ||||
|  | ||||
| 	opts Options | ||||
|  | ||||
| 	sync.RWMutex | ||||
| 	mu sync.RWMutex | ||||
| } | ||||
|  | ||||
| type cacheEntry struct { | ||||
| @@ -171,7 +171,7 @@ func (c *cache) put(req string, res string) { | ||||
| 		ttl = c.opts.MaxCacheTTL | ||||
| 	} | ||||
|  | ||||
| 	c.Lock() | ||||
| 	c.mu.Lock() | ||||
| 	if c.entries == nil { | ||||
| 		c.entries = make(map[string]cacheEntry) | ||||
| 	} | ||||
| @@ -207,7 +207,7 @@ func (c *cache) put(req string, res string) { | ||||
| 	} | ||||
|  | ||||
| 	c.opts.Meter.Counter(semconv.CacheItemsTotal, "type", "dns").Inc() | ||||
| 	c.Unlock() | ||||
| 	c.mu.Unlock() | ||||
| } | ||||
|  | ||||
| func (c *cache) get(req string) (res string) { | ||||
| @@ -219,8 +219,8 @@ func (c *cache) get(req string) (res string) { | ||||
| 		return "" | ||||
| 	} | ||||
|  | ||||
| 	c.RLock() | ||||
| 	defer c.RUnlock() | ||||
| 	c.mu.RLock() | ||||
| 	defer c.mu.RUnlock() | ||||
|  | ||||
| 	if c.entries == nil { | ||||
| 		return "" | ||||
|   | ||||
| @@ -20,7 +20,7 @@ type dnsConn struct { | ||||
| 	ibuf bytes.Buffer | ||||
| 	obuf bytes.Buffer | ||||
|  | ||||
| 	sync.Mutex | ||||
| 	mu sync.Mutex | ||||
| } | ||||
|  | ||||
| type roundTripper func(ctx context.Context, req string) (res string, err error) | ||||
| @@ -42,15 +42,15 @@ func (c *dnsConn) Read(b []byte) (n int, err error) { | ||||
| } | ||||
|  | ||||
| func (c *dnsConn) Write(b []byte) (n int, err error) { | ||||
| 	c.Lock() | ||||
| 	defer c.Unlock() | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
| 	return c.ibuf.Write(b) | ||||
| } | ||||
|  | ||||
| func (c *dnsConn) Close() error { | ||||
| 	c.Lock() | ||||
| 	c.mu.Lock() | ||||
| 	cancel := c.cancel | ||||
| 	c.Unlock() | ||||
| 	c.mu.Unlock() | ||||
|  | ||||
| 	if cancel != nil { | ||||
| 		cancel() | ||||
| @@ -78,9 +78,9 @@ func (c *dnsConn) SetDeadline(t time.Time) error { | ||||
| } | ||||
|  | ||||
| func (c *dnsConn) SetReadDeadline(t time.Time) error { | ||||
| 	c.Lock() | ||||
| 	c.mu.Lock() | ||||
| 	c.deadline = t | ||||
| 	c.Unlock() | ||||
| 	c.mu.Unlock() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| @@ -90,8 +90,8 @@ func (c *dnsConn) SetWriteDeadline(_ time.Time) error { | ||||
| } | ||||
|  | ||||
| func (c *dnsConn) drainBuffers(b []byte) (string, int, error) { | ||||
| 	c.Lock() | ||||
| 	defer c.Unlock() | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	// drain the output buffer | ||||
| 	if c.obuf.Len() > 0 { | ||||
| @@ -119,8 +119,8 @@ func (c *dnsConn) drainBuffers(b []byte) (string, int, error) { | ||||
| } | ||||
|  | ||||
| func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) { | ||||
| 	c.Lock() | ||||
| 	defer c.Unlock() | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
| 	c.obuf.WriteByte(byte(len(str) >> 8)) | ||||
| 	c.obuf.WriteByte(byte(len(str))) | ||||
| 	c.obuf.WriteString(str) | ||||
| @@ -128,8 +128,8 @@ func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) { | ||||
| } | ||||
|  | ||||
| func (c *dnsConn) childContext() (context.Context, context.CancelFunc) { | ||||
| 	c.Lock() | ||||
| 	defer c.Unlock() | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
| 	if c.ctx == nil { | ||||
| 		c.ctx, c.cancel = context.WithCancel(context.Background()) | ||||
| 	} | ||||
|   | ||||
| @@ -52,7 +52,7 @@ type clientTracer struct { | ||||
| 	tr          tracer.Tracer | ||||
| 	activeHooks map[string]context.Context | ||||
| 	root        tracer.Span | ||||
| 	mtx         sync.Mutex | ||||
| 	mu          sync.Mutex | ||||
| } | ||||
|  | ||||
| func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrace { | ||||
| @@ -83,8 +83,8 @@ func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrac | ||||
| } | ||||
|  | ||||
| func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) { | ||||
| 	ct.mtx.Lock() | ||||
| 	defer ct.mtx.Unlock() | ||||
| 	ct.mu.Lock() | ||||
| 	defer ct.mu.Unlock() | ||||
|  | ||||
| 	if hookCtx, found := ct.activeHooks[hook]; !found { | ||||
| 		var sp tracer.Span | ||||
| @@ -104,8 +104,8 @@ func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) { | ||||
| } | ||||
|  | ||||
| func (ct *clientTracer) end(hook string, err error, attrs ...interface{}) { | ||||
| 	ct.mtx.Lock() | ||||
| 	defer ct.mtx.Unlock() | ||||
| 	ct.mu.Lock() | ||||
| 	defer ct.mu.Unlock() | ||||
| 	if ctx, ok := ct.activeHooks[hook]; ok { // nolint:nestif | ||||
| 		if span, ok := tracer.SpanFromContext(ctx); ok { | ||||
| 			if err != nil { | ||||
| @@ -136,8 +136,8 @@ func (ct *clientTracer) getParentContext(hook string) context.Context { | ||||
| } | ||||
|  | ||||
| func (ct *clientTracer) span(hook string) (tracer.Span, bool) { | ||||
| 	ct.mtx.Lock() | ||||
| 	defer ct.mtx.Unlock() | ||||
| 	ct.mu.Lock() | ||||
| 	defer ct.mu.Unlock() | ||||
| 	if ctx, ok := ct.activeHooks[hook]; ok { | ||||
| 		return tracer.SpanFromContext(ctx) | ||||
| 	} | ||||
|   | ||||
| @@ -14,7 +14,7 @@ type Buffer struct { | ||||
| 	vals    []*Entry | ||||
| 	size    int | ||||
|  | ||||
| 	sync.RWMutex | ||||
| 	mu sync.RWMutex | ||||
| } | ||||
|  | ||||
| // Entry is ring buffer data entry | ||||
| @@ -35,8 +35,8 @@ type Stream struct { | ||||
|  | ||||
| // Put adds a new value to ring buffer | ||||
| func (b *Buffer) Put(v interface{}) { | ||||
| 	b.Lock() | ||||
| 	defer b.Unlock() | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
|  | ||||
| 	// append to values | ||||
| 	entry := &Entry{ | ||||
| @@ -63,8 +63,8 @@ func (b *Buffer) Put(v interface{}) { | ||||
|  | ||||
| // Get returns the last n entries | ||||
| func (b *Buffer) Get(n int) []*Entry { | ||||
| 	b.RLock() | ||||
| 	defer b.RUnlock() | ||||
| 	b.mu.RLock() | ||||
| 	defer b.mu.RUnlock() | ||||
|  | ||||
| 	// reset any invalid values | ||||
| 	if n > len(b.vals) || n < 0 { | ||||
| @@ -80,8 +80,8 @@ func (b *Buffer) Get(n int) []*Entry { | ||||
|  | ||||
| // Since returns the entries since a specific time | ||||
| func (b *Buffer) Since(t time.Time) []*Entry { | ||||
| 	b.RLock() | ||||
| 	defer b.RUnlock() | ||||
| 	b.mu.RLock() | ||||
| 	defer b.mu.RUnlock() | ||||
|  | ||||
| 	// return all the values | ||||
| 	if t.IsZero() { | ||||
| @@ -109,8 +109,8 @@ func (b *Buffer) Since(t time.Time) []*Entry { | ||||
| // Stream logs from the buffer | ||||
| // Close the channel when you want to stop | ||||
| func (b *Buffer) Stream() (<-chan *Entry, chan bool) { | ||||
| 	b.Lock() | ||||
| 	defer b.Unlock() | ||||
| 	b.mu.Lock() | ||||
| 	defer b.mu.Unlock() | ||||
|  | ||||
| 	entries := make(chan *Entry, 128) | ||||
| 	id := id.MustNew() | ||||
|   | ||||
| @@ -24,7 +24,7 @@ type stream struct { | ||||
| 	err     error | ||||
| 	request *request | ||||
|  | ||||
| 	sync.RWMutex | ||||
| 	mu sync.RWMutex | ||||
| } | ||||
|  | ||||
| type request struct { | ||||
| @@ -57,9 +57,9 @@ func (s *stream) Request() server.Request { | ||||
| func (s *stream) Send(v interface{}) error { | ||||
| 	err := s.Stream.SendMsg(v) | ||||
| 	if err != nil { | ||||
| 		s.Lock() | ||||
| 		s.mu.Lock() | ||||
| 		s.err = err | ||||
| 		s.Unlock() | ||||
| 		s.mu.Unlock() | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
| @@ -68,17 +68,17 @@ func (s *stream) Send(v interface{}) error { | ||||
| func (s *stream) Recv(v interface{}) error { | ||||
| 	err := s.Stream.RecvMsg(v) | ||||
| 	if err != nil { | ||||
| 		s.Lock() | ||||
| 		s.mu.Lock() | ||||
| 		s.err = err | ||||
| 		s.Unlock() | ||||
| 		s.mu.Unlock() | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Error returns error that stream holds | ||||
| func (s *stream) Error() error { | ||||
| 	s.RLock() | ||||
| 	defer s.RUnlock() | ||||
| 	s.mu.RLock() | ||||
| 	defer s.mu.RUnlock() | ||||
| 	return s.err | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -6,18 +6,18 @@ import ( | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"go.unistack.org/micro/v4/meter" | ||||
| 	"go.unistack.org/micro/v4/semconv" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	pools   = make([]Statser, 0) | ||||
| 	poolsMu sync.Mutex | ||||
| ) | ||||
| func unregisterMetrics(size int) { | ||||
| 	meter.DefaultMeter.Unregister(semconv.PoolGetTotal, "capacity", strconv.Itoa(size)) | ||||
| 	meter.DefaultMeter.Unregister(semconv.PoolPutTotal, "capacity", strconv.Itoa(size)) | ||||
| 	meter.DefaultMeter.Unregister(semconv.PoolMisTotal, "capacity", strconv.Itoa(size)) | ||||
| 	meter.DefaultMeter.Unregister(semconv.PoolRetTotal, "capacity", strconv.Itoa(size)) | ||||
| } | ||||
|  | ||||
| // Stats struct | ||||
| type Stats struct { | ||||
| 	Get uint64 | ||||
| 	Put uint64 | ||||
| @@ -25,41 +25,13 @@ type Stats struct { | ||||
| 	Ret uint64 | ||||
| } | ||||
|  | ||||
| // Statser provides buffer pool stats | ||||
| type Statser interface { | ||||
| 	Stats() Stats | ||||
| 	Cap() int | ||||
| } | ||||
|  | ||||
| func init() { | ||||
| 	go newStatsMeter() | ||||
| } | ||||
|  | ||||
| func newStatsMeter() { | ||||
| 	ticker := time.NewTicker(meter.DefaultMeterStatsInterval) | ||||
| 	defer ticker.Stop() | ||||
|  | ||||
| 	for range ticker.C { | ||||
| 		poolsMu.Lock() | ||||
| 		for _, st := range pools { | ||||
| 			stats := st.Stats() | ||||
| 			meter.DefaultMeter.Counter(semconv.PoolGetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Get) | ||||
| 			meter.DefaultMeter.Counter(semconv.PoolPutTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Put) | ||||
| 			meter.DefaultMeter.Counter(semconv.PoolMisTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Mis) | ||||
| 			meter.DefaultMeter.Counter(semconv.PoolRetTotal, "capacity", strconv.Itoa(st.Cap())).Set(stats.Ret) | ||||
| 		} | ||||
| 		poolsMu.Unlock() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	_ Statser = (*BytePool)(nil) | ||||
| 	_ Statser = (*BytesPool)(nil) | ||||
| 	_ Statser = (*StringsPool)(nil) | ||||
| ) | ||||
|  | ||||
| type Pool[T any] struct { | ||||
| 	p   *sync.Pool | ||||
| 	get *atomic.Uint64 | ||||
| 	put *atomic.Uint64 | ||||
| 	mis *atomic.Uint64 | ||||
| 	ret *atomic.Uint64 | ||||
| 	c   int | ||||
| } | ||||
|  | ||||
| func (p Pool[T]) Put(t T) { | ||||
| @@ -70,37 +42,82 @@ func (p Pool[T]) Get() T { | ||||
| 	return p.p.Get().(T) | ||||
| } | ||||
|  | ||||
| func NewPool[T any](fn func() T) Pool[T] { | ||||
| 	return Pool[T]{ | ||||
| 		p: &sync.Pool{ | ||||
| func NewPool[T any](fn func() T, size int) Pool[T] { | ||||
| 	p := Pool[T]{ | ||||
| 		c:   size, | ||||
| 		get: &atomic.Uint64{}, | ||||
| 		put: &atomic.Uint64{}, | ||||
| 		mis: &atomic.Uint64{}, | ||||
| 		ret: &atomic.Uint64{}, | ||||
| 	} | ||||
|  | ||||
| 	p.p = &sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			p.mis.Add(1) | ||||
| 			return fn() | ||||
| 		}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolGetTotal, func() float64 { | ||||
| 		return float64(p.get.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolPutTotal, func() float64 { | ||||
| 		return float64(p.put.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolMisTotal, func() float64 { | ||||
| 		return float64(p.mis.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolRetTotal, func() float64 { | ||||
| 		return float64(p.ret.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| type BytePool struct { | ||||
| 	p   *sync.Pool | ||||
| 	get uint64 | ||||
| 	put uint64 | ||||
| 	mis uint64 | ||||
| 	ret uint64 | ||||
| 	get *atomic.Uint64 | ||||
| 	put *atomic.Uint64 | ||||
| 	mis *atomic.Uint64 | ||||
| 	ret *atomic.Uint64 | ||||
| 	c   int | ||||
| } | ||||
|  | ||||
| func NewBytePool(size int) *BytePool { | ||||
| 	p := &BytePool{c: size} | ||||
| 	p := &BytePool{ | ||||
| 		c:   size, | ||||
| 		get: &atomic.Uint64{}, | ||||
| 		put: &atomic.Uint64{}, | ||||
| 		mis: &atomic.Uint64{}, | ||||
| 		ret: &atomic.Uint64{}, | ||||
| 	} | ||||
| 	p.p = &sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			atomic.AddUint64(&p.mis, 1) | ||||
| 			p.mis.Add(1) | ||||
| 			b := make([]byte, 0, size) | ||||
| 			return &b | ||||
| 		}, | ||||
| 	} | ||||
| 	poolsMu.Lock() | ||||
| 	pools = append(pools, p) | ||||
| 	poolsMu.Unlock() | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolGetTotal, func() float64 { | ||||
| 		return float64(p.get.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolPutTotal, func() float64 { | ||||
| 		return float64(p.put.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolMisTotal, func() float64 { | ||||
| 		return float64(p.mis.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolRetTotal, func() float64 { | ||||
| 		return float64(p.ret.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| @@ -110,49 +127,73 @@ func (p *BytePool) Cap() int { | ||||
|  | ||||
| func (p *BytePool) Stats() Stats { | ||||
| 	return Stats{ | ||||
| 		Put: atomic.LoadUint64(&p.put), | ||||
| 		Get: atomic.LoadUint64(&p.get), | ||||
| 		Mis: atomic.LoadUint64(&p.mis), | ||||
| 		Ret: atomic.LoadUint64(&p.ret), | ||||
| 		Put: p.put.Load(), | ||||
| 		Get: p.get.Load(), | ||||
| 		Mis: p.mis.Load(), | ||||
| 		Ret: p.ret.Load(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *BytePool) Get() *[]byte { | ||||
| 	atomic.AddUint64(&p.get, 1) | ||||
| 	p.get.Add(1) | ||||
| 	return p.p.Get().(*[]byte) | ||||
| } | ||||
|  | ||||
| func (p *BytePool) Put(b *[]byte) { | ||||
| 	atomic.AddUint64(&p.put, 1) | ||||
| 	p.put.Add(1) | ||||
| 	if cap(*b) > p.c { | ||||
| 		atomic.AddUint64(&p.ret, 1) | ||||
| 		p.ret.Add(1) | ||||
| 		return | ||||
| 	} | ||||
| 	*b = (*b)[:0] | ||||
| 	p.p.Put(b) | ||||
| } | ||||
|  | ||||
| func (p *BytePool) Close() { | ||||
| 	unregisterMetrics(p.c) | ||||
| } | ||||
|  | ||||
| type BytesPool struct { | ||||
| 	p   *sync.Pool | ||||
| 	get uint64 | ||||
| 	put uint64 | ||||
| 	mis uint64 | ||||
| 	ret uint64 | ||||
| 	get *atomic.Uint64 | ||||
| 	put *atomic.Uint64 | ||||
| 	mis *atomic.Uint64 | ||||
| 	ret *atomic.Uint64 | ||||
| 	c   int | ||||
| } | ||||
|  | ||||
| func NewBytesPool(size int) *BytesPool { | ||||
| 	p := &BytesPool{c: size} | ||||
| 	p := &BytesPool{ | ||||
| 		c:   size, | ||||
| 		get: &atomic.Uint64{}, | ||||
| 		put: &atomic.Uint64{}, | ||||
| 		mis: &atomic.Uint64{}, | ||||
| 		ret: &atomic.Uint64{}, | ||||
| 	} | ||||
| 	p.p = &sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			atomic.AddUint64(&p.mis, 1) | ||||
| 			p.mis.Add(1) | ||||
| 			b := bytes.NewBuffer(make([]byte, 0, size)) | ||||
| 			return b | ||||
| 		}, | ||||
| 	} | ||||
| 	poolsMu.Lock() | ||||
| 	pools = append(pools, p) | ||||
| 	poolsMu.Unlock() | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolGetTotal, func() float64 { | ||||
| 		return float64(p.get.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolPutTotal, func() float64 { | ||||
| 		return float64(p.put.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolMisTotal, func() float64 { | ||||
| 		return float64(p.mis.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	meter.DefaultMeter.Gauge(semconv.PoolRetTotal, func() float64 { | ||||
| 		return float64(p.ret.Load()) | ||||
| 	}, "capacity", strconv.Itoa(p.c)) | ||||
|  | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| @@ -162,10 +203,10 @@ func (p *BytesPool) Cap() int { | ||||
|  | ||||
| func (p *BytesPool) Stats() Stats { | ||||
| 	return Stats{ | ||||
| 		Put: atomic.LoadUint64(&p.put), | ||||
| 		Get: atomic.LoadUint64(&p.get), | ||||
| 		Mis: atomic.LoadUint64(&p.mis), | ||||
| 		Ret: atomic.LoadUint64(&p.ret), | ||||
| 		Put: p.put.Load(), | ||||
| 		Get: p.get.Load(), | ||||
| 		Mis: p.mis.Load(), | ||||
| 		Ret: p.ret.Load(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @@ -174,34 +215,43 @@ func (p *BytesPool) Get() *bytes.Buffer { | ||||
| } | ||||
|  | ||||
| func (p *BytesPool) Put(b *bytes.Buffer) { | ||||
| 	p.put.Add(1) | ||||
| 	if (*b).Cap() > p.c { | ||||
| 		atomic.AddUint64(&p.ret, 1) | ||||
| 		p.ret.Add(1) | ||||
| 		return | ||||
| 	} | ||||
| 	b.Reset() | ||||
| 	p.p.Put(b) | ||||
| } | ||||
|  | ||||
| func (p *BytesPool) Close() { | ||||
| 	unregisterMetrics(p.c) | ||||
| } | ||||
|  | ||||
| type StringsPool struct { | ||||
| 	p   *sync.Pool | ||||
| 	get uint64 | ||||
| 	put uint64 | ||||
| 	mis uint64 | ||||
| 	ret uint64 | ||||
| 	get *atomic.Uint64 | ||||
| 	put *atomic.Uint64 | ||||
| 	mis *atomic.Uint64 | ||||
| 	ret *atomic.Uint64 | ||||
| 	c   int | ||||
| } | ||||
|  | ||||
| func NewStringsPool(size int) *StringsPool { | ||||
| 	p := &StringsPool{c: size} | ||||
| 	p := &StringsPool{ | ||||
| 		c:   size, | ||||
| 		get: &atomic.Uint64{}, | ||||
| 		put: &atomic.Uint64{}, | ||||
| 		mis: &atomic.Uint64{}, | ||||
| 		ret: &atomic.Uint64{}, | ||||
| 	} | ||||
| 	p.p = &sync.Pool{ | ||||
| 		New: func() interface{} { | ||||
| 			atomic.AddUint64(&p.mis, 1) | ||||
| 			p.mis.Add(1) | ||||
| 			return &strings.Builder{} | ||||
| 		}, | ||||
| 	} | ||||
| 	poolsMu.Lock() | ||||
| 	pools = append(pools, p) | ||||
| 	poolsMu.Unlock() | ||||
|  | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| @@ -211,24 +261,28 @@ func (p *StringsPool) Cap() int { | ||||
|  | ||||
| func (p *StringsPool) Stats() Stats { | ||||
| 	return Stats{ | ||||
| 		Put: atomic.LoadUint64(&p.put), | ||||
| 		Get: atomic.LoadUint64(&p.get), | ||||
| 		Mis: atomic.LoadUint64(&p.mis), | ||||
| 		Ret: atomic.LoadUint64(&p.ret), | ||||
| 		Put: p.put.Load(), | ||||
| 		Get: p.get.Load(), | ||||
| 		Mis: p.mis.Load(), | ||||
| 		Ret: p.ret.Load(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *StringsPool) Get() *strings.Builder { | ||||
| 	atomic.AddUint64(&p.get, 1) | ||||
| 	p.get.Add(1) | ||||
| 	return p.p.Get().(*strings.Builder) | ||||
| } | ||||
|  | ||||
| func (p *StringsPool) Put(b *strings.Builder) { | ||||
| 	atomic.AddUint64(&p.put, 1) | ||||
| 	p.put.Add(1) | ||||
| 	if b.Cap() > p.c { | ||||
| 		atomic.AddUint64(&p.ret, 1) | ||||
| 		p.ret.Add(1) | ||||
| 		return | ||||
| 	} | ||||
| 	b.Reset() | ||||
| 	p.p.Put(b) | ||||
| } | ||||
|  | ||||
| func (p *StringsPool) Close() { | ||||
| 	unregisterMetrics(p.c) | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user