Compare commits
28 Commits
v4.1.10
...
ea84ac094f
Author | SHA1 | Date | |
---|---|---|---|
ea84ac094f | |||
2886a7fe8a | |||
|
268b3dbff4 | ||
f9d2c14597 | |||
e6bf914dd9 | |||
b59f4a16f0 | |||
3deb572f72 | |||
0e668c0f0f | |||
2bac878845 | |||
9ee31fb5a6 | |||
ed5d30a58e | |||
|
b4b67a8b41 | ||
13f90ff716 | |||
0f8f12aee0 | |||
8b406cf963 | |||
029a434a2b | |||
|
847259bc39 | ||
a1ee8728ad | |||
88a5875cfb | |||
03ee33040c | |||
0144f175f0 | |||
b3539a32ab | |||
|
6a7223ea4a | ||
1a1b67866a | |||
b7c98da6d1 | |||
2c21cce076 | |||
c8946dcdc8 | |||
|
d342ff2626 |
48
.github/workflows/job_sync.yml
vendored
48
.github/workflows/job_sync.yml
vendored
@@ -18,34 +18,76 @@ jobs:
|
|||||||
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
|
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
|
||||||
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
|
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
|
||||||
|
|
||||||
|
- name: check master
|
||||||
|
id: check_master
|
||||||
|
run: |
|
||||||
|
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
|
||||||
|
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
|
||||||
|
echo "src_hash=$src_hash"
|
||||||
|
echo "dst_hash=$dst_hash"
|
||||||
|
if [ "$src_hash" != "$dst_hash" ]; then
|
||||||
|
echo "sync_needed=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "sync_needed=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- name: sync master
|
- name: sync master
|
||||||
|
if: steps.check_master.outputs.sync_needed == 'true'
|
||||||
run: |
|
run: |
|
||||||
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
||||||
cd repo
|
cd repo
|
||||||
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
|
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
|
||||||
git merge upstream/master
|
git pull --rebase upstream master
|
||||||
git push upstream master --progress
|
git push upstream master --progress
|
||||||
git push origin master --progress
|
git push origin master --progress
|
||||||
cd ../
|
cd ../
|
||||||
rm -rf repo
|
rm -rf repo
|
||||||
|
|
||||||
|
- name: check v3
|
||||||
|
id: check_v3
|
||||||
|
run: |
|
||||||
|
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
|
||||||
|
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
|
||||||
|
echo "src_hash=$src_hash"
|
||||||
|
echo "dst_hash=$dst_hash"
|
||||||
|
if [ "$src_hash" != "$dst_hash" ]; then
|
||||||
|
echo "sync_needed=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "sync_needed=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- name: sync v3
|
- name: sync v3
|
||||||
|
if: steps.check_v3.outputs.sync_needed == 'true'
|
||||||
run: |
|
run: |
|
||||||
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
||||||
cd repo
|
cd repo
|
||||||
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
|
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
|
||||||
git merge upstream/v3
|
git pull --rebase upstream v3
|
||||||
git push upstream v3 --progress
|
git push upstream v3 --progress
|
||||||
git push origin v3 --progress
|
git push origin v3 --progress
|
||||||
cd ../
|
cd ../
|
||||||
rm -rf repo
|
rm -rf repo
|
||||||
|
|
||||||
|
- name: check v4
|
||||||
|
id: check_v4
|
||||||
|
run: |
|
||||||
|
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
|
||||||
|
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
|
||||||
|
echo "src_hash=$src_hash"
|
||||||
|
echo "dst_hash=$dst_hash"
|
||||||
|
if [ "$src_hash" != "$dst_hash" ]; then
|
||||||
|
echo "sync_needed=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "sync_needed=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- name: sync v4
|
- name: sync v4
|
||||||
|
if: steps.check_v4.outputs.sync_needed == 'true'
|
||||||
run: |
|
run: |
|
||||||
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
|
||||||
cd repo
|
cd repo
|
||||||
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
|
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
|
||||||
git merge upstream/v4
|
git pull --rebase upstream v4
|
||||||
git push upstream v4 --progress
|
git push upstream v4 --progress
|
||||||
git push origin v4 --progress
|
git push origin v4 --progress
|
||||||
cd ../
|
cd ../
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
# Micro
|
# Micro
|
||||||

|

|
||||||
[](https://opensource.org/licenses/Apache-2.0)
|
[](https://opensource.org/licenses/Apache-2.0)
|
||||||
[](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview)
|
[](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview)
|
||||||
[](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush)
|
[](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush)
|
||||||
|
@@ -41,7 +41,7 @@ type Broker interface {
|
|||||||
// Disconnect disconnect from broker
|
// Disconnect disconnect from broker
|
||||||
Disconnect(ctx context.Context) error
|
Disconnect(ctx context.Context) error
|
||||||
// NewMessage create new broker message to publish.
|
// NewMessage create new broker message to publish.
|
||||||
NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error)
|
NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error)
|
||||||
// Publish message to broker topic
|
// Publish message to broker topic
|
||||||
Publish(ctx context.Context, topic string, messages ...Message) error
|
Publish(ctx context.Context, topic string, messages ...Message) error
|
||||||
// Subscribe subscribes to topic message via handler
|
// Subscribe subscribes to topic message via handler
|
||||||
|
@@ -42,9 +42,9 @@ func SetSubscribeOption(k, v interface{}) SubscribeOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPublishOption returns a function to setup a context with given value
|
// SetMessageOption returns a function to setup a context with given value
|
||||||
func SetPublishOption(k, v interface{}) PublishOption {
|
func SetMessageOption(k, v interface{}) MessageOption {
|
||||||
return func(o *PublishOptions) {
|
return func(o *MessageOptions) {
|
||||||
if o.Context == nil {
|
if o.Context == nil {
|
||||||
o.Context = context.Background()
|
o.Context = context.Background()
|
||||||
}
|
}
|
||||||
|
@@ -22,8 +22,8 @@ type Broker struct {
|
|||||||
subscribers map[string][]*Subscriber
|
subscribers map[string][]*Subscriber
|
||||||
addr string
|
addr string
|
||||||
opts broker.Options
|
opts broker.Options
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
connected bool
|
connected bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type memoryMessage struct {
|
type memoryMessage struct {
|
||||||
@@ -32,7 +32,7 @@ type memoryMessage struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
body []byte
|
body []byte
|
||||||
hdr metadata.Metadata
|
hdr metadata.Metadata
|
||||||
opts broker.PublishOptions
|
opts broker.MessageOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *memoryMessage) Ack() error {
|
func (m *memoryMessage) Ack() error {
|
||||||
@@ -72,9 +72,9 @@ func (b *Broker) newCodec(ct string) (codec.Codec, error) {
|
|||||||
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
|
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
|
||||||
ct = ct[:idx]
|
ct = ct[:idx]
|
||||||
}
|
}
|
||||||
b.RLock()
|
b.mu.RLock()
|
||||||
c, ok := b.opts.Codecs[ct]
|
c, ok := b.opts.Codecs[ct]
|
||||||
b.RUnlock()
|
b.mu.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
@@ -96,8 +96,8 @@ func (b *Broker) Connect(ctx context.Context) error {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Lock()
|
b.mu.Lock()
|
||||||
defer b.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if b.connected {
|
if b.connected {
|
||||||
return nil
|
return nil
|
||||||
@@ -126,8 +126,8 @@ func (b *Broker) Disconnect(ctx context.Context) error {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Lock()
|
b.mu.Lock()
|
||||||
defer b.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if !b.connected {
|
if !b.connected {
|
||||||
return nil
|
return nil
|
||||||
@@ -157,8 +157,11 @@ func (b *Broker) Init(opts ...broker.Option) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.PublishOption) (broker.Message, error) {
|
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) {
|
||||||
options := broker.NewPublishOptions(opts...)
|
options := broker.NewMessageOptions(opts...)
|
||||||
|
if options.ContentType == "" {
|
||||||
|
options.ContentType = b.opts.ContentType
|
||||||
|
}
|
||||||
m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options}
|
m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options}
|
||||||
c, err := b.newCodec(m.opts.ContentType)
|
c, err := b.newCodec(m.opts.ContentType)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -180,12 +183,12 @@ func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
|
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
|
||||||
b.RLock()
|
b.mu.RLock()
|
||||||
if !b.connected {
|
if !b.connected {
|
||||||
b.RUnlock()
|
b.mu.RUnlock()
|
||||||
return broker.ErrNotConnected
|
return broker.ErrNotConnected
|
||||||
}
|
}
|
||||||
b.RUnlock()
|
b.mu.RUnlock()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -193,9 +196,9 @@ func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.M
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
b.RLock()
|
b.mu.RLock()
|
||||||
subs, ok := b.subscribers[topic]
|
subs, ok := b.subscribers[topic]
|
||||||
b.RUnlock()
|
b.mu.RUnlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -252,12 +255,12 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
b.RLock()
|
b.mu.RLock()
|
||||||
if !b.connected {
|
if !b.connected {
|
||||||
b.RUnlock()
|
b.mu.RUnlock()
|
||||||
return nil, broker.ErrNotConnected
|
return nil, broker.ErrNotConnected
|
||||||
}
|
}
|
||||||
b.RUnlock()
|
b.mu.RUnlock()
|
||||||
|
|
||||||
sid, err := id.New()
|
sid, err := id.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -275,13 +278,13 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
|
|||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Lock()
|
b.mu.Lock()
|
||||||
b.subscribers[topic] = append(b.subscribers[topic], sub)
|
b.subscribers[topic] = append(b.subscribers[topic], sub)
|
||||||
b.Unlock()
|
b.mu.Unlock()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-sub.exit
|
<-sub.exit
|
||||||
b.Lock()
|
b.mu.Lock()
|
||||||
newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1)
|
newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1)
|
||||||
for _, sb := range b.subscribers[topic] {
|
for _, sb := range b.subscribers[topic] {
|
||||||
if sb.id == sub.id {
|
if sb.id == sub.id {
|
||||||
@@ -290,7 +293,7 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
|
|||||||
newSubscribers = append(newSubscribers, sb)
|
newSubscribers = append(newSubscribers, sb)
|
||||||
}
|
}
|
||||||
b.subscribers[topic] = newSubscribers
|
b.subscribers[topic] = newSubscribers
|
||||||
b.Unlock()
|
b.mu.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return sub, nil
|
return sub, nil
|
||||||
|
@@ -49,7 +49,7 @@ func TestMemoryBroker(t *testing.T) {
|
|||||||
"id", fmt.Sprintf("%d", i),
|
"id", fmt.Sprintf("%d", i),
|
||||||
),
|
),
|
||||||
[]byte(`"hello world"`),
|
[]byte(`"hello world"`),
|
||||||
broker.PublishContentType("application/octet-stream"),
|
broker.MessageContentType("application/octet-stream"),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@@ -14,16 +14,16 @@ type NoopBroker struct {
|
|||||||
funcPublish FuncPublish
|
funcPublish FuncPublish
|
||||||
funcSubscribe FuncSubscribe
|
funcSubscribe FuncSubscribe
|
||||||
opts Options
|
opts Options
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) {
|
func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) {
|
||||||
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
|
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
|
||||||
ct = ct[:idx]
|
ct = ct[:idx]
|
||||||
}
|
}
|
||||||
b.RLock()
|
b.mu.RLock()
|
||||||
c, ok := b.opts.Codecs[ct]
|
c, ok := b.opts.Codecs[ct]
|
||||||
b.RUnlock()
|
b.mu.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
@@ -99,7 +99,7 @@ type noopMessage struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
body []byte
|
body []byte
|
||||||
hdr metadata.Metadata
|
hdr metadata.Metadata
|
||||||
opts PublishOptions
|
opts MessageOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *noopMessage) Ack() error {
|
func (m *noopMessage) Ack() error {
|
||||||
@@ -126,8 +126,11 @@ func (m *noopMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
|
|||||||
return m.c.Unmarshal(m.body, dst)
|
return m.c.Unmarshal(m.body, dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...PublishOption) (Message, error) {
|
func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error) {
|
||||||
options := NewPublishOptions(opts...)
|
options := NewMessageOptions(opts...)
|
||||||
|
if options.ContentType == "" {
|
||||||
|
options.ContentType = b.opts.ContentType
|
||||||
|
}
|
||||||
m := &noopMessage{ctx: ctx, hdr: hdr, opts: options}
|
m := &noopMessage{ctx: ctx, hdr: hdr, opts: options}
|
||||||
c, err := b.newCodec(m.opts.ContentType)
|
c, err := b.newCodec(m.opts.ContentType)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@@ -45,6 +45,9 @@ type Options struct {
|
|||||||
|
|
||||||
// GracefulTimeout contains time to wait to finish in flight requests
|
// GracefulTimeout contains time to wait to finish in flight requests
|
||||||
GracefulTimeout time.Duration
|
GracefulTimeout time.Duration
|
||||||
|
|
||||||
|
// ContentType will be used if no content-type set when creating message
|
||||||
|
ContentType string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOptions create new Options
|
// NewOptions create new Options
|
||||||
@@ -57,14 +60,19 @@ func NewOptions(opts ...Option) Options {
|
|||||||
Codecs: make(map[string]codec.Codec),
|
Codecs: make(map[string]codec.Codec),
|
||||||
Tracer: tracer.DefaultTracer,
|
Tracer: tracer.DefaultTracer,
|
||||||
GracefulTimeout: DefaultGracefulTimeout,
|
GracefulTimeout: DefaultGracefulTimeout,
|
||||||
|
ContentType: DefaultContentType,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(&options)
|
o(&options)
|
||||||
}
|
}
|
||||||
|
|
||||||
return options
|
return options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultContentType is the default content-type if not specified
|
||||||
|
var DefaultContentType = ""
|
||||||
|
|
||||||
// Context sets the context option
|
// Context sets the context option
|
||||||
func Context(ctx context.Context) Option {
|
func Context(ctx context.Context) Option {
|
||||||
return func(o *Options) {
|
return func(o *Options) {
|
||||||
@@ -72,8 +80,15 @@ func Context(ctx context.Context) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishOptions struct
|
// ContentType used by default if not specified
|
||||||
type PublishOptions struct {
|
func ContentType(ct string) Option {
|
||||||
|
return func(o *Options) {
|
||||||
|
o.ContentType = ct
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageOptions struct
|
||||||
|
type MessageOptions struct {
|
||||||
// ContentType for message body
|
// ContentType for message body
|
||||||
ContentType string
|
ContentType string
|
||||||
// BodyOnly flag says the message contains raw body bytes and don't need
|
// BodyOnly flag says the message contains raw body bytes and don't need
|
||||||
@@ -83,9 +98,9 @@ type PublishOptions struct {
|
|||||||
Context context.Context
|
Context context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPublishOptions creates PublishOptions struct
|
// NewMessageOptions creates MessageOptions struct
|
||||||
func NewPublishOptions(opts ...PublishOption) PublishOptions {
|
func NewMessageOptions(opts ...MessageOption) MessageOptions {
|
||||||
options := PublishOptions{
|
options := MessageOptions{
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
}
|
}
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
@@ -113,19 +128,19 @@ type SubscribeOptions struct {
|
|||||||
// Option func
|
// Option func
|
||||||
type Option func(*Options)
|
type Option func(*Options)
|
||||||
|
|
||||||
// PublishOption func
|
// MessageOption func
|
||||||
type PublishOption func(*PublishOptions)
|
type MessageOption func(*MessageOptions)
|
||||||
|
|
||||||
// PublishContentType sets message content-type that used to Marshal
|
// MessageContentType sets message content-type that used to Marshal
|
||||||
func PublishContentType(ct string) PublishOption {
|
func MessageContentType(ct string) MessageOption {
|
||||||
return func(o *PublishOptions) {
|
return func(o *MessageOptions) {
|
||||||
o.ContentType = ct
|
o.ContentType = ct
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishBodyOnly publish only body of the message
|
// MessageBodyOnly publish only body of the message
|
||||||
func PublishBodyOnly(b bool) PublishOption {
|
func MessageBodyOnly(b bool) MessageOption {
|
||||||
return func(o *PublishOptions) {
|
return func(o *MessageOptions) {
|
||||||
o.BodyOnly = b
|
o.BodyOnly = b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -3,8 +3,6 @@ package codec
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -68,10 +66,10 @@ func (m *RawMessage) MarshalYAML() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML sets *m to a copy of data.
|
// UnmarshalYAML sets *m to a copy of data.
|
||||||
func (m *RawMessage) UnmarshalYAML(n *yaml.Node) error {
|
func (m *RawMessage) UnmarshalYAML(data []byte) error {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return errors.New("RawMessage UnmarshalYAML on nil pointer")
|
return errors.New("RawMessage UnmarshalYAML on nil pointer")
|
||||||
}
|
}
|
||||||
*m = append((*m)[0:0], []byte(n.Value)...)
|
*m = append((*m)[0:0], data...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -1,7 +1,5 @@
|
|||||||
package codec
|
package codec
|
||||||
|
|
||||||
import "gopkg.in/yaml.v3"
|
|
||||||
|
|
||||||
// Frame gives us the ability to define raw data to send over the pipes
|
// Frame gives us the ability to define raw data to send over the pipes
|
||||||
type Frame struct {
|
type Frame struct {
|
||||||
Data []byte
|
Data []byte
|
||||||
@@ -28,8 +26,8 @@ func (m *Frame) MarshalYAML() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML set frame data
|
// UnmarshalYAML set frame data
|
||||||
func (m *Frame) UnmarshalYAML(n *yaml.Node) error {
|
func (m *Frame) UnmarshalYAML(data []byte) error {
|
||||||
m.Data = []byte(n.Value)
|
m.Data = append((m.Data)[0:0], data...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
7
go.mod
7
go.mod
@@ -1,12 +1,12 @@
|
|||||||
module go.unistack.org/micro/v4
|
module go.unistack.org/micro/v4
|
||||||
|
|
||||||
go 1.22.0
|
go 1.24
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.1
|
dario.cat/mergo v1.0.1
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||||
github.com/KimMachineGun/automemlimit v0.7.0
|
github.com/KimMachineGun/automemlimit v0.7.0
|
||||||
github.com/ash3in/uuidv8 v1.2.0
|
github.com/goccy/go-yaml v1.17.1
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/matoous/go-nanoid v1.5.1
|
github.com/matoous/go-nanoid v1.5.1
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
@@ -17,9 +17,9 @@ require (
|
|||||||
go.uber.org/automaxprocs v1.6.0
|
go.uber.org/automaxprocs v1.6.0
|
||||||
go.unistack.org/micro-proto/v4 v4.1.0
|
go.unistack.org/micro-proto/v4 v4.1.0
|
||||||
golang.org/x/sync v0.10.0
|
golang.org/x/sync v0.10.0
|
||||||
|
golang.yandex/hasql/v2 v2.1.0
|
||||||
google.golang.org/grpc v1.69.4
|
google.golang.org/grpc v1.69.4
|
||||||
google.golang.org/protobuf v1.36.3
|
google.golang.org/protobuf v1.36.3
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -31,4 +31,5 @@ require (
|
|||||||
golang.org/x/sys v0.29.0 // indirect
|
golang.org/x/sys v0.29.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
6
go.sum
6
go.sum
@@ -4,12 +4,12 @@ github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7Oputl
|
|||||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||||
github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88=
|
github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88=
|
||||||
github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
|
github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
|
||||||
github.com/ash3in/uuidv8 v1.2.0 h1:2oogGdtCPwaVtyvPPGin4TfZLtOGE5F+W++E880G6SI=
|
|
||||||
github.com/ash3in/uuidv8 v1.2.0/go.mod h1:BnU0wJBxnzdEKmVg4xckBkD+VZuecTFTUP3M0dWgyY4=
|
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
|
github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY=
|
||||||
|
github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
@@ -56,6 +56,8 @@ golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
|||||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
|
golang.yandex/hasql/v2 v2.1.0 h1:7CaFFWeHoK5TvA+QvZzlKHlIN5sqNpqM8NSrXskZD/k=
|
||||||
|
golang.yandex/hasql/v2 v2.1.0/go.mod h1:3Au1AxuJDCTXmS117BpbI6e+70kGWeyLR1qJAH6HdtA=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
|
||||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||||
|
504
hooks/sql/cluster.go
Normal file
504
hooks/sql/cluster.go
Normal file
@@ -0,0 +1,504 @@
|
|||||||
|
package sql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.yandex/hasql/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errNoAliveNodes = errors.New("no alive nodes")
|
||||||
|
|
||||||
|
func newSQLRowError() *sql.Row {
|
||||||
|
row := &sql.Row{}
|
||||||
|
t := reflect.TypeOf(row).Elem()
|
||||||
|
field, _ := t.FieldByName("err")
|
||||||
|
rowPtr := unsafe.Pointer(row)
|
||||||
|
errFieldPtr := unsafe.Pointer(uintptr(rowPtr) + field.Offset)
|
||||||
|
errPtr := (*error)(errFieldPtr)
|
||||||
|
*errPtr = errNoAliveNodes
|
||||||
|
return row
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSQLRowsError() *sql.Rows {
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
t := reflect.TypeOf(rows).Elem()
|
||||||
|
field, _ := t.FieldByName("lasterr")
|
||||||
|
rowPtr := unsafe.Pointer(rows)
|
||||||
|
errFieldPtr := unsafe.Pointer(uintptr(rowPtr) + field.Offset)
|
||||||
|
errPtr := (*error)(errFieldPtr)
|
||||||
|
*errPtr = errNoAliveNodes
|
||||||
|
return rows
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterQuerier interface {
|
||||||
|
Querier
|
||||||
|
WaitForNodes(ctx context.Context, criterion ...hasql.NodeStateCriterion) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Querier interface {
|
||||||
|
// Basic connection methods
|
||||||
|
PingContext(ctx context.Context) error
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// Query methods with context
|
||||||
|
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
||||||
|
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
|
||||||
|
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
||||||
|
|
||||||
|
// Prepared statements with context
|
||||||
|
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
|
||||||
|
|
||||||
|
// Transaction management with context
|
||||||
|
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
|
||||||
|
|
||||||
|
// Connection pool management
|
||||||
|
SetConnMaxLifetime(d time.Duration)
|
||||||
|
SetConnMaxIdleTime(d time.Duration)
|
||||||
|
SetMaxOpenConns(n int)
|
||||||
|
SetMaxIdleConns(n int)
|
||||||
|
Stats() sql.DBStats
|
||||||
|
|
||||||
|
Conn(ctx context.Context) (*sql.Conn, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrClusterChecker = errors.New("cluster node checker required")
|
||||||
|
ErrClusterDiscoverer = errors.New("cluster node discoverer required")
|
||||||
|
ErrClusterPicker = errors.New("cluster node picker required")
|
||||||
|
)
|
||||||
|
|
||||||
|
type Cluster struct {
|
||||||
|
hasql *hasql.Cluster[Querier]
|
||||||
|
options ClusterOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCluster returns Querier that provides cluster of nodes
|
||||||
|
func NewCluster[T Querier](opts ...ClusterOption) (ClusterQuerier, error) {
|
||||||
|
options := ClusterOptions{Context: context.Background()}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&options)
|
||||||
|
}
|
||||||
|
if options.NodeChecker == nil {
|
||||||
|
return nil, ErrClusterChecker
|
||||||
|
}
|
||||||
|
if options.NodeDiscoverer == nil {
|
||||||
|
return nil, ErrClusterDiscoverer
|
||||||
|
}
|
||||||
|
if options.NodePicker == nil {
|
||||||
|
return nil, ErrClusterPicker
|
||||||
|
}
|
||||||
|
|
||||||
|
options.Options = append(options.Options, hasql.WithNodePicker(options.NodePicker))
|
||||||
|
if p, ok := options.NodePicker.(*CustomPicker[Querier]); ok {
|
||||||
|
p.opts.Priority = options.NodePriority
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := hasql.NewCluster(
|
||||||
|
options.NodeDiscoverer,
|
||||||
|
options.NodeChecker,
|
||||||
|
options.Options...,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Cluster{hasql: c, options: options}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// compile time guard
|
||||||
|
var _ hasql.NodePicker[Querier] = (*CustomPicker[Querier])(nil)
|
||||||
|
|
||||||
|
type nodeStateCriterionKey struct{}
|
||||||
|
|
||||||
|
// NodeStateCriterion inject hasql.NodeStateCriterion to context
|
||||||
|
func NodeStateCriterion(ctx context.Context, c hasql.NodeStateCriterion) context.Context {
|
||||||
|
return context.WithValue(ctx, nodeStateCriterionKey{}, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNodeStateCriterion(ctx context.Context) hasql.NodeStateCriterion {
|
||||||
|
if v, ok := ctx.Value(nodeStateCriterionKey{}).(hasql.NodeStateCriterion); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return hasql.PreferPrimary
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomPickerOptions holds options to pick nodes
|
||||||
|
type CustomPickerOptions struct {
|
||||||
|
MaxLag int
|
||||||
|
Priority map[string]int32
|
||||||
|
RetryOnError bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomPickerOption func apply option to CustomPickerOptions
|
||||||
|
type CustomPickerOption func(*CustomPickerOptions)
|
||||||
|
|
||||||
|
// CustomPickerMaxLag specifies max lag for which node can be used
|
||||||
|
func CustomPickerMaxLag(n int) CustomPickerOption {
|
||||||
|
return func(o *CustomPickerOptions) {
|
||||||
|
o.MaxLag = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCustomPicker creates new node picker
|
||||||
|
func NewCustomPicker[T Querier](opts ...CustomPickerOption) *CustomPicker[Querier] {
|
||||||
|
options := CustomPickerOptions{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(&options)
|
||||||
|
}
|
||||||
|
return &CustomPicker[Querier]{opts: options}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomPicker holds node picker options
|
||||||
|
type CustomPicker[T Querier] struct {
|
||||||
|
opts CustomPickerOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// PickNode used to return specific node
|
||||||
|
func (p *CustomPicker[T]) PickNode(cnodes []hasql.CheckedNode[T]) hasql.CheckedNode[T] {
|
||||||
|
for _, n := range cnodes {
|
||||||
|
fmt.Printf("node %s\n", n.Node.String())
|
||||||
|
}
|
||||||
|
return cnodes[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *CustomPicker[T]) getPriority(nodeName string) int32 {
|
||||||
|
if prio, ok := p.opts.Priority[nodeName]; ok {
|
||||||
|
return prio
|
||||||
|
}
|
||||||
|
return math.MaxInt32 // Default to lowest priority
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompareNodes used to sort nodes
|
||||||
|
func (p *CustomPicker[T]) CompareNodes(a, b hasql.CheckedNode[T]) int {
|
||||||
|
fmt.Printf("CompareNodes %s %s\n", a.Node.String(), b.Node.String())
|
||||||
|
// Get replication lag values
|
||||||
|
aLag := a.Info.(interface{ ReplicationLag() int }).ReplicationLag()
|
||||||
|
bLag := b.Info.(interface{ ReplicationLag() int }).ReplicationLag()
|
||||||
|
|
||||||
|
// First check that lag lower then MaxLag
|
||||||
|
if aLag > p.opts.MaxLag && bLag > p.opts.MaxLag {
|
||||||
|
fmt.Printf("CompareNodes aLag > p.opts.MaxLag && bLag > p.opts.MaxLag\n")
|
||||||
|
return 0 // both are equal
|
||||||
|
}
|
||||||
|
|
||||||
|
// If one node exceeds MaxLag and the other doesn't, prefer the one that doesn't
|
||||||
|
if aLag > p.opts.MaxLag {
|
||||||
|
fmt.Printf("CompareNodes aLag > p.opts.MaxLag\n")
|
||||||
|
return 1 // b is better
|
||||||
|
}
|
||||||
|
if bLag > p.opts.MaxLag {
|
||||||
|
fmt.Printf("CompareNodes bLag > p.opts.MaxLag\n")
|
||||||
|
return -1 // a is better
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get node priorities
|
||||||
|
aPrio := p.getPriority(a.Node.String())
|
||||||
|
bPrio := p.getPriority(b.Node.String())
|
||||||
|
|
||||||
|
// if both priority equals
|
||||||
|
if aPrio == bPrio {
|
||||||
|
fmt.Printf("CompareNodes aPrio == bPrio\n")
|
||||||
|
// First compare by replication lag
|
||||||
|
if aLag < bLag {
|
||||||
|
fmt.Printf("CompareNodes aLag < bLag\n")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if aLag > bLag {
|
||||||
|
fmt.Printf("CompareNodes aLag > bLag\n")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
// If replication lag is equal, compare by latency
|
||||||
|
aLatency := a.Info.(interface{ Latency() time.Duration }).Latency()
|
||||||
|
bLatency := b.Info.(interface{ Latency() time.Duration }).Latency()
|
||||||
|
|
||||||
|
if aLatency < bLatency {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if aLatency > bLatency {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// If lag and latency is equal
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// If priorities are different, prefer the node with lower priority value
|
||||||
|
if aPrio < bPrio {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterOptions contains cluster specific options
|
||||||
|
type ClusterOptions struct {
|
||||||
|
NodeChecker hasql.NodeChecker
|
||||||
|
NodePicker hasql.NodePicker[Querier]
|
||||||
|
NodeDiscoverer hasql.NodeDiscoverer[Querier]
|
||||||
|
Options []hasql.ClusterOpt[Querier]
|
||||||
|
Context context.Context
|
||||||
|
RetryOnError bool
|
||||||
|
NodePriority map[string]int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClusterOption apply cluster options to ClusterOptions
|
||||||
|
type ClusterOption func(*ClusterOptions)
|
||||||
|
|
||||||
|
// WithClusterNodeChecker pass hasql.NodeChecker to cluster options
|
||||||
|
func WithClusterNodeChecker(c hasql.NodeChecker) ClusterOption {
|
||||||
|
return func(o *ClusterOptions) {
|
||||||
|
o.NodeChecker = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClusterNodePicker pass hasql.NodePicker to cluster options
|
||||||
|
func WithClusterNodePicker(p hasql.NodePicker[Querier]) ClusterOption {
|
||||||
|
return func(o *ClusterOptions) {
|
||||||
|
o.NodePicker = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClusterNodeDiscoverer pass hasql.NodeDiscoverer to cluster options
|
||||||
|
func WithClusterNodeDiscoverer(d hasql.NodeDiscoverer[Querier]) ClusterOption {
|
||||||
|
return func(o *ClusterOptions) {
|
||||||
|
o.NodeDiscoverer = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetryOnError retry on other nodes on error
|
||||||
|
func WithRetryOnError(b bool) ClusterOption {
|
||||||
|
return func(o *ClusterOptions) {
|
||||||
|
o.RetryOnError = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClusterContext pass context.Context to cluster options and used for checks
|
||||||
|
func WithClusterContext(ctx context.Context) ClusterOption {
|
||||||
|
return func(o *ClusterOptions) {
|
||||||
|
o.Context = ctx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClusterOptions pass hasql.ClusterOpt
|
||||||
|
func WithClusterOptions(opts ...hasql.ClusterOpt[Querier]) ClusterOption {
|
||||||
|
return func(o *ClusterOptions) {
|
||||||
|
o.Options = append(o.Options, opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterNode struct {
|
||||||
|
Name string
|
||||||
|
DB Querier
|
||||||
|
Priority int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClusterNodes create cluster with static NodeDiscoverer
|
||||||
|
func WithClusterNodes(cns ...ClusterNode) ClusterOption {
|
||||||
|
return func(o *ClusterOptions) {
|
||||||
|
nodes := make([]*hasql.Node[Querier], 0, len(cns))
|
||||||
|
if o.NodePriority == nil {
|
||||||
|
o.NodePriority = make(map[string]int32, len(cns))
|
||||||
|
}
|
||||||
|
for _, cn := range cns {
|
||||||
|
nodes = append(nodes, hasql.NewNode(cn.Name, cn.DB))
|
||||||
|
if cn.Priority == 0 {
|
||||||
|
cn.Priority = math.MaxInt32
|
||||||
|
}
|
||||||
|
o.NodePriority[cn.Name] = cn.Priority
|
||||||
|
}
|
||||||
|
o.NodeDiscoverer = hasql.NewStaticNodeDiscoverer(nodes...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
|
||||||
|
var tx *sql.Tx
|
||||||
|
var err error
|
||||||
|
|
||||||
|
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
if tx, err = n.DB().BeginTx(ctx, opts); err != nil && !c.options.RetryOnError {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if tx == nil && err == nil {
|
||||||
|
err = errNoAliveNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) Close() error {
|
||||||
|
return c.hasql.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) Conn(ctx context.Context) (*sql.Conn, error) {
|
||||||
|
var conn *sql.Conn
|
||||||
|
var err error
|
||||||
|
|
||||||
|
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
if conn, err = n.DB().Conn(ctx); err != nil && !c.options.RetryOnError {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if conn == nil && err == nil {
|
||||||
|
err = errNoAliveNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
return conn, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
|
||||||
|
var res sql.Result
|
||||||
|
var err error
|
||||||
|
|
||||||
|
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
if res, err = n.DB().ExecContext(ctx, query, args...); err != nil && !c.options.RetryOnError {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if res == nil && err == nil {
|
||||||
|
err = errNoAliveNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
|
||||||
|
var res *sql.Stmt
|
||||||
|
var err error
|
||||||
|
|
||||||
|
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
if res, err = n.DB().PrepareContext(ctx, query); err != nil && !c.options.RetryOnError {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if res == nil && err == nil {
|
||||||
|
err = errNoAliveNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
|
||||||
|
var res *sql.Rows
|
||||||
|
var err error
|
||||||
|
|
||||||
|
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
if res, err = n.DB().QueryContext(ctx, query); err != nil && err != sql.ErrNoRows && !c.options.RetryOnError {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if res == nil && err == nil {
|
||||||
|
err = errNoAliveNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
||||||
|
var res *sql.Row
|
||||||
|
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
res = n.DB().QueryRowContext(ctx, query, args...)
|
||||||
|
if res.Err() == nil {
|
||||||
|
return false
|
||||||
|
} else if res.Err() != nil && !c.options.RetryOnError {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
if res == nil {
|
||||||
|
res = newSQLRowError()
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) PingContext(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
c.hasql.NodesIter(getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
ok = true
|
||||||
|
if err = n.DB().PingContext(ctx); err != nil && !c.options.RetryOnError {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
err = errNoAliveNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) WaitForNodes(ctx context.Context, criterions ...hasql.NodeStateCriterion) error {
|
||||||
|
for _, criterion := range criterions {
|
||||||
|
if _, err := c.hasql.WaitForNode(ctx, criterion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) SetConnMaxLifetime(td time.Duration) {
|
||||||
|
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
n.DB().SetConnMaxIdleTime(td)
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) SetConnMaxIdleTime(td time.Duration) {
|
||||||
|
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
n.DB().SetConnMaxIdleTime(td)
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) SetMaxOpenConns(nc int) {
|
||||||
|
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
n.DB().SetMaxOpenConns(nc)
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) SetMaxIdleConns(nc int) {
|
||||||
|
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
n.DB().SetMaxIdleConns(nc)
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) Stats() sql.DBStats {
|
||||||
|
s := sql.DBStats{}
|
||||||
|
c.hasql.NodesIter(hasql.NodeStateCriterion(hasql.Alive))(func(n *hasql.Node[Querier]) bool {
|
||||||
|
st := n.DB().Stats()
|
||||||
|
s.Idle += st.Idle
|
||||||
|
s.InUse += st.InUse
|
||||||
|
s.MaxIdleClosed += st.MaxIdleClosed
|
||||||
|
s.MaxIdleTimeClosed += st.MaxIdleTimeClosed
|
||||||
|
s.MaxOpenConnections += st.MaxOpenConnections
|
||||||
|
s.OpenConnections += st.OpenConnections
|
||||||
|
s.WaitCount += st.WaitCount
|
||||||
|
s.WaitDuration += st.WaitDuration
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
return s
|
||||||
|
}
|
171
hooks/sql/cluster_test.go
Normal file
171
hooks/sql/cluster_test.go
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
package sql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
"golang.yandex/hasql/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewCluster(t *testing.T) {
|
||||||
|
dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer dbMaster.Close()
|
||||||
|
dbMasterMock.MatchExpectationsInOrder(false)
|
||||||
|
|
||||||
|
dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
|
||||||
|
sqlmock.NewRowsWithColumnDefinition(
|
||||||
|
sqlmock.NewColumn("role").OfType("int8", 0),
|
||||||
|
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
|
||||||
|
AddRow(1, 0)).
|
||||||
|
RowsWillBeClosed().
|
||||||
|
WithoutArgs()
|
||||||
|
|
||||||
|
dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
|
||||||
|
sqlmock.NewRows([]string{"name"}).
|
||||||
|
AddRow("master-dc1"))
|
||||||
|
|
||||||
|
dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer dbDRMaster.Close()
|
||||||
|
dbDRMasterMock.MatchExpectationsInOrder(false)
|
||||||
|
|
||||||
|
dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
|
||||||
|
sqlmock.NewRowsWithColumnDefinition(
|
||||||
|
sqlmock.NewColumn("role").OfType("int8", 0),
|
||||||
|
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
|
||||||
|
AddRow(2, 40)).
|
||||||
|
RowsWillBeClosed().
|
||||||
|
WithoutArgs()
|
||||||
|
|
||||||
|
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
|
||||||
|
sqlmock.NewRows([]string{"name"}).
|
||||||
|
AddRow("drmaster1-dc2"))
|
||||||
|
|
||||||
|
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
|
||||||
|
sqlmock.NewRows([]string{"name"}).
|
||||||
|
AddRow("drmaster"))
|
||||||
|
|
||||||
|
dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer dbSlaveDC1.Close()
|
||||||
|
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
|
||||||
|
|
||||||
|
dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
|
||||||
|
sqlmock.NewRowsWithColumnDefinition(
|
||||||
|
sqlmock.NewColumn("role").OfType("int8", 0),
|
||||||
|
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
|
||||||
|
AddRow(2, 50)).
|
||||||
|
RowsWillBeClosed().
|
||||||
|
WithoutArgs()
|
||||||
|
|
||||||
|
dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
|
||||||
|
sqlmock.NewRows([]string{"name"}).
|
||||||
|
AddRow("slave-dc1"))
|
||||||
|
|
||||||
|
dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer dbSlaveDC2.Close()
|
||||||
|
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
|
||||||
|
|
||||||
|
dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
|
||||||
|
sqlmock.NewRowsWithColumnDefinition(
|
||||||
|
sqlmock.NewColumn("role").OfType("int8", 0),
|
||||||
|
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
|
||||||
|
AddRow(2, 50)).
|
||||||
|
RowsWillBeClosed().
|
||||||
|
WithoutArgs()
|
||||||
|
|
||||||
|
dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
|
||||||
|
sqlmock.NewRows([]string{"name"}).
|
||||||
|
AddRow("slave-dc1"))
|
||||||
|
|
||||||
|
tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
c, err := NewCluster[Querier](
|
||||||
|
WithClusterContext(tctx),
|
||||||
|
WithClusterNodeChecker(hasql.PostgreSQLChecker),
|
||||||
|
WithClusterNodePicker(NewCustomPicker[Querier](
|
||||||
|
CustomPickerMaxLag(100),
|
||||||
|
)),
|
||||||
|
WithClusterNodes(
|
||||||
|
ClusterNode{"slave-dc1", dbSlaveDC1, 1},
|
||||||
|
ClusterNode{"master-dc1", dbMaster, 1},
|
||||||
|
ClusterNode{"slave-dc2", dbSlaveDC2, 2},
|
||||||
|
ClusterNode{"drmaster1-dc2", dbDRMaster, 0},
|
||||||
|
),
|
||||||
|
WithClusterOptions(
|
||||||
|
hasql.WithUpdateInterval[Querier](2*time.Second),
|
||||||
|
hasql.WithUpdateTimeout[Querier](1*time.Second),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
|
||||||
|
node1Name := ""
|
||||||
|
fmt.Printf("check for Standby\n")
|
||||||
|
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.Standby), "SELECT node_name as name"); row.Err() != nil {
|
||||||
|
t.Fatal(row.Err())
|
||||||
|
} else if err = row.Scan(&node1Name); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if "slave-dc1" != node1Name {
|
||||||
|
t.Fatalf("invalid node name %s != %s", "slave-dc1", node1Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
|
||||||
|
sqlmock.NewRows([]string{"name"}).
|
||||||
|
AddRow("slave-dc1"))
|
||||||
|
|
||||||
|
node2Name := ""
|
||||||
|
fmt.Printf("check for PreferStandby\n")
|
||||||
|
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() != nil {
|
||||||
|
t.Fatal(row.Err())
|
||||||
|
} else if err = row.Scan(&node2Name); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if "slave-dc1" != node2Name {
|
||||||
|
t.Fatalf("invalid node name %s != %s", "slave-dc1", node2Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
node3Name := ""
|
||||||
|
fmt.Printf("check for PreferPrimary\n")
|
||||||
|
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferPrimary), "SELECT node_name as name"); row.Err() != nil {
|
||||||
|
t.Fatal(row.Err())
|
||||||
|
} else if err = row.Scan(&node3Name); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if "master-dc1" != node3Name {
|
||||||
|
t.Fatalf("invalid node name %s != %s", "master-dc1", node3Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
dbSlaveDC1Mock.ExpectQuery(`.*`).WillReturnRows(sqlmock.NewRows([]string{"role"}).RowError(1, fmt.Errorf("row error")))
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
fmt.Printf("check for PreferStandby\n")
|
||||||
|
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() == nil {
|
||||||
|
t.Fatal("must return error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbMasterErr := dbMasterMock.ExpectationsWereMet(); dbMasterErr != nil {
|
||||||
|
t.Error(dbMasterErr)
|
||||||
|
}
|
||||||
|
}
|
@@ -4,18 +4,20 @@ package logger
|
|||||||
type Level int8
|
type Level int8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// TraceLevel level usually used to find bugs, very verbose
|
// TraceLevel usually used to find bugs, very verbose
|
||||||
TraceLevel Level = iota - 2
|
TraceLevel Level = iota - 2
|
||||||
// DebugLevel level used only when enabled debugging
|
// DebugLevel used only when enabled debugging
|
||||||
DebugLevel
|
DebugLevel
|
||||||
// InfoLevel level used for general info about what's going on inside the application
|
// InfoLevel used for general info about what's going on inside the application
|
||||||
InfoLevel
|
InfoLevel
|
||||||
// WarnLevel level used for non-critical entries
|
// WarnLevel used for non-critical entries
|
||||||
WarnLevel
|
WarnLevel
|
||||||
// ErrorLevel level used for errors that should definitely be noted
|
// ErrorLevel used for errors that should definitely be noted
|
||||||
ErrorLevel
|
ErrorLevel
|
||||||
// FatalLevel level used for critical errors and then calls `os.Exit(1)`
|
// FatalLevel used for critical errors and then calls `os.Exit(1)`
|
||||||
FatalLevel
|
FatalLevel
|
||||||
|
// NoneLevel used to disable logging
|
||||||
|
NoneLevel
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns logger level string representation
|
// String returns logger level string representation
|
||||||
@@ -33,6 +35,8 @@ func (l Level) String() string {
|
|||||||
return "error"
|
return "error"
|
||||||
case FatalLevel:
|
case FatalLevel:
|
||||||
return "fatal"
|
return "fatal"
|
||||||
|
case NoneLevel:
|
||||||
|
return "none"
|
||||||
}
|
}
|
||||||
return "info"
|
return "info"
|
||||||
}
|
}
|
||||||
@@ -58,6 +62,8 @@ func ParseLevel(lvl string) Level {
|
|||||||
return ErrorLevel
|
return ErrorLevel
|
||||||
case FatalLevel.String():
|
case FatalLevel.String():
|
||||||
return FatalLevel
|
return FatalLevel
|
||||||
|
case NoneLevel.String():
|
||||||
|
return NoneLevel
|
||||||
}
|
}
|
||||||
return InfoLevel
|
return InfoLevel
|
||||||
}
|
}
|
||||||
|
@@ -34,6 +34,7 @@ var (
|
|||||||
warnValue = slog.StringValue("warn")
|
warnValue = slog.StringValue("warn")
|
||||||
errorValue = slog.StringValue("error")
|
errorValue = slog.StringValue("error")
|
||||||
fatalValue = slog.StringValue("fatal")
|
fatalValue = slog.StringValue("fatal")
|
||||||
|
noneValue = slog.StringValue("none")
|
||||||
)
|
)
|
||||||
|
|
||||||
type wrapper struct {
|
type wrapper struct {
|
||||||
@@ -85,6 +86,8 @@ func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
|
|||||||
a.Value = errorValue
|
a.Value = errorValue
|
||||||
case lvl >= logger.FatalLevel:
|
case lvl >= logger.FatalLevel:
|
||||||
a.Value = fatalValue
|
a.Value = fatalValue
|
||||||
|
case lvl >= logger.NoneLevel:
|
||||||
|
a.Value = noneValue
|
||||||
default:
|
default:
|
||||||
a.Value = infoValue
|
a.Value = infoValue
|
||||||
}
|
}
|
||||||
@@ -316,6 +319,8 @@ func loggerToSlogLevel(level logger.Level) slog.Level {
|
|||||||
return slog.LevelDebug - 1
|
return slog.LevelDebug - 1
|
||||||
case logger.FatalLevel:
|
case logger.FatalLevel:
|
||||||
return slog.LevelError + 1
|
return slog.LevelError + 1
|
||||||
|
case logger.NoneLevel:
|
||||||
|
return slog.LevelError + 2
|
||||||
default:
|
default:
|
||||||
return slog.LevelInfo
|
return slog.LevelInfo
|
||||||
}
|
}
|
||||||
@@ -333,6 +338,8 @@ func slogToLoggerLevel(level slog.Level) logger.Level {
|
|||||||
return logger.TraceLevel
|
return logger.TraceLevel
|
||||||
case slog.LevelError + 1:
|
case slog.LevelError + 1:
|
||||||
return logger.FatalLevel
|
return logger.FatalLevel
|
||||||
|
case slog.LevelError + 2:
|
||||||
|
return logger.NoneLevel
|
||||||
default:
|
default:
|
||||||
return logger.InfoLevel
|
return logger.InfoLevel
|
||||||
}
|
}
|
||||||
|
@@ -36,6 +36,24 @@ func TestStacktrace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNoneLevel(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
l := NewLogger(logger.WithLevel(logger.NoneLevel), logger.WithOutput(buf),
|
||||||
|
WithHandlerFunc(slog.NewTextHandler),
|
||||||
|
logger.WithAddStacktrace(true),
|
||||||
|
)
|
||||||
|
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Error(ctx, "msg1", errors.New("err"))
|
||||||
|
|
||||||
|
if buf.Len() != 0 {
|
||||||
|
t.Fatalf("logger none level not works, buf contains: %s", buf.Bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestDelayedBuffer(t *testing.T) {
|
func TestDelayedBuffer(t *testing.T) {
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
@@ -62,7 +80,7 @@ func TestTime(t *testing.T) {
|
|||||||
WithHandlerFunc(slog.NewTextHandler),
|
WithHandlerFunc(slog.NewTextHandler),
|
||||||
logger.WithAddStacktrace(true),
|
logger.WithAddStacktrace(true),
|
||||||
logger.WithTimeFunc(func() time.Time {
|
logger.WithTimeFunc(func() time.Time {
|
||||||
return time.Unix(0, 0)
|
return time.Unix(0, 0).UTC()
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
|
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
|
||||||
@@ -71,8 +89,7 @@ func TestTime(t *testing.T) {
|
|||||||
|
|
||||||
l.Error(ctx, "msg1", errors.New("err"))
|
l.Error(ctx, "msg1", errors.New("err"))
|
||||||
|
|
||||||
if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T03:00:00.000000000+03:00`)) &&
|
if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) {
|
||||||
!bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) {
|
|
||||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
294
metadata/context.go
Normal file
294
metadata/context.go
Normal file
@@ -0,0 +1,294 @@
|
|||||||
|
package metadata
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// In the metadata package, context and metadata are treated as immutable.
|
||||||
|
// Deep copies of metadata are made to keep things safe and correct.
|
||||||
|
// If a user takes a map and changes it across threads, it's their responsibility.
|
||||||
|
//
|
||||||
|
// 1. Incoming Context
|
||||||
|
//
|
||||||
|
// This context is provided by an external system and populated by the server or broker of the micro framework.
|
||||||
|
// It should not be modified. The idea is to extract all necessary data from it,
|
||||||
|
// validate the data, and transfer it into the current context.
|
||||||
|
// After that, only the current context should be used throughout the code.
|
||||||
|
//
|
||||||
|
// 2. Current Context
|
||||||
|
//
|
||||||
|
// This is the context used during the execution flow.
|
||||||
|
// You can add any needed metadata to it and pass it through your code.
|
||||||
|
//
|
||||||
|
// 3. Outgoing Context
|
||||||
|
//
|
||||||
|
// This context is for sending data to external systems.
|
||||||
|
// You can add what you need before sending it out.
|
||||||
|
// But it’s usually better to build and prepare this context right before making the external call,
|
||||||
|
// instead of changing it in many places.
|
||||||
|
//
|
||||||
|
// Execution Flow:
|
||||||
|
//
|
||||||
|
// [External System]
|
||||||
|
// ↓
|
||||||
|
// [Incoming Context]
|
||||||
|
// ↓
|
||||||
|
// [Extract & Validate Metadata from Incoming Context]
|
||||||
|
// ↓
|
||||||
|
// [Prepare Current Context]
|
||||||
|
// ↓
|
||||||
|
// [Enrich Current Context]
|
||||||
|
// ↓
|
||||||
|
// [Business Logic]
|
||||||
|
// ↓
|
||||||
|
// [Prepare Outgoing Context]
|
||||||
|
// ↓
|
||||||
|
// [External System Call]
|
||||||
|
|
||||||
|
type (
|
||||||
|
metadataCurrentKey struct{}
|
||||||
|
metadataIncomingKey struct{}
|
||||||
|
metadataOutgoingKey struct{}
|
||||||
|
|
||||||
|
rawMetadata struct {
|
||||||
|
md Metadata
|
||||||
|
added [][]string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewContext creates a new context with the provided Metadata attached.
|
||||||
|
// The Metadata must not be modified after calling this function.
|
||||||
|
func NewContext(ctx context.Context, md Metadata) context.Context {
|
||||||
|
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIncomingContext creates a new context with the provided incoming Metadata attached.
|
||||||
|
// The Metadata must not be modified after calling this function.
|
||||||
|
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
|
||||||
|
return context.WithValue(ctx, metadataIncomingKey{}, rawMetadata{md: md})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOutgoingContext creates a new context with the provided outgoing Metadata attached.
|
||||||
|
// The Metadata must not be modified after calling this function.
|
||||||
|
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
|
||||||
|
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendContext returns a new context with the provided key-value pairs (kv)
|
||||||
|
// merged with any existing metadata in the context. For a description of kv,
|
||||||
|
// please refer to the Pairs documentation.
|
||||||
|
func AppendContext(ctx context.Context, kv ...string) context.Context {
|
||||||
|
if len(kv)%2 == 1 {
|
||||||
|
panic(fmt.Sprintf("metadata: AppendContext got an odd number of input pairs for metadata: %d", len(kv)))
|
||||||
|
}
|
||||||
|
md, _ := ctx.Value(metadataCurrentKey{}).(rawMetadata)
|
||||||
|
added := make([][]string, len(md.added)+1)
|
||||||
|
copy(added, md.added)
|
||||||
|
kvCopy := make([]string, 0, len(kv))
|
||||||
|
for i := 0; i < len(kv); i += 2 {
|
||||||
|
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
|
||||||
|
}
|
||||||
|
added[len(added)-1] = kvCopy
|
||||||
|
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md.md, added: added})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendOutgoingContext returns a new context with the provided key-value pairs (kv)
|
||||||
|
// merged with any existing metadata in the context. For a description of kv,
|
||||||
|
// please refer to the Pairs documentation.
|
||||||
|
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
|
||||||
|
if len(kv)%2 == 1 {
|
||||||
|
panic(fmt.Sprintf("metadata: AppendOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
|
||||||
|
}
|
||||||
|
md, _ := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
|
||||||
|
added := make([][]string, len(md.added)+1)
|
||||||
|
copy(added, md.added)
|
||||||
|
kvCopy := make([]string, 0, len(kv))
|
||||||
|
for i := 0; i < len(kv); i += 2 {
|
||||||
|
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
|
||||||
|
}
|
||||||
|
added[len(added)-1] = kvCopy
|
||||||
|
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md.md, added: added})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContext retrieves a deep copy of the metadata from the context and returns it
|
||||||
|
// with a boolean indicating if it was found.
|
||||||
|
func FromContext(ctx context.Context) (Metadata, bool) {
|
||||||
|
raw, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
metadataSize := len(raw.md)
|
||||||
|
for i := range raw.added {
|
||||||
|
metadataSize += len(raw.added[i]) / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(Metadata, metadataSize)
|
||||||
|
for k, v := range raw.md {
|
||||||
|
out[k] = copyOf(v)
|
||||||
|
}
|
||||||
|
for _, added := range raw.added {
|
||||||
|
if len(added)%2 == 1 {
|
||||||
|
panic(fmt.Sprintf("metadata: FromContext got an odd number of input pairs for metadata: %d", len(added)))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(added); i += 2 {
|
||||||
|
out[added[i]] = append(out[added[i]], added[i+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustContext retrieves a deep copy of the metadata from the context and panics
|
||||||
|
// if the metadata is not found.
|
||||||
|
func MustContext(ctx context.Context) Metadata {
|
||||||
|
md, ok := FromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
panic("missing metadata")
|
||||||
|
}
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromIncomingContext retrieves a deep copy of the metadata from the context and returns it
|
||||||
|
// with a boolean indicating if it was found.
|
||||||
|
func FromIncomingContext(ctx context.Context) (Metadata, bool) {
|
||||||
|
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
metadataSize := len(raw.md)
|
||||||
|
for i := range raw.added {
|
||||||
|
metadataSize += len(raw.added[i]) / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(Metadata, metadataSize)
|
||||||
|
for k, v := range raw.md {
|
||||||
|
out[k] = copyOf(v)
|
||||||
|
}
|
||||||
|
for _, added := range raw.added {
|
||||||
|
if len(added)%2 == 1 {
|
||||||
|
panic(fmt.Sprintf("metadata: FromIncomingContext got an odd number of input pairs for metadata: %d", len(added)))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(added); i += 2 {
|
||||||
|
out[added[i]] = append(out[added[i]], added[i+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustIncomingContext retrieves a deep copy of the metadata from the context and panics
|
||||||
|
// if the metadata is not found.
|
||||||
|
func MustIncomingContext(ctx context.Context) Metadata {
|
||||||
|
md, ok := FromIncomingContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
panic("missing metadata")
|
||||||
|
}
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromOutgoingContext retrieves a deep copy of the metadata from the context and returns it
|
||||||
|
// with a boolean indicating if it was found.
|
||||||
|
func FromOutgoingContext(ctx context.Context) (Metadata, bool) {
|
||||||
|
raw, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
metadataSize := len(raw.md)
|
||||||
|
for i := range raw.added {
|
||||||
|
metadataSize += len(raw.added[i]) / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(Metadata, metadataSize)
|
||||||
|
for k, v := range raw.md {
|
||||||
|
out[k] = copyOf(v)
|
||||||
|
}
|
||||||
|
for _, added := range raw.added {
|
||||||
|
if len(added)%2 == 1 {
|
||||||
|
panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(added); i += 2 {
|
||||||
|
out[added[i]] = append(out[added[i]], added[i+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustOutgoingContext retrieves a deep copy of the metadata from the context and panics
|
||||||
|
// if the metadata is not found.
|
||||||
|
func MustOutgoingContext(ctx context.Context) Metadata {
|
||||||
|
md, ok := FromOutgoingContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
panic("missing metadata")
|
||||||
|
}
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueFromCurrentContext retrieves a deep copy of the metadata for the given key
|
||||||
|
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
|
||||||
|
func ValueFromCurrentContext(ctx context.Context, key string) []string {
|
||||||
|
md, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := md.md[key]; ok {
|
||||||
|
return copyOf(v)
|
||||||
|
}
|
||||||
|
for k, v := range md.md {
|
||||||
|
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
|
||||||
|
// that the Metadata attached to the context is created using our helper
|
||||||
|
// functions.
|
||||||
|
if strings.EqualFold(k, key) {
|
||||||
|
return copyOf(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueFromIncomingContext retrieves a deep copy of the metadata for the given key
|
||||||
|
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
|
||||||
|
func ValueFromIncomingContext(ctx context.Context, key string) []string {
|
||||||
|
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := raw.md[key]; ok {
|
||||||
|
return copyOf(v)
|
||||||
|
}
|
||||||
|
for k, v := range raw.md {
|
||||||
|
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
|
||||||
|
// that the Metadata attached to the context is created using our helper
|
||||||
|
// functions.
|
||||||
|
if strings.EqualFold(k, key) {
|
||||||
|
return copyOf(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueFromOutgoingContext retrieves a deep copy of the metadata for the given key
|
||||||
|
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
|
||||||
|
func ValueFromOutgoingContext(ctx context.Context, key string) []string {
|
||||||
|
md, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := md.md[key]; ok {
|
||||||
|
return copyOf(v)
|
||||||
|
}
|
||||||
|
for k, v := range md.md {
|
||||||
|
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
|
||||||
|
// that the Metadata attached to the context is created using our helper
|
||||||
|
// functions.
|
||||||
|
if strings.EqualFold(k, key) {
|
||||||
|
return copyOf(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@@ -2,18 +2,18 @@
|
|||||||
package metadata
|
package metadata
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// HeaderTopic is the header name that contains topic name
|
// HeaderTopic is the header name that contains topic name.
|
||||||
HeaderTopic = "Micro-Topic"
|
HeaderTopic = "Micro-Topic"
|
||||||
// HeaderContentType specifies content type of message
|
// HeaderContentType specifies content type of message.
|
||||||
HeaderContentType = "Content-Type"
|
HeaderContentType = "Content-Type"
|
||||||
// HeaderEndpoint specifies endpoint in service
|
// HeaderEndpoint specifies endpoint in service.
|
||||||
HeaderEndpoint = "Micro-Endpoint"
|
HeaderEndpoint = "Micro-Endpoint"
|
||||||
// HeaderService specifies service
|
// HeaderService specifies service.
|
||||||
HeaderService = "Micro-Service"
|
HeaderService = "Micro-Service"
|
||||||
// HeaderTimeout specifies timeout of operation
|
// HeaderTimeout specifies timeout of operation.
|
||||||
HeaderTimeout = "Micro-Timeout"
|
HeaderTimeout = "Micro-Timeout"
|
||||||
// HeaderAuthorization specifies Authorization header
|
// HeaderAuthorization specifies Authorization header.
|
||||||
HeaderAuthorization = "Authorization"
|
HeaderAuthorization = "Authorization"
|
||||||
// HeaderXRequestID specifies request id
|
// HeaderXRequestID specifies request id.
|
||||||
HeaderXRequestID = "X-Request-Id"
|
HeaderXRequestID = "X-Request-Id"
|
||||||
)
|
)
|
||||||
|
7
metadata/helpers.go
Normal file
7
metadata/helpers.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package metadata
|
||||||
|
|
||||||
|
func copyOf(v []string) []string {
|
||||||
|
vals := make([]string, len(v))
|
||||||
|
copy(vals, v)
|
||||||
|
return vals
|
||||||
|
}
|
37
metadata/iterator.go
Normal file
37
metadata/iterator.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package metadata
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
type Iterator struct {
|
||||||
|
md Metadata
|
||||||
|
keys []string
|
||||||
|
cur int
|
||||||
|
cnt int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next advances the iterator to the next element.
|
||||||
|
func (iter *Iterator) Next(k *string, v *[]string) bool {
|
||||||
|
if iter.cur+1 > iter.cnt {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if k != nil && v != nil {
|
||||||
|
*k = iter.keys[iter.cur]
|
||||||
|
vv := iter.md[*k]
|
||||||
|
*v = make([]string, len(vv))
|
||||||
|
copy(*v, vv)
|
||||||
|
iter.cur++
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterator returns an iterator for iterating over metadata in sorted order.
|
||||||
|
func (md Metadata) Iterator() *Iterator {
|
||||||
|
iter := &Iterator{md: md, cnt: len(md)}
|
||||||
|
iter.keys = make([]string, 0, iter.cnt)
|
||||||
|
for k := range md {
|
||||||
|
iter.keys = append(iter.keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(iter.keys)
|
||||||
|
return iter
|
||||||
|
}
|
@@ -1,21 +1,18 @@
|
|||||||
package metadata
|
package metadata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// defaultMetadataSize used when need to init new Metadata
|
// defaultMetadataSize is used when initializing new Metadata.
|
||||||
var defaultMetadataSize = 2
|
var defaultMetadataSize = 2
|
||||||
|
|
||||||
// Metadata is a mapping from metadata keys to values. Users should use the following
|
// Metadata maps keys to values. Use the New, NewWithMetadata and Pairs functions to create it.
|
||||||
// two convenience functions New and Pairs to generate Metadata.
|
|
||||||
type Metadata map[string][]string
|
type Metadata map[string][]string
|
||||||
|
|
||||||
// New creates an zero Metadata.
|
// New creates a zero-value Metadata with the specified size.
|
||||||
func New(l int) Metadata {
|
func New(l int) Metadata {
|
||||||
if l == 0 {
|
if l == 0 {
|
||||||
l = defaultMetadataSize
|
l = defaultMetadataSize
|
||||||
@@ -24,7 +21,7 @@ func New(l int) Metadata {
|
|||||||
return md
|
return md
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWithMetadata creates an Metadata from a given key-value map.
|
// NewWithMetadata creates a Metadata from the provided key-value map.
|
||||||
func NewWithMetadata(m map[string]string) Metadata {
|
func NewWithMetadata(m map[string]string) Metadata {
|
||||||
md := make(Metadata, len(m))
|
md := make(Metadata, len(m))
|
||||||
for key, val := range m {
|
for key, val := range m {
|
||||||
@@ -33,8 +30,7 @@ func NewWithMetadata(m map[string]string) Metadata {
|
|||||||
return md
|
return md
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pairs returns an Metadata formed by the mapping of key, value ...
|
// Pairs returns a Metadata formed from the key-value mapping. It panics if the length of kv is odd.
|
||||||
// Pairs panics if len(kv) is odd.
|
|
||||||
func Pairs(kv ...string) Metadata {
|
func Pairs(kv ...string) Metadata {
|
||||||
if len(kv)%2 == 1 {
|
if len(kv)%2 == 1 {
|
||||||
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
|
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
|
||||||
@@ -46,12 +42,19 @@ func Pairs(kv ...string) Metadata {
|
|||||||
return md
|
return md
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns the number of items in Metadata.
|
// Join combines multiple Metadatas into a single Metadata.
|
||||||
func (md Metadata) Len() int {
|
// The order of values for each key is determined by the order in which the Metadatas are provided to Join.
|
||||||
return len(md)
|
func Join(mds ...Metadata) Metadata {
|
||||||
|
out := Metadata{}
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns a copy of Metadata.
|
// Copy returns a deep copy of Metadata.
|
||||||
func Copy(src Metadata) Metadata {
|
func Copy(src Metadata) Metadata {
|
||||||
out := make(Metadata, len(src))
|
out := make(Metadata, len(src))
|
||||||
for k, v := range src {
|
for k, v := range src {
|
||||||
@@ -60,7 +63,7 @@ func Copy(src Metadata) Metadata {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns a copy of Metadata.
|
// Copy returns a deep copy of Metadata.
|
||||||
func (md Metadata) Copy() Metadata {
|
func (md Metadata) Copy() Metadata {
|
||||||
out := make(Metadata, len(md))
|
out := make(Metadata, len(md))
|
||||||
for k, v := range md {
|
for k, v := range md {
|
||||||
@@ -69,7 +72,19 @@ func (md Metadata) Copy() Metadata {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsMap returns a copy of Metadata with map[string]string.
|
// CopyTo performs a deep copy of Metadata to the out.
|
||||||
|
func (md Metadata) CopyTo(out Metadata) {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = copyOf(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items in Metadata.
|
||||||
|
func (md Metadata) Len() int {
|
||||||
|
return len(md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsMap returns a deep copy of Metadata as a map[string]string
|
||||||
func (md Metadata) AsMap() map[string]string {
|
func (md Metadata) AsMap() map[string]string {
|
||||||
out := make(map[string]string, len(md))
|
out := make(map[string]string, len(md))
|
||||||
for k, v := range md {
|
for k, v := range md {
|
||||||
@@ -78,8 +93,7 @@ func (md Metadata) AsMap() map[string]string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsHTTP1 returns a copy of Metadata
|
// AsHTTP1 returns a deep copy of Metadata with keys converted to canonical MIME header key format.
|
||||||
// with CanonicalMIMEHeaderKey.
|
|
||||||
func (md Metadata) AsHTTP1() map[string][]string {
|
func (md Metadata) AsHTTP1() map[string][]string {
|
||||||
out := make(map[string][]string, len(md))
|
out := make(map[string][]string, len(md))
|
||||||
for k, v := range md {
|
for k, v := range md {
|
||||||
@@ -88,8 +102,7 @@ func (md Metadata) AsHTTP1() map[string][]string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsHTTP1 returns a copy of Metadata
|
// AsHTTP2 returns a deep copy of Metadata with keys converted to lowercase.
|
||||||
// with strings.ToLower.
|
|
||||||
func (md Metadata) AsHTTP2() map[string][]string {
|
func (md Metadata) AsHTTP2() map[string][]string {
|
||||||
out := make(map[string][]string, len(md))
|
out := make(map[string][]string, len(md))
|
||||||
for k, v := range md {
|
for k, v := range md {
|
||||||
@@ -98,14 +111,10 @@ func (md Metadata) AsHTTP2() map[string][]string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyTo copies Metadata to out.
|
// Get retrieves the values for a given key, checking the key in three formats:
|
||||||
func (md Metadata) CopyTo(out Metadata) {
|
// - exact case,
|
||||||
for k, v := range md {
|
// - lower case,
|
||||||
out[k] = copyOf(v)
|
// - canonical MIME header key format.
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get obtains the values for a given key.
|
|
||||||
func (md Metadata) Get(k string) []string {
|
func (md Metadata) Get(k string) []string {
|
||||||
v, ok := md[k]
|
v, ok := md[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -117,13 +126,12 @@ func (md Metadata) Get(k string) []string {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetJoined obtains the values for a given key
|
// GetJoined retrieves the values for a given key and joins them into a single string, separated by commas.
|
||||||
// with joined values with "," symbol
|
|
||||||
func (md Metadata) GetJoined(k string) string {
|
func (md Metadata) GetJoined(k string) string {
|
||||||
return strings.Join(md.Get(k), ",")
|
return strings.Join(md.Get(k), ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets the value of a given key with a slice of values.
|
// Set assigns the values to the given key.
|
||||||
func (md Metadata) Set(key string, vals ...string) {
|
func (md Metadata) Set(key string, vals ...string) {
|
||||||
if len(vals) == 0 {
|
if len(vals) == 0 {
|
||||||
return
|
return
|
||||||
@@ -131,8 +139,7 @@ func (md Metadata) Set(key string, vals ...string) {
|
|||||||
md[key] = vals
|
md[key] = vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append adds the values to key k, not overwriting what was already stored at
|
// Append adds values to the existing values for the given key.
|
||||||
// that key.
|
|
||||||
func (md Metadata) Append(key string, vals ...string) {
|
func (md Metadata) Append(key string, vals ...string) {
|
||||||
if len(vals) == 0 {
|
if len(vals) == 0 {
|
||||||
return
|
return
|
||||||
@@ -140,7 +147,10 @@ func (md Metadata) Append(key string, vals ...string) {
|
|||||||
md[key] = append(md[key], vals...)
|
md[key] = append(md[key], vals...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Del removes the values for a given keys k.
|
// Del removes the values for the given keys k. It checks and removes the keys in the following formats:
|
||||||
|
// - exact case,
|
||||||
|
// - lower case,
|
||||||
|
// - canonical MIME header key format.
|
||||||
func (md Metadata) Del(k ...string) {
|
func (md Metadata) Del(k ...string) {
|
||||||
for i := range k {
|
for i := range k {
|
||||||
delete(md, k[i])
|
delete(md, k[i])
|
||||||
@@ -148,303 +158,3 @@ func (md Metadata) Del(k ...string) {
|
|||||||
delete(md, textproto.CanonicalMIMEHeaderKey(k[i]))
|
delete(md, textproto.CanonicalMIMEHeaderKey(k[i]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Join joins any number of Metadatas into a single Metadata.
|
|
||||||
//
|
|
||||||
// The order of values for each key is determined by the order in which the Metadatas
|
|
||||||
// containing those values are presented to Join.
|
|
||||||
func Join(mds ...Metadata) Metadata {
|
|
||||||
out := Metadata{}
|
|
||||||
for _, Metadata := range mds {
|
|
||||||
for k, v := range Metadata {
|
|
||||||
out[k] = append(out[k], v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
metadataIncomingKey struct{}
|
|
||||||
metadataOutgoingKey struct{}
|
|
||||||
metadataCurrentKey struct{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewContext creates a new context with Metadata attached. Metadata must
|
|
||||||
// not be modified after calling this function.
|
|
||||||
func NewContext(ctx context.Context, md Metadata) context.Context {
|
|
||||||
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIncomingContext creates a new context with incoming Metadata attached. Metadata must
|
|
||||||
// not be modified after calling this function.
|
|
||||||
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
|
|
||||||
return context.WithValue(ctx, metadataIncomingKey{}, rawMetadata{md: md})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOutgoingContext creates a new context with outgoing Metadata attached. If used
|
|
||||||
// in conjunction with AppendOutgoingContext, NewOutgoingContext will
|
|
||||||
// overwrite any previously-appended metadata. Metadata must not be modified after
|
|
||||||
// calling this function.
|
|
||||||
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
|
|
||||||
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md})
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendContext returns a new context with the provided kv merged
|
|
||||||
// with any existing metadata in the context. Please refer to the documentation
|
|
||||||
// of Pairs for a description of kv.
|
|
||||||
func AppendContext(ctx context.Context, kv ...string) context.Context {
|
|
||||||
if len(kv)%2 == 1 {
|
|
||||||
panic(fmt.Sprintf("metadata: AppendContext got an odd number of input pairs for metadata: %d", len(kv)))
|
|
||||||
}
|
|
||||||
md, _ := ctx.Value(metadataCurrentKey{}).(rawMetadata)
|
|
||||||
added := make([][]string, len(md.added)+1)
|
|
||||||
copy(added, md.added)
|
|
||||||
kvCopy := make([]string, 0, len(kv))
|
|
||||||
for i := 0; i < len(kv); i += 2 {
|
|
||||||
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
|
|
||||||
}
|
|
||||||
added[len(added)-1] = kvCopy
|
|
||||||
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md.md, added: added})
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendOutgoingContext returns a new context with the provided kv merged
|
|
||||||
// with any existing metadata in the context. Please refer to the documentation
|
|
||||||
// of Pairs for a description of kv.
|
|
||||||
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
|
|
||||||
if len(kv)%2 == 1 {
|
|
||||||
panic(fmt.Sprintf("metadata: AppendOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
|
|
||||||
}
|
|
||||||
md, _ := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
|
|
||||||
added := make([][]string, len(md.added)+1)
|
|
||||||
copy(added, md.added)
|
|
||||||
kvCopy := make([]string, 0, len(kv))
|
|
||||||
for i := 0; i < len(kv); i += 2 {
|
|
||||||
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
|
|
||||||
}
|
|
||||||
added[len(added)-1] = kvCopy
|
|
||||||
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md.md, added: added})
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromContext returns the metadata in ctx if it exists.
|
|
||||||
func FromContext(ctx context.Context) (Metadata, bool) {
|
|
||||||
raw, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
metadataSize := len(raw.md)
|
|
||||||
for i := range raw.added {
|
|
||||||
metadataSize += len(raw.added[i]) / 2
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make(Metadata, metadataSize)
|
|
||||||
for k, v := range raw.md {
|
|
||||||
out[k] = copyOf(v)
|
|
||||||
}
|
|
||||||
for _, added := range raw.added {
|
|
||||||
if len(added)%2 == 1 {
|
|
||||||
panic(fmt.Sprintf("metadata: FromContext got an odd number of input pairs for metadata: %d", len(added)))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(added); i += 2 {
|
|
||||||
out[added[i]] = append(out[added[i]], added[i+1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustContext returns the metadata in ctx.
|
|
||||||
func MustContext(ctx context.Context) Metadata {
|
|
||||||
md, ok := FromContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
panic("missing metadata")
|
|
||||||
}
|
|
||||||
return md
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromIncomingContext returns the incoming metadata in ctx if it exists.
|
|
||||||
func FromIncomingContext(ctx context.Context) (Metadata, bool) {
|
|
||||||
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
metadataSize := len(raw.md)
|
|
||||||
for i := range raw.added {
|
|
||||||
metadataSize += len(raw.added[i]) / 2
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make(Metadata, metadataSize)
|
|
||||||
for k, v := range raw.md {
|
|
||||||
out[k] = copyOf(v)
|
|
||||||
}
|
|
||||||
for _, added := range raw.added {
|
|
||||||
if len(added)%2 == 1 {
|
|
||||||
panic(fmt.Sprintf("metadata: FromIncomingContext got an odd number of input pairs for metadata: %d", len(added)))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(added); i += 2 {
|
|
||||||
out[added[i]] = append(out[added[i]], added[i+1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustIncomingContext returns the incoming metadata in ctx.
|
|
||||||
func MustIncomingContext(ctx context.Context) Metadata {
|
|
||||||
md, ok := FromIncomingContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
panic("missing metadata")
|
|
||||||
}
|
|
||||||
return md
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
|
|
||||||
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
|
|
||||||
// manner.
|
|
||||||
func ValueFromIncomingContext(ctx context.Context, key string) []string {
|
|
||||||
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := raw.md[key]; ok {
|
|
||||||
return copyOf(v)
|
|
||||||
}
|
|
||||||
for k, v := range raw.md {
|
|
||||||
// Case insensitive comparison: Metadata is a map, and there's no guarantee
|
|
||||||
// that the Metadata attached to the context is created using our helper
|
|
||||||
// functions.
|
|
||||||
if strings.EqualFold(k, key) {
|
|
||||||
return copyOf(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValueFromCurrentContext returns the metadata value corresponding to the metadata
|
|
||||||
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
|
|
||||||
// manner.
|
|
||||||
func ValueFromCurrentContext(ctx context.Context, key string) []string {
|
|
||||||
md, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := md.md[key]; ok {
|
|
||||||
return copyOf(v)
|
|
||||||
}
|
|
||||||
for k, v := range md.md {
|
|
||||||
// Case insensitive comparison: Metadata is a map, and there's no guarantee
|
|
||||||
// that the Metadata attached to the context is created using our helper
|
|
||||||
// functions.
|
|
||||||
if strings.EqualFold(k, key) {
|
|
||||||
return copyOf(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustOutgoingContext returns the outgoing metadata in ctx.
|
|
||||||
func MustOutgoingContext(ctx context.Context) Metadata {
|
|
||||||
md, ok := FromOutgoingContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
panic("missing metadata")
|
|
||||||
}
|
|
||||||
return md
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValueFromOutgoingContext returns the metadata value corresponding to the metadata
|
|
||||||
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
|
|
||||||
// manner.
|
|
||||||
func ValueFromOutgoingContext(ctx context.Context, key string) []string {
|
|
||||||
md, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := md.md[key]; ok {
|
|
||||||
return copyOf(v)
|
|
||||||
}
|
|
||||||
for k, v := range md.md {
|
|
||||||
// Case insensitive comparison: Metadata is a map, and there's no guarantee
|
|
||||||
// that the Metadata attached to the context is created using our helper
|
|
||||||
// functions.
|
|
||||||
if strings.EqualFold(k, key) {
|
|
||||||
return copyOf(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyOf(v []string) []string {
|
|
||||||
vals := make([]string, len(v))
|
|
||||||
copy(vals, v)
|
|
||||||
return vals
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
|
|
||||||
func FromOutgoingContext(ctx context.Context) (Metadata, bool) {
|
|
||||||
raw, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
metadataSize := len(raw.md)
|
|
||||||
for i := range raw.added {
|
|
||||||
metadataSize += len(raw.added[i]) / 2
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make(Metadata, metadataSize)
|
|
||||||
for k, v := range raw.md {
|
|
||||||
out[k] = copyOf(v)
|
|
||||||
}
|
|
||||||
for _, added := range raw.added {
|
|
||||||
if len(added)%2 == 1 {
|
|
||||||
panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(added); i += 2 {
|
|
||||||
out[added[i]] = append(out[added[i]], added[i+1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
type rawMetadata struct {
|
|
||||||
md Metadata
|
|
||||||
added [][]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterator used to iterate over metadata with order
|
|
||||||
type Iterator struct {
|
|
||||||
md Metadata
|
|
||||||
keys []string
|
|
||||||
cur int
|
|
||||||
cnt int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next advance iterator to next element
|
|
||||||
func (iter *Iterator) Next(k *string, v *[]string) bool {
|
|
||||||
if iter.cur+1 > iter.cnt {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if k != nil && v != nil {
|
|
||||||
*k = iter.keys[iter.cur]
|
|
||||||
vv := iter.md[*k]
|
|
||||||
*v = make([]string, len(vv))
|
|
||||||
copy(*v, vv)
|
|
||||||
iter.cur++
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterator returns the itarator for metadata in sorted order
|
|
||||||
func (md Metadata) Iterator() *Iterator {
|
|
||||||
iter := &Iterator{md: md, cnt: len(md)}
|
|
||||||
iter.keys = make([]string, 0, iter.cnt)
|
|
||||||
for k := range md {
|
|
||||||
iter.keys = append(iter.keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(iter.keys)
|
|
||||||
return iter
|
|
||||||
}
|
|
||||||
|
@@ -4,8 +4,8 @@ package meter
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -117,6 +117,39 @@ func BuildLabels(labels ...string) []string {
|
|||||||
return labels
|
return labels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var spool = newStringsPool(500)
|
||||||
|
|
||||||
|
type stringsPool struct {
|
||||||
|
p *sync.Pool
|
||||||
|
c int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStringsPool(size int) *stringsPool {
|
||||||
|
p := &stringsPool{c: size}
|
||||||
|
p.p = &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &strings.Builder{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *stringsPool) Cap() int {
|
||||||
|
return p.c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *stringsPool) Get() *strings.Builder {
|
||||||
|
return p.p.Get().(*strings.Builder)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *stringsPool) Put(b *strings.Builder) {
|
||||||
|
if b.Cap() > p.c {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
p.p.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
// BuildName used to combine metric with labels.
|
// BuildName used to combine metric with labels.
|
||||||
// If labels count is odd, drop last element
|
// If labels count is odd, drop last element
|
||||||
func BuildName(name string, labels ...string) string {
|
func BuildName(name string, labels ...string) string {
|
||||||
@@ -125,8 +158,6 @@ func BuildName(name string, labels ...string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(labels) > 2 {
|
if len(labels) > 2 {
|
||||||
sort.Sort(byKey(labels))
|
|
||||||
|
|
||||||
idx := 0
|
idx := 0
|
||||||
for {
|
for {
|
||||||
if labels[idx] == labels[idx+2] {
|
if labels[idx] == labels[idx+2] {
|
||||||
@@ -141,7 +172,9 @@ func BuildName(name string, labels ...string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var b strings.Builder
|
b := spool.Get()
|
||||||
|
defer spool.Put(b)
|
||||||
|
|
||||||
_, _ = b.WriteString(name)
|
_, _ = b.WriteString(name)
|
||||||
_, _ = b.WriteRune('{')
|
_, _ = b.WriteRune('{')
|
||||||
for idx := 0; idx < len(labels); idx += 2 {
|
for idx := 0; idx < len(labels); idx += 2 {
|
||||||
@@ -149,8 +182,9 @@ func BuildName(name string, labels ...string) string {
|
|||||||
_, _ = b.WriteRune(',')
|
_, _ = b.WriteRune(',')
|
||||||
}
|
}
|
||||||
_, _ = b.WriteString(labels[idx])
|
_, _ = b.WriteString(labels[idx])
|
||||||
_, _ = b.WriteString(`=`)
|
_, _ = b.WriteString(`="`)
|
||||||
_, _ = b.WriteString(strconv.Quote(labels[idx+1]))
|
_, _ = b.WriteString(labels[idx+1])
|
||||||
|
_, _ = b.WriteRune('"')
|
||||||
}
|
}
|
||||||
_, _ = b.WriteRune('}')
|
_, _ = b.WriteRune('}')
|
||||||
|
|
||||||
|
@@ -50,11 +50,12 @@ func TestBuildName(t *testing.T) {
|
|||||||
data := map[string][]string{
|
data := map[string][]string{
|
||||||
`my_metric{firstlabel="value2",zerolabel="value3"}`: {
|
`my_metric{firstlabel="value2",zerolabel="value3"}`: {
|
||||||
"my_metric",
|
"my_metric",
|
||||||
"zerolabel", "value3", "firstlabel", "value2",
|
"firstlabel", "value2",
|
||||||
|
"zerolabel", "value3",
|
||||||
},
|
},
|
||||||
`my_metric{broker="broker2",register="mdns",server="tcp"}`: {
|
`my_metric{broker="broker2",register="mdns",server="tcp"}`: {
|
||||||
"my_metric",
|
"my_metric",
|
||||||
"broker", "broker1", "broker", "broker2", "server", "http", "server", "tcp", "register", "mdns",
|
"broker", "broker1", "broker", "broker2", "register", "mdns", "server", "http", "server", "tcp",
|
||||||
},
|
},
|
||||||
`my_metric{aaa="aaa"}`: {
|
`my_metric{aaa="aaa"}`: {
|
||||||
"my_metric",
|
"my_metric",
|
||||||
|
@@ -91,7 +91,7 @@ func (p *bro) Connect(_ context.Context) error { return nil }
|
|||||||
func (p *bro) Disconnect(_ context.Context) error { return nil }
|
func (p *bro) Disconnect(_ context.Context) error { return nil }
|
||||||
|
|
||||||
// NewMessage creates new message
|
// NewMessage creates new message
|
||||||
func (p *bro) NewMessage(_ context.Context, _ metadata.Metadata, _ interface{}, _ ...broker.PublishOption) (broker.Message, error) {
|
func (p *bro) NewMessage(_ context.Context, _ metadata.Metadata, _ interface{}, _ ...broker.MessageOption) (broker.Message, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -11,8 +11,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type httpProfile struct {
|
type httpProfile struct {
|
||||||
server *http.Server
|
server *http.Server
|
||||||
sync.Mutex
|
mu sync.Mutex
|
||||||
running bool
|
running bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -21,8 +21,8 @@ var DefaultAddress = ":6060"
|
|||||||
|
|
||||||
// Start the profiler
|
// Start the profiler
|
||||||
func (h *httpProfile) Start() error {
|
func (h *httpProfile) Start() error {
|
||||||
h.Lock()
|
h.mu.Lock()
|
||||||
defer h.Unlock()
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
if h.running {
|
if h.running {
|
||||||
return nil
|
return nil
|
||||||
@@ -30,9 +30,9 @@ func (h *httpProfile) Start() error {
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := h.server.ListenAndServe(); err != nil {
|
if err := h.server.ListenAndServe(); err != nil {
|
||||||
h.Lock()
|
h.mu.Lock()
|
||||||
h.running = false
|
h.running = false
|
||||||
h.Unlock()
|
h.mu.Unlock()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -43,8 +43,8 @@ func (h *httpProfile) Start() error {
|
|||||||
|
|
||||||
// Stop the profiler
|
// Stop the profiler
|
||||||
func (h *httpProfile) Stop() error {
|
func (h *httpProfile) Stop() error {
|
||||||
h.Lock()
|
h.mu.Lock()
|
||||||
defer h.Unlock()
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
if !h.running {
|
if !h.running {
|
||||||
return nil
|
return nil
|
||||||
|
@@ -17,7 +17,7 @@ type profiler struct {
|
|||||||
cpuFile *os.File
|
cpuFile *os.File
|
||||||
memFile *os.File
|
memFile *os.File
|
||||||
opts profile.Options
|
opts profile.Options
|
||||||
sync.Mutex
|
mu sync.Mutex
|
||||||
running bool
|
running bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,8 +39,8 @@ func (p *profiler) writeHeap(f *os.File) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *profiler) Start() error {
|
func (p *profiler) Start() error {
|
||||||
p.Lock()
|
p.mu.Lock()
|
||||||
defer p.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
if p.running {
|
if p.running {
|
||||||
return nil
|
return nil
|
||||||
@@ -86,8 +86,8 @@ func (p *profiler) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *profiler) Stop() error {
|
func (p *profiler) Stop() error {
|
||||||
p.Lock()
|
p.mu.Lock()
|
||||||
defer p.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-p.exit:
|
case <-p.exit:
|
||||||
|
@@ -33,7 +33,7 @@ type memory struct {
|
|||||||
records map[string]services
|
records map[string]services
|
||||||
watchers map[string]*watcher
|
watchers map[string]*watcher
|
||||||
opts register.Options
|
opts register.Options
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// services is a KV map with service name as the key and a map of records as the value
|
// services is a KV map with service name as the key and a map of records as the value
|
||||||
@@ -57,7 +57,7 @@ func (m *memory) ttlPrune() {
|
|||||||
defer prune.Stop()
|
defer prune.Stop()
|
||||||
|
|
||||||
for range prune.C {
|
for range prune.C {
|
||||||
m.Lock()
|
m.mu.Lock()
|
||||||
for namespace, services := range m.records {
|
for namespace, services := range m.records {
|
||||||
for service, versions := range services {
|
for service, versions := range services {
|
||||||
for version, record := range versions {
|
for version, record := range versions {
|
||||||
@@ -72,24 +72,24 @@ func (m *memory) ttlPrune() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.Unlock()
|
m.mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *memory) sendEvent(r *register.Result) {
|
func (m *memory) sendEvent(r *register.Result) {
|
||||||
m.RLock()
|
m.mu.RLock()
|
||||||
watchers := make([]*watcher, 0, len(m.watchers))
|
watchers := make([]*watcher, 0, len(m.watchers))
|
||||||
for _, w := range m.watchers {
|
for _, w := range m.watchers {
|
||||||
watchers = append(watchers, w)
|
watchers = append(watchers, w)
|
||||||
}
|
}
|
||||||
m.RUnlock()
|
m.mu.RUnlock()
|
||||||
|
|
||||||
for _, w := range watchers {
|
for _, w := range watchers {
|
||||||
select {
|
select {
|
||||||
case <-w.exit:
|
case <-w.exit:
|
||||||
m.Lock()
|
m.mu.Lock()
|
||||||
delete(m.watchers, w.id)
|
delete(m.watchers, w.id)
|
||||||
m.Unlock()
|
m.mu.Unlock()
|
||||||
default:
|
default:
|
||||||
select {
|
select {
|
||||||
case w.res <- r:
|
case w.res <- r:
|
||||||
@@ -113,8 +113,8 @@ func (m *memory) Init(opts ...register.Option) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add services
|
// add services
|
||||||
m.Lock()
|
m.mu.Lock()
|
||||||
defer m.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -124,8 +124,8 @@ func (m *memory) Options() register.Options {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *memory) Register(_ context.Context, s *register.Service, opts ...register.RegisterOption) error {
|
func (m *memory) Register(_ context.Context, s *register.Service, opts ...register.RegisterOption) error {
|
||||||
m.Lock()
|
m.mu.Lock()
|
||||||
defer m.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
options := register.NewRegisterOptions(opts...)
|
options := register.NewRegisterOptions(opts...)
|
||||||
|
|
||||||
@@ -197,8 +197,8 @@ func (m *memory) Register(_ context.Context, s *register.Service, opts ...regist
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...register.DeregisterOption) error {
|
func (m *memory) Deregister(ctx context.Context, s *register.Service, opts ...register.DeregisterOption) error {
|
||||||
m.Lock()
|
m.mu.Lock()
|
||||||
defer m.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
options := register.NewDeregisterOptions(opts...)
|
options := register.NewDeregisterOptions(opts...)
|
||||||
|
|
||||||
@@ -264,9 +264,9 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe
|
|||||||
|
|
||||||
// if it's a wildcard domain, return from all domains
|
// if it's a wildcard domain, return from all domains
|
||||||
if options.Namespace == register.WildcardNamespace {
|
if options.Namespace == register.WildcardNamespace {
|
||||||
m.RLock()
|
m.mu.RLock()
|
||||||
recs := m.records
|
recs := m.records
|
||||||
m.RUnlock()
|
m.mu.RUnlock()
|
||||||
|
|
||||||
var services []*register.Service
|
var services []*register.Service
|
||||||
|
|
||||||
@@ -286,8 +286,8 @@ func (m *memory) LookupService(ctx context.Context, name string, opts ...registe
|
|||||||
return services, nil
|
return services, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.RLock()
|
m.mu.RLock()
|
||||||
defer m.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
// check the domain exists
|
// check the domain exists
|
||||||
services, ok := m.records[options.Namespace]
|
services, ok := m.records[options.Namespace]
|
||||||
@@ -319,9 +319,9 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption)
|
|||||||
|
|
||||||
// if it's a wildcard domain, list from all domains
|
// if it's a wildcard domain, list from all domains
|
||||||
if options.Namespace == register.WildcardNamespace {
|
if options.Namespace == register.WildcardNamespace {
|
||||||
m.RLock()
|
m.mu.RLock()
|
||||||
recs := m.records
|
recs := m.records
|
||||||
m.RUnlock()
|
m.mu.RUnlock()
|
||||||
|
|
||||||
var services []*register.Service
|
var services []*register.Service
|
||||||
|
|
||||||
@@ -336,8 +336,8 @@ func (m *memory) ListServices(ctx context.Context, opts ...register.ListOption)
|
|||||||
return services, nil
|
return services, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.RLock()
|
m.mu.RLock()
|
||||||
defer m.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
// ensure the domain exists
|
// ensure the domain exists
|
||||||
services, ok := m.records[options.Namespace]
|
services, ok := m.records[options.Namespace]
|
||||||
@@ -371,9 +371,9 @@ func (m *memory) Watch(ctx context.Context, opts ...register.WatchOption) (regis
|
|||||||
wo: wo,
|
wo: wo,
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Lock()
|
m.mu.Lock()
|
||||||
m.watchers[w.id] = w
|
m.watchers[w.id] = w
|
||||||
m.Unlock()
|
m.mu.Unlock()
|
||||||
|
|
||||||
return w, nil
|
return w, nil
|
||||||
}
|
}
|
||||||
|
@@ -69,7 +69,8 @@ type Service struct {
|
|||||||
type Node struct {
|
type Node struct {
|
||||||
Metadata metadata.Metadata `json:"metadata,omitempty"`
|
Metadata metadata.Metadata `json:"metadata,omitempty"`
|
||||||
ID string `json:"id,omitempty"`
|
ID string `json:"id,omitempty"`
|
||||||
Address string `json:"address,omitempty"`
|
// Address also prefixed with scheme like grpc://xx.xx.xx.xx:1234
|
||||||
|
Address string `json:"address,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Option func signature
|
// Option func signature
|
||||||
|
@@ -51,13 +51,13 @@ func (r *rpcHandler) Options() HandlerOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type noopServer struct {
|
type noopServer struct {
|
||||||
h Handler
|
h Handler
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
rsvc *register.Service
|
rsvc *register.Service
|
||||||
handlers map[string]Handler
|
handlers map[string]Handler
|
||||||
exit chan chan error
|
exit chan chan error
|
||||||
opts Options
|
opts Options
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
registered bool
|
registered bool
|
||||||
started bool
|
started bool
|
||||||
}
|
}
|
||||||
@@ -125,10 +125,10 @@ func (n *noopServer) String() string {
|
|||||||
|
|
||||||
//nolint:gocyclo
|
//nolint:gocyclo
|
||||||
func (n *noopServer) Register() error {
|
func (n *noopServer) Register() error {
|
||||||
n.RLock()
|
n.mu.RLock()
|
||||||
rsvc := n.rsvc
|
rsvc := n.rsvc
|
||||||
config := n.opts
|
config := n.opts
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
|
|
||||||
// if service already filled, reuse it and return early
|
// if service already filled, reuse it and return early
|
||||||
if rsvc != nil {
|
if rsvc != nil {
|
||||||
@@ -144,9 +144,9 @@ func (n *noopServer) Register() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
n.RLock()
|
n.mu.RLock()
|
||||||
registered := n.registered
|
registered := n.registered
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
|
|
||||||
if !registered {
|
if !registered {
|
||||||
if config.Logger.V(logger.InfoLevel) {
|
if config.Logger.V(logger.InfoLevel) {
|
||||||
@@ -164,8 +164,8 @@ func (n *noopServer) Register() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Lock()
|
n.mu.Lock()
|
||||||
defer n.Unlock()
|
defer n.mu.Unlock()
|
||||||
|
|
||||||
n.registered = true
|
n.registered = true
|
||||||
if cacheService {
|
if cacheService {
|
||||||
@@ -178,9 +178,9 @@ func (n *noopServer) Register() error {
|
|||||||
func (n *noopServer) Deregister() error {
|
func (n *noopServer) Deregister() error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
n.RLock()
|
n.mu.RLock()
|
||||||
config := n.opts
|
config := n.opts
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
|
|
||||||
service, err := NewRegisterService(n)
|
service, err := NewRegisterService(n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -195,29 +195,29 @@ func (n *noopServer) Deregister() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Lock()
|
n.mu.Lock()
|
||||||
n.rsvc = nil
|
n.rsvc = nil
|
||||||
|
|
||||||
if !n.registered {
|
if !n.registered {
|
||||||
n.Unlock()
|
n.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
n.registered = false
|
n.registered = false
|
||||||
|
|
||||||
n.Unlock()
|
n.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gocyclo
|
//nolint:gocyclo
|
||||||
func (n *noopServer) Start() error {
|
func (n *noopServer) Start() error {
|
||||||
n.RLock()
|
n.mu.RLock()
|
||||||
if n.started {
|
if n.started {
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
config := n.Options()
|
config := n.Options()
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
|
|
||||||
// use 127.0.0.1 to avoid scan of all network interfaces
|
// use 127.0.0.1 to avoid scan of all network interfaces
|
||||||
addr, err := maddr.Extract("127.0.0.1")
|
addr, err := maddr.Extract("127.0.0.1")
|
||||||
@@ -235,11 +235,11 @@ func (n *noopServer) Start() error {
|
|||||||
config.Logger.Info(n.opts.Context, "server [noop] Listening on "+config.Address)
|
config.Logger.Info(n.opts.Context, "server [noop] Listening on "+config.Address)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Lock()
|
n.mu.Lock()
|
||||||
if len(config.Advertise) == 0 {
|
if len(config.Advertise) == 0 {
|
||||||
config.Advertise = config.Address
|
config.Advertise = config.Address
|
||||||
}
|
}
|
||||||
n.Unlock()
|
n.mu.Unlock()
|
||||||
|
|
||||||
// use RegisterCheck func before register
|
// use RegisterCheck func before register
|
||||||
// nolint: nestif
|
// nolint: nestif
|
||||||
@@ -273,9 +273,9 @@ func (n *noopServer) Start() error {
|
|||||||
select {
|
select {
|
||||||
// register self on interval
|
// register self on interval
|
||||||
case <-t.C:
|
case <-t.C:
|
||||||
n.RLock()
|
n.mu.RLock()
|
||||||
registered := n.registered
|
registered := n.registered
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
rerr := config.RegisterCheck(config.Context)
|
rerr := config.RegisterCheck(config.Context)
|
||||||
// nolint: nestif
|
// nolint: nestif
|
||||||
if rerr != nil && registered {
|
if rerr != nil && registered {
|
||||||
@@ -332,29 +332,29 @@ func (n *noopServer) Start() error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// mark the server as started
|
// mark the server as started
|
||||||
n.Lock()
|
n.mu.Lock()
|
||||||
n.started = true
|
n.started = true
|
||||||
n.Unlock()
|
n.mu.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *noopServer) Stop() error {
|
func (n *noopServer) Stop() error {
|
||||||
n.RLock()
|
n.mu.RLock()
|
||||||
if !n.started {
|
if !n.started {
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
n.RUnlock()
|
n.mu.RUnlock()
|
||||||
|
|
||||||
ch := make(chan error)
|
ch := make(chan error)
|
||||||
n.exit <- ch
|
n.exit <- ch
|
||||||
|
|
||||||
err := <-ch
|
err := <-ch
|
||||||
n.Lock()
|
n.mu.Lock()
|
||||||
n.rsvc = nil
|
n.rsvc = nil
|
||||||
n.started = false
|
n.started = false
|
||||||
n.Unlock()
|
n.mu.Unlock()
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
20
service.go
20
service.go
@@ -96,9 +96,9 @@ func RegisterHandler(s server.Server, h interface{}, opts ...server.HandlerOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
type service struct {
|
type service struct {
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
opts Options
|
opts Options
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
stopped bool
|
stopped bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -321,9 +321,9 @@ func (s *service) Health() bool {
|
|||||||
func (s *service) Start() error {
|
func (s *service) Start() error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
s.RLock()
|
s.mu.RLock()
|
||||||
config := s.opts
|
config := s.opts
|
||||||
s.RUnlock()
|
s.mu.RUnlock()
|
||||||
|
|
||||||
for _, cfg := range s.opts.Configs {
|
for _, cfg := range s.opts.Configs {
|
||||||
if cfg.Options().Struct == nil {
|
if cfg.Options().Struct == nil {
|
||||||
@@ -380,9 +380,9 @@ func (s *service) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *service) Stop() error {
|
func (s *service) Stop() error {
|
||||||
s.RLock()
|
s.mu.RLock()
|
||||||
config := s.opts
|
config := s.opts
|
||||||
s.RUnlock()
|
s.mu.RUnlock()
|
||||||
|
|
||||||
if config.Loggers[0].V(logger.InfoLevel) {
|
if config.Loggers[0].V(logger.InfoLevel) {
|
||||||
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name()))
|
config.Loggers[0].Info(s.opts.Context, fmt.Sprintf("stoppping [service] %s", s.Name()))
|
||||||
@@ -457,13 +457,13 @@ func (s *service) Run() error {
|
|||||||
// notifyShutdown marks the service as stopped and closes the done channel.
|
// notifyShutdown marks the service as stopped and closes the done channel.
|
||||||
// It ensures the channel is closed only once, preventing multiple closures.
|
// It ensures the channel is closed only once, preventing multiple closures.
|
||||||
func (s *service) notifyShutdown() {
|
func (s *service) notifyShutdown() {
|
||||||
s.Lock()
|
s.mu.Lock()
|
||||||
if s.stopped {
|
if s.stopped {
|
||||||
s.Unlock()
|
s.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.stopped = true
|
s.stopped = true
|
||||||
s.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
||||||
close(s.done)
|
close(s.done)
|
||||||
}
|
}
|
||||||
|
@@ -139,7 +139,7 @@ func (n *noopStore) fnExists(ctx context.Context, _ string, _ ...ExistsOption) e
|
|||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
return nil
|
return ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error {
|
func (n *noopStore) Write(ctx context.Context, key string, val interface{}, opts ...WriteOption) error {
|
||||||
|
@@ -2,6 +2,7 @@ package store
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -25,7 +26,8 @@ func TestHook(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Exists(context.TODO(), "test"); err != nil {
|
err := s.Exists(context.TODO(), "test")
|
||||||
|
if !errors.Is(err, ErrNotFound) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -9,7 +9,7 @@ type memorySync struct {
|
|||||||
locks map[string]*memoryLock
|
locks map[string]*memoryLock
|
||||||
options Options
|
options Options
|
||||||
|
|
||||||
mtx gosync.RWMutex
|
mu gosync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
type memoryLock struct {
|
type memoryLock struct {
|
||||||
@@ -74,7 +74,7 @@ func (m *memorySync) Options() Options {
|
|||||||
|
|
||||||
func (m *memorySync) Lock(id string, opts ...LockOption) error {
|
func (m *memorySync) Lock(id string, opts ...LockOption) error {
|
||||||
// lock our access
|
// lock our access
|
||||||
m.mtx.Lock()
|
m.mu.Lock()
|
||||||
|
|
||||||
var options LockOptions
|
var options LockOptions
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
@@ -90,11 +90,11 @@ func (m *memorySync) Lock(id string, opts ...LockOption) error {
|
|||||||
release: make(chan bool),
|
release: make(chan bool),
|
||||||
}
|
}
|
||||||
// unlock
|
// unlock
|
||||||
m.mtx.Unlock()
|
m.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mtx.Unlock()
|
m.mu.Unlock()
|
||||||
|
|
||||||
// set wait time
|
// set wait time
|
||||||
var wait <-chan time.Time
|
var wait <-chan time.Time
|
||||||
@@ -124,12 +124,12 @@ lockLoop:
|
|||||||
// wait for the lock to be released
|
// wait for the lock to be released
|
||||||
select {
|
select {
|
||||||
case <-lk.release:
|
case <-lk.release:
|
||||||
m.mtx.Lock()
|
m.mu.Lock()
|
||||||
|
|
||||||
// someone locked before us
|
// someone locked before us
|
||||||
lk, ok = m.locks[id]
|
lk, ok = m.locks[id]
|
||||||
if ok {
|
if ok {
|
||||||
m.mtx.Unlock()
|
m.mu.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,7 +141,7 @@ lockLoop:
|
|||||||
release: make(chan bool),
|
release: make(chan bool),
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mtx.Unlock()
|
m.mu.Unlock()
|
||||||
|
|
||||||
break lockLoop
|
break lockLoop
|
||||||
case <-ttl:
|
case <-ttl:
|
||||||
@@ -160,8 +160,8 @@ lockLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *memorySync) Unlock(id string) error {
|
func (m *memorySync) Unlock(id string) error {
|
||||||
m.mtx.Lock()
|
m.mu.Lock()
|
||||||
defer m.mtx.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
lk, ok := m.locks[id]
|
lk, ok := m.locks[id]
|
||||||
// no lock exists
|
// no lock exists
|
||||||
|
@@ -46,6 +46,10 @@ func (s memoryStringer) String() string {
|
|||||||
return s.s
|
return s.s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Tracer) Enabled() bool {
|
||||||
|
return t.opts.Enabled
|
||||||
|
}
|
||||||
|
|
||||||
func (t *Tracer) Flush(_ context.Context) error {
|
func (t *Tracer) Flush(_ context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.unistack.org/micro/v4/util/id"
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ Tracer = (*noopTracer)(nil)
|
var _ Tracer = (*noopTracer)(nil)
|
||||||
@@ -18,6 +18,12 @@ func (t *noopTracer) Spans() []Span {
|
|||||||
return t.spans
|
return t.spans
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var uuidNil = uuid.Nil.String()
|
||||||
|
|
||||||
|
func (t *noopTracer) Enabled() bool {
|
||||||
|
return t.opts.Enabled
|
||||||
|
}
|
||||||
|
|
||||||
func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
|
func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
|
||||||
options := NewSpanOptions(opts...)
|
options := NewSpanOptions(opts...)
|
||||||
span := &noopSpan{
|
span := &noopSpan{
|
||||||
@@ -28,8 +34,8 @@ func (t *noopTracer) Start(ctx context.Context, name string, opts ...SpanOption)
|
|||||||
labels: options.Labels,
|
labels: options.Labels,
|
||||||
kind: options.Kind,
|
kind: options.Kind,
|
||||||
}
|
}
|
||||||
span.spanID.s, _ = id.New()
|
span.spanID.s = uuidNil
|
||||||
span.traceID.s, _ = id.New()
|
span.traceID.s = uuidNil
|
||||||
if span.ctx == nil {
|
if span.ctx == nil {
|
||||||
span.ctx = context.Background()
|
span.ctx = context.Background()
|
||||||
}
|
}
|
||||||
|
@@ -142,6 +142,8 @@ type Options struct {
|
|||||||
Name string
|
Name string
|
||||||
// ContextAttrFuncs contains funcs that provides tracing
|
// ContextAttrFuncs contains funcs that provides tracing
|
||||||
ContextAttrFuncs []ContextAttrFunc
|
ContextAttrFuncs []ContextAttrFunc
|
||||||
|
// Enabled specify trace status
|
||||||
|
Enabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Option func signature
|
// Option func signature
|
||||||
@@ -181,6 +183,7 @@ func NewOptions(opts ...Option) Options {
|
|||||||
Logger: logger.DefaultLogger,
|
Logger: logger.DefaultLogger,
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
ContextAttrFuncs: DefaultContextAttrFuncs,
|
ContextAttrFuncs: DefaultContextAttrFuncs,
|
||||||
|
Enabled: true,
|
||||||
}
|
}
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(&options)
|
o(&options)
|
||||||
@@ -194,3 +197,10 @@ func Name(n string) Option {
|
|||||||
o.Name = n
|
o.Name = n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Disabled disable tracer
|
||||||
|
func Disabled(b bool) Option {
|
||||||
|
return func(o *Options) {
|
||||||
|
o.Enabled = !b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -29,10 +29,10 @@ type ContextAttrFunc func(ctx context.Context) []interface{}
|
|||||||
func init() {
|
func init() {
|
||||||
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs,
|
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs,
|
||||||
func(ctx context.Context) []interface{} {
|
func(ctx context.Context) []interface{} {
|
||||||
if span, ok := SpanFromContext(ctx); ok {
|
if sp, ok := SpanFromContext(ctx); ok && sp != nil && sp.IsRecording() {
|
||||||
return []interface{}{
|
return []interface{}{
|
||||||
TraceIDKey, span.TraceID(),
|
TraceIDKey, sp.TraceID(),
|
||||||
SpanIDKey, span.SpanID(),
|
SpanIDKey, sp.SpanID(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -51,6 +51,8 @@ type Tracer interface {
|
|||||||
// Extract(ctx context.Context)
|
// Extract(ctx context.Context)
|
||||||
// Flush flushes spans
|
// Flush flushes spans
|
||||||
Flush(ctx context.Context) error
|
Flush(ctx context.Context) error
|
||||||
|
// Enabled returns tracer status
|
||||||
|
Enabled() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type Span interface {
|
type Span interface {
|
||||||
|
@@ -1,13 +1,16 @@
|
|||||||
package buffer
|
package buffer
|
||||||
|
|
||||||
import "io"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
var _ interface {
|
var _ interface {
|
||||||
io.ReadCloser
|
io.ReadCloser
|
||||||
io.ReadSeeker
|
io.ReadSeeker
|
||||||
} = (*SeekerBuffer)(nil)
|
} = (*SeekerBuffer)(nil)
|
||||||
|
|
||||||
// Buffer is a ReadWriteCloser that supports seeking. It's intended to
|
// SeekerBuffer is a ReadWriteCloser that supports seeking. It's intended to
|
||||||
// replicate the functionality of bytes.Buffer that I use in my projects.
|
// replicate the functionality of bytes.Buffer that I use in my projects.
|
||||||
//
|
//
|
||||||
// Note that the seeking is limited to the read marker; all writes are
|
// Note that the seeking is limited to the read marker; all writes are
|
||||||
@@ -23,6 +26,7 @@ func NewSeekerBuffer(data []byte) *SeekerBuffer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read reads up to len(p) bytes into p from the current read position.
|
||||||
func (b *SeekerBuffer) Read(p []byte) (int, error) {
|
func (b *SeekerBuffer) Read(p []byte) (int, error) {
|
||||||
if b.pos >= int64(len(b.data)) {
|
if b.pos >= int64(len(b.data)) {
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
@@ -30,29 +34,51 @@ func (b *SeekerBuffer) Read(p []byte) (int, error) {
|
|||||||
|
|
||||||
n := copy(p, b.data[b.pos:])
|
n := copy(p, b.data[b.pos:])
|
||||||
b.pos += int64(n)
|
b.pos += int64(n)
|
||||||
|
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write appends the contents of p to the end of the buffer. It does not affect the read position.
|
||||||
func (b *SeekerBuffer) Write(p []byte) (int, error) {
|
func (b *SeekerBuffer) Write(p []byte) (int, error) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
b.data = append(b.data, p...)
|
b.data = append(b.data, p...)
|
||||||
|
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek sets the read pointer to pos.
|
// Seek sets the offset for the next Read operation.
|
||||||
|
// The offset is interpreted according to whence:
|
||||||
|
// - io.SeekStart: relative to the beginning of the buffer
|
||||||
|
// - io.SeekCurrent: relative to the current position
|
||||||
|
// - io.SeekEnd: relative to the end of the buffer
|
||||||
|
//
|
||||||
|
// Returns an error if the resulting position is negative or if whence is invalid.
|
||||||
func (b *SeekerBuffer) Seek(offset int64, whence int) (int64, error) {
|
func (b *SeekerBuffer) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
var newPos int64
|
||||||
|
|
||||||
switch whence {
|
switch whence {
|
||||||
case io.SeekStart:
|
case io.SeekStart:
|
||||||
b.pos = offset
|
newPos = offset
|
||||||
case io.SeekEnd:
|
case io.SeekEnd:
|
||||||
b.pos = int64(len(b.data)) + offset
|
newPos = int64(len(b.data)) + offset
|
||||||
case io.SeekCurrent:
|
case io.SeekCurrent:
|
||||||
b.pos += offset
|
newPos = b.pos + offset
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("invalid whence: %d", whence)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if newPos < 0 {
|
||||||
|
return 0, fmt.Errorf("invalid seek: resulting position %d is negative", newPos)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.pos = newPos
|
||||||
return b.pos, nil
|
return b.pos, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rewind resets the read pointer to 0.
|
// Rewind resets the read position to 0.
|
||||||
func (b *SeekerBuffer) Rewind() error {
|
func (b *SeekerBuffer) Rewind() error {
|
||||||
if _, err := b.Seek(0, io.SeekStart); err != nil {
|
if _, err := b.Seek(0, io.SeekStart); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -67,12 +93,24 @@ func (b *SeekerBuffer) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset clears all the data out of the buffer and sets the read position to 0.
|
||||||
|
func (b *SeekerBuffer) Reset() {
|
||||||
|
b.data = nil
|
||||||
|
b.pos = 0
|
||||||
|
}
|
||||||
|
|
||||||
// Len returns the length of data remaining to be read.
|
// Len returns the length of data remaining to be read.
|
||||||
func (b *SeekerBuffer) Len() int {
|
func (b *SeekerBuffer) Len() int {
|
||||||
|
if b.pos >= int64(len(b.data)) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
return len(b.data[b.pos:])
|
return len(b.data[b.pos:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns the underlying bytes from the current position.
|
// Bytes returns the underlying bytes from the current position.
|
||||||
func (b *SeekerBuffer) Bytes() []byte {
|
func (b *SeekerBuffer) Bytes() []byte {
|
||||||
|
if b.pos >= int64(len(b.data)) {
|
||||||
|
return []byte{}
|
||||||
|
}
|
||||||
return b.data[b.pos:]
|
return b.data[b.pos:]
|
||||||
}
|
}
|
||||||
|
@@ -2,54 +2,384 @@ package buffer
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func noErrorT(t *testing.T, err error) {
|
func TestNewSeekerBuffer(t *testing.T) {
|
||||||
if nil != err {
|
input := []byte{'a', 'b', 'c', 'd', 'e'}
|
||||||
t.Fatalf("%s", err)
|
expected := &SeekerBuffer{data: []byte{'a', 'b', 'c', 'd', 'e'}, pos: 0}
|
||||||
|
require.Equal(t, expected, NewSeekerBuffer(input))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeekerBuffer_Read(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []byte
|
||||||
|
initPos int64
|
||||||
|
readBuf []byte
|
||||||
|
expectedN int
|
||||||
|
expectedData []byte
|
||||||
|
expectedErr error
|
||||||
|
expectedPos int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "read with empty buffer",
|
||||||
|
data: []byte("hello"),
|
||||||
|
initPos: 0,
|
||||||
|
readBuf: []byte{},
|
||||||
|
expectedN: 0,
|
||||||
|
expectedData: []byte{},
|
||||||
|
expectedErr: nil,
|
||||||
|
expectedPos: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "read with nil buffer",
|
||||||
|
data: []byte("hello"),
|
||||||
|
initPos: 0,
|
||||||
|
readBuf: nil,
|
||||||
|
expectedN: 0,
|
||||||
|
expectedData: nil,
|
||||||
|
expectedErr: nil,
|
||||||
|
expectedPos: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "read full buffer",
|
||||||
|
data: []byte("hello"),
|
||||||
|
initPos: 0,
|
||||||
|
readBuf: make([]byte, 5),
|
||||||
|
expectedN: 5,
|
||||||
|
expectedData: []byte("hello"),
|
||||||
|
expectedErr: nil,
|
||||||
|
expectedPos: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "read partial buffer",
|
||||||
|
data: []byte("hello"),
|
||||||
|
initPos: 2,
|
||||||
|
readBuf: make([]byte, 2),
|
||||||
|
expectedN: 2,
|
||||||
|
expectedData: []byte("ll"),
|
||||||
|
expectedErr: nil,
|
||||||
|
expectedPos: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "read after end",
|
||||||
|
data: []byte("hello"),
|
||||||
|
initPos: 5,
|
||||||
|
readBuf: make([]byte, 5),
|
||||||
|
expectedN: 0,
|
||||||
|
expectedData: make([]byte, 5),
|
||||||
|
expectedErr: io.EOF,
|
||||||
|
expectedPos: 5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
sb := NewSeekerBuffer(tt.data)
|
||||||
|
sb.pos = tt.initPos
|
||||||
|
|
||||||
|
n, err := sb.Read(tt.readBuf)
|
||||||
|
|
||||||
|
if tt.expectedErr != nil {
|
||||||
|
require.Equal(t, err, tt.expectedErr)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, tt.expectedN, n)
|
||||||
|
require.Equal(t, tt.expectedData, tt.readBuf)
|
||||||
|
require.Equal(t, tt.expectedPos, sb.pos)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func boolT(t *testing.T, cond bool, s ...string) {
|
func TestSeekerBuffer_Write(t *testing.T) {
|
||||||
if !cond {
|
tests := []struct {
|
||||||
what := strings.Join(s, ", ")
|
name string
|
||||||
if len(what) > 0 {
|
initialData []byte
|
||||||
what = ": " + what
|
initialPos int64
|
||||||
}
|
writeData []byte
|
||||||
t.Fatalf("assert.Bool failed%s", what)
|
expectedData []byte
|
||||||
|
expectedN int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "write empty slice",
|
||||||
|
initialData: []byte("data"),
|
||||||
|
initialPos: 0,
|
||||||
|
writeData: []byte{},
|
||||||
|
expectedData: []byte("data"),
|
||||||
|
expectedN: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "write nil slice",
|
||||||
|
initialData: []byte("data"),
|
||||||
|
initialPos: 0,
|
||||||
|
writeData: nil,
|
||||||
|
expectedData: []byte("data"),
|
||||||
|
expectedN: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "write to empty buffer",
|
||||||
|
initialData: nil,
|
||||||
|
initialPos: 0,
|
||||||
|
writeData: []byte("abc"),
|
||||||
|
expectedData: []byte("abc"),
|
||||||
|
expectedN: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "write to existing buffer",
|
||||||
|
initialData: []byte("hello"),
|
||||||
|
initialPos: 0,
|
||||||
|
writeData: []byte(" world"),
|
||||||
|
expectedData: []byte("hello world"),
|
||||||
|
expectedN: 6,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "write after read",
|
||||||
|
initialData: []byte("abc"),
|
||||||
|
initialPos: 2,
|
||||||
|
writeData: []byte("XYZ"),
|
||||||
|
expectedData: []byte("abcXYZ"),
|
||||||
|
expectedN: 3,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
sb := NewSeekerBuffer(tt.initialData)
|
||||||
|
sb.pos = tt.initialPos
|
||||||
|
|
||||||
|
n, err := sb.Write(tt.writeData)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expectedN, n)
|
||||||
|
require.Equal(t, tt.expectedData, sb.data)
|
||||||
|
require.Equal(t, tt.initialPos, sb.pos)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSeeking(t *testing.T) {
|
func TestSeekerBuffer_Seek(t *testing.T) {
|
||||||
partA := []byte("hello, ")
|
tests := []struct {
|
||||||
partB := []byte("world!")
|
name string
|
||||||
|
initialData []byte
|
||||||
|
initialPos int64
|
||||||
|
offset int64
|
||||||
|
whence int
|
||||||
|
expectedPos int64
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "seek with invalid whence",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 0,
|
||||||
|
offset: 1,
|
||||||
|
whence: 12345,
|
||||||
|
expectedPos: 0,
|
||||||
|
expectedErr: fmt.Errorf("invalid whence: %d", 12345),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek negative from start",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 0,
|
||||||
|
offset: -1,
|
||||||
|
whence: io.SeekStart,
|
||||||
|
expectedPos: 0,
|
||||||
|
expectedErr: fmt.Errorf("invalid seek: resulting position %d is negative", -1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek from start to 0",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 0,
|
||||||
|
offset: 0,
|
||||||
|
whence: io.SeekStart,
|
||||||
|
expectedPos: 0,
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek from start to 3",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 0,
|
||||||
|
offset: 3,
|
||||||
|
whence: io.SeekStart,
|
||||||
|
expectedPos: 3,
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek from end to -1 (last byte)",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 0,
|
||||||
|
offset: -1,
|
||||||
|
whence: io.SeekEnd,
|
||||||
|
expectedPos: 5,
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek from current forward",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 2,
|
||||||
|
offset: 2,
|
||||||
|
whence: io.SeekCurrent,
|
||||||
|
expectedPos: 4,
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek from current backward",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 4,
|
||||||
|
offset: -2,
|
||||||
|
whence: io.SeekCurrent,
|
||||||
|
expectedPos: 2,
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek to end exactly",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 0,
|
||||||
|
offset: 0,
|
||||||
|
whence: io.SeekEnd,
|
||||||
|
expectedPos: 6,
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "seek to out of range",
|
||||||
|
initialData: []byte("abcdef"),
|
||||||
|
initialPos: 0,
|
||||||
|
offset: 2,
|
||||||
|
whence: io.SeekEnd,
|
||||||
|
expectedPos: 8,
|
||||||
|
expectedErr: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
buf := NewSeekerBuffer(partA)
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
sb := NewSeekerBuffer(tt.initialData)
|
||||||
|
sb.pos = tt.initialPos
|
||||||
|
|
||||||
boolT(t, buf.Len() == len(partA), fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA)))
|
newPos, err := sb.Seek(tt.offset, tt.whence)
|
||||||
|
|
||||||
b := make([]byte, 32)
|
if tt.expectedErr != nil {
|
||||||
|
require.Equal(t, tt.expectedErr, err)
|
||||||
n, err := buf.Read(b)
|
} else {
|
||||||
noErrorT(t, err)
|
require.NoError(t, err)
|
||||||
boolT(t, buf.Len() == 0, fmt.Sprintf("after reading 1: have length %d, want length 0", buf.Len()))
|
require.Equal(t, tt.expectedPos, newPos)
|
||||||
boolT(t, n == len(partA), fmt.Sprintf("after reading 2: have length %d, want length %d", n, len(partA)))
|
require.Equal(t, tt.expectedPos, sb.pos)
|
||||||
|
}
|
||||||
n, err = buf.Write(partB)
|
})
|
||||||
noErrorT(t, err)
|
}
|
||||||
boolT(t, n == len(partB), fmt.Sprintf("after writing: have length %d, want length %d", n, len(partB)))
|
}
|
||||||
|
|
||||||
n, err = buf.Read(b)
|
func TestSeekerBuffer_Rewind(t *testing.T) {
|
||||||
noErrorT(t, err)
|
buf := NewSeekerBuffer([]byte("hello world"))
|
||||||
boolT(t, buf.Len() == 0, fmt.Sprintf("after rereading 1: have length %d, want length 0", buf.Len()))
|
buf.pos = 4
|
||||||
boolT(t, n == len(partB), fmt.Sprintf("after rereading 2: have length %d, want length %d", n, len(partB)))
|
|
||||||
|
require.NoError(t, buf.Rewind())
|
||||||
partsLen := len(partA) + len(partB)
|
require.Equal(t, []byte("hello world"), buf.data)
|
||||||
_ = buf.Rewind()
|
require.Equal(t, int64(0), buf.pos)
|
||||||
boolT(t, buf.Len() == partsLen, fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen))
|
}
|
||||||
|
|
||||||
buf.Close()
|
func TestSeekerBuffer_Close(t *testing.T) {
|
||||||
boolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len()))
|
buf := NewSeekerBuffer([]byte("hello world"))
|
||||||
|
buf.pos = 2
|
||||||
|
|
||||||
|
require.NoError(t, buf.Close())
|
||||||
|
require.Nil(t, buf.data)
|
||||||
|
require.Equal(t, int64(0), buf.pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeekerBuffer_Reset(t *testing.T) {
|
||||||
|
buf := NewSeekerBuffer([]byte("hello world"))
|
||||||
|
buf.pos = 2
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
require.Nil(t, buf.data)
|
||||||
|
require.Equal(t, int64(0), buf.pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeekerBuffer_Len(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []byte
|
||||||
|
pos int64
|
||||||
|
expected int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "full buffer",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 0,
|
||||||
|
expected: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "partial read",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 2,
|
||||||
|
expected: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fully read",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 5,
|
||||||
|
expected: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pos > len",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 10,
|
||||||
|
expected: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
buf := NewSeekerBuffer(tt.data)
|
||||||
|
buf.pos = tt.pos
|
||||||
|
require.Equal(t, tt.expected, buf.Len())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSeekerBuffer_Bytes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []byte
|
||||||
|
pos int64
|
||||||
|
expected []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "start of buffer",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 0,
|
||||||
|
expected: []byte("abcde"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "middle of buffer",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 2,
|
||||||
|
expected: []byte("cde"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "end of buffer",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 5,
|
||||||
|
expected: []byte{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pos beyond end",
|
||||||
|
data: []byte("abcde"),
|
||||||
|
pos: 10,
|
||||||
|
expected: []byte{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
buf := NewSeekerBuffer(tt.data)
|
||||||
|
buf.pos = tt.pos
|
||||||
|
require.Equal(t, tt.expected, buf.Bytes())
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -137,7 +137,7 @@ type cache struct {
|
|||||||
|
|
||||||
opts Options
|
opts Options
|
||||||
|
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
type cacheEntry struct {
|
type cacheEntry struct {
|
||||||
@@ -171,7 +171,7 @@ func (c *cache) put(req string, res string) {
|
|||||||
ttl = c.opts.MaxCacheTTL
|
ttl = c.opts.MaxCacheTTL
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Lock()
|
c.mu.Lock()
|
||||||
if c.entries == nil {
|
if c.entries == nil {
|
||||||
c.entries = make(map[string]cacheEntry)
|
c.entries = make(map[string]cacheEntry)
|
||||||
}
|
}
|
||||||
@@ -207,7 +207,7 @@ func (c *cache) put(req string, res string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
c.opts.Meter.Counter(semconv.CacheItemsTotal, "type", "dns").Inc()
|
c.opts.Meter.Counter(semconv.CacheItemsTotal, "type", "dns").Inc()
|
||||||
c.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) get(req string) (res string) {
|
func (c *cache) get(req string) (res string) {
|
||||||
@@ -219,8 +219,8 @@ func (c *cache) get(req string) (res string) {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
c.RLock()
|
c.mu.RLock()
|
||||||
defer c.RUnlock()
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
if c.entries == nil {
|
if c.entries == nil {
|
||||||
return ""
|
return ""
|
||||||
|
@@ -20,7 +20,7 @@ type dnsConn struct {
|
|||||||
ibuf bytes.Buffer
|
ibuf bytes.Buffer
|
||||||
obuf bytes.Buffer
|
obuf bytes.Buffer
|
||||||
|
|
||||||
sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
type roundTripper func(ctx context.Context, req string) (res string, err error)
|
type roundTripper func(ctx context.Context, req string) (res string, err error)
|
||||||
@@ -42,15 +42,15 @@ func (c *dnsConn) Read(b []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *dnsConn) Write(b []byte) (n int, err error) {
|
func (c *dnsConn) Write(b []byte) (n int, err error) {
|
||||||
c.Lock()
|
c.mu.Lock()
|
||||||
defer c.Unlock()
|
defer c.mu.Unlock()
|
||||||
return c.ibuf.Write(b)
|
return c.ibuf.Write(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dnsConn) Close() error {
|
func (c *dnsConn) Close() error {
|
||||||
c.Lock()
|
c.mu.Lock()
|
||||||
cancel := c.cancel
|
cancel := c.cancel
|
||||||
c.Unlock()
|
c.mu.Unlock()
|
||||||
|
|
||||||
if cancel != nil {
|
if cancel != nil {
|
||||||
cancel()
|
cancel()
|
||||||
@@ -78,9 +78,9 @@ func (c *dnsConn) SetDeadline(t time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *dnsConn) SetReadDeadline(t time.Time) error {
|
func (c *dnsConn) SetReadDeadline(t time.Time) error {
|
||||||
c.Lock()
|
c.mu.Lock()
|
||||||
c.deadline = t
|
c.deadline = t
|
||||||
c.Unlock()
|
c.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,8 +90,8 @@ func (c *dnsConn) SetWriteDeadline(_ time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
|
func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
|
||||||
c.Lock()
|
c.mu.Lock()
|
||||||
defer c.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
// drain the output buffer
|
// drain the output buffer
|
||||||
if c.obuf.Len() > 0 {
|
if c.obuf.Len() > 0 {
|
||||||
@@ -119,8 +119,8 @@ func (c *dnsConn) drainBuffers(b []byte) (string, int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
|
func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
|
||||||
c.Lock()
|
c.mu.Lock()
|
||||||
defer c.Unlock()
|
defer c.mu.Unlock()
|
||||||
c.obuf.WriteByte(byte(len(str) >> 8))
|
c.obuf.WriteByte(byte(len(str) >> 8))
|
||||||
c.obuf.WriteByte(byte(len(str)))
|
c.obuf.WriteByte(byte(len(str)))
|
||||||
c.obuf.WriteString(str)
|
c.obuf.WriteString(str)
|
||||||
@@ -128,8 +128,8 @@ func (c *dnsConn) fillBuffer(b []byte, str string) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *dnsConn) childContext() (context.Context, context.CancelFunc) {
|
func (c *dnsConn) childContext() (context.Context, context.CancelFunc) {
|
||||||
c.Lock()
|
c.mu.Lock()
|
||||||
defer c.Unlock()
|
defer c.mu.Unlock()
|
||||||
if c.ctx == nil {
|
if c.ctx == nil {
|
||||||
c.ctx, c.cancel = context.WithCancel(context.Background())
|
c.ctx, c.cancel = context.WithCancel(context.Background())
|
||||||
}
|
}
|
||||||
|
@@ -52,7 +52,7 @@ type clientTracer struct {
|
|||||||
tr tracer.Tracer
|
tr tracer.Tracer
|
||||||
activeHooks map[string]context.Context
|
activeHooks map[string]context.Context
|
||||||
root tracer.Span
|
root tracer.Span
|
||||||
mtx sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrace {
|
func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrace {
|
||||||
@@ -83,8 +83,8 @@ func NewClientTrace(ctx context.Context, tr tracer.Tracer) *httptrace.ClientTrac
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
|
func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
|
||||||
ct.mtx.Lock()
|
ct.mu.Lock()
|
||||||
defer ct.mtx.Unlock()
|
defer ct.mu.Unlock()
|
||||||
|
|
||||||
if hookCtx, found := ct.activeHooks[hook]; !found {
|
if hookCtx, found := ct.activeHooks[hook]; !found {
|
||||||
var sp tracer.Span
|
var sp tracer.Span
|
||||||
@@ -104,8 +104,8 @@ func (ct *clientTracer) start(hook, spanName string, attrs ...interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ct *clientTracer) end(hook string, err error, attrs ...interface{}) {
|
func (ct *clientTracer) end(hook string, err error, attrs ...interface{}) {
|
||||||
ct.mtx.Lock()
|
ct.mu.Lock()
|
||||||
defer ct.mtx.Unlock()
|
defer ct.mu.Unlock()
|
||||||
if ctx, ok := ct.activeHooks[hook]; ok { // nolint:nestif
|
if ctx, ok := ct.activeHooks[hook]; ok { // nolint:nestif
|
||||||
if span, ok := tracer.SpanFromContext(ctx); ok {
|
if span, ok := tracer.SpanFromContext(ctx); ok {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -136,8 +136,8 @@ func (ct *clientTracer) getParentContext(hook string) context.Context {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ct *clientTracer) span(hook string) (tracer.Span, bool) {
|
func (ct *clientTracer) span(hook string) (tracer.Span, bool) {
|
||||||
ct.mtx.Lock()
|
ct.mu.Lock()
|
||||||
defer ct.mtx.Unlock()
|
defer ct.mu.Unlock()
|
||||||
if ctx, ok := ct.activeHooks[hook]; ok {
|
if ctx, ok := ct.activeHooks[hook]; ok {
|
||||||
return tracer.SpanFromContext(ctx)
|
return tracer.SpanFromContext(ctx)
|
||||||
}
|
}
|
||||||
|
@@ -2,12 +2,8 @@ package id
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
uuidv8 "github.com/ash3in/uuidv8"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
nanoid "github.com/matoous/go-nanoid"
|
nanoid "github.com/matoous/go-nanoid"
|
||||||
)
|
)
|
||||||
@@ -25,6 +21,7 @@ type Type int
|
|||||||
const (
|
const (
|
||||||
TypeUnspecified Type = iota
|
TypeUnspecified Type = iota
|
||||||
TypeNanoid
|
TypeNanoid
|
||||||
|
TypeUUIDv7
|
||||||
TypeUUIDv8
|
TypeUUIDv8
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -58,14 +55,14 @@ func (g *Generator) New() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nanoid.Generate(g.opts.NanoidAlphabet, g.opts.NanoidSize)
|
return nanoid.Generate(g.opts.NanoidAlphabet, g.opts.NanoidSize)
|
||||||
case TypeUUIDv8:
|
case TypeUUIDv7:
|
||||||
timestamp := uint64(time.Now().UnixNano())
|
uid, err := uuid.NewV7()
|
||||||
clockSeq := make([]byte, 2)
|
if err != nil {
|
||||||
if _, err := rand.Read(clockSeq); err != nil {
|
return "", err
|
||||||
return "", fmt.Errorf("failed to generate random clock sequence: %w", err)
|
|
||||||
}
|
}
|
||||||
clockSeqValue := binary.BigEndian.Uint16(clockSeq) & 0x0FFF // Mask to 12 bits
|
return uid.String(), nil
|
||||||
return uuidv8.NewWithParams(timestamp, clockSeqValue, g.opts.UUIDNode[:], uuidv8.TimestampBits48)
|
case TypeUUIDv8:
|
||||||
|
return "", errors.New("unsupported uuid version v8")
|
||||||
}
|
}
|
||||||
return "", errors.New("invalid option, Type unspecified")
|
return "", errors.New("invalid option, Type unspecified")
|
||||||
}
|
}
|
||||||
@@ -82,16 +79,15 @@ func New(opts ...Option) (string, error) {
|
|||||||
if options.NanoidSize <= 0 {
|
if options.NanoidSize <= 0 {
|
||||||
return "", errors.New("invalid option, NanoidSize must be positive integer")
|
return "", errors.New("invalid option, NanoidSize must be positive integer")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nanoid.Generate(options.NanoidAlphabet, options.NanoidSize)
|
return nanoid.Generate(options.NanoidAlphabet, options.NanoidSize)
|
||||||
case TypeUUIDv8:
|
case TypeUUIDv7:
|
||||||
timestamp := uint64(time.Now().UnixNano())
|
uid, err := uuid.NewV7()
|
||||||
clockSeq := make([]byte, 2)
|
if err != nil {
|
||||||
if _, err := rand.Read(clockSeq); err != nil {
|
return "", err
|
||||||
return "", fmt.Errorf("failed to generate random clock sequence: %w", err)
|
|
||||||
}
|
}
|
||||||
clockSeqValue := binary.BigEndian.Uint16(clockSeq) & 0x0FFF // Mask to 12 bits
|
return uid.String(), nil
|
||||||
return uuidv8.NewWithParams(timestamp, clockSeqValue, options.UUIDNode[:], uuidv8.TimestampBits48)
|
case TypeUUIDv8:
|
||||||
|
return "", errors.New("unsupported uuid version v8")
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", errors.New("invalid option, Type unspecified")
|
return "", errors.New("invalid option, Type unspecified")
|
||||||
@@ -145,7 +141,7 @@ func WithUUIDNode(node [6]byte) Option {
|
|||||||
// NewOptions returns new Options struct filled by opts
|
// NewOptions returns new Options struct filled by opts
|
||||||
func NewOptions(opts ...Option) Options {
|
func NewOptions(opts ...Option) Options {
|
||||||
options := Options{
|
options := Options{
|
||||||
Type: TypeUUIDv8,
|
Type: TypeUUIDv7,
|
||||||
NanoidAlphabet: DefaultNanoidAlphabet,
|
NanoidAlphabet: DefaultNanoidAlphabet,
|
||||||
NanoidSize: DefaultNanoidSize,
|
NanoidSize: DefaultNanoidSize,
|
||||||
UUIDNode: generatedNode,
|
UUIDNode: generatedNode,
|
||||||
|
@@ -14,7 +14,7 @@ type Buffer struct {
|
|||||||
vals []*Entry
|
vals []*Entry
|
||||||
size int
|
size int
|
||||||
|
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry is ring buffer data entry
|
// Entry is ring buffer data entry
|
||||||
@@ -35,8 +35,8 @@ type Stream struct {
|
|||||||
|
|
||||||
// Put adds a new value to ring buffer
|
// Put adds a new value to ring buffer
|
||||||
func (b *Buffer) Put(v interface{}) {
|
func (b *Buffer) Put(v interface{}) {
|
||||||
b.Lock()
|
b.mu.Lock()
|
||||||
defer b.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
// append to values
|
// append to values
|
||||||
entry := &Entry{
|
entry := &Entry{
|
||||||
@@ -63,8 +63,8 @@ func (b *Buffer) Put(v interface{}) {
|
|||||||
|
|
||||||
// Get returns the last n entries
|
// Get returns the last n entries
|
||||||
func (b *Buffer) Get(n int) []*Entry {
|
func (b *Buffer) Get(n int) []*Entry {
|
||||||
b.RLock()
|
b.mu.RLock()
|
||||||
defer b.RUnlock()
|
defer b.mu.RUnlock()
|
||||||
|
|
||||||
// reset any invalid values
|
// reset any invalid values
|
||||||
if n > len(b.vals) || n < 0 {
|
if n > len(b.vals) || n < 0 {
|
||||||
@@ -80,8 +80,8 @@ func (b *Buffer) Get(n int) []*Entry {
|
|||||||
|
|
||||||
// Since returns the entries since a specific time
|
// Since returns the entries since a specific time
|
||||||
func (b *Buffer) Since(t time.Time) []*Entry {
|
func (b *Buffer) Since(t time.Time) []*Entry {
|
||||||
b.RLock()
|
b.mu.RLock()
|
||||||
defer b.RUnlock()
|
defer b.mu.RUnlock()
|
||||||
|
|
||||||
// return all the values
|
// return all the values
|
||||||
if t.IsZero() {
|
if t.IsZero() {
|
||||||
@@ -109,8 +109,8 @@ func (b *Buffer) Since(t time.Time) []*Entry {
|
|||||||
// Stream logs from the buffer
|
// Stream logs from the buffer
|
||||||
// Close the channel when you want to stop
|
// Close the channel when you want to stop
|
||||||
func (b *Buffer) Stream() (<-chan *Entry, chan bool) {
|
func (b *Buffer) Stream() (<-chan *Entry, chan bool) {
|
||||||
b.Lock()
|
b.mu.Lock()
|
||||||
defer b.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
entries := make(chan *Entry, 128)
|
entries := make(chan *Entry, 128)
|
||||||
id := id.MustNew()
|
id := id.MustNew()
|
||||||
|
@@ -24,7 +24,7 @@ type stream struct {
|
|||||||
err error
|
err error
|
||||||
request *request
|
request *request
|
||||||
|
|
||||||
sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
type request struct {
|
type request struct {
|
||||||
@@ -57,9 +57,9 @@ func (s *stream) Request() server.Request {
|
|||||||
func (s *stream) Send(v interface{}) error {
|
func (s *stream) Send(v interface{}) error {
|
||||||
err := s.Stream.SendMsg(v)
|
err := s.Stream.SendMsg(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Lock()
|
s.mu.Lock()
|
||||||
s.err = err
|
s.err = err
|
||||||
s.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -68,17 +68,17 @@ func (s *stream) Send(v interface{}) error {
|
|||||||
func (s *stream) Recv(v interface{}) error {
|
func (s *stream) Recv(v interface{}) error {
|
||||||
err := s.Stream.RecvMsg(v)
|
err := s.Stream.RecvMsg(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Lock()
|
s.mu.Lock()
|
||||||
s.err = err
|
s.err = err
|
||||||
s.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns error that stream holds
|
// Error returns error that stream holds
|
||||||
func (s *stream) Error() error {
|
func (s *stream) Error() error {
|
||||||
s.RLock()
|
s.mu.RLock()
|
||||||
defer s.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -6,7 +6,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gopkg.in/yaml.v3"
|
"github.com/goccy/go-yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Duration int64
|
type Duration int64
|
||||||
@@ -58,9 +58,9 @@ func (d Duration) MarshalYAML() (interface{}, error) {
|
|||||||
return time.Duration(d).String(), nil
|
return time.Duration(d).String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Duration) UnmarshalYAML(n *yaml.Node) error {
|
func (d *Duration) UnmarshalYAML(data []byte) error {
|
||||||
var v interface{}
|
var v interface{}
|
||||||
if err := yaml.Unmarshal([]byte(n.Value), &v); err != nil {
|
if err := yaml.Unmarshal(data, &v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch value := v.(type) {
|
switch value := v.(type) {
|
||||||
|
@@ -6,7 +6,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gopkg.in/yaml.v3"
|
"github.com/goccy/go-yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMarshalYAML(t *testing.T) {
|
func TestMarshalYAML(t *testing.T) {
|
||||||
|
Reference in New Issue
Block a user