Compare commits

..

4 Commits

Author SHA1 Message Date
413c6cc2f0 logger: fixup WithAddFields
All checks were successful
coverage / build (push) Successful in 1m58s
test / test (push) Successful in 5m39s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-02-21 18:12:27 +03:00
vtolstov
f56bd70136 Apply Code Coverage Badge 2025-02-06 13:22:17 +00:00
b51b4107a8 logger/slog: fixup stacktrace
All checks were successful
coverage / build (push) Successful in 1m27s
test / test (push) Successful in 2m54s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-02-06 16:21:29 +03:00
2067c9de6b util/buffer: add new seeker implementation
Some checks failed
coverage / build (push) Failing after 1m35s
test / test (push) Successful in 3m11s
Signed-off-by: Vasiliy Tolstov <v.tolstov@unistack.org>
2025-02-06 15:19:09 +03:00
176 changed files with 4863 additions and 27192 deletions

View File

@@ -3,16 +3,14 @@ name: coverage
on:
push:
branches: [ main, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
pull_request:
branches: [ main, v3, v4 ]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: checkout code
@@ -24,7 +22,7 @@ jobs:
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
go-version: 'stable'
- name: test coverage
run: |
@@ -41,8 +39,8 @@ jobs:
name: autocommit
with:
commit_message: Apply Code Coverage Badge
skip_fetch: false
skip_checkout: false
skip_fetch: true
skip_checkout: true
file_pattern: ./README.md
- name: push
@@ -50,4 +48,4 @@ jobs:
uses: ad-m/github-push-action@master
with:
github_token: ${{ github.token }}
branch: ${{ github.ref }}
branch: ${{ github.ref }}

View File

@@ -3,10 +3,10 @@ name: lint
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
branches:
- master
- v3
- v4
jobs:
lint:
@@ -20,10 +20,10 @@ jobs:
uses: actions/setup-go@v5
with:
cache-dependency-path: "**/*.sum"
go-version: 'stable'
go-version: 'stable'
- name: setup deps
run: go get -v ./...
- name: run lint
uses: golangci/golangci-lint-action@v6
uses: https://github.com/golangci/golangci-lint-action@v6
with:
version: 'latest'

View File

@@ -3,12 +3,15 @@ name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
branches:
- master
- v3
- v4
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
branches:
- master
- v3
- v4
jobs:
test:

View File

@@ -3,12 +3,15 @@ name: test
on:
pull_request:
types: [opened, reopened, synchronize]
branches: [ master, v3, v4 ]
branches:
- master
- v3
- v4
push:
branches: [ master, v3, v4 ]
paths-ignore:
- '.github/**'
- '.gitea/**'
branches:
- master
- v3
- v4
jobs:
test:
@@ -32,19 +35,19 @@ jobs:
go-version: 'stable'
- name: setup go work
env:
GOWORK: ${{ github.workspace }}/go.work
GOWORK: /workspace/${{ github.repository_owner }}/go.work
run: |
go work init
go work use .
go work use micro-tests
- name: setup deps
env:
GOWORK: ${{ github.workspace }}/go.work
GOWORK: /workspace/${{ github.repository_owner }}/go.work
run: go get -v ./...
- name: run tests
env:
INTEGRATION_TESTS: yes
GOWORK: ${{ github.workspace }}/go.work
GOWORK: /workspace/${{ github.repository_owner }}/go.work
run: |
cd micro-tests
go test -mod readonly -v ./... || true

View File

@@ -1,94 +0,0 @@
name: sync
on:
schedule:
- cron: '*/5 * * * *'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
sync:
if: github.server_url != 'https://github.com'
runs-on: ubuntu-latest
steps:
- name: init
run: |
git config --global user.email "vtolstov <vtolstov@users.noreply.github.com>"
git config --global user.name "github-actions[bot]"
echo "machine git.unistack.org login vtolstov password ${{ secrets.TOKEN_GITEA }}" >> /root/.netrc
echo "machine github.com login vtolstov password ${{ secrets.TOKEN_GITHUB }}" >> /root/.netrc
- name: check master
id: check_master
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/master | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync master
if: steps.check_master.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch master --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track master upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream master
git push upstream master --progress
git push origin master --progress
cd ../
rm -rf repo
- name: check v3
id: check_v3
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v3 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v3
if: steps.check_v3.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v3 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v3 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v3
git push upstream v3 --progress
git push origin v3 --progress
cd ../
rm -rf repo
- name: check v4
id: check_v4
run: |
src_hash=$(git ls-remote https://github.com/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
dst_hash=$(git ls-remote ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} refs/heads/v4 | cut -f1)
echo "src_hash=$src_hash"
echo "dst_hash=$dst_hash"
if [ "$src_hash" != "$dst_hash" ]; then
echo "sync_needed=true" >> $GITHUB_OUTPUT
else
echo "sync_needed=false" >> $GITHUB_OUTPUT
fi
- name: sync v4
if: steps.check_v4.outputs.sync_needed == 'true'
run: |
git clone --filter=blob:none --filter=tree:0 --branch v4 --single-branch ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY} repo
cd repo
git remote add --no-tags --fetch --track v4 upstream https://github.com/${GITHUB_REPOSITORY}
git pull --rebase upstream v4
git push upstream v4 --progress
git push origin v4 --progress
cd ../
rm -rf repo

View File

@@ -1,5 +1,5 @@
run:
concurrency: 8
timeout: 5m
deadline: 5m
issues-exit-code: 1
tests: true

View File

@@ -1,9 +1,9 @@
# Micro
![Coverage](https://img.shields.io/badge/Coverage-34.1%25-yellow)
![Coverage](https://img.shields.io/badge/Coverage-44.7%25-yellow)
[![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro/v4?tab=overview)
[![Status](https://git.unistack.org/unistack-org/micro/actions/workflows/job_tests.yml/badge.svg?branch=v4)](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av4+event%3Apush)
[![Lint](https://goreportcard.com/badge/go.unistack.org/micro/v4)](https://goreportcard.com/report/go.unistack.org/micro/v4)
[![Doc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/go.unistack.org/micro/v3?tab=overview)
[![Status](https://git.unistack.org/unistack-org/micro/actions/workflows/job_tests.yml/badge.svg?branch=v3)](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av3+event%3Apush)
[![Lint](https://goreportcard.com/badge/go.unistack.org/micro/v3)](https://goreportcard.com/report/go.unistack.org/micro/v3)
Micro is a standard library for microservices.

15
SECURITY.md Normal file
View File

@@ -0,0 +1,15 @@
# Security Policy
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 3.7.x | :white_check_mark: |
| < 3.7.0 | :x: |
## Reporting a Vulnerability
If you find any issue, please create github issue in this repo

View File

@@ -6,8 +6,8 @@ import (
"errors"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/metadata"
)
// DefaultBroker default memory broker
@@ -18,10 +18,6 @@ var (
ErrNotConnected = errors.New("broker not connected")
// ErrDisconnected returns when broker disconnected
ErrDisconnected = errors.New("broker disconnected")
// ErrInvalidMessage returns when invalid Message passed
ErrInvalidMessage = errors.New("invalid message")
// ErrInvalidHandler returns when subscriber passed to Subscribe
ErrInvalidHandler = errors.New("invalid handler, ony func(Message) error and func([]Message) error supported")
// DefaultGracefulTimeout
DefaultGracefulTimeout = 5 * time.Second
)
@@ -40,12 +36,14 @@ type Broker interface {
Connect(ctx context.Context) error
// Disconnect disconnect from broker
Disconnect(ctx context.Context) error
// NewMessage create new broker message to publish.
NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error)
// Publish message to broker topic
Publish(ctx context.Context, topic string, messages ...Message) error
Publish(ctx context.Context, topic string, msg *Message, opts ...PublishOption) error
// Subscribe subscribes to topic message via handler
Subscribe(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error)
Subscribe(ctx context.Context, topic string, h Handler, opts ...SubscribeOption) (Subscriber, error)
// BatchPublish messages to broker with multiple topics
BatchPublish(ctx context.Context, msgs []*Message, opts ...PublishOption) error
// BatchSubscribe subscribes to topic messages via handler
BatchSubscribe(ctx context.Context, topic string, h BatchHandler, opts ...SubscribeOption) (Subscriber, error)
// String type of broker
String() string
// Live returns broker liveness
@@ -57,27 +55,72 @@ type Broker interface {
}
type (
FuncPublish func(ctx context.Context, topic string, messages ...Message) error
HookPublish func(next FuncPublish) FuncPublish
FuncSubscribe func(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error)
HookSubscribe func(next FuncSubscribe) FuncSubscribe
FuncPublish func(ctx context.Context, topic string, msg *Message, opts ...PublishOption) error
HookPublish func(next FuncPublish) FuncPublish
FuncBatchPublish func(ctx context.Context, msgs []*Message, opts ...PublishOption) error
HookBatchPublish func(next FuncBatchPublish) FuncBatchPublish
FuncSubscribe func(ctx context.Context, topic string, h Handler, opts ...SubscribeOption) (Subscriber, error)
HookSubscribe func(next FuncSubscribe) FuncSubscribe
FuncBatchSubscribe func(ctx context.Context, topic string, h BatchHandler, opts ...SubscribeOption) (Subscriber, error)
HookBatchSubscribe func(next FuncBatchSubscribe) FuncBatchSubscribe
)
// Message is given to a subscription handler for processing
type Message interface {
// Context for the message.
// Handler is used to process messages via a subscription of a topic.
type Handler func(Event) error
// Events contains multiple events
type Events []Event
// Ack try to ack all events and return
func (evs Events) Ack() error {
var err error
for _, ev := range evs {
if err = ev.Ack(); err != nil {
return err
}
}
return nil
}
// SetError sets error on event
func (evs Events) SetError(err error) {
for _, ev := range evs {
ev.SetError(err)
}
}
// BatchHandler is used to process messages in batches via a subscription of a topic.
type BatchHandler func(Events) error
// Event is given to a subscription handler for processing
type Event interface {
// Context return context.Context for event
Context() context.Context
// Topic returns message destination topic.
// Topic returns event topic
Topic() string
// Header returns message headers.
Header() metadata.Metadata
// Body returns broker message []byte slice
Body() []byte
// Unmarshal try to decode message body to dst.
// This is helper method that uses codec.Unmarshal.
Unmarshal(dst interface{}, opts ...codec.Option) error
// Ack acknowledge message if supported.
// Message returns broker message
Message() *Message
// Ack acknowledge message
Ack() error
// Error returns message error (like decoding errors or some other)
Error() error
// SetError set event processing error
SetError(err error)
}
// Message is used to transfer data
type Message struct {
// Header contains message metadata
Header metadata.Metadata
// Body contains message body
Body codec.RawMessage
}
// NewMessage create broker message with topic filled
func NewMessage(topic string) *Message {
m := &Message{Header: metadata.New(2)}
m.Header.Set(metadata.HeaderTopic, topic)
return m
}
// Subscriber is a convenience return type for the Subscribe method

View File

@@ -42,16 +42,6 @@ func SetSubscribeOption(k, v interface{}) SubscribeOption {
}
}
// SetMessageOption returns a function to setup a context with given value
func SetMessageOption(k, v interface{}) MessageOption {
return func(o *MessageOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {
@@ -61,3 +51,13 @@ func SetOption(k, v interface{}) Option {
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetPublishOption returns a function to setup a context with given value
func SetPublishOption(k, v interface{}) PublishOption {
return func(o *PublishOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}

View File

@@ -49,6 +49,17 @@ func TestSetSubscribeOption(t *testing.T) {
}
}
func TestSetPublishOption(t *testing.T) {
type key struct{}
o := SetPublishOption(key{}, "test")
opts := &PublishOptions{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetPublishOption not works")
}
}
func TestSetOption(t *testing.T) {
type key struct{}
o := SetOption(key{}, "test")

View File

@@ -2,104 +2,66 @@ package broker
import (
"context"
"strings"
"sync"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
maddr "go.unistack.org/micro/v4/util/addr"
"go.unistack.org/micro/v4/util/id"
mnet "go.unistack.org/micro/v4/util/net"
"go.unistack.org/micro/v4/util/rand"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/options"
maddr "go.unistack.org/micro/v3/util/addr"
"go.unistack.org/micro/v3/util/id"
mnet "go.unistack.org/micro/v3/util/net"
"go.unistack.org/micro/v3/util/rand"
)
type Broker struct {
funcPublish broker.FuncPublish
funcSubscribe broker.FuncSubscribe
subscribers map[string][]*Subscriber
addr string
opts broker.Options
mu sync.RWMutex
connected bool
type memoryBroker struct {
funcPublish broker.FuncPublish
funcBatchPublish broker.FuncBatchPublish
funcSubscribe broker.FuncSubscribe
funcBatchSubscribe broker.FuncBatchSubscribe
subscribers map[string][]*memorySubscriber
addr string
opts broker.Options
sync.RWMutex
connected bool
}
type memoryMessage struct {
c codec.Codec
topic string
ctx context.Context
body []byte
hdr metadata.Metadata
opts broker.MessageOptions
}
func (m *memoryMessage) Ack() error {
return nil
}
func (m *memoryMessage) Body() []byte {
return m.body
}
func (m *memoryMessage) Header() metadata.Metadata {
return m.hdr
}
func (m *memoryMessage) Context() context.Context {
return m.ctx
}
func (m *memoryMessage) Topic() string {
return ""
}
func (m *memoryMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
return m.c.Unmarshal(m.body, dst)
}
type Subscriber struct {
ctx context.Context
exit chan bool
handler interface{}
id string
type memoryEvent struct {
err error
message interface{}
topic string
opts broker.SubscribeOptions
opts broker.Options
}
func (b *Broker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx]
}
b.mu.RLock()
c, ok := b.opts.Codecs[ct]
b.mu.RUnlock()
if ok {
return c, nil
}
return nil, codec.ErrUnknownContentType
type memorySubscriber struct {
ctx context.Context
exit chan bool
handler broker.Handler
batchhandler broker.BatchHandler
id string
topic string
opts broker.SubscribeOptions
}
func (b *Broker) Options() broker.Options {
return b.opts
func (m *memoryBroker) Options() broker.Options {
return m.opts
}
func (b *Broker) Address() string {
return b.addr
func (m *memoryBroker) Address() string {
return m.addr
}
func (b *Broker) Connect(ctx context.Context) error {
func (m *memoryBroker) Connect(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
b.mu.Lock()
defer b.mu.Unlock()
m.Lock()
defer m.Unlock()
if b.connected {
if m.connected {
return nil
}
@@ -113,129 +75,154 @@ func (b *Broker) Connect(ctx context.Context) error {
// set addr with port
addr = mnet.HostPort(addr, 10000+i)
b.addr = addr
b.connected = true
m.addr = addr
m.connected = true
return nil
}
func (b *Broker) Disconnect(ctx context.Context) error {
func (m *memoryBroker) Disconnect(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
b.mu.Lock()
defer b.mu.Unlock()
m.Lock()
defer m.Unlock()
if !b.connected {
if !m.connected {
return nil
}
b.connected = false
m.connected = false
return nil
}
func (b *Broker) Init(opts ...broker.Option) error {
func (m *memoryBroker) Init(opts ...broker.Option) error {
for _, o := range opts {
o(&b.opts)
o(&m.opts)
}
b.funcPublish = b.fnPublish
b.funcSubscribe = b.fnSubscribe
m.funcPublish = m.fnPublish
m.funcBatchPublish = m.fnBatchPublish
m.funcSubscribe = m.fnSubscribe
m.funcBatchSubscribe = m.fnBatchSubscribe
b.opts.Hooks.EachPrev(func(hook options.Hook) {
m.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
case broker.HookPublish:
b.funcPublish = h(b.funcPublish)
m.funcPublish = h(m.funcPublish)
case broker.HookBatchPublish:
m.funcBatchPublish = h(m.funcBatchPublish)
case broker.HookSubscribe:
b.funcSubscribe = h(b.funcSubscribe)
m.funcSubscribe = h(m.funcSubscribe)
case broker.HookBatchSubscribe:
m.funcBatchSubscribe = h(m.funcBatchSubscribe)
}
})
return nil
}
func (b *Broker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...broker.MessageOption) (broker.Message, error) {
options := broker.NewMessageOptions(opts...)
if options.ContentType == "" {
options.ContentType = b.opts.ContentType
}
m := &memoryMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType)
if err == nil {
m.body, err = c.Marshal(body)
}
if err != nil {
return nil, err
}
return m, nil
func (m *memoryBroker) Publish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error {
return m.funcPublish(ctx, topic, msg, opts...)
}
func (b *Broker) Publish(ctx context.Context, topic string, messages ...broker.Message) error {
return b.funcPublish(ctx, topic, messages...)
func (m *memoryBroker) fnPublish(ctx context.Context, topic string, msg *broker.Message, opts ...broker.PublishOption) error {
msg.Header.Set(metadata.HeaderTopic, topic)
return m.publish(ctx, []*broker.Message{msg}, opts...)
}
func (b *Broker) fnPublish(ctx context.Context, topic string, messages ...broker.Message) error {
return b.publish(ctx, topic, messages...)
func (m *memoryBroker) BatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
return m.funcBatchPublish(ctx, msgs, opts...)
}
func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.Message) error {
b.mu.RLock()
if !b.connected {
b.mu.RUnlock()
func (m *memoryBroker) fnBatchPublish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
return m.publish(ctx, msgs, opts...)
}
func (m *memoryBroker) publish(ctx context.Context, msgs []*broker.Message, opts ...broker.PublishOption) error {
m.RLock()
if !m.connected {
m.RUnlock()
return broker.ErrNotConnected
}
b.mu.RUnlock()
m.RUnlock()
var err error
select {
case <-ctx.Done():
return ctx.Err()
default:
}
options := broker.NewPublishOptions(opts...)
b.mu.RLock()
subs, ok := b.subscribers[topic]
b.mu.RUnlock()
if !ok {
return nil
}
msgTopicMap := make(map[string]broker.Events)
for _, v := range msgs {
p := &memoryEvent{opts: m.opts}
var err error
for _, sub := range subs {
switch s := sub.handler.(type) {
default:
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", broker.ErrInvalidHandler)
}
case func(broker.Message) error:
for _, message := range messages {
msg, ok := message.(*memoryMessage)
if !ok {
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", broker.ErrInvalidMessage)
}
}
msg.topic = topic
if err = s(msg); err == nil && sub.opts.AutoAck {
err = msg.Ack()
}
if m.opts.Codec == nil || options.BodyOnly {
p.topic, _ = v.Header.Get(metadata.HeaderTopic)
p.message = v.Body
} else {
p.topic, _ = v.Header.Get(metadata.HeaderTopic)
p.message, err = m.opts.Codec.Marshal(v)
if err != nil {
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", err)
}
return err
}
}
case func([]broker.Message) error:
if err = s(messages); err == nil && sub.opts.AutoAck {
for _, message := range messages {
err = message.Ack()
if err != nil {
if b.opts.Logger.V(logger.ErrorLevel) {
b.opts.Logger.Error(ctx, "broker handler error", err)
msgTopicMap[p.topic] = append(msgTopicMap[p.topic], p)
}
beh := m.opts.BatchErrorHandler
eh := m.opts.ErrorHandler
for t, ms := range msgTopicMap {
m.RLock()
subs, ok := m.subscribers[t]
m.RUnlock()
if !ok {
continue
}
for _, sub := range subs {
if sub.opts.BatchErrorHandler != nil {
beh = sub.opts.BatchErrorHandler
}
if sub.opts.ErrorHandler != nil {
eh = sub.opts.ErrorHandler
}
switch {
// batch processing
case sub.batchhandler != nil:
if err = sub.batchhandler(ms); err != nil {
ms.SetError(err)
if beh != nil {
_ = beh(ms)
} else if m.opts.Logger.V(logger.ErrorLevel) {
m.opts.Logger.Error(m.opts.Context, err.Error())
}
} else if sub.opts.AutoAck {
if err = ms.Ack(); err != nil {
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
}
}
// single processing
case sub.handler != nil:
for _, p := range ms {
if err = sub.handler(p); err != nil {
p.SetError(err)
if eh != nil {
_ = eh(p)
} else if m.opts.Logger.V(logger.ErrorLevel) {
m.opts.Logger.Error(m.opts.Context, "broker handler error", err)
}
} else if sub.opts.AutoAck {
if err = p.Ack(); err != nil {
m.opts.Logger.Error(m.opts.Context, "broker ack error", err)
}
}
}
}
@@ -246,21 +233,17 @@ func (b *Broker) publish(ctx context.Context, topic string, messages ...broker.M
return nil
}
func (b *Broker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return b.funcSubscribe(ctx, topic, handler, opts...)
func (m *memoryBroker) BatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return m.funcBatchSubscribe(ctx, topic, handler, opts...)
}
func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interface{}, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
if err := broker.IsValidHandler(handler); err != nil {
return nil, err
}
b.mu.RLock()
if !b.connected {
b.mu.RUnlock()
func (m *memoryBroker) fnBatchSubscribe(ctx context.Context, topic string, handler broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
m.RLock()
if !m.connected {
m.RUnlock()
return nil, broker.ErrNotConnected
}
b.mu.RUnlock()
m.RUnlock()
sid, err := id.New()
if err != nil {
@@ -269,7 +252,56 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
options := broker.NewSubscribeOptions(opts...)
sub := &Subscriber{
sub := &memorySubscriber{
exit: make(chan bool, 1),
id: sid,
topic: topic,
batchhandler: handler,
opts: options,
ctx: ctx,
}
m.Lock()
m.subscribers[topic] = append(m.subscribers[topic], sub)
m.Unlock()
go func() {
<-sub.exit
m.Lock()
newSubscribers := make([]*memorySubscriber, 0, len(m.subscribers)-1)
for _, sb := range m.subscribers[topic] {
if sb.id == sub.id {
continue
}
newSubscribers = append(newSubscribers, sb)
}
m.subscribers[topic] = newSubscribers
m.Unlock()
}()
return sub, nil
}
func (m *memoryBroker) Subscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
return m.funcSubscribe(ctx, topic, handler, opts...)
}
func (m *memoryBroker) fnSubscribe(ctx context.Context, topic string, handler broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
m.RLock()
if !m.connected {
m.RUnlock()
return nil, broker.ErrNotConnected
}
m.RUnlock()
sid, err := id.New()
if err != nil {
return nil, err
}
options := broker.NewSubscribeOptions(opts...)
sub := &memorySubscriber{
exit: make(chan bool, 1),
id: sid,
topic: topic,
@@ -278,64 +310,102 @@ func (b *Broker) fnSubscribe(ctx context.Context, topic string, handler interfac
ctx: ctx,
}
b.mu.Lock()
b.subscribers[topic] = append(b.subscribers[topic], sub)
b.mu.Unlock()
m.Lock()
m.subscribers[topic] = append(m.subscribers[topic], sub)
m.Unlock()
go func() {
<-sub.exit
b.mu.Lock()
newSubscribers := make([]*Subscriber, 0, len(b.subscribers)-1)
for _, sb := range b.subscribers[topic] {
m.Lock()
newSubscribers := make([]*memorySubscriber, 0, len(m.subscribers)-1)
for _, sb := range m.subscribers[topic] {
if sb.id == sub.id {
continue
}
newSubscribers = append(newSubscribers, sb)
}
b.subscribers[topic] = newSubscribers
b.mu.Unlock()
m.subscribers[topic] = newSubscribers
m.Unlock()
}()
return sub, nil
}
func (b *Broker) String() string {
func (m *memoryBroker) String() string {
return "memory"
}
func (b *Broker) Name() string {
return b.opts.Name
func (m *memoryBroker) Name() string {
return m.opts.Name
}
func (b *Broker) Live() bool {
func (m *memoryBroker) Live() bool {
return true
}
func (b *Broker) Ready() bool {
func (m *memoryBroker) Ready() bool {
return true
}
func (b *Broker) Health() bool {
func (m *memoryBroker) Health() bool {
return true
}
func (m *Subscriber) Options() broker.SubscribeOptions {
return m.opts
}
func (m *Subscriber) Topic() string {
func (m *memoryEvent) Topic() string {
return m.topic
}
func (m *Subscriber) Unsubscribe(ctx context.Context) error {
func (m *memoryEvent) Message() *broker.Message {
switch v := m.message.(type) {
case *broker.Message:
return v
case []byte:
msg := &broker.Message{}
if err := m.opts.Codec.Unmarshal(v, msg); err != nil {
if m.opts.Logger.V(logger.ErrorLevel) {
m.opts.Logger.Error(m.opts.Context, "[memory]: failed to unmarshal: %v", err)
}
return nil
}
return msg
}
return nil
}
func (m *memoryEvent) Ack() error {
return nil
}
func (m *memoryEvent) Error() error {
return m.err
}
func (m *memoryEvent) SetError(err error) {
m.err = err
}
func (m *memoryEvent) Context() context.Context {
return m.opts.Context
}
func (m *memorySubscriber) Options() broker.SubscribeOptions {
return m.opts
}
func (m *memorySubscriber) Topic() string {
return m.topic
}
func (m *memorySubscriber) Unsubscribe(ctx context.Context) error {
m.exit <- true
return nil
}
// NewBroker return new memory broker
func NewBroker(opts ...broker.Option) broker.Broker {
return &Broker{
return &memoryBroker{
opts: broker.NewOptions(opts...),
subscribers: make(map[string][]*Subscriber),
subscribers: make(map[string][]*memorySubscriber),
}
}

View File

@@ -5,23 +5,12 @@ import (
"fmt"
"testing"
"go.uber.org/atomic"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/metadata"
)
type hldr struct {
c atomic.Int64
}
func (h *hldr) Handler(m broker.Message) error {
h.c.Add(1)
return nil
}
func TestMemoryBroker(t *testing.T) {
b := NewBroker(broker.Codec("application/octet-stream", codec.NewCodec()))
func TestMemoryBatchBroker(t *testing.T) {
b := NewBroker()
ctx := context.Background()
if err := b.Init(); err != nil {
@@ -33,31 +22,32 @@ func TestMemoryBroker(t *testing.T) {
}
topic := "test"
count := int64(10)
count := 10
h := &hldr{}
fn := func(evts broker.Events) error {
return evts.Ack()
}
sub, err := b.Subscribe(ctx, topic, h.Handler)
sub, err := b.BatchSubscribe(ctx, topic, fn)
if err != nil {
t.Fatalf("Unexpected error subscribing %v", err)
}
for i := int64(0); i < count; i++ {
message, err := b.NewMessage(ctx,
metadata.Pairs(
"foo", "bar",
"id", fmt.Sprintf("%d", i),
),
[]byte(`"hello world"`),
broker.MessageContentType("application/octet-stream"),
)
if err != nil {
t.Fatal(err)
msgs := make([]*broker.Message, 0, count)
for i := 0; i < count; i++ {
message := &broker.Message{
Header: map[string]string{
metadata.HeaderTopic: topic,
"foo": "bar",
"id": fmt.Sprintf("%d", i),
},
Body: []byte(`"hello world"`),
}
msgs = append(msgs, message)
}
if err := b.Publish(ctx, topic, message); err != nil {
t.Fatalf("Unexpected error publishing %d err: %v", i, err)
}
if err := b.BatchPublish(ctx, msgs); err != nil {
t.Fatalf("Unexpected error publishing %v", err)
}
if err := sub.Unsubscribe(ctx); err != nil {
@@ -67,8 +57,58 @@ func TestMemoryBroker(t *testing.T) {
if err := b.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
}
if h.c.Load() != count {
t.Fatal("invalid messages count received")
func TestMemoryBroker(t *testing.T) {
b := NewBroker()
ctx := context.Background()
if err := b.Init(); err != nil {
t.Fatalf("Unexpected init error %v", err)
}
if err := b.Connect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
topic := "test"
count := 10
fn := func(_ broker.Event) error {
return nil
}
sub, err := b.Subscribe(ctx, topic, fn)
if err != nil {
t.Fatalf("Unexpected error subscribing %v", err)
}
msgs := make([]*broker.Message, 0, count)
for i := 0; i < count; i++ {
message := &broker.Message{
Header: map[string]string{
metadata.HeaderTopic: topic,
"foo": "bar",
"id": fmt.Sprintf("%d", i),
},
Body: []byte(`"hello world"`),
}
msgs = append(msgs, message)
if err := b.Publish(ctx, topic, message); err != nil {
t.Fatalf("Unexpected error publishing %d err: %v", i, err)
}
}
if err := b.BatchPublish(ctx, msgs); err != nil {
t.Fatalf("Unexpected error publishing %v", err)
}
if err := sub.Unsubscribe(ctx); err != nil {
t.Fatalf("Unexpected error unsubscribing from %s: %v", topic, err)
}
if err := b.Disconnect(ctx); err != nil {
t.Fatalf("Unexpected connect error %v", err)
}
}

View File

@@ -3,37 +3,24 @@ package broker
import (
"context"
"strings"
"sync"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v3/options"
)
type NoopBroker struct {
funcPublish FuncPublish
funcSubscribe FuncSubscribe
opts Options
mu sync.RWMutex
}
func (b *NoopBroker) newCodec(ct string) (codec.Codec, error) {
if idx := strings.IndexRune(ct, ';'); idx >= 0 {
ct = ct[:idx]
}
b.mu.RLock()
c, ok := b.opts.Codecs[ct]
b.mu.RUnlock()
if ok {
return c, nil
}
return nil, codec.ErrUnknownContentType
funcPublish FuncPublish
funcBatchPublish FuncBatchPublish
funcSubscribe FuncSubscribe
funcBatchSubscribe FuncBatchSubscribe
opts Options
}
func NewBroker(opts ...Option) *NoopBroker {
b := &NoopBroker{opts: NewOptions(opts...)}
b.funcPublish = b.fnPublish
b.funcBatchPublish = b.fnBatchPublish
b.funcSubscribe = b.fnSubscribe
b.funcBatchSubscribe = b.fnBatchSubscribe
return b
}
@@ -68,14 +55,20 @@ func (b *NoopBroker) Init(opts ...Option) error {
}
b.funcPublish = b.fnPublish
b.funcBatchPublish = b.fnBatchPublish
b.funcSubscribe = b.fnSubscribe
b.funcBatchSubscribe = b.fnBatchSubscribe
b.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
case HookPublish:
b.funcPublish = h(b.funcPublish)
case HookBatchPublish:
b.funcBatchPublish = h(b.funcBatchPublish)
case HookSubscribe:
b.funcSubscribe = h(b.funcSubscribe)
case HookBatchSubscribe:
b.funcBatchSubscribe = h(b.funcBatchSubscribe)
}
})
@@ -94,75 +87,43 @@ func (b *NoopBroker) Address() string {
return strings.Join(b.opts.Addrs, ",")
}
type noopMessage struct {
c codec.Codec
ctx context.Context
body []byte
hdr metadata.Metadata
opts MessageOptions
}
func (m *noopMessage) Ack() error {
func (b *NoopBroker) fnBatchPublish(_ context.Context, _ []*Message, _ ...PublishOption) error {
return nil
}
func (m *noopMessage) Body() []byte {
return m.body
func (b *NoopBroker) BatchPublish(ctx context.Context, msgs []*Message, opts ...PublishOption) error {
return b.funcBatchPublish(ctx, msgs, opts...)
}
func (m *noopMessage) Header() metadata.Metadata {
return m.hdr
}
func (m *noopMessage) Context() context.Context {
return m.ctx
}
func (m *noopMessage) Topic() string {
return ""
}
func (m *noopMessage) Unmarshal(dst interface{}, opts ...codec.Option) error {
return m.c.Unmarshal(m.body, dst)
}
func (b *NoopBroker) NewMessage(ctx context.Context, hdr metadata.Metadata, body interface{}, opts ...MessageOption) (Message, error) {
options := NewMessageOptions(opts...)
if options.ContentType == "" {
options.ContentType = b.opts.ContentType
}
m := &noopMessage{ctx: ctx, hdr: hdr, opts: options}
c, err := b.newCodec(m.opts.ContentType)
if err == nil {
m.body, err = c.Marshal(body)
}
if err != nil {
return nil, err
}
return m, nil
}
func (b *NoopBroker) fnPublish(_ context.Context, _ string, _ ...Message) error {
func (b *NoopBroker) fnPublish(_ context.Context, _ string, _ *Message, _ ...PublishOption) error {
return nil
}
func (b *NoopBroker) Publish(ctx context.Context, topic string, msg ...Message) error {
return b.funcPublish(ctx, topic, msg...)
func (b *NoopBroker) Publish(ctx context.Context, topic string, msg *Message, opts ...PublishOption) error {
return b.funcPublish(ctx, topic, msg, opts...)
}
type NoopSubscriber struct {
ctx context.Context
topic string
handler interface{}
opts SubscribeOptions
ctx context.Context
topic string
handler Handler
batchHandler BatchHandler
opts SubscribeOptions
}
func (b *NoopBroker) fnSubscribe(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error) {
func (b *NoopBroker) fnBatchSubscribe(ctx context.Context, topic string, handler BatchHandler, opts ...SubscribeOption) (Subscriber, error) {
return &NoopSubscriber{ctx: ctx, topic: topic, opts: NewSubscribeOptions(opts...), batchHandler: handler}, nil
}
func (b *NoopBroker) BatchSubscribe(ctx context.Context, topic string, handler BatchHandler, opts ...SubscribeOption) (Subscriber, error) {
return b.funcBatchSubscribe(ctx, topic, handler, opts...)
}
func (b *NoopBroker) fnSubscribe(ctx context.Context, topic string, handler Handler, opts ...SubscribeOption) (Subscriber, error) {
return &NoopSubscriber{ctx: ctx, topic: topic, opts: NewSubscribeOptions(opts...), handler: handler}, nil
}
func (b *NoopBroker) Subscribe(ctx context.Context, topic string, handler interface{}, opts ...SubscribeOption) (Subscriber, error) {
func (b *NoopBroker) Subscribe(ctx context.Context, topic string, handler Handler, opts ...SubscribeOption) (Subscriber, error) {
return b.funcSubscribe(ctx, topic, handler, opts...)
}

View File

@@ -10,9 +10,9 @@ type testHook struct {
}
func (t *testHook) Publish1(fn FuncPublish) FuncPublish {
return func(ctx context.Context, topic string, messages ...Message) error {
return func(ctx context.Context, topic string, msg *Message, opts ...PublishOption) error {
t.f = true
return fn(ctx, topic, messages...)
return fn(ctx, topic, msg, opts...)
}
}

View File

@@ -5,13 +5,13 @@ import (
"crypto/tls"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/register"
"go.unistack.org/micro/v4/sync"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/options"
"go.unistack.org/micro/v3/register"
"go.unistack.org/micro/v3/sync"
"go.unistack.org/micro/v3/tracer"
)
// Options struct
@@ -23,8 +23,8 @@ type Options struct {
Tracer tracer.Tracer
// Register can be used for clustering
Register register.Register
// Codecs holds the codecs for marshal/unmarshal based on content-type
Codecs map[string]codec.Codec
// Codec holds the codec for marshal/unmarshal
Codec codec.Codec
// Logger used for logging
Logger logger.Logger
// Meter used for metrics
@@ -37,6 +37,11 @@ type Options struct {
// TLSConfig holds tls.TLSConfig options
TLSConfig *tls.Config
// ErrorHandler used when broker can't unmarshal incoming message
ErrorHandler Handler
// BatchErrorHandler used when broker can't unmashal incoming messages
BatchErrorHandler BatchHandler
// Addrs holds the broker address
Addrs []string
// Hooks can be run before broker Publish/BatchPublish and
@@ -45,9 +50,6 @@ type Options struct {
// GracefulTimeout contains time to wait to finish in flight requests
GracefulTimeout time.Duration
// ContentType will be used if no content-type set when creating message
ContentType string
}
// NewOptions create new Options
@@ -57,22 +59,16 @@ func NewOptions(opts ...Option) Options {
Logger: logger.DefaultLogger,
Context: context.Background(),
Meter: meter.DefaultMeter,
Codecs: make(map[string]codec.Codec),
Codec: codec.DefaultCodec,
Tracer: tracer.DefaultTracer,
GracefulTimeout: DefaultGracefulTimeout,
ContentType: DefaultContentType,
}
for _, o := range opts {
o(&options)
}
return options
}
// DefaultContentType is the default content-type if not specified
var DefaultContentType = ""
// Context sets the context option
func Context(ctx context.Context) Option {
return func(o *Options) {
@@ -80,27 +76,17 @@ func Context(ctx context.Context) Option {
}
}
// ContentType used by default if not specified
func ContentType(ct string) Option {
return func(o *Options) {
o.ContentType = ct
}
}
// MessageOptions struct
type MessageOptions struct {
// ContentType for message body
ContentType string
// BodyOnly flag says the message contains raw body bytes and don't need
// codec Marshal method
BodyOnly bool
// Context holds custom options
// PublishOptions struct
type PublishOptions struct {
// Context holds external options
Context context.Context
// BodyOnly flag says the message contains raw body bytes
BodyOnly bool
}
// NewMessageOptions creates MessageOptions struct
func NewMessageOptions(opts ...MessageOption) MessageOptions {
options := MessageOptions{
// NewPublishOptions creates PublishOptions struct
func NewPublishOptions(opts ...PublishOption) PublishOptions {
options := PublishOptions{
Context: context.Background(),
}
for _, o := range opts {
@@ -113,6 +99,10 @@ func NewMessageOptions(opts ...MessageOption) MessageOptions {
type SubscribeOptions struct {
// Context holds external options
Context context.Context
// ErrorHandler used when broker can't unmarshal incoming message
ErrorHandler Handler
// BatchErrorHandler used when broker can't unmashal incoming messages
BatchErrorHandler BatchHandler
// Group holds consumer group
Group string
// AutoAck flag specifies auto ack of incoming message when no error happens
@@ -128,20 +118,20 @@ type SubscribeOptions struct {
// Option func
type Option func(*Options)
// MessageOption func
type MessageOption func(*MessageOptions)
// PublishOption func
type PublishOption func(*PublishOptions)
// MessageContentType sets message content-type that used to Marshal
func MessageContentType(ct string) MessageOption {
return func(o *MessageOptions) {
o.ContentType = ct
// PublishBodyOnly publish only body of the message
func PublishBodyOnly(b bool) PublishOption {
return func(o *PublishOptions) {
o.BodyOnly = b
}
}
// MessageBodyOnly publish only body of the message
func MessageBodyOnly(b bool) MessageOption {
return func(o *MessageOptions) {
o.BodyOnly = b
// PublishContext sets the context
func PublishContext(ctx context.Context) PublishOption {
return func(o *PublishOptions) {
o.Context = ctx
}
}
@@ -152,10 +142,51 @@ func Addrs(addrs ...string) Option {
}
}
// Codec sets the codec used for encoding/decoding messages
func Codec(ct string, c codec.Codec) Option {
// Codec sets the codec used for encoding/decoding used where
// a broker does not support headers
func Codec(c codec.Codec) Option {
return func(o *Options) {
o.Codecs[ct] = c
o.Codec = c
}
}
// ErrorHandler will catch all broker errors that cant be handled
// in normal way, for example Codec errors
func ErrorHandler(h Handler) Option {
return func(o *Options) {
o.ErrorHandler = h
}
}
// BatchErrorHandler will catch all broker errors that cant be handled
// in normal way, for example Codec errors
func BatchErrorHandler(h BatchHandler) Option {
return func(o *Options) {
o.BatchErrorHandler = h
}
}
// SubscribeErrorHandler will catch all broker errors that cant be handled
// in normal way, for example Codec errors
func SubscribeErrorHandler(h Handler) SubscribeOption {
return func(o *SubscribeOptions) {
o.ErrorHandler = h
}
}
// SubscribeBatchErrorHandler will catch all broker errors that cant be handled
// in normal way, for example Codec errors
func SubscribeBatchErrorHandler(h BatchHandler) SubscribeOption {
return func(o *SubscribeOptions) {
o.BatchErrorHandler = h
}
}
// Queue sets the subscribers queue
// Deprecated
func Queue(name string) SubscribeOption {
return func(o *SubscribeOptions) {
o.Group = name
}
}
@@ -222,6 +253,14 @@ func SubscribeContext(ctx context.Context) SubscribeOption {
}
}
// DisableAutoAck disables auto ack
// Deprecated
func DisableAutoAck() SubscribeOption {
return func(o *SubscribeOptions) {
o.AutoAck = false
}
}
// SubscribeAutoAck contol auto acking of messages
// after they have been handled.
func SubscribeAutoAck(b bool) SubscribeOption {

View File

@@ -1,14 +0,0 @@
package broker
// IsValidHandler func signature
func IsValidHandler(sub interface{}) error {
switch sub.(type) {
default:
return ErrInvalidHandler
case func(Message) error:
break
case func([]Message) error:
break
}
return nil
}

View File

@@ -5,7 +5,7 @@ import (
"math"
"time"
"go.unistack.org/micro/v4/util/backoff"
"go.unistack.org/micro/v3/util/backoff"
)
// BackoffFunc is the backoff call func

View File

@@ -5,8 +5,8 @@ import (
"context"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/metadata"
)
var (
@@ -29,24 +29,40 @@ var (
)
// Client is the interface used to make requests to services.
// It supports Request/Response via Transport and Publishing via the Broker.
// It also supports bidirectional streaming of requests.
type Client interface {
Name() string
Init(opts ...Option) error
Options() Options
NewMessage(topic string, msg interface{}, opts ...MessageOption) Message
NewRequest(service string, endpoint string, req interface{}, opts ...RequestOption) Request
Call(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error
Stream(ctx context.Context, req Request, opts ...CallOption) (Stream, error)
Publish(ctx context.Context, msg Message, opts ...PublishOption) error
BatchPublish(ctx context.Context, msg []Message, opts ...PublishOption) error
String() string
}
type (
FuncCall func(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error
HookCall func(next FuncCall) FuncCall
FuncStream func(ctx context.Context, req Request, opts ...CallOption) (Stream, error)
HookStream func(next FuncStream) FuncStream
FuncCall func(ctx context.Context, req Request, rsp interface{}, opts ...CallOption) error
HookCall func(next FuncCall) FuncCall
FuncStream func(ctx context.Context, req Request, opts ...CallOption) (Stream, error)
HookStream func(next FuncStream) FuncStream
FuncPublish func(ctx context.Context, msg Message, opts ...PublishOption) error
HookPublish func(next FuncPublish) FuncPublish
FuncBatchPublish func(ctx context.Context, msg []Message, opts ...PublishOption) error
HookBatchPublish func(next FuncBatchPublish) FuncBatchPublish
)
// Message is the interface for publishing asynchronously
type Message interface {
Topic() string
Payload() interface{}
ContentType() string
Metadata() metadata.Metadata
}
// Request is the interface for a synchronous request used by Call or Stream
type Request interface {
// The service to call
@@ -105,5 +121,11 @@ type Option func(*Options)
// CallOption used by Call or Stream
type CallOption func(*CallOptions)
// PublishOption used by Publish
type PublishOption func(*PublishOptions)
// MessageOption used by NewMessage
type MessageOption func(*MessageOptions)
// RequestOption used by NewRequest
type RequestOption func(*RequestOptions)

View File

@@ -32,6 +32,16 @@ func NewContext(ctx context.Context, c Client) context.Context {
return context.WithValue(ctx, clientKey{}, c)
}
// SetPublishOption returns a function to setup a context with given value
func SetPublishOption(k, v interface{}) PublishOption {
return func(o *PublishOptions) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}
// SetCallOption returns a function to setup a context with given value
func SetCallOption(k, v interface{}) CallOption {
return func(o *CallOptions) {

View File

@@ -39,6 +39,17 @@ func TestNewNilContext(t *testing.T) {
}
}
func TestSetPublishOption(t *testing.T) {
type key struct{}
o := SetPublishOption(key{}, "test")
opts := &PublishOptions{}
o(opts)
if v, ok := opts.Context.Value(key{}).(string); !ok || v == "" {
t.Fatal("SetPublishOption not works")
}
}
func TestSetCallOption(t *testing.T) {
type key struct{}
o := SetCallOption(key{}, "test")

View File

@@ -4,8 +4,8 @@ import (
"context"
"sort"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/router"
"go.unistack.org/micro/v3/errors"
"go.unistack.org/micro/v3/router"
)
// LookupFunc is used to lookup routes for a service

View File

@@ -3,16 +3,18 @@ package client
import (
"context"
"fmt"
"os"
"strconv"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/selector"
"go.unistack.org/micro/v4/semconv"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/errors"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/options"
"go.unistack.org/micro/v3/selector"
"go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v3/tracer"
)
// DefaultCodecs will be used to encode/decode data
@@ -21,9 +23,17 @@ var DefaultCodecs = map[string]codec.Codec{
}
type noopClient struct {
funcCall FuncCall
funcStream FuncStream
opts Options
funcPublish FuncPublish
funcBatchPublish FuncBatchPublish
funcCall FuncCall
funcStream FuncStream
opts Options
}
type noopMessage struct {
topic string
payload interface{}
opts MessageOptions
}
type noopRequest struct {
@@ -42,6 +52,8 @@ func NewClient(opts ...Option) Client {
n.funcCall = n.fnCall
n.funcStream = n.fnStream
n.funcPublish = n.fnPublish
n.funcBatchPublish = n.fnBatchPublish
return n
}
@@ -146,6 +158,32 @@ func (n *noopStream) CloseSend() error {
return n.err
}
func (n *noopMessage) Topic() string {
return n.topic
}
func (n *noopMessage) Payload() interface{} {
return n.payload
}
func (n *noopMessage) ContentType() string {
return n.opts.ContentType
}
func (n *noopMessage) Metadata() metadata.Metadata {
return n.opts.Metadata
}
func (n *noopClient) newCodec(contentType string) (codec.Codec, error) {
if cf, ok := n.opts.Codecs[contentType]; ok {
return cf, nil
}
if cf, ok := DefaultCodecs[contentType]; ok {
return cf, nil
}
return nil, codec.ErrUnknownContentType
}
func (n *noopClient) Init(opts ...Option) error {
for _, o := range opts {
o(&n.opts)
@@ -153,6 +191,8 @@ func (n *noopClient) Init(opts ...Option) error {
n.funcCall = n.fnCall
n.funcStream = n.fnStream
n.funcPublish = n.fnPublish
n.funcBatchPublish = n.fnBatchPublish
n.opts.Hooks.EachPrev(func(hook options.Hook) {
switch h := hook.(type) {
@@ -160,6 +200,10 @@ func (n *noopClient) Init(opts ...Option) error {
n.funcCall = h(n.funcCall)
case HookStream:
n.funcStream = h(n.funcStream)
case HookPublish:
n.funcPublish = h(n.funcPublish)
case HookBatchPublish:
n.funcBatchPublish = h(n.funcBatchPublish)
}
})
@@ -332,6 +376,11 @@ func (n *noopClient) NewRequest(service, endpoint string, _ interface{}, _ ...Re
return &noopRequest{service: service, endpoint: endpoint}
}
func (n *noopClient) NewMessage(topic string, msg interface{}, opts ...MessageOption) Message {
options := NewMessageOptions(append([]MessageOption{MessageContentType(n.opts.ContentType)}, opts...)...)
return &noopMessage{topic: topic, payload: msg, opts: options}
}
func (n *noopClient) Stream(ctx context.Context, req Request, opts ...CallOption) (Stream, error) {
ts := time.Now()
n.opts.Meter.Counter(semconv.ClientRequestInflight, "endpoint", req.Endpoint()).Inc()
@@ -500,3 +549,90 @@ func (n *noopClient) fnStream(ctx context.Context, req Request, opts ...CallOpti
func (n *noopClient) stream(ctx context.Context, _ string, _ Request, _ CallOptions) (Stream, error) {
return &noopStream{ctx: ctx}, nil
}
func (n *noopClient) BatchPublish(ctx context.Context, ps []Message, opts ...PublishOption) error {
return n.funcBatchPublish(ctx, ps, opts...)
}
func (n *noopClient) fnBatchPublish(ctx context.Context, ps []Message, opts ...PublishOption) error {
return n.publish(ctx, ps, opts...)
}
func (n *noopClient) Publish(ctx context.Context, p Message, opts ...PublishOption) error {
return n.funcPublish(ctx, p, opts...)
}
func (n *noopClient) fnPublish(ctx context.Context, p Message, opts ...PublishOption) error {
return n.publish(ctx, []Message{p}, opts...)
}
func (n *noopClient) publish(ctx context.Context, ps []Message, opts ...PublishOption) error {
options := NewPublishOptions(opts...)
msgs := make([]*broker.Message, 0, len(ps))
// get proxy
exchange := ""
if v, ok := os.LookupEnv("MICRO_PROXY"); ok {
exchange = v
}
// get the exchange
if len(options.Exchange) > 0 {
exchange = options.Exchange
}
omd, ok := metadata.FromOutgoingContext(ctx)
if !ok {
omd = metadata.New(0)
}
for _, p := range ps {
md := metadata.Copy(omd)
topic := p.Topic()
if len(exchange) > 0 {
topic = exchange
}
md[metadata.HeaderTopic] = topic
iter := p.Metadata().Iterator()
var k, v string
for iter.Next(&k, &v) {
md.Set(k, v)
}
md[metadata.HeaderContentType] = p.ContentType()
var body []byte
// passed in raw data
if d, ok := p.Payload().(*codec.Frame); ok {
body = d.Data
} else {
// use codec for payload
cf, err := n.newCodec(p.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", "%s", err)
}
// set the body
b, err := cf.Marshal(p.Payload())
if err != nil {
return errors.InternalServerError("go.micro.client", "%s", err)
}
body = b
}
msgs = append(msgs, &broker.Message{Header: md, Body: body})
}
if len(msgs) == 1 {
return n.opts.Broker.Publish(ctx, msgs[0].Header[metadata.HeaderTopic], msgs[0],
broker.PublishContext(options.Context),
broker.PublishBodyOnly(options.BodyOnly),
)
}
return n.opts.Broker.BatchPublish(ctx, msgs,
broker.PublishContext(options.Context),
broker.PublishBodyOnly(options.BodyOnly),
)
}

35
client/noop_test.go Normal file
View File

@@ -0,0 +1,35 @@
package client
import (
"context"
"testing"
)
type testHook struct {
f bool
}
func (t *testHook) Publish(fn FuncPublish) FuncPublish {
return func(ctx context.Context, msg Message, opts ...PublishOption) error {
t.f = true
return fn(ctx, msg, opts...)
}
}
func TestNoopHook(t *testing.T) {
h := &testHook{}
c := NewClient(Hooks(HookPublish(h.Publish)))
if err := c.Init(); err != nil {
t.Fatal(err)
}
if err := c.Publish(context.TODO(), c.NewMessage("", nil, MessageContentType("application/octet-stream"))); err != nil {
t.Fatal(err)
}
if !h.f {
t.Fatal("hook not works")
}
}

View File

@@ -6,17 +6,17 @@ import (
"net"
"time"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/register"
"go.unistack.org/micro/v4/router"
"go.unistack.org/micro/v4/selector"
"go.unistack.org/micro/v4/selector/random"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/options"
"go.unistack.org/micro/v3/register"
"go.unistack.org/micro/v3/router"
"go.unistack.org/micro/v3/selector"
"go.unistack.org/micro/v3/selector/random"
"go.unistack.org/micro/v3/tracer"
)
// Options holds client options
@@ -137,6 +137,43 @@ func Context(ctx context.Context) Option {
}
}
// NewPublishOptions create new PublishOptions struct from option
func NewPublishOptions(opts ...PublishOption) PublishOptions {
options := PublishOptions{}
for _, o := range opts {
o(&options)
}
return options
}
// PublishOptions holds publish options
type PublishOptions struct {
// Context used for external options
Context context.Context
// Exchange topic exchange name
Exchange string
// BodyOnly will publish only message body
BodyOnly bool
}
// NewMessageOptions creates message options struct
func NewMessageOptions(opts ...MessageOption) MessageOptions {
options := MessageOptions{Metadata: metadata.New(1)}
for _, o := range opts {
o(&options)
}
return options
}
// MessageOptions holds client message options
type MessageOptions struct {
// Metadata additional metadata
Metadata metadata.Metadata
// ContentType specify content-type of message
// deprecated
ContentType string
}
// NewRequestOptions creates new RequestOptions struct
func NewRequestOptions(opts ...RequestOption) RequestOptions {
options := RequestOptions{}
@@ -337,6 +374,43 @@ func DialTimeout(d time.Duration) Option {
}
}
// WithExchange sets the exchange to route a message through
// Deprecated
func WithExchange(e string) PublishOption {
return func(o *PublishOptions) {
o.Exchange = e
}
}
// PublishExchange sets the exchange to route a message through
func PublishExchange(e string) PublishOption {
return func(o *PublishOptions) {
o.Exchange = e
}
}
// WithBodyOnly publish only message body
// DERECATED
func WithBodyOnly(b bool) PublishOption {
return func(o *PublishOptions) {
o.BodyOnly = b
}
}
// PublishBodyOnly publish only message body
func PublishBodyOnly(b bool) PublishOption {
return func(o *PublishOptions) {
o.BodyOnly = b
}
}
// PublishContext sets the context in publish options
func PublishContext(ctx context.Context) PublishOption {
return func(o *PublishOptions) {
o.Context = ctx
}
}
// WithContextDialer pass ContextDialer to client call
func WithContextDialer(fn func(context.Context, string) (net.Conn, error)) CallOption {
return func(o *CallOptions) {
@@ -448,6 +522,30 @@ func WithSelectOptions(sops ...selector.SelectOption) CallOption {
}
}
// WithMessageContentType sets the message content type
// Deprecated
func WithMessageContentType(ct string) MessageOption {
return func(o *MessageOptions) {
o.Metadata.Set(metadata.HeaderContentType, ct)
o.ContentType = ct
}
}
// MessageContentType sets the message content type
func MessageContentType(ct string) MessageOption {
return func(o *MessageOptions) {
o.Metadata.Set(metadata.HeaderContentType, ct)
o.ContentType = ct
}
}
// MessageMetadata sets the message metadata
func MessageMetadata(k, v string) MessageOption {
return func(o *MessageOptions) {
o.Metadata.Set(k, v)
}
}
// StreamingRequest specifies that request is streaming
func StreamingRequest(b bool) RequestOption {
return func(o *RequestOptions) {

View File

@@ -3,7 +3,7 @@ package client
import (
"context"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v3/errors"
)
// RetryFunc that returning either false or a non-nil error will result in the call not being retried

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"testing"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v3/errors"
)
func TestRetryAlways(t *testing.T) {

View File

@@ -1,7 +1,7 @@
package client
import (
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v3/codec"
)
type testRequest struct {

View File

@@ -3,7 +3,7 @@ package cluster
import (
"context"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v3/metadata"
)
// Message sent to member in cluster

View File

@@ -1,235 +0,0 @@
package sql
import (
"context"
"database/sql"
"reflect"
"unsafe"
"golang.yandex/hasql/v2"
)
func newSQLRowError() *sql.Row {
row := &sql.Row{}
t := reflect.TypeOf(row).Elem()
field, _ := t.FieldByName("err")
rowPtr := unsafe.Pointer(row)
errFieldPtr := unsafe.Pointer(uintptr(rowPtr) + field.Offset)
errPtr := (*error)(errFieldPtr)
*errPtr = ErrorNoAliveNodes
return row
}
type ClusterQuerier interface {
Querier
WaitForNodes(ctx context.Context, criterion ...hasql.NodeStateCriterion) error
}
type Cluster struct {
hasql *hasql.Cluster[Querier]
options ClusterOptions
}
// NewCluster returns [Querier] that provides cluster of nodes
func NewCluster[T Querier](opts ...ClusterOption) (ClusterQuerier, error) {
options := ClusterOptions{Context: context.Background()}
for _, opt := range opts {
opt(&options)
}
if options.NodeChecker == nil {
return nil, ErrClusterChecker
}
if options.NodeDiscoverer == nil {
return nil, ErrClusterDiscoverer
}
if options.NodePicker == nil {
return nil, ErrClusterPicker
}
if options.Retries < 1 {
options.Retries = 1
}
if options.NodeStateCriterion == 0 {
options.NodeStateCriterion = hasql.Primary
}
options.Options = append(options.Options, hasql.WithNodePicker(options.NodePicker))
if p, ok := options.NodePicker.(*CustomPicker[Querier]); ok {
p.opts.Priority = options.NodePriority
}
c, err := hasql.NewCluster(
options.NodeDiscoverer,
options.NodeChecker,
options.Options...,
)
if err != nil {
return nil, err
}
return &Cluster{hasql: c, options: options}, nil
}
func (c *Cluster) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
var tx *sql.Tx
var err error
retries := 0
c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
for ; retries < c.options.Retries; retries++ {
if tx, err = n.DB().BeginTx(ctx, opts); err != nil && retries >= c.options.Retries {
return true
}
}
return false
})
if tx == nil && err == nil {
err = ErrorNoAliveNodes
}
return tx, err
}
func (c *Cluster) Close() error {
return c.hasql.Close()
}
func (c *Cluster) Conn(ctx context.Context) (*sql.Conn, error) {
var conn *sql.Conn
var err error
retries := 0
c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
for ; retries < c.options.Retries; retries++ {
if conn, err = n.DB().Conn(ctx); err != nil && retries >= c.options.Retries {
return true
}
}
return false
})
if conn == nil && err == nil {
err = ErrorNoAliveNodes
}
return conn, err
}
func (c *Cluster) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
var res sql.Result
var err error
retries := 0
c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
for ; retries < c.options.Retries; retries++ {
if res, err = n.DB().ExecContext(ctx, query, args...); err != nil && retries >= c.options.Retries {
return true
}
}
return false
})
if res == nil && err == nil {
err = ErrorNoAliveNodes
}
return res, err
}
func (c *Cluster) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
var res *sql.Stmt
var err error
retries := 0
c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
for ; retries < c.options.Retries; retries++ {
if res, err = n.DB().PrepareContext(ctx, query); err != nil && retries >= c.options.Retries {
return true
}
}
return false
})
if res == nil && err == nil {
err = ErrorNoAliveNodes
}
return res, err
}
func (c *Cluster) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
var res *sql.Rows
var err error
retries := 0
c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
for ; retries < c.options.Retries; retries++ {
if res, err = n.DB().QueryContext(ctx, query); err != nil && err != sql.ErrNoRows && retries >= c.options.Retries {
return true
}
}
return false
})
if res == nil && err == nil {
err = ErrorNoAliveNodes
}
return res, err
}
func (c *Cluster) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
var res *sql.Row
retries := 0
c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
for ; retries < c.options.Retries; retries++ {
res = n.DB().QueryRowContext(ctx, query, args...)
if res.Err() == nil {
return false
} else if res.Err() != nil && retries >= c.options.Retries {
return false
}
}
return true
})
if res == nil {
res = newSQLRowError()
}
return res
}
func (c *Cluster) PingContext(ctx context.Context) error {
var err error
var ok bool
retries := 0
c.hasql.NodesIter(c.getNodeStateCriterion(ctx))(func(n *hasql.Node[Querier]) bool {
ok = true
for ; retries < c.options.Retries; retries++ {
if err = n.DB().PingContext(ctx); err != nil && retries >= c.options.Retries {
return true
}
}
return false
})
if !ok {
err = ErrorNoAliveNodes
}
return err
}
func (c *Cluster) WaitForNodes(ctx context.Context, criterions ...hasql.NodeStateCriterion) error {
for _, criterion := range criterions {
if _, err := c.hasql.WaitForNode(ctx, criterion); err != nil {
return err
}
}
return nil
}

View File

@@ -1,171 +0,0 @@
package sql
import (
"context"
"fmt"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"golang.yandex/hasql/v2"
)
func TestNewCluster(t *testing.T) {
dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbMaster.Close()
dbMasterMock.MatchExpectationsInOrder(false)
dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(1, 0)).
RowsWillBeClosed().
WithoutArgs()
dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("master-dc1"))
dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbDRMaster.Close()
dbDRMasterMock.MatchExpectationsInOrder(false)
dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 40)).
RowsWillBeClosed().
WithoutArgs()
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("drmaster1-dc2"))
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("drmaster"))
dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbSlaveDC1.Close()
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 50)).
RowsWillBeClosed().
WithoutArgs()
dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbSlaveDC2.Close()
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 50)).
RowsWillBeClosed().
WithoutArgs()
dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
c, err := NewCluster[Querier](
WithClusterContext(tctx),
WithClusterNodeChecker(hasql.PostgreSQLChecker),
WithClusterNodePicker(NewCustomPicker[Querier](
CustomPickerMaxLag(100),
)),
WithClusterNodes(
ClusterNode{"slave-dc1", dbSlaveDC1, 1},
ClusterNode{"master-dc1", dbMaster, 1},
ClusterNode{"slave-dc2", dbSlaveDC2, 2},
ClusterNode{"drmaster1-dc2", dbDRMaster, 0},
),
WithClusterOptions(
hasql.WithUpdateInterval[Querier](2*time.Second),
hasql.WithUpdateTimeout[Querier](1*time.Second),
),
)
if err != nil {
t.Fatal(err)
}
defer c.Close()
if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil {
t.Fatal(err)
}
time.Sleep(500 * time.Millisecond)
node1Name := ""
fmt.Printf("check for Standby\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.Standby), "SELECT node_name as name"); row.Err() != nil {
t.Fatal(row.Err())
} else if err = row.Scan(&node1Name); err != nil {
t.Fatal(err)
} else if "slave-dc1" != node1Name {
t.Fatalf("invalid node name %s != %s", "slave-dc1", node1Name)
}
dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
node2Name := ""
fmt.Printf("check for PreferStandby\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() != nil {
t.Fatal(row.Err())
} else if err = row.Scan(&node2Name); err != nil {
t.Fatal(err)
} else if "slave-dc1" != node2Name {
t.Fatalf("invalid node name %s != %s", "slave-dc1", node2Name)
}
node3Name := ""
fmt.Printf("check for PreferPrimary\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferPrimary), "SELECT node_name as name"); row.Err() != nil {
t.Fatal(row.Err())
} else if err = row.Scan(&node3Name); err != nil {
t.Fatal(err)
} else if "master-dc1" != node3Name {
t.Fatalf("invalid node name %s != %s", "master-dc1", node3Name)
}
dbSlaveDC1Mock.ExpectQuery(`.*`).WillReturnRows(sqlmock.NewRows([]string{"role"}).RowError(1, fmt.Errorf("row error")))
time.Sleep(2 * time.Second)
fmt.Printf("check for PreferStandby\n")
if row := c.QueryRowContext(NodeStateCriterion(tctx, hasql.PreferStandby), "SELECT node_name as name"); row.Err() == nil {
t.Fatal("must return error")
}
if dbMasterErr := dbMasterMock.ExpectationsWereMet(); dbMasterErr != nil {
t.Error(dbMasterErr)
}
}

View File

@@ -1,25 +0,0 @@
package sql
import (
"context"
"database/sql"
)
type Querier interface {
// Basic connection methods
PingContext(ctx context.Context) error
Close() error
// Query methods with context
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
// Prepared statements with context
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
// Transaction management with context
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
Conn(ctx context.Context) (*sql.Conn, error)
}

View File

@@ -1,295 +0,0 @@
package sql
import (
"context"
"database/sql"
"database/sql/driver"
"io"
"sync"
"time"
)
// OpenDBWithCluster creates a [*sql.DB] that uses the [ClusterQuerier]
func OpenDBWithCluster(db ClusterQuerier) (*sql.DB, error) {
driver := NewClusterDriver(db)
connector, err := driver.OpenConnector("")
if err != nil {
return nil, err
}
return sql.OpenDB(connector), nil
}
// ClusterDriver implements [driver.Driver] and driver.Connector for an existing [Querier]
type ClusterDriver struct {
db ClusterQuerier
}
// NewClusterDriver creates a new [driver.Driver] that uses an existing [ClusterQuerier]
func NewClusterDriver(db ClusterQuerier) *ClusterDriver {
return &ClusterDriver{db: db}
}
// Open implements [driver.Driver.Open]
func (d *ClusterDriver) Open(name string) (driver.Conn, error) {
return d.Connect(context.Background())
}
// OpenConnector implements [driver.DriverContext.OpenConnector]
func (d *ClusterDriver) OpenConnector(name string) (driver.Connector, error) {
return d, nil
}
// Connect implements [driver.Connector.Connect]
func (d *ClusterDriver) Connect(ctx context.Context) (driver.Conn, error) {
conn, err := d.db.Conn(ctx)
if err != nil {
return nil, err
}
return &dbConn{conn: conn}, nil
}
// Driver implements [driver.Connector.Driver]
func (d *ClusterDriver) Driver() driver.Driver {
return d
}
// dbConn implements driver.Conn with both context and legacy methods
type dbConn struct {
conn *sql.Conn
mu sync.Mutex
}
// Prepare implements [driver.Conn.Prepare] (legacy method)
func (c *dbConn) Prepare(query string) (driver.Stmt, error) {
return c.PrepareContext(context.Background(), query)
}
// PrepareContext implements [driver.ConnPrepareContext.PrepareContext]
func (c *dbConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
c.mu.Lock()
defer c.mu.Unlock()
stmt, err := c.conn.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
return &dbStmt{stmt: stmt}, nil
}
// Exec implements [driver.Execer.Exec] (legacy method)
func (c *dbConn) Exec(query string, args []driver.Value) (driver.Result, error) {
namedArgs := make([]driver.NamedValue, len(args))
for i, value := range args {
namedArgs[i] = driver.NamedValue{Value: value}
}
return c.ExecContext(context.Background(), query, namedArgs)
}
// ExecContext implements [driver.ExecerContext.ExecContext]
func (c *dbConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
c.mu.Lock()
defer c.mu.Unlock()
// Convert driver.NamedValue to any
interfaceArgs := make([]any, len(args))
for i, arg := range args {
interfaceArgs[i] = arg.Value
}
return c.conn.ExecContext(ctx, query, interfaceArgs...)
}
// Query implements [driver.Queryer.Query] (legacy method)
func (c *dbConn) Query(query string, args []driver.Value) (driver.Rows, error) {
namedArgs := make([]driver.NamedValue, len(args))
for i, value := range args {
namedArgs[i] = driver.NamedValue{Value: value}
}
return c.QueryContext(context.Background(), query, namedArgs)
}
// QueryContext implements [driver.QueryerContext.QueryContext]
func (c *dbConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
c.mu.Lock()
defer c.mu.Unlock()
// Convert driver.NamedValue to any
interfaceArgs := make([]any, len(args))
for i, arg := range args {
interfaceArgs[i] = arg.Value
}
rows, err := c.conn.QueryContext(ctx, query, interfaceArgs...)
if err != nil {
return nil, err
}
return &dbRows{rows: rows}, nil
}
// Begin implements [driver.Conn.Begin] (legacy method)
func (c *dbConn) Begin() (driver.Tx, error) {
return c.BeginTx(context.Background(), driver.TxOptions{})
}
// BeginTx implements [driver.ConnBeginTx.BeginTx]
func (c *dbConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
c.mu.Lock()
defer c.mu.Unlock()
sqlOpts := &sql.TxOptions{
Isolation: sql.IsolationLevel(opts.Isolation),
ReadOnly: opts.ReadOnly,
}
tx, err := c.conn.BeginTx(ctx, sqlOpts)
if err != nil {
return nil, err
}
return &dbTx{tx: tx}, nil
}
// Ping implements [driver.Pinger.Ping]
func (c *dbConn) Ping(ctx context.Context) error {
return c.conn.PingContext(ctx)
}
// Close implements [driver.Conn.Close]
func (c *dbConn) Close() error {
return c.conn.Close()
}
// IsValid implements [driver.Validator.IsValid]
func (c *dbConn) IsValid() bool {
// Ping with a short timeout to check if the connection is still valid
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
return c.conn.PingContext(ctx) == nil
}
// dbStmt implements [driver.Stmt] with both context and legacy methods
type dbStmt struct {
stmt *sql.Stmt
mu sync.Mutex
}
// Close implements [driver.Stmt.Close]
func (s *dbStmt) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
return s.stmt.Close()
}
// Close implements [driver.Stmt.NumInput]
func (s *dbStmt) NumInput() int {
return -1 // Number of parameters is unknown
}
// Exec implements [driver.Stmt.Exec] (legacy method)
func (s *dbStmt) Exec(args []driver.Value) (driver.Result, error) {
namedArgs := make([]driver.NamedValue, len(args))
for i, value := range args {
namedArgs[i] = driver.NamedValue{Value: value}
}
return s.ExecContext(context.Background(), namedArgs)
}
// ExecContext implements [driver.StmtExecContext.ExecContext]
func (s *dbStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
s.mu.Lock()
defer s.mu.Unlock()
interfaceArgs := make([]any, len(args))
for i, arg := range args {
interfaceArgs[i] = arg.Value
}
return s.stmt.ExecContext(ctx, interfaceArgs...)
}
// Query implements [driver.Stmt.Query] (legacy method)
func (s *dbStmt) Query(args []driver.Value) (driver.Rows, error) {
namedArgs := make([]driver.NamedValue, len(args))
for i, value := range args {
namedArgs[i] = driver.NamedValue{Value: value}
}
return s.QueryContext(context.Background(), namedArgs)
}
// QueryContext implements [driver.StmtQueryContext.QueryContext]
func (s *dbStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
s.mu.Lock()
defer s.mu.Unlock()
interfaceArgs := make([]any, len(args))
for i, arg := range args {
interfaceArgs[i] = arg.Value
}
rows, err := s.stmt.QueryContext(ctx, interfaceArgs...)
if err != nil {
return nil, err
}
return &dbRows{rows: rows}, nil
}
// dbRows implements [driver.Rows]
type dbRows struct {
rows *sql.Rows
}
// Columns implements [driver.Rows.Columns]
func (r *dbRows) Columns() []string {
cols, err := r.rows.Columns()
if err != nil {
// This shouldn't happen if the query was successful
return []string{}
}
return cols
}
// Close implements [driver.Rows.Close]
func (r *dbRows) Close() error {
return r.rows.Close()
}
// Next implements [driver.Rows.Next]
func (r *dbRows) Next(dest []driver.Value) error {
if !r.rows.Next() {
if err := r.rows.Err(); err != nil {
return err
}
return io.EOF
}
// Create a slice of interfaces to scan into
scanArgs := make([]any, len(dest))
for i := range scanArgs {
scanArgs[i] = &dest[i]
}
return r.rows.Scan(scanArgs...)
}
// dbTx implements [driver.Tx]
type dbTx struct {
tx *sql.Tx
mu sync.Mutex
}
// Commit implements [driver.Tx.Commit]
func (t *dbTx) Commit() error {
t.mu.Lock()
defer t.mu.Unlock()
return t.tx.Commit()
}
// Rollback implements [driver.Tx.Rollback]
func (t *dbTx) Rollback() error {
t.mu.Lock()
defer t.mu.Unlock()
return t.tx.Rollback()
}

View File

@@ -1,141 +0,0 @@
package sql
import (
"context"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"golang.yandex/hasql/v2"
)
func TestDriver(t *testing.T) {
dbMaster, dbMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbMaster.Close()
dbMasterMock.MatchExpectationsInOrder(false)
dbMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(1, 0)).
RowsWillBeClosed().
WithoutArgs()
dbMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("master-dc1"))
dbDRMaster, dbDRMasterMock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbDRMaster.Close()
dbDRMasterMock.MatchExpectationsInOrder(false)
dbDRMasterMock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 40)).
RowsWillBeClosed().
WithoutArgs()
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("drmaster1-dc2"))
dbDRMasterMock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("drmaster"))
dbSlaveDC1, dbSlaveDC1Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbSlaveDC1.Close()
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
dbSlaveDC1Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 50)).
RowsWillBeClosed().
WithoutArgs()
dbSlaveDC1Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
dbSlaveDC2, dbSlaveDC2Mock, err := sqlmock.New(sqlmock.MonitorPingsOption(true))
if err != nil {
t.Fatal(err)
}
defer dbSlaveDC2.Close()
dbSlaveDC1Mock.MatchExpectationsInOrder(false)
dbSlaveDC2Mock.ExpectQuery(`.*pg_is_in_recovery.*`).WillReturnRows(
sqlmock.NewRowsWithColumnDefinition(
sqlmock.NewColumn("role").OfType("int8", 0),
sqlmock.NewColumn("replication_lag").OfType("int8", 0)).
AddRow(2, 50)).
RowsWillBeClosed().
WithoutArgs()
dbSlaveDC2Mock.ExpectQuery(`SELECT node_name as name`).WillReturnRows(
sqlmock.NewRows([]string{"name"}).
AddRow("slave-dc1"))
tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
c, err := NewCluster[Querier](
WithClusterContext(tctx),
WithClusterNodeChecker(hasql.PostgreSQLChecker),
WithClusterNodePicker(NewCustomPicker[Querier](
CustomPickerMaxLag(100),
)),
WithClusterNodes(
ClusterNode{"slave-dc1", dbSlaveDC1, 1},
ClusterNode{"master-dc1", dbMaster, 1},
ClusterNode{"slave-dc2", dbSlaveDC2, 2},
ClusterNode{"drmaster1-dc2", dbDRMaster, 0},
),
WithClusterOptions(
hasql.WithUpdateInterval[Querier](2*time.Second),
hasql.WithUpdateTimeout[Querier](1*time.Second),
),
)
if err != nil {
t.Fatal(err)
}
defer c.Close()
if err = c.WaitForNodes(tctx, hasql.Primary, hasql.Standby); err != nil {
t.Fatal(err)
}
db, err := OpenDBWithCluster(c)
if err != nil {
t.Fatal(err)
}
// Use context methods
row := db.QueryRowContext(NodeStateCriterion(t.Context(), hasql.Primary), "SELECT node_name as name")
if err = row.Err(); err != nil {
t.Fatal(err)
}
nodeName := ""
if err = row.Scan(&nodeName); err != nil {
t.Fatal(err)
}
if nodeName != "master-dc1" {
t.Fatalf("invalid node_name %s != %s", "master-dc1", nodeName)
}
}

View File

@@ -1,10 +0,0 @@
package sql
import "errors"
var (
ErrClusterChecker = errors.New("cluster node checker required")
ErrClusterDiscoverer = errors.New("cluster node discoverer required")
ErrClusterPicker = errors.New("cluster node picker required")
ErrorNoAliveNodes = errors.New("cluster no alive nodes")
)

View File

@@ -1,110 +0,0 @@
package sql
import (
"context"
"math"
"golang.yandex/hasql/v2"
)
// ClusterOptions contains cluster specific options
type ClusterOptions struct {
NodeChecker hasql.NodeChecker
NodePicker hasql.NodePicker[Querier]
NodeDiscoverer hasql.NodeDiscoverer[Querier]
Options []hasql.ClusterOpt[Querier]
Context context.Context
Retries int
NodePriority map[string]int32
NodeStateCriterion hasql.NodeStateCriterion
}
// ClusterOption apply cluster options to ClusterOptions
type ClusterOption func(*ClusterOptions)
// WithClusterNodeChecker pass hasql.NodeChecker to cluster options
func WithClusterNodeChecker(c hasql.NodeChecker) ClusterOption {
return func(o *ClusterOptions) {
o.NodeChecker = c
}
}
// WithClusterNodePicker pass hasql.NodePicker to cluster options
func WithClusterNodePicker(p hasql.NodePicker[Querier]) ClusterOption {
return func(o *ClusterOptions) {
o.NodePicker = p
}
}
// WithClusterNodeDiscoverer pass hasql.NodeDiscoverer to cluster options
func WithClusterNodeDiscoverer(d hasql.NodeDiscoverer[Querier]) ClusterOption {
return func(o *ClusterOptions) {
o.NodeDiscoverer = d
}
}
// WithRetries retry count on other nodes in case of error
func WithRetries(n int) ClusterOption {
return func(o *ClusterOptions) {
o.Retries = n
}
}
// WithClusterContext pass context.Context to cluster options and used for checks
func WithClusterContext(ctx context.Context) ClusterOption {
return func(o *ClusterOptions) {
o.Context = ctx
}
}
// WithClusterOptions pass hasql.ClusterOpt
func WithClusterOptions(opts ...hasql.ClusterOpt[Querier]) ClusterOption {
return func(o *ClusterOptions) {
o.Options = append(o.Options, opts...)
}
}
// WithClusterNodeStateCriterion pass default hasql.NodeStateCriterion
func WithClusterNodeStateCriterion(c hasql.NodeStateCriterion) ClusterOption {
return func(o *ClusterOptions) {
o.NodeStateCriterion = c
}
}
type ClusterNode struct {
Name string
DB Querier
Priority int32
}
// WithClusterNodes create cluster with static NodeDiscoverer
func WithClusterNodes(cns ...ClusterNode) ClusterOption {
return func(o *ClusterOptions) {
nodes := make([]*hasql.Node[Querier], 0, len(cns))
if o.NodePriority == nil {
o.NodePriority = make(map[string]int32, len(cns))
}
for _, cn := range cns {
nodes = append(nodes, hasql.NewNode(cn.Name, cn.DB))
if cn.Priority == 0 {
cn.Priority = math.MaxInt32
}
o.NodePriority[cn.Name] = cn.Priority
}
o.NodeDiscoverer = hasql.NewStaticNodeDiscoverer(nodes...)
}
}
type nodeStateCriterionKey struct{}
// NodeStateCriterion inject hasql.NodeStateCriterion to context
func NodeStateCriterion(ctx context.Context, c hasql.NodeStateCriterion) context.Context {
return context.WithValue(ctx, nodeStateCriterionKey{}, c)
}
func (c *Cluster) getNodeStateCriterion(ctx context.Context) hasql.NodeStateCriterion {
if v, ok := ctx.Value(nodeStateCriterionKey{}).(hasql.NodeStateCriterion); ok {
return v
}
return c.options.NodeStateCriterion
}

View File

@@ -1,113 +0,0 @@
package sql
import (
"fmt"
"math"
"time"
"golang.yandex/hasql/v2"
)
// compile time guard
var _ hasql.NodePicker[Querier] = (*CustomPicker[Querier])(nil)
// CustomPickerOptions holds options to pick nodes
type CustomPickerOptions struct {
MaxLag int
Priority map[string]int32
Retries int
}
// CustomPickerOption func apply option to CustomPickerOptions
type CustomPickerOption func(*CustomPickerOptions)
// CustomPickerMaxLag specifies max lag for which node can be used
func CustomPickerMaxLag(n int) CustomPickerOption {
return func(o *CustomPickerOptions) {
o.MaxLag = n
}
}
// NewCustomPicker creates new node picker
func NewCustomPicker[T Querier](opts ...CustomPickerOption) *CustomPicker[Querier] {
options := CustomPickerOptions{}
for _, o := range opts {
o(&options)
}
return &CustomPicker[Querier]{opts: options}
}
// CustomPicker holds node picker options
type CustomPicker[T Querier] struct {
opts CustomPickerOptions
}
// PickNode used to return specific node
func (p *CustomPicker[T]) PickNode(cnodes []hasql.CheckedNode[T]) hasql.CheckedNode[T] {
for _, n := range cnodes {
fmt.Printf("node %s\n", n.Node.String())
}
return cnodes[0]
}
func (p *CustomPicker[T]) getPriority(nodeName string) int32 {
if prio, ok := p.opts.Priority[nodeName]; ok {
return prio
}
return math.MaxInt32 // Default to lowest priority
}
// CompareNodes used to sort nodes
func (p *CustomPicker[T]) CompareNodes(a, b hasql.CheckedNode[T]) int {
// Get replication lag values
aLag := a.Info.(interface{ ReplicationLag() int }).ReplicationLag()
bLag := b.Info.(interface{ ReplicationLag() int }).ReplicationLag()
// First check that lag lower then MaxLag
if aLag > p.opts.MaxLag && bLag > p.opts.MaxLag {
return 0 // both are equal
}
// If one node exceeds MaxLag and the other doesn't, prefer the one that doesn't
if aLag > p.opts.MaxLag {
return 1 // b is better
}
if bLag > p.opts.MaxLag {
return -1 // a is better
}
// Get node priorities
aPrio := p.getPriority(a.Node.String())
bPrio := p.getPriority(b.Node.String())
// if both priority equals
if aPrio == bPrio {
// First compare by replication lag
if aLag < bLag {
return -1
}
if aLag > bLag {
return 1
}
// If replication lag is equal, compare by latency
aLatency := a.Info.(interface{ Latency() time.Duration }).Latency()
bLatency := b.Info.(interface{ Latency() time.Duration }).Latency()
if aLatency < bLatency {
return -1
}
if aLatency > bLatency {
return 1
}
// If lag and latency is equal
return 0
}
// If priorities are different, prefer the node with lower priority value
if aPrio < bPrio {
return -1
}
return 1
}

View File

@@ -3,6 +3,8 @@ package codec
import (
"errors"
"gopkg.in/yaml.v3"
)
var (
@@ -66,10 +68,10 @@ func (m *RawMessage) MarshalYAML() ([]byte, error) {
}
// UnmarshalYAML sets *m to a copy of data.
func (m *RawMessage) UnmarshalYAML(data []byte) error {
func (m *RawMessage) UnmarshalYAML(n *yaml.Node) error {
if m == nil {
return errors.New("RawMessage UnmarshalYAML on nil pointer")
}
*m = append((*m)[0:0], data...)
*m = append((*m)[0:0], []byte(n.Value)...)
return nil
}

View File

@@ -1,5 +1,7 @@
package codec
import "gopkg.in/yaml.v3"
// Frame gives us the ability to define raw data to send over the pipes
type Frame struct {
Data []byte
@@ -26,8 +28,8 @@ func (m *Frame) MarshalYAML() ([]byte, error) {
}
// UnmarshalYAML set frame data
func (m *Frame) UnmarshalYAML(data []byte) error {
m.Data = append((m.Data)[0:0], data...)
func (m *Frame) UnmarshalYAML(n *yaml.Node) error {
m.Data = []byte(n.Value)
return nil
}

View File

@@ -17,7 +17,7 @@ syntax = "proto3";
package micro.codec;
option cc_enable_arenas = true;
option go_package = "go.unistack.org/micro/v4/codec;codec";
option go_package = "go.unistack.org/micro/v3/codec;codec";
option java_multiple_files = true;
option java_outer_classname = "MicroCodec";
option java_package = "micro.codec";

View File

@@ -3,7 +3,7 @@ package codec
import (
"encoding/json"
codecpb "go.unistack.org/micro-proto/v4/codec"
codecpb "go.unistack.org/micro-proto/v3/codec"
)
type noopCodec struct {

View File

@@ -3,9 +3,9 @@ package codec
import (
"context"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/tracer"
)
// Option func

View File

@@ -9,10 +9,10 @@ import (
"dario.cat/mergo"
"github.com/google/uuid"
"go.unistack.org/micro/v4/options"
mid "go.unistack.org/micro/v4/util/id"
rutil "go.unistack.org/micro/v4/util/reflect"
mtime "go.unistack.org/micro/v4/util/time"
"go.unistack.org/micro/v3/options"
mid "go.unistack.org/micro/v3/util/id"
rutil "go.unistack.org/micro/v3/util/reflect"
mtime "go.unistack.org/micro/v3/util/time"
)
type defaultConfig struct {
@@ -210,7 +210,7 @@ func fillValue(value reflect.Value, val string) error {
return err
}
value.Set(reflect.ValueOf(v))
case value.Type().String() == "time.Duration" && value.Type().PkgPath() == "go.unistack.org/micro/v4/util/time":
case value.Type().String() == "time.Duration" && value.Type().PkgPath() == "go.unistack.org/micro/v3/util/time":
v, err := mtime.ParseDuration(val)
if err != nil {
return err

View File

@@ -7,8 +7,8 @@ import (
"testing"
"time"
"go.unistack.org/micro/v4/config"
mtime "go.unistack.org/micro/v4/util/time"
"go.unistack.org/micro/v3/config"
mtime "go.unistack.org/micro/v3/util/time"
)
type cfg struct {

View File

@@ -4,11 +4,11 @@ import (
"context"
"time"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/options"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/options"
"go.unistack.org/micro/v3/tracer"
)
// Options hold the config options

View File

@@ -17,7 +17,7 @@ syntax = "proto3";
package micro.errors;
option cc_enable_arenas = true;
option go_package = "go.unistack.org/micro/v4/errors;errors";
option go_package = "go.unistack.org/micro/v3/errors;errors";
option java_multiple_files = true;
option java_outer_classname = "MicroErrors";
option java_package = "micro.errors";

27
event.go Normal file
View File

@@ -0,0 +1,27 @@
package micro
import (
"context"
"go.unistack.org/micro/v3/client"
)
// Event is used to publish messages to a topic
type Event interface {
// Publish publishes a message to the event topic
Publish(ctx context.Context, msg interface{}, opts ...client.PublishOption) error
}
type event struct {
c client.Client
topic string
}
// NewEvent creates a new event publisher
func NewEvent(topic string, c client.Client) Event {
return &event{c, topic}
}
func (e *event) Publish(ctx context.Context, msg interface{}, opts ...client.PublishOption) error {
return e.c.Publish(ctx, e.c.NewMessage(e.topic, msg), opts...)
}

View File

@@ -15,6 +15,15 @@ func FromContext(ctx context.Context) (Flow, bool) {
return c, ok
}
// MustContext returns Flow from context
func MustContext(ctx context.Context) Flow {
f, ok := FromContext(ctx)
if !ok {
panic("missing flow")
}
return f
}
// NewContext stores Flow to context
func NewContext(ctx context.Context, f Flow) context.Context {
if ctx == nil {

View File

@@ -1,5 +1,3 @@
//go:build ignore
package flow
import (

View File

@@ -1,19 +1,17 @@
//go:build ignore
package flow
import (
"context"
"fmt"
"path/filepath"
"sync"
"github.com/heimdalr/dag"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/store"
"go.unistack.org/micro/v4/util/id"
"github.com/silas/dag"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/util/id"
)
type microFlow struct {
@@ -22,7 +20,7 @@ type microFlow struct {
type microWorkflow struct {
opts Options
g *dag.DAG
g *dag.AcyclicGraph
steps map[string]Step
id string
status Status
@@ -34,20 +32,20 @@ func (w *microWorkflow) ID() string {
return w.id
}
func (w *microWorkflow) Steps() ([][]Step, error) {
return w.getSteps("", false)
}
func (w *microWorkflow) Status() Status {
return w.status
}
func (w *microWorkflow) AppendSteps(steps ...Step) error {
var err error
w.Lock()
defer w.Unlock()
for _, s := range steps {
w.steps[s.String()] = s
if _, err = w.g.AddVertex(s); err != nil {
return err
}
w.g.Add(s)
}
for _, dst := range steps {
@@ -56,13 +54,18 @@ func (w *microWorkflow) AppendSteps(steps ...Step) error {
if !ok {
return ErrStepNotExists
}
if err = w.g.AddEdge(src.String(), dst.String()); err != nil {
return err
}
w.g.Connect(dag.BasicEdge(src, dst))
}
}
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
w.Unlock()
return err
}
w.g.TransitiveReduction()
w.Unlock()
return nil
}
@@ -71,11 +74,10 @@ func (w *microWorkflow) RemoveSteps(steps ...Step) error {
// TODO: handle case when some step requires or required by removed step
w.Lock()
defer w.Unlock()
for _, s := range steps {
delete(w.steps, s.String())
w.g.DeleteVertex(s.String())
w.g.Remove(s)
}
for _, dst := range steps {
@@ -84,34 +86,91 @@ func (w *microWorkflow) RemoveSteps(steps ...Step) error {
if !ok {
return ErrStepNotExists
}
w.g.AddEdge(src.String(), dst.String())
w.g.Connect(dag.BasicEdge(src, dst))
}
}
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
w.Unlock()
return err
}
w.g.TransitiveReduction()
w.Unlock()
return nil
}
func (w *microWorkflow) getSteps(start string, reverse bool) ([][]Step, error) {
var steps [][]Step
var root dag.Vertex
var err error
fn := func(n dag.Vertex, idx int) error {
if idx == 0 {
steps = make([][]Step, 1)
steps[0] = make([]Step, 0, 1)
} else if idx >= len(steps) {
tsteps := make([][]Step, idx+1)
copy(tsteps, steps)
steps = tsteps
steps[idx] = make([]Step, 0, 1)
}
steps[idx] = append(steps[idx], n.(Step))
return nil
}
if start != "" {
var ok bool
w.RLock()
root, ok = w.steps[start]
w.RUnlock()
if !ok {
return nil, ErrStepNotExists
}
} else {
root, err = w.g.Root()
if err != nil {
return nil, err
}
}
if reverse {
err = w.g.SortedReverseDepthFirstWalk([]dag.Vertex{root}, fn)
} else {
err = w.g.SortedDepthFirstWalk([]dag.Vertex{root}, fn)
}
if err != nil {
return nil, err
}
return steps, nil
}
func (w *microWorkflow) Abort(ctx context.Context, id string) error {
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+id)
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusAborted.String())})
}
func (w *microWorkflow) Suspend(ctx context.Context, id string) error {
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+id)
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusSuspend.String())})
}
func (w *microWorkflow) Resume(ctx context.Context, id string) error {
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", id))
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+id)
return workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusRunning.String())})
}
func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...ExecuteOption) (string, error) {
w.Lock()
if !w.init {
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
w.Unlock()
return "", err
}
w.g.TransitiveReduction()
w.init = true
}
w.Unlock()
@@ -121,11 +180,26 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
return "", err
}
// stepStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("steps", eid))
workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", eid))
stepStore := store.NewNamespaceStore(w.opts.Store, "steps"+w.opts.Store.Options().Separator+eid)
workflowStore := store.NewNamespaceStore(w.opts.Store, "workflows"+w.opts.Store.Options().Separator+eid)
options := NewExecuteOptions(opts...)
steps, err := w.getSteps(options.Start, options.Reverse)
if err != nil {
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
return "", err
}
var wg sync.WaitGroup
cherr := make(chan error, 1)
chstatus := make(chan Status, 1)
nctx, cancel := context.WithCancel(ctx)
defer cancel()
nopts := make([]ExecuteOption, 0, len(opts)+5)
nopts = append(nopts,
@@ -135,274 +209,143 @@ func (w *microWorkflow) Execute(ctx context.Context, req *Message, opts ...Execu
ExecuteMeter(w.opts.Meter),
)
nopts = append(nopts, opts...)
done := make(chan struct{})
if werr := workflowStore.Write(ctx, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
w.opts.Logger.Error(ctx, "store error: %v", werr)
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
return eid, werr
}
var startID string
if options.Start == "" {
mp := w.g.GetRoots()
if len(mp) != 1 {
return eid, ErrStepNotExists
}
for k := range mp {
startID = k
}
} else {
for k, v := range w.g.GetVertices() {
if v == options.Start {
startID = k
for idx := range steps {
for nidx := range steps[idx] {
cstep := steps[idx][nidx]
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusPending.String())}); werr != nil {
return eid, werr
}
}
}
if startID == "" {
return eid, ErrStepNotExists
}
if options.Async {
go w.handleWorkflow(startID, nopts...)
return eid, nil
}
return eid, w.handleWorkflow(startID, nopts...)
}
func (w *microWorkflow) handleWorkflow(startID string, opts ...ExecuteOption) error {
w.RLock()
defer w.RUnlock()
// stepStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("steps", eid))
// workflowStore := store.NewNamespaceStore(w.opts.Store, filepath.Join("workflows", eid))
// Get IDs of all descendant vertices.
flowIDs, errDes := w.g.GetDescendants(startID)
if errDes != nil {
return errDes
}
// inputChannels provides for input channels for each of the descendant vertices (+ the start-vertex).
inputChannels := make(map[string]chan FlowResult, len(flowIDs)+1)
// Iterate vertex IDs and create an input channel for each of them and a single
// output channel for leaves. Note, this "pre-flight" is needed to ensure we
// really have an input channel regardless of how we traverse the tree and spawn
// workers.
leafCount := 0
for id := range flowIDs {
// Get all parents of this vertex.
parents, errPar := w.g.GetParents(id)
if errPar != nil {
return errPar
}
// Create a buffered input channel that has capacity for all parent results.
inputChannels[id] = make(chan FlowResult, len(parents))
if ok, err := w.g.IsLeaf(id); ok && err == nil {
leafCount += 1
}
}
// outputChannel caries the results of leaf vertices.
outputChannel := make(chan FlowResult, leafCount)
// To also process the start vertex and to have its results being passed to its
// children, add it to the vertex IDs. Also add an input channel for the start
// vertex and feed the inputs to this channel.
flowIDs[startID] = struct{}{}
inputChannels[startID] = make(chan FlowResult, len(inputs))
for _, i := range inputs {
inputChannels[startID] <- i
}
wg := sync.WaitGroup{}
// Iterate all vertex IDs (now incl. start vertex) and handle each worker (incl.
// inputs and outputs) in a separate goroutine.
for id := range flowIDs {
// Get all children of this vertex that later need to be notified. Note, we
// collect all children before the goroutine to be able to release the read
// lock as early as possible.
children, errChildren := w.g.GetChildren(id)
if errChildren != nil {
return errChildren
}
// Remember to wait for this goroutine.
wg.Add(1)
go func(id string) {
// Get this vertex's input channel.
// Note, only concurrent read here, which is fine.
c := inputChannels[id]
// Await all parent inputs and stuff them into a slice.
parentCount := cap(c)
parentResults := make([]FlowResult, parentCount)
for i := 0; i < parentCount; i++ {
parentResults[i] = <-c
}
// Execute the worker.
errWorker := callback(w.g, id, parentResults)
if errWorker != nil {
return errWorker
}
// Send this worker's FlowResult onto all children's input channels or, if it is
// a leaf (i.e. no children), send the result onto the output channel.
if len(children) > 0 {
for child := range children {
inputChannels[child] <- flowResult
go func() {
for idx := range steps {
for nidx := range steps[idx] {
wStatus := &codec.Frame{}
if werr := workflowStore.Read(w.opts.Context, "status", wStatus); werr != nil {
cherr <- werr
return
}
} else {
outputChannel <- flowResult
}
// "Sign off".
wg.Done()
}(id)
}
// Wait for all go routines to finish.
wg.Wait()
// Await all leaf vertex results and stuff them into a slice.
resultCount := cap(outputChannel)
results := make([]FlowResult, resultCount)
for i := 0; i < resultCount; i++ {
results[i] = <-outputChannel
}
/*
go func() {
for idx := range steps {
for nidx := range steps[idx] {
wStatus := &codec.Frame{}
if werr := workflowStore.Read(w.opts.Context, "status", wStatus); werr != nil {
cherr <- werr
return
}
if status := StringStatus[string(wStatus.Data)]; status != StatusRunning {
chstatus <- status
return
}
if w.opts.Logger.V(logger.TraceLevel) {
w.opts.Logger.Tracef(nctx, "will be executed %v", steps[idx][nidx])
}
cstep := steps[idx][nidx]
// nolint: nestif
if len(cstep.Requires()) == 0 {
wg.Add(1)
go func(step Step) {
defer wg.Done()
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "req"), req); werr != nil {
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
cherr <- werr
return
}
rsp, serr := step.Execute(nctx, req, nopts...)
if serr != nil {
step.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "rsp"), serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "rsp"), rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(step.ID(), "status"), &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
cherr <- werr
return
}
}(cstep)
wg.Wait()
} else {
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "req"), req); werr != nil {
if status := StringStatus[string(wStatus.Data)]; status != StatusRunning {
chstatus <- status
return
}
if w.opts.Logger.V(logger.TraceLevel) {
w.opts.Logger.Trace(nctx, fmt.Sprintf("will be executed %v", steps[idx][nidx]))
}
cstep := steps[idx][nidx]
// nolint: nestif
if len(cstep.Requires()) == 0 {
wg.Add(1)
go func(step Step) {
defer wg.Done()
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"req", req); werr != nil {
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
cherr <- werr
return
}
rsp, serr := cstep.Execute(nctx, req, nopts...)
rsp, serr := step.Execute(nctx, req, nopts...)
if serr != nil {
cstep.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "rsp"), serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
step.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Error(ctx, "store write error", werr)
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Error(ctx, "store write error", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "rsp"), rsp); werr != nil {
w.opts.Logger.Errorf(ctx, "store write error: %v", werr)
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Error(ctx, "store write error", werr)
cherr <- werr
return
}
if werr := stepStore.Write(ctx, filepath.Join(cstep.ID(), "status"), &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
if werr := stepStore.Write(ctx, step.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Error(ctx, "store write error", werr)
cherr <- werr
return
}
}(cstep)
wg.Wait()
} else {
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"req", req); werr != nil {
cherr <- werr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusRunning.String())}); werr != nil {
cherr <- werr
return
}
rsp, serr := cstep.Execute(nctx, req, nopts...)
if serr != nil {
cstep.SetStatus(StatusFailure)
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", serr); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Error(ctx, "store write error", werr)
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil && w.opts.Logger.V(logger.ErrorLevel) {
w.opts.Logger.Error(ctx, "store write error", werr)
}
cherr <- serr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"rsp", rsp); werr != nil {
w.opts.Logger.Error(ctx, "store write error", werr)
cherr <- werr
return
}
if werr := stepStore.Write(ctx, cstep.ID()+w.opts.Store.Options().Separator+"status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
cherr <- werr
return
}
}
}
close(done)
}()
if options.Async {
return eid, nil
}
close(done)
}()
logger.Tracef(ctx, "wait for finish or error")
select {
case <-nctx.Done():
err = nctx.Err()
case cerr := <-cherr:
err = cerr
case <-done:
close(cherr)
case <-chstatus:
close(chstatus)
return eid, nil
}
if options.Async {
return eid, nil
}
switch {
case nctx.Err() != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
case err == nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
case err != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
w.opts.Logger.Errorf(w.opts.Context, "store error: %v", werr)
}
logger.DefaultLogger.Trace(ctx, "wait for finish or error")
select {
case <-nctx.Done():
err = nctx.Err()
case cerr := <-cherr:
err = cerr
case <-done:
close(cherr)
case <-chstatus:
close(chstatus)
return eid, nil
}
switch {
case nctx.Err() != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusAborted.String())}); werr != nil {
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
*/
return err
case err == nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusSuccess.String())}); werr != nil {
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
case err != nil:
if werr := workflowStore.Write(w.opts.Context, "status", &codec.Frame{Data: []byte(StatusFailure.String())}); werr != nil {
w.opts.Logger.Error(w.opts.Context, "store write error", werr)
}
}
return eid, err
}
// NewFlow create new flow
@@ -442,11 +385,11 @@ func (f *microFlow) WorkflowList(ctx context.Context) ([]Workflow, error) {
}
func (f *microFlow) WorkflowCreate(ctx context.Context, id string, steps ...Step) (Workflow, error) {
w := &microWorkflow{opts: f.opts, id: id, g: &dag.DAG{}, steps: make(map[string]Step, len(steps))}
w := &microWorkflow{opts: f.opts, id: id, g: &dag.AcyclicGraph{}, steps: make(map[string]Step, len(steps))}
for _, s := range steps {
w.steps[s.String()] = s
w.g.AddVertex(s)
w.g.Add(s)
}
for _, dst := range steps {
@@ -455,11 +398,14 @@ func (f *microFlow) WorkflowCreate(ctx context.Context, id string, steps ...Step
if !ok {
return nil, ErrStepNotExists
}
w.g.AddEdge(src.String(), dst.String())
w.g.Connect(dag.BasicEdge(src, dst))
}
}
w.g.ReduceTransitively()
if err := w.g.Validate(); err != nil {
return nil, err
}
w.g.TransitiveReduction()
w.init = true

View File

@@ -1,5 +1,5 @@
// Package flow is an interface used for saga pattern microservice workflow
package flow // import "go.unistack.org/micro/v4/flow"
package flow
import (
"context"
@@ -7,7 +7,7 @@ import (
"sync"
"sync/atomic"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v3/metadata"
)
var (
@@ -125,6 +125,8 @@ type Workflow interface {
AppendSteps(steps ...Step) error
// Status returns workflow status
Status() Status
// Steps returns steps slice where parallel steps returned on the same level
Steps() ([][]Step, error)
// Suspend suspends execution
Suspend(ctx context.Context, id string) error
// Resume resumes execution

View File

@@ -4,11 +4,11 @@ import (
"context"
"time"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/store"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/tracer"
)
// Option func
@@ -123,6 +123,8 @@ type ExecuteOptions struct {
Start string
// Timeout for execution
Timeout time.Duration
// Reverse execution
Reverse bool
// Async enables async execution
Async bool
}
@@ -165,6 +167,13 @@ func ExecuteContext(ctx context.Context) ExecuteOption {
}
}
// ExecuteReverse says that dag must be run in reverse order
func ExecuteReverse(b bool) ExecuteOption {
return func(o *ExecuteOptions) {
o.Reverse = b
}
}
// ExecuteTimeout pass timeout time.Duration for execution
func ExecuteTimeout(td time.Duration) ExecuteOption {
return func(o *ExecuteOptions) {

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"testing"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v3/logger"
)
func TestFSMStart(t *testing.T) {

36
go.mod
View File

@@ -1,35 +1,43 @@
module go.unistack.org/micro/v4
module go.unistack.org/micro/v3
go 1.24
go 1.22.0
require (
dario.cat/mergo v1.0.1
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/KimMachineGun/automemlimit v0.7.0
github.com/goccy/go-yaml v1.17.1
github.com/KimMachineGun/automemlimit v0.6.1
github.com/ash3in/uuidv8 v1.2.0
github.com/google/uuid v1.6.0
github.com/matoous/go-nanoid v1.5.1
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5
github.com/spf13/cast v1.7.1
github.com/stretchr/testify v1.10.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.unistack.org/micro-proto/v4 v4.1.0
go.unistack.org/micro-proto/v3 v3.4.1
golang.org/x/sync v0.10.0
golang.yandex/hasql/v2 v2.1.0
google.golang.org/grpc v1.69.4
google.golang.org/protobuf v1.36.3
google.golang.org/grpc v1.69.2
google.golang.org/protobuf v1.36.1
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/cilium/ebpf v0.16.0 // indirect
github.com/containerd/cgroups/v3 v3.0.4 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/sys v0.29.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/stretchr/testify v1.10.0 // indirect
go.uber.org/goleak v1.3.0 // indirect
golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/sys v0.28.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

76
go.sum
View File

@@ -2,20 +2,39 @@ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88=
github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
github.com/ash3in/uuidv8 v1.2.0 h1:2oogGdtCPwaVtyvPPGin4TfZLtOGE5F+W++E880G6SI=
github.com/ash3in/uuidv8 v1.2.0/go.mod h1:BnU0wJBxnzdEKmVg4xckBkD+VZuecTFTUP3M0dWgyY4=
github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4=
github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY=
github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -26,10 +45,19 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/matoous/go-nanoid v1.5.1 h1:aCjdvTyO9LLnTIi0fgdXhOPPvOHjpXN6Ik9DaNjIct4=
github.com/matoous/go-nanoid v1.5.1/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U=
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
@@ -38,34 +66,38 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5 h1:G/FZtUu7a6NTWl3KUHMV9jkLAh/Rvtf03NWMHaEDl+E=
github.com/silas/dag v0.0.0-20220518035006-a7e85ada93c5/go.mod h1:7RTUFBdIRC9nZ7/3RyRNH1bdqIShrDejd1YbLwgPS+I=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.unistack.org/micro-proto/v4 v4.1.0 h1:qPwL2n/oqh9RE3RTTDgt28XK3QzV597VugQPaw9lKUk=
go.unistack.org/micro-proto/v4 v4.1.0/go.mod h1:ArmK7o+uFvxSY3dbJhKBBX4Pm1rhWdLEFf3LxBrMtec=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.unistack.org/micro-proto/v3 v3.4.1 h1:UTjLSRz2YZuaHk9iSlVqqsA50JQNAEK2ZFboGqtEa9Q=
go.unistack.org/micro-proto/v3 v3.4.1/go.mod h1:okx/cnOhzuCX0ggl/vToatbCupi0O44diiiLLsZ93Zo=
golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU=
golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.yandex/hasql/v2 v2.1.0 h1:7CaFFWeHoK5TvA+QvZzlKHlIN5sqNpqM8NSrXskZD/k=
golang.yandex/hasql/v2 v2.1.0/go.mod h1:3Au1AxuJDCTXmS117BpbI6e+70kGWeyLR1qJAH6HdtA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,117 +0,0 @@
package metadata
import (
"context"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/server"
)
type wrapper struct {
keys []string
client.Client
}
func NewClientWrapper(keys ...string) client.Wrapper {
return func(c client.Client) client.Client {
handler := &wrapper{
Client: c,
keys: keys,
}
return handler
}
}
func NewClientCallWrapper(keys ...string) client.CallWrapper {
return func(fn client.CallFunc) client.CallFunc {
return func(ctx context.Context, addr string, req client.Request, rsp interface{}, opts client.CallOptions) error {
if keys == nil {
return fn(ctx, addr, req, rsp, opts)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return fn(ctx, addr, req, rsp, opts)
}
}
}
func (w *wrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
if w.keys == nil {
return w.Client.Call(ctx, req, rsp, opts...)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range w.keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return w.Client.Call(ctx, req, rsp, opts...)
}
func (w *wrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
if w.keys == nil {
return w.Client.Stream(ctx, req, opts...)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range w.keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return w.Client.Stream(ctx, req, opts...)
}
func NewServerHandlerWrapper(keys ...string) server.HandlerWrapper {
return func(fn server.HandlerFunc) server.HandlerFunc {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
if keys == nil {
return fn(ctx, req, rsp)
}
if imd, iok := metadata.FromIncomingContext(ctx); iok && imd != nil {
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(len(imd))
}
for _, k := range keys {
if v := imd.Get(k); v != nil {
omd.Set(k, v...)
}
}
if !ook {
ctx = metadata.NewOutgoingContext(ctx, omd)
}
}
return fn(ctx, req, rsp)
}
}
}

View File

@@ -1,63 +0,0 @@
package recovery
import (
"context"
"fmt"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/server"
)
func NewOptions(opts ...Option) Options {
options := Options{
ServerHandlerFn: DefaultServerHandlerFn,
}
for _, o := range opts {
o(&options)
}
return options
}
type Options struct {
ServerHandlerFn func(context.Context, server.Request, interface{}, error) error
}
type Option func(*Options)
func ServerHandlerFunc(fn func(context.Context, server.Request, interface{}, error) error) Option {
return func(o *Options) {
o.ServerHandlerFn = fn
}
}
var DefaultServerHandlerFn = func(ctx context.Context, req server.Request, rsp interface{}, err error) error {
return errors.BadRequest("", "%v", err)
}
var Hook = NewHook()
type hook struct {
opts Options
}
func NewHook(opts ...Option) *hook {
return &hook{opts: NewOptions(opts...)}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) (err error) {
defer func() {
r := recover()
switch verr := r.(type) {
case nil:
return
case error:
err = w.opts.ServerHandlerFn(ctx, req, rsp, verr)
default:
err = w.opts.ServerHandlerFn(ctx, req, rsp, fmt.Errorf("%v", r))
}
}()
err = next(ctx, req, rsp)
return err
}
}

View File

@@ -1,103 +0,0 @@
package requestid
import (
"context"
"net/textproto"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/server"
"go.unistack.org/micro/v4/util/id"
)
type XRequestIDKey struct{}
// DefaultMetadataKey contains metadata key
var DefaultMetadataKey = textproto.CanonicalMIMEHeaderKey("x-request-id")
// DefaultMetadataFunc wil be used if user not provide own func to fill metadata
var DefaultMetadataFunc = func(ctx context.Context) (context.Context, error) {
var xid string
cid, cok := ctx.Value(XRequestIDKey{}).(string)
if cok && cid != "" {
xid = cid
}
imd, iok := metadata.FromIncomingContext(ctx)
if !iok || imd == nil {
imd = metadata.New(1)
ctx = metadata.NewIncomingContext(ctx, imd)
}
omd, ook := metadata.FromOutgoingContext(ctx)
if !ook || omd == nil {
omd = metadata.New(1)
ctx = metadata.NewOutgoingContext(ctx, omd)
}
if xid == "" {
xid = imd.GetJoined(DefaultMetadataKey)
if xid == "" {
xid = omd.GetJoined(DefaultMetadataKey)
}
}
if xid == "" {
var err error
xid, err = id.New()
if err != nil {
return ctx, err
}
}
if !cok {
ctx = context.WithValue(ctx, XRequestIDKey{}, xid)
}
if !iok {
imd.Set(DefaultMetadataKey, xid)
}
if !ook {
omd.Set(DefaultMetadataKey, xid)
}
return ctx, nil
}
type hook struct{}
func NewHook() *hook {
return &hook{}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return err
}
return next(ctx, req, rsp)
}
}
func (w *hook) ClientCall(next client.FuncCall) client.FuncCall {
return func(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return err
}
return next(ctx, req, rsp, opts...)
}
}
func (w *hook) ClientStream(next client.FuncStream) client.FuncStream {
return func(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
var err error
if ctx, err = DefaultMetadataFunc(ctx); err != nil {
return nil, err
}
return next(ctx, req, opts...)
}
}

View File

@@ -1,34 +0,0 @@
package requestid
import (
"context"
"slices"
"testing"
"go.unistack.org/micro/v4/metadata"
)
func TestDefaultMetadataFunc(t *testing.T) {
ctx := context.TODO()
nctx, err := DefaultMetadataFunc(ctx)
if err != nil {
t.Fatalf("%v", err)
}
imd, ok := metadata.FromIncomingContext(nctx)
if !ok {
t.Fatalf("md missing in incoming context")
}
omd, ok := metadata.FromOutgoingContext(nctx)
if !ok {
t.Fatalf("md missing in outgoing context")
}
iv := imd.Get(DefaultMetadataKey)
ov := omd.Get(DefaultMetadataKey)
if !slices.Equal(iv, ov) {
t.Fatalf("missing metadata key value %v != %v", iv, ov)
}
}

View File

@@ -1,51 +0,0 @@
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"runtime"
)
//go:generate sh -c "go run gen.go > wrap_gen.go"
// namedValueToValue converts driver arguments of NamedValue format to Value format. Implemented in the same way as in
// database/sql ctxutil.go.
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
dargs := make([]driver.Value, len(named))
for n, param := range named {
if len(param.Name) > 0 {
return nil, errors.New("sql: driver does not support the use of Named Parameters")
}
dargs[n] = param.Value
}
return dargs, nil
}
// namedValueToLabels convert driver arguments to interface{} slice
func namedValueToLabels(named []driver.NamedValue) []interface{} {
largs := make([]interface{}, 0, len(named)*2)
var name string
for _, param := range named {
if param.Name != "" {
name = param.Name
} else {
name = fmt.Sprintf("$%d", param.Ordinal)
}
largs = append(largs, fmt.Sprintf("%s=%v", name, param.Value))
}
return largs
}
// getCallerName get the name of the function A where A() -> B() -> GetFunctionCallerName()
func getCallerName() string {
pc, _, _, ok := runtime.Caller(3)
details := runtime.FuncForPC(pc)
var callerName string
if ok && details != nil {
callerName = details.Name()
} else {
callerName = labelUnknown
}
return callerName
}

View File

@@ -1,467 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"fmt"
"time"
"go.unistack.org/micro/v4/hooks/requestid"
"go.unistack.org/micro/v4/tracer"
)
var (
_ driver.Conn = (*wrapperConn)(nil)
_ driver.ConnBeginTx = (*wrapperConn)(nil)
_ driver.ConnPrepareContext = (*wrapperConn)(nil)
_ driver.Pinger = (*wrapperConn)(nil)
_ driver.Validator = (*wrapperConn)(nil)
_ driver.Queryer = (*wrapperConn)(nil) // nolint:staticcheck
_ driver.QueryerContext = (*wrapperConn)(nil)
_ driver.Execer = (*wrapperConn)(nil) // nolint:staticcheck
_ driver.ExecerContext = (*wrapperConn)(nil)
// _ driver.Connector
// _ driver.Driver
// _ driver.DriverContext
)
// wrapperConn defines a wrapper for driver.Conn
type wrapperConn struct {
d *wrapperDriver
dname string
conn driver.Conn
opts Options
ctx context.Context
//span tracer.Span
}
// Close implements driver.Conn Close
func (w *wrapperConn) Close() error {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Close"}
ts := time.Now()
err := w.conn.Close()
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Close", getCallerName(), td, err)...)
}
*/
return err
}
// Begin implements driver.Conn Begin
func (w *wrapperConn) Begin() (driver.Tx, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
labels := []string{labelMethod, "Begin"}
ts := time.Now()
tx, err := w.conn.Begin() // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Begin", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Begin", getCallerName(), td, err)...)
}
*/
return &wrapperTx{tx: tx, opts: w.opts, ctx: ctx}, nil
}
// BeginTx implements driver.ConnBeginTx BeginTx
func (w *wrapperConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
name := getQueryName(ctx)
nctx, span := w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
span.AddLabels("db.method", "BeginTx")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "BeginTx", labelQuery, name}
connBeginTx, ok := w.conn.(driver.ConnBeginTx)
if !ok {
return w.Begin()
}
ts := time.Now()
tx, err := connBeginTx.BeginTx(nctx, opts)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "BeginTx", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "BeginTx", getCallerName(), td, err)...)
}
*/
return &wrapperTx{tx: tx, opts: w.opts, ctx: ctx, span: span}, nil
}
// Prepare implements driver.Conn Prepare
func (w *wrapperConn) Prepare(query string) (driver.Stmt, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Prepare", labelQuery, getCallerName()}
ts := time.Now()
stmt, err := w.conn.Prepare(query)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Prepare", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Prepare", getCallerName(), td, err)...)
}
*/
return wrapStmt(stmt, query, w.opts), nil
}
// PrepareContext implements driver.ConnPrepareContext PrepareContext
func (w *wrapperConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "PrepareContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "PrepareContext", labelQuery, name}
conn, ok := w.conn.(driver.ConnPrepareContext)
if !ok {
return w.Prepare(query)
}
ts := time.Now()
stmt, err := conn.PrepareContext(nctx, query)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "PrepareContext", getCallerName(), td, err)...)
}
*/
return nil, err
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "PrepareContext", getCallerName(), td, err)...)
}
*/
return wrapStmt(stmt, query, w.opts), nil
}
// Exec implements driver.Execer Exec
func (w *wrapperConn) Exec(query string, args []driver.Value) (driver.Result, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Exec", labelQuery, getCallerName()}
// nolint:staticcheck
conn, ok := w.conn.(driver.Execer)
if !ok {
return nil, driver.ErrSkip
}
ts := time.Now()
res, err := conn.Exec(query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Exec", getCallerName(), td, err)...)
}
*/
return res, err
}
// Exec implements driver.StmtExecContext ExecContext
func (w *wrapperConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "ExecContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
labels := []string{labelMethod, "ExecContext", labelQuery, name}
conn, ok := w.conn.(driver.ExecerContext)
if !ok {
// nolint:staticcheck
return nil, driver.ErrSkip
}
ts := time.Now()
res, err := conn.ExecContext(nctx, query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", getCallerName(), td, err)...)
}
*/
return res, err
}
// Ping implements driver.Pinger Ping
func (w *wrapperConn) Ping(ctx context.Context) error {
conn, ok := w.conn.(driver.Pinger)
if !ok {
// fallback path to check db alive
pc, err := w.d.Open(w.dname)
if err != nil {
return err
}
return pc.Close()
}
var nctx context.Context //nolint:gosimple
nctx = ctx
/*
var span tracer.Span
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "Ping")
defer span.Finish()
*/
labels := []string{labelMethod, "Ping"}
ts := time.Now()
err := conn.Ping(nctx)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
// span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Ping", getCallerName(), td, err)...)
}
*/
return err
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
return nil
}
// Query implements driver.Queryer Query
func (w *wrapperConn) Query(query string, args []driver.Value) (driver.Rows, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
// nolint:staticcheck
conn, ok := w.conn.(driver.Queryer)
if !ok {
return nil, driver.ErrSkip
}
labels := []string{labelMethod, "Query", labelQuery, getCallerName()}
ts := time.Now()
rows, err := conn.Query(query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Query", getCallerName(), td, err)...)
}
*/
return rows, err
}
// QueryContext implements Driver.QueryerContext QueryContext
func (w *wrapperConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "QueryContext")
span.AddLabels("db.statement", name)
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
labels := []string{labelMethod, "QueryContext", labelQuery, name}
conn, ok := w.conn.(driver.QueryerContext)
if !ok {
return nil, driver.ErrSkip
}
ts := time.Now()
rows, err := conn.QueryContext(nctx, query, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", getCallerName(), td, err)...)
}
*/
return rows, err
}
// CheckNamedValue implements driver.NamedValueChecker
func (w *wrapperConn) CheckNamedValue(v *driver.NamedValue) error {
s, ok := w.conn.(driver.NamedValueChecker)
if !ok {
return driver.ErrSkip
}
return s.CheckNamedValue(v)
}
// IsValid implements driver.Validator
func (w *wrapperConn) IsValid() bool {
v, ok := w.conn.(driver.Validator)
if !ok {
return w.conn != nil
}
return v.IsValid()
}
func (w *wrapperConn) ResetSession(ctx context.Context) error {
s, ok := w.conn.(driver.SessionResetter)
if !ok {
return driver.ErrSkip
}
return s.ResetSession(ctx)
}

View File

@@ -1,94 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"time"
)
var (
// _ driver.DriverContext = (*wrapperDriver)(nil)
// _ driver.Connector = (*wrapperDriver)(nil)
)
/*
type conn interface {
driver.Pinger
driver.Execer
driver.ExecerContext
driver.Queryer
driver.QueryerContext
driver.Conn
driver.ConnPrepareContext
driver.ConnBeginTx
}
*/
// wrapperDriver defines a wrapper for driver.Driver
type wrapperDriver struct {
driver driver.Driver
opts Options
ctx context.Context
}
// NewWrapper creates and returns a new SQL driver with passed capabilities
func NewWrapper(d driver.Driver, opts ...Option) driver.Driver {
return &wrapperDriver{driver: d, opts: NewOptions(opts...), ctx: context.Background()}
}
type wrappedConnector struct {
connector driver.Connector
// name string
opts Options
ctx context.Context
}
func NewWrapperConnector(c driver.Connector, opts ...Option) driver.Connector {
return &wrappedConnector{connector: c, opts: NewOptions(opts...), ctx: context.Background()}
}
// Connect implements driver.Driver Connect
func (w *wrappedConnector) Connect(ctx context.Context) (driver.Conn, error) {
return w.connector.Connect(ctx)
}
// Driver implements driver.Driver Driver
func (w *wrappedConnector) Driver() driver.Driver {
return w.connector.Driver()
}
/*
// Connect implements driver.Driver OpenConnector
func (w *wrapperDriver) OpenConnector(name string) (driver.Conn, error) {
return &wrapperConnector{driver: w.driver, name: name, opts: w.opts}, nil
}
*/
// Open implements driver.Driver Open
func (w *wrapperDriver) Open(name string) (driver.Conn, error) {
// ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) // Ensure eventual timeout
// defer cancel()
/*
connector, err := w.OpenConnector(name)
if err != nil {
return nil, err
}
return connector.Connect(ctx)
*/
ts := time.Now()
c, err := w.driver.Open(name)
td := time.Since(ts)
/*
if w.opts.LoggerEnabled {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Open", getCallerName(), td, err)...)
}
*/
_ = td
if err != nil {
return nil, err
}
return wrapConn(c, w.opts), nil
}

View File

@@ -1,167 +0,0 @@
//go:build ignore
package main
import (
"bytes"
"crypto/md5"
"fmt"
"io"
"sort"
"strings"
)
var connIfaces = []string{
"driver.ConnBeginTx",
"driver.ConnPrepareContext",
"driver.Execer",
"driver.ExecerContext",
"driver.NamedValueChecker",
"driver.Pinger",
"driver.Queryer",
"driver.QueryerContext",
"driver.SessionResetter",
"driver.Validator",
}
var stmtIfaces = []string{
"driver.StmtExecContext",
"driver.StmtQueryContext",
"driver.ColumnConverter",
"driver.NamedValueChecker",
}
func getHash(s []string) string {
h := md5.New()
io.WriteString(h, strings.Join(s, "|"))
return fmt.Sprintf("%x", h.Sum(nil))
}
func main() {
comboConn := all(connIfaces)
sort.Slice(comboConn, func(i, j int) bool {
return len(comboConn[i]) < len(comboConn[j])
})
comboStmt := all(stmtIfaces)
sort.Slice(comboStmt, func(i, j int) bool {
return len(comboStmt[i]) < len(comboStmt[j])
})
b := bytes.NewBuffer(nil)
b.WriteString("// Code generated. DO NOT EDIT.\n\n")
b.WriteString("package sql\n\n")
b.WriteString(`import "database/sql/driver"`)
b.WriteString("\n\n")
b.WriteString("func wrapConn(dc driver.Conn, opts Options) driver.Conn {\n")
b.WriteString("\tc := &wrapperConn{conn: dc, opts: opts}\n")
for idx := len(comboConn) - 1; idx >= 0; idx-- {
ifaces := comboConn[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("\tif _, ok := dc.(wrapConn%04d_%s); ok {\n", n, h))
b.WriteString("\treturn struct {\n")
b.WriteString("\t\tdriver.Conn\n")
b.WriteString(fmt.Sprintf("\t\t\t%s", strings.Join(ifaces, "\n\t\t\t")))
b.WriteString("\t\t\n}{")
for idx := range ifaces {
if idx > 0 {
b.WriteString(", ")
b.WriteString("c")
} else if idx == 0 {
b.WriteString("c")
} else {
b.WriteString("c")
}
}
b.WriteString(", c}\n")
b.WriteString("}\n\n")
}
b.WriteString("return c\n")
b.WriteString("}\n")
for idx := len(comboConn) - 1; idx >= 0; idx-- {
ifaces := comboConn[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("// %s\n", strings.Join(ifaces, "|")))
b.WriteString(fmt.Sprintf("type wrapConn%04d_%s interface {\n", n, h))
for _, iface := range ifaces {
b.WriteString(fmt.Sprintf("\t%s\n", iface))
}
b.WriteString("}\n\n")
}
b.WriteString("func wrapStmt(stmt driver.Stmt, query string, opts Options) driver.Stmt {\n")
b.WriteString("\tc := &wrapperStmt{stmt: stmt, query: query, opts: opts}\n")
for idx := len(comboStmt) - 1; idx >= 0; idx-- {
ifaces := comboStmt[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("\tif _, ok := stmt.(wrapStmt%04d_%s); ok {\n", n, h))
b.WriteString("\treturn struct {\n")
b.WriteString("\t\tdriver.Stmt\n")
b.WriteString(fmt.Sprintf("\t\t\t%s", strings.Join(ifaces, "\n\t\t\t")))
b.WriteString("\t\t\n}{")
for idx := range ifaces {
if idx > 0 {
b.WriteString(", ")
b.WriteString("c")
} else if idx == 0 {
b.WriteString("c")
} else {
b.WriteString("c")
}
}
b.WriteString(", c}\n")
b.WriteString("}\n\n")
}
b.WriteString("return c\n")
b.WriteString("}\n")
for idx := len(comboStmt) - 1; idx >= 0; idx-- {
ifaces := comboStmt[idx]
n := len(ifaces)
if n == 0 {
continue
}
h := getHash(ifaces)
b.WriteString(fmt.Sprintf("// %s\n", strings.Join(ifaces, "|")))
b.WriteString(fmt.Sprintf("type wrapStmt%04d_%s interface {\n", n, h))
for _, iface := range ifaces {
b.WriteString(fmt.Sprintf("\t%s\n", iface))
}
b.WriteString("}\n\n")
}
fmt.Printf("%s\n", b.String())
}
// all returns all combinations for a given string array.
func all[T any](set []T) (subsets [][]T) {
length := uint(len(set))
for subsetBits := 1; subsetBits < (1 << length); subsetBits++ {
var subset []T
for object := uint(0); object < length; object++ {
if (subsetBits>>object)&1 == 1 {
subset = append(subset, set[object])
}
}
subsets = append(subsets, subset)
}
return subsets
}

View File

@@ -1,172 +0,0 @@
package sql
import (
"context"
"fmt"
"time"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/tracer"
)
var (
// DefaultMeterStatsInterval holds default stats interval
DefaultMeterStatsInterval = 5 * time.Second
// DefaultLoggerObserver used to prepare labels for logger
DefaultLoggerObserver = func(ctx context.Context, method string, query string, td time.Duration, err error) []interface{} {
labels := []interface{}{"db.method", method, "took", fmt.Sprintf("%v", td)}
if err != nil {
labels = append(labels, "error", err.Error())
}
if query != labelUnknown {
labels = append(labels, "query", query)
}
return labels
}
)
var (
MaxOpenConnections = "micro_sql_max_open_conn"
OpenConnections = "micro_sql_open_conn"
InuseConnections = "micro_sql_inuse_conn"
IdleConnections = "micro_sql_idle_conn"
WaitConnections = "micro_sql_waited_conn"
BlockedSeconds = "micro_sql_blocked_seconds"
MaxIdleClosed = "micro_sql_max_idle_closed"
MaxIdletimeClosed = "micro_sql_closed_max_idle"
MaxLifetimeClosed = "micro_sql_closed_max_lifetime"
meterRequestTotal = "micro_sql_request_total"
meterRequestLatencyMicroseconds = "micro_sql_latency_microseconds"
meterRequestDurationSeconds = "micro_sql_request_duration_seconds"
labelUnknown = "unknown"
labelQuery = "db_statement"
labelMethod = "db_method"
labelStatus = "status"
labelSuccess = "success"
labelFailure = "failure"
labelHost = "db_host"
labelDatabase = "db_name"
)
// Options struct holds wrapper options
type Options struct {
Logger logger.Logger
Meter meter.Meter
Tracer tracer.Tracer
DatabaseHost string
DatabaseName string
MeterStatsInterval time.Duration
LoggerLevel logger.Level
LoggerEnabled bool
LoggerObserver func(ctx context.Context, method string, name string, td time.Duration, err error) []interface{}
}
// Option func signature
type Option func(*Options)
// NewOptions create new Options struct from provided option slice
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
MeterStatsInterval: DefaultMeterStatsInterval,
LoggerLevel: logger.ErrorLevel,
LoggerObserver: DefaultLoggerObserver,
}
for _, o := range opts {
o(&options)
}
options.Meter = options.Meter.Clone(
meter.Labels(
labelHost, options.DatabaseHost,
labelDatabase, options.DatabaseName,
),
)
options.Logger = options.Logger.Clone(logger.WithAddCallerSkipCount(1))
return options
}
// MetricInterval specifies stats interval for *sql.DB
func MetricInterval(td time.Duration) Option {
return func(o *Options) {
o.MeterStatsInterval = td
}
}
func DatabaseHost(host string) Option {
return func(o *Options) {
o.DatabaseHost = host
}
}
func DatabaseName(name string) Option {
return func(o *Options) {
o.DatabaseName = name
}
}
// Meter passes meter.Meter to wrapper
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Logger passes logger.Logger to wrapper
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// LoggerEnabled enable sql logging
func LoggerEnabled(b bool) Option {
return func(o *Options) {
o.LoggerEnabled = b
}
}
// LoggerLevel passes logger.Level option
func LoggerLevel(lvl logger.Level) Option {
return func(o *Options) {
o.LoggerLevel = lvl
}
}
// LoggerObserver passes observer to fill logger fields
func LoggerObserver(obs func(context.Context, string, string, time.Duration, error) []interface{}) Option {
return func(o *Options) {
o.LoggerObserver = obs
}
}
// Tracer passes tracer.Tracer to wrapper
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
type queryNameKey struct{}
// QueryName passes query name to wrapper func
func QueryName(ctx context.Context, name string) context.Context {
if ctx == nil {
ctx = context.Background()
}
return context.WithValue(ctx, queryNameKey{}, name)
}
func getQueryName(ctx context.Context) string {
if v, ok := ctx.Value(queryNameKey{}).(string); ok && v != labelUnknown {
return v
}
return getCallerName()
}

View File

@@ -1,41 +0,0 @@
package sql
import (
"context"
"database/sql"
"time"
)
type Statser interface {
Stats() sql.DBStats
}
func NewStatsMeter(ctx context.Context, db Statser, opts ...Option) {
options := NewOptions(opts...)
go func() {
ticker := time.NewTicker(options.MeterStatsInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if db == nil {
return
}
stats := db.Stats()
options.Meter.Counter(MaxOpenConnections).Set(uint64(stats.MaxOpenConnections))
options.Meter.Counter(OpenConnections).Set(uint64(stats.OpenConnections))
options.Meter.Counter(InuseConnections).Set(uint64(stats.InUse))
options.Meter.Counter(IdleConnections).Set(uint64(stats.Idle))
options.Meter.Counter(WaitConnections).Set(uint64(stats.WaitCount))
options.Meter.FloatCounter(BlockedSeconds).Set(stats.WaitDuration.Seconds())
options.Meter.Counter(MaxIdleClosed).Set(uint64(stats.MaxIdleClosed))
options.Meter.Counter(MaxIdletimeClosed).Set(uint64(stats.MaxIdleTimeClosed))
options.Meter.Counter(MaxLifetimeClosed).Set(uint64(stats.MaxLifetimeClosed))
}
}
}()
}

View File

@@ -1,287 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"fmt"
"time"
requestid "go.unistack.org/micro/v4/hooks/requestid"
"go.unistack.org/micro/v4/tracer"
)
var (
_ driver.Stmt = (*wrapperStmt)(nil)
_ driver.StmtQueryContext = (*wrapperStmt)(nil)
_ driver.StmtExecContext = (*wrapperStmt)(nil)
_ driver.NamedValueChecker = (*wrapperStmt)(nil)
)
// wrapperStmt defines a wrapper for driver.Stmt
type wrapperStmt struct {
stmt driver.Stmt
opts Options
query string
ctx context.Context
}
// Close implements driver.Stmt Close
func (w *wrapperStmt) Close() error {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Close"}
ts := time.Now()
err := w.stmt.Close()
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Close", getCallerName(), td, err)...)
}
*/
return err
}
// NumInput implements driver.Stmt NumInput
func (w *wrapperStmt) NumInput() int {
return w.stmt.NumInput()
}
// CheckNamedValue implements driver.NamedValueChecker
func (w *wrapperStmt) CheckNamedValue(v *driver.NamedValue) error {
s, ok := w.stmt.(driver.NamedValueChecker)
if !ok {
return driver.ErrSkip
}
return s.CheckNamedValue(v)
}
// Exec implements driver.Stmt Exec
func (w *wrapperStmt) Exec(args []driver.Value) (driver.Result, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Exec"}
ts := time.Now()
res, err := w.stmt.Exec(args) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Exec", getCallerName(), td, err)...)
}
*/
return res, err
}
// Query implements driver.Stmt Query
func (w *wrapperStmt) Query(args []driver.Value) (driver.Rows, error) {
var ctx context.Context
if w.ctx != nil {
ctx = w.ctx
} else {
ctx = context.Background()
}
_ = ctx
labels := []string{labelMethod, "Query"}
ts := time.Now()
rows, err := w.stmt.Query(args) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "Query", getCallerName(), td, err)...)
}
*/
return rows, err
}
// ColumnConverter implements driver.ColumnConverter
func (w *wrapperStmt) ColumnConverter(idx int) driver.ValueConverter {
s, ok := w.stmt.(driver.ColumnConverter) // nolint:staticcheck
if !ok {
return nil
}
return s.ColumnConverter(idx)
}
// ExecContext implements driver.StmtExecContext ExecContext
func (w *wrapperStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "ExecContext")
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "ExecContext", labelQuery, name}
if conn, ok := w.stmt.(driver.StmtExecContext); ok {
ts := time.Now()
res, err := conn.ExecContext(nctx, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...)
}
*/
return res, err
}
values, err := namedValueToValue(args)
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, 0, err)...)
}
*/
return nil, err
}
ts := time.Now()
res, err := w.Exec(values) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "ExecContext", name, td, err)...)
}
*/
return res, err
}
// QueryContext implements driver.StmtQueryContext StmtQueryContext
func (w *wrapperStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
var nctx context.Context
var span tracer.Span
name := getQueryName(ctx)
if w.ctx != nil {
nctx, span = w.opts.Tracer.Start(w.ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
} else {
nctx, span = w.opts.Tracer.Start(ctx, "sdk.database", tracer.WithSpanKind(tracer.SpanKindClient))
}
span.AddLabels("db.method", "QueryContext")
span.AddLabels("db.statement", name)
defer span.Finish()
if len(args) > 0 {
span.AddLabels("db.args", fmt.Sprintf("%v", namedValueToLabels(args)))
}
if id, ok := ctx.Value(requestid.XRequestIDKey{}).(string); ok {
span.AddLabels("x-request-id", id)
}
labels := []string{labelMethod, "QueryContext", labelQuery, name}
if conn, ok := w.stmt.(driver.StmtQueryContext); ok {
ts := time.Now()
rows, err := conn.QueryContext(nctx, args)
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...)
}
*/
return rows, err
}
values, err := namedValueToValue(args)
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, 0, err)...)
}
*/
return nil, err
}
ts := time.Now()
rows, err := w.Query(values) // nolint:staticcheck
td := time.Since(ts)
te := td.Seconds()
if err != nil {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelFailure)...).Inc()
span.SetStatus(tracer.SpanStatusError, err.Error())
} else {
w.opts.Meter.Counter(meterRequestTotal, append(labels, labelStatus, labelSuccess)...).Inc()
}
w.opts.Meter.Summary(meterRequestLatencyMicroseconds, labels...).Update(te)
w.opts.Meter.Histogram(meterRequestDurationSeconds, labels...).Update(te)
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(ctx, "QueryContext", name, td, err)...)
}
*/
return rows, err
}

View File

@@ -1,63 +0,0 @@
package sql
import (
"context"
"database/sql/driver"
"time"
"go.unistack.org/micro/v4/tracer"
)
var _ driver.Tx = (*wrapperTx)(nil)
// wrapperTx defines a wrapper for driver.Tx
type wrapperTx struct {
tx driver.Tx
span tracer.Span
opts Options
ctx context.Context
}
// Commit implements driver.Tx Commit
func (w *wrapperTx) Commit() error {
ts := time.Now()
err := w.tx.Commit()
td := time.Since(ts)
_ = td
if w.span != nil {
if err != nil {
w.span.SetStatus(tracer.SpanStatusError, err.Error())
}
w.span.Finish()
}
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Commit", getCallerName(), td, err)...)
}
*/
w.ctx = nil
return err
}
// Rollback implements driver.Tx Rollback
func (w *wrapperTx) Rollback() error {
ts := time.Now()
err := w.tx.Rollback()
td := time.Since(ts)
_ = td
if w.span != nil {
if err != nil {
w.span.SetStatus(tracer.SpanStatusError, err.Error())
}
w.span.Finish()
}
/*
if w.opts.LoggerEnabled && w.opts.Logger.V(w.opts.LoggerLevel) {
w.opts.Logger.Log(w.ctx, w.opts.LoggerLevel, w.opts.LoggerObserver(w.ctx, "Rollback", getCallerName(), td, err)...)
}
*/
w.ctx = nil
return err
}

View File

@@ -1,19 +0,0 @@
package sql
import (
"database/sql/driver"
)
/*
func wrapDriver(d driver.Driver, opts Options) driver.Driver {
if _, ok := d.(driver.DriverContext); ok {
return &wrapperDriver{driver: d, opts: opts}
}
return struct{ driver.Driver }{&wrapperDriver{driver: d, opts: opts}}
}
*/
// WrapConn allows an existing driver.Conn to be wrapped.
func WrapConn(c driver.Conn, opts ...Option) driver.Conn {
return wrapConn(c, NewOptions(opts...))
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,133 +0,0 @@
package validator
import (
"context"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/errors"
"go.unistack.org/micro/v4/server"
)
var (
DefaultClientErrorFunc = func(req client.Request, rsp interface{}, err error) error {
if rsp != nil {
return errors.BadGateway(req.Service(), "%v", err)
}
return errors.BadRequest(req.Service(), "%v", err)
}
DefaultServerErrorFunc = func(req server.Request, rsp interface{}, err error) error {
if rsp != nil {
return errors.BadGateway(req.Service(), "%v", err)
}
return errors.BadRequest(req.Service(), "%v", err)
}
)
type (
ClientErrorFunc func(client.Request, interface{}, error) error
ServerErrorFunc func(server.Request, interface{}, error) error
)
// Options struct holds wrapper options
type Options struct {
ClientErrorFn ClientErrorFunc
ServerErrorFn ServerErrorFunc
ClientValidateResponse bool
ServerValidateResponse bool
}
// Option func signature
type Option func(*Options)
func ClientValidateResponse(b bool) Option {
return func(o *Options) {
o.ClientValidateResponse = b
}
}
func ServerValidateResponse(b bool) Option {
return func(o *Options) {
o.ClientValidateResponse = b
}
}
func ClientReqErrorFn(fn ClientErrorFunc) Option {
return func(o *Options) {
o.ClientErrorFn = fn
}
}
func ServerErrorFn(fn ServerErrorFunc) Option {
return func(o *Options) {
o.ServerErrorFn = fn
}
}
func NewOptions(opts ...Option) Options {
options := Options{
ClientErrorFn: DefaultClientErrorFunc,
ServerErrorFn: DefaultServerErrorFunc,
}
for _, o := range opts {
o(&options)
}
return options
}
func NewHook(opts ...Option) *hook {
return &hook{opts: NewOptions(opts...)}
}
type validator interface {
Validate() error
}
type hook struct {
opts Options
}
func (w *hook) ClientCall(next client.FuncCall) client.FuncCall {
return func(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return w.opts.ClientErrorFn(req, nil, err)
}
}
err := next(ctx, req, rsp, opts...)
if v, ok := rsp.(validator); ok && w.opts.ClientValidateResponse {
if verr := v.Validate(); verr != nil {
return w.opts.ClientErrorFn(req, rsp, verr)
}
}
return err
}
}
func (w *hook) ClientStream(next client.FuncStream) client.FuncStream {
return func(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return nil, w.opts.ClientErrorFn(req, nil, err)
}
}
return next(ctx, req, opts...)
}
}
func (w *hook) ServerHandler(next server.FuncHandler) server.FuncHandler {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
if v, ok := req.Body().(validator); ok {
if err := v.Validate(); err != nil {
return w.opts.ServerErrorFn(req, nil, err)
}
}
err := next(ctx, req, rsp)
if v, ok := rsp.(validator); ok && w.opts.ServerValidateResponse {
if verr := v.Validate(); verr != nil {
return w.opts.ServerErrorFn(req, rsp, verr)
}
}
return err
}
}

View File

@@ -4,20 +4,18 @@ package logger
type Level int8
const (
// TraceLevel usually used to find bugs, very verbose
// TraceLevel level usually used to find bugs, very verbose
TraceLevel Level = iota - 2
// DebugLevel used only when enabled debugging
// DebugLevel level used only when enabled debugging
DebugLevel
// InfoLevel used for general info about what's going on inside the application
// InfoLevel level used for general info about what's going on inside the application
InfoLevel
// WarnLevel used for non-critical entries
// WarnLevel level used for non-critical entries
WarnLevel
// ErrorLevel used for errors that should definitely be noted
// ErrorLevel level used for errors that should definitely be noted
ErrorLevel
// FatalLevel used for critical errors and then calls `os.Exit(1)`
// FatalLevel level used for critical errors and then calls `os.Exit(1)`
FatalLevel
// NoneLevel used to disable logging
NoneLevel
)
// String returns logger level string representation
@@ -35,8 +33,6 @@ func (l Level) String() string {
return "error"
case FatalLevel:
return "fatal"
case NoneLevel:
return "none"
}
return "info"
}
@@ -62,8 +58,6 @@ func ParseLevel(lvl string) Level {
return ErrorLevel
case FatalLevel.String():
return FatalLevel
case NoneLevel.String():
return NoneLevel
}
return InfoLevel
}

View File

@@ -2,13 +2,14 @@ package logger
import (
"context"
"fmt"
"io"
"log/slog"
"os"
"slices"
"time"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v3/meter"
)
// Option func signature
@@ -99,8 +100,11 @@ func WithAddFields(fields ...interface{}) Option {
iv, iok := o.Fields[i].(string)
jv, jok := fields[j].(string)
if iok && jok && iv == jv {
fmt.Printf("AAAA o.Fields:%v old:%v new:%v\n", o.Fields[i], o.Fields[i+1], fields[j+1])
o.Fields[i+1] = fields[j+1]
fmt.Printf("BBBB o.Fields:%v old:%v new:%v\n", o.Fields[i], o.Fields[i+1], fields[j+1])
fields = slices.Delete(fields, j, j+2)
}
}
}

View File

@@ -13,9 +13,9 @@ import (
"sync/atomic"
"time"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/semconv"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/semconv"
"go.unistack.org/micro/v3/tracer"
)
const (
@@ -34,7 +34,6 @@ var (
warnValue = slog.StringValue("warn")
errorValue = slog.StringValue("error")
fatalValue = slog.StringValue("fatal")
noneValue = slog.StringValue("none")
)
type wrapper struct {
@@ -86,8 +85,6 @@ func (s *slogLogger) renameAttr(_ []string, a slog.Attr) slog.Attr {
a.Value = errorValue
case lvl >= logger.FatalLevel:
a.Value = fatalValue
case lvl >= logger.NoneLevel:
a.Value = noneValue
default:
a.Value = infoValue
}
@@ -319,8 +316,6 @@ func loggerToSlogLevel(level logger.Level) slog.Level {
return slog.LevelDebug - 1
case logger.FatalLevel:
return slog.LevelError + 1
case logger.NoneLevel:
return slog.LevelError + 2
default:
return slog.LevelInfo
}
@@ -338,8 +333,6 @@ func slogToLoggerLevel(level slog.Level) logger.Level {
return logger.TraceLevel
case slog.LevelError + 1:
return logger.FatalLevel
case slog.LevelError + 2:
return logger.NoneLevel
default:
return logger.InfoLevel
}

View File

@@ -12,9 +12,9 @@ import (
"time"
"github.com/google/uuid"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v4/util/buffer"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/util/buffer"
)
// always first to have proper check
@@ -36,24 +36,6 @@ func TestStacktrace(t *testing.T) {
}
}
func TestNoneLevel(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
l := NewLogger(logger.WithLevel(logger.NoneLevel), logger.WithOutput(buf),
WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
t.Fatal(err)
}
l.Error(ctx, "msg1", errors.New("err"))
if buf.Len() != 0 {
t.Fatalf("logger none level not works, buf contains: %s", buf.Bytes())
}
}
func TestDelayedBuffer(t *testing.T) {
ctx := context.TODO()
buf := bytes.NewBuffer(nil)
@@ -80,7 +62,7 @@ func TestTime(t *testing.T) {
WithHandlerFunc(slog.NewTextHandler),
logger.WithAddStacktrace(true),
logger.WithTimeFunc(func() time.Time {
return time.Unix(0, 0).UTC()
return time.Unix(0, 0)
}),
)
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
@@ -89,7 +71,8 @@ func TestTime(t *testing.T) {
l.Error(ctx, "msg1", errors.New("err"))
if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) {
if !bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T03:00:00.000000000+03:00`)) &&
!bytes.Contains(buf.Bytes(), []byte(`timestamp=1970-01-01T00:00:00.000000000Z`)) {
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
}
}
@@ -423,16 +406,15 @@ func TestLogger(t *testing.T) {
func Test_WithContextAttrFunc(t *testing.T) {
loggerContextAttrFuncs := []logger.ContextAttrFunc{
func(ctx context.Context) []interface{} {
md, ok := metadata.FromOutgoingContext(ctx)
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil
}
attrs := make([]interface{}, 0, 10)
for k, v := range md {
key := strings.ToLower(k)
switch key {
case "x-request-id", "phone", "external-Id", "source-service", "x-app-install-id", "client-id", "client-ip":
attrs = append(attrs, key, v[0])
switch k {
case "X-Request-Id", "Phone", "External-Id", "Source-Service", "X-App-Install-Id", "Client-Id", "Client-Ip":
attrs = append(attrs, strings.ToLower(k), v)
}
}
return attrs
@@ -442,7 +424,7 @@ func Test_WithContextAttrFunc(t *testing.T) {
logger.DefaultContextAttrFuncs = append(logger.DefaultContextAttrFuncs, loggerContextAttrFuncs...)
ctx := context.TODO()
ctx = metadata.AppendOutgoingContext(ctx, "X-Request-Id", uuid.New().String(),
ctx = metadata.AppendIncomingContext(ctx, "X-Request-Id", uuid.New().String(),
"Source-Service", "Test-System")
buf := bytes.NewBuffer(nil)
@@ -462,9 +444,9 @@ func Test_WithContextAttrFunc(t *testing.T) {
t.Fatalf("logger info, buf %s", buf.Bytes())
}
buf.Reset()
omd, _ := metadata.FromOutgoingContext(ctx)
imd, _ := metadata.FromIncomingContext(ctx)
l.Info(ctx, "test message1")
omd.Set("Source-Service", "Test-System2")
imd.Set("Source-Service", "Test-System2")
l.Info(ctx, "test message2")
// t.Logf("xxx %s", buf.Bytes())

View File

@@ -8,7 +8,7 @@ import (
"strconv"
"strings"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v3/codec"
)
const sf = "0-+# "

View File

@@ -5,7 +5,7 @@ import (
"strings"
"testing"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v3/codec"
)
func TestUnwrap(t *testing.T) {

View File

@@ -1,185 +1,32 @@
// Package metadata is a way of defining message headers
package metadata
import (
"context"
"fmt"
"strings"
)
// In the metadata package, context and metadata are treated as immutable.
// Deep copies of metadata are made to keep things safe and correct.
// If a user takes a map and changes it across threads, it's their responsibility.
//
// 1. Incoming Context
//
// This context is provided by an external system and populated by the server or broker of the micro framework.
// It should not be modified. The idea is to extract all necessary data from it,
// validate the data, and transfer it into the current context.
// After that, only the current context should be used throughout the code.
//
// 2. Current Context
//
// This is the context used during the execution flow.
// You can add any needed metadata to it and pass it through your code.
//
// 3. Outgoing Context
//
// This context is for sending data to external systems.
// You can add what you need before sending it out.
// But its usually better to build and prepare this context right before making the external call,
// instead of changing it in many places.
//
// Execution Flow:
//
// [External System]
// ↓
// [Incoming Context]
// ↓
// [Extract & Validate Metadata from Incoming Context]
// ↓
// [Prepare Current Context]
// ↓
// [Enrich Current Context]
// ↓
// [Business Logic]
// ↓
// [Prepare Outgoing Context]
// ↓
// [External System Call]
type (
metadataCurrentKey struct{}
metadataIncomingKey struct{}
metadataOutgoingKey struct{}
rawMetadata struct {
md Metadata
added [][]string
}
mdIncomingKey struct{}
mdOutgoingKey struct{}
mdKey struct{}
)
// NewContext creates a new context with the provided Metadata attached.
// The Metadata must not be modified after calling this function.
func NewContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md})
}
// NewIncomingContext creates a new context with the provided incoming Metadata attached.
// The Metadata must not be modified after calling this function.
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataIncomingKey{}, rawMetadata{md: md})
}
// NewOutgoingContext creates a new context with the provided outgoing Metadata attached.
// The Metadata must not be modified after calling this function.
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md})
}
// AppendContext returns a new context with the provided key-value pairs (kv)
// merged with any existing metadata in the context. For a description of kv,
// please refer to the Pairs documentation.
func AppendContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataCurrentKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataCurrentKey{}, rawMetadata{md: md.md, added: added})
}
// AppendOutgoingContext returns a new context with the provided key-value pairs (kv)
// merged with any existing metadata in the context. For a description of kv,
// please refer to the Pairs documentation.
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
}
md, _ := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, metadataOutgoingKey{}, rawMetadata{md: md.md, added: added})
}
// FromContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
func FromContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
if !ok {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
}
// MustContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
func MustContext(ctx context.Context) Metadata {
md, ok := FromContext(ctx)
if !ok {
panic("missing metadata")
}
return md
}
// FromIncomingContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
// FromIncomingContext returns metadata from incoming ctx
// returned metadata shoud not be modified or race condition happens
func FromIncomingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
if !ok {
if ctx == nil {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
md, ok := ctx.Value(mdIncomingKey{}).(*rawMetadata)
if !ok || md.md == nil {
return nil, false
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromIncomingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, true
return md.md, ok
}
// MustIncomingContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
// MustIncomingContext returns metadata from incoming ctx
// returned metadata shoud not be modified or race condition happens.
// If metadata not exists panics.
func MustIncomingContext(ctx context.Context) Metadata {
md, ok := FromIncomingContext(ctx)
if !ok {
@@ -188,37 +35,22 @@ func MustIncomingContext(ctx context.Context) Metadata {
return md
}
// FromOutgoingContext retrieves a deep copy of the metadata from the context and returns it
// with a boolean indicating if it was found.
// FromOutgoingContext returns metadata from outgoing ctx
// returned metadata shoud not be modified or race condition happens
func FromOutgoingContext(ctx context.Context) (Metadata, bool) {
raw, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
if !ok {
if ctx == nil {
return nil, false
}
metadataSize := len(raw.md)
for i := range raw.added {
metadataSize += len(raw.added[i]) / 2
md, ok := ctx.Value(mdOutgoingKey{}).(*rawMetadata)
if !ok || md.md == nil {
return nil, false
}
out := make(Metadata, metadataSize)
for k, v := range raw.md {
out[k] = copyOf(v)
}
for _, added := range raw.added {
if len(added)%2 == 1 {
panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
}
for i := 0; i < len(added); i += 2 {
out[added[i]] = append(out[added[i]], added[i+1])
}
}
return out, ok
return md.md, ok
}
// MustOutgoingContext retrieves a deep copy of the metadata from the context and panics
// if the metadata is not found.
// MustOutgoingContext returns metadata from outgoing ctx
// returned metadata shoud not be modified or race condition happens.
// If metadata not exists panics.
func MustOutgoingContext(ctx context.Context) Metadata {
md, ok := FromOutgoingContext(ctx)
if !ok {
@@ -227,68 +59,121 @@ func MustOutgoingContext(ctx context.Context) Metadata {
return md
}
// ValueFromCurrentContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromCurrentContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataCurrentKey{}).(rawMetadata)
if !ok {
return nil
// FromContext returns metadata from the given context
// returned metadata shoud not be modified or race condition happens
func FromContext(ctx context.Context) (Metadata, bool) {
if ctx == nil {
return nil, false
}
if v, ok := md.md[key]; ok {
return copyOf(v)
md, ok := ctx.Value(mdKey{}).(*rawMetadata)
if !ok || md.md == nil {
return nil, false
}
for k, v := range md.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
return md.md, ok
}
// ValueFromIncomingContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
raw, ok := ctx.Value(metadataIncomingKey{}).(rawMetadata)
// MustContext returns metadata from the given context
// returned metadata shoud not be modified or race condition happens
func MustContext(ctx context.Context) Metadata {
md, ok := FromContext(ctx)
if !ok {
return nil
panic("missing metadata")
}
if v, ok := raw.md[key]; ok {
return copyOf(v)
}
for k, v := range raw.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
return md
}
// ValueFromOutgoingContext retrieves a deep copy of the metadata for the given key
// from the context, performing a case-insensitive search if needed. Returns nil if not found.
func ValueFromOutgoingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(metadataOutgoingKey{}).(rawMetadata)
if !ok {
return nil
// NewContext creates a new context with the given metadata
func NewContext(ctx context.Context, md Metadata) context.Context {
if ctx == nil {
ctx = context.Background()
}
if v, ok := md.md[key]; ok {
return copyOf(v)
}
for k, v := range md.md {
// Case-insensitive comparison: Metadata is a map, and there's no guarantee
// that the Metadata attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
return context.WithValue(ctx, mdKey{}, &rawMetadata{md})
}
// SetOutgoingContext modify outgoing context with given metadata
func SetOutgoingContext(ctx context.Context, md Metadata) bool {
if ctx == nil {
return false
}
if omd, ok := ctx.Value(mdOutgoingKey{}).(*rawMetadata); ok {
omd.md = md
return true
}
return false
}
// SetIncomingContext modify incoming context with given metadata
func SetIncomingContext(ctx context.Context, md Metadata) bool {
if ctx == nil {
return false
}
if omd, ok := ctx.Value(mdIncomingKey{}).(*rawMetadata); ok {
omd.md = md
return true
}
return false
}
// NewIncomingContext creates a new context with incoming metadata attached
func NewIncomingContext(ctx context.Context, md Metadata) context.Context {
if ctx == nil {
ctx = context.Background()
}
return context.WithValue(ctx, mdIncomingKey{}, &rawMetadata{md})
}
// NewOutgoingContext creates a new context with outcoming metadata attached
func NewOutgoingContext(ctx context.Context, md Metadata) context.Context {
if ctx == nil {
ctx = context.Background()
}
return context.WithValue(ctx, mdOutgoingKey{}, &rawMetadata{md})
}
// AppendOutgoingContext apends new md to context
func AppendOutgoingContext(ctx context.Context, kv ...string) context.Context {
md, ok := Pairs(kv...)
if !ok {
return ctx
}
omd, ok := FromOutgoingContext(ctx)
if !ok {
return NewOutgoingContext(ctx, md)
}
for k, v := range md {
omd.Set(k, v)
}
return ctx
}
// AppendIncomingContext apends new md to context
func AppendIncomingContext(ctx context.Context, kv ...string) context.Context {
md, ok := Pairs(kv...)
if !ok {
return ctx
}
omd, ok := FromIncomingContext(ctx)
if !ok {
return NewIncomingContext(ctx, md)
}
for k, v := range md {
omd.Set(k, v)
}
return ctx
}
// AppendContext apends new md to context
func AppendContext(ctx context.Context, kv ...string) context.Context {
md, ok := Pairs(kv...)
if !ok {
return ctx
}
omd, ok := FromContext(ctx)
if !ok {
return NewContext(ctx, md)
}
for k, v := range md {
omd.Set(k, v)
}
return ctx
}

140
metadata/context_test.go Normal file
View File

@@ -0,0 +1,140 @@
package metadata
import (
"context"
"testing"
)
func TestFromNilContext(t *testing.T) {
// nolint: staticcheck
c, ok := FromContext(nil)
if ok || c != nil {
t.Fatal("FromContext not works")
}
}
func TestNewNilContext(t *testing.T) {
// nolint: staticcheck
ctx := NewContext(nil, New(0))
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("NewContext not works")
}
}
func TestFromContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), mdKey{}, &rawMetadata{New(0)})
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("FromContext not works")
}
}
func TestNewContext(t *testing.T) {
ctx := NewContext(context.TODO(), New(0))
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("NewContext not works")
}
}
func TestFromIncomingContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), mdIncomingKey{}, &rawMetadata{New(0)})
c, ok := FromIncomingContext(ctx)
if c == nil || !ok {
t.Fatal("FromIncomingContext not works")
}
}
func TestFromOutgoingContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), mdOutgoingKey{}, &rawMetadata{New(0)})
c, ok := FromOutgoingContext(ctx)
if c == nil || !ok {
t.Fatal("FromOutgoingContext not works")
}
}
func TestSetIncomingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := context.WithValue(context.TODO(), mdIncomingKey{}, &rawMetadata{})
if !SetIncomingContext(ctx, md) {
t.Fatal("SetIncomingContext not works")
}
md, ok := FromIncomingContext(ctx)
if md == nil || !ok {
t.Fatal("SetIncomingContext not works")
} else if v, ok := md.Get("key"); !ok || v != "val" {
t.Fatal("SetIncomingContext not works")
}
}
func TestSetOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := context.WithValue(context.TODO(), mdOutgoingKey{}, &rawMetadata{})
if !SetOutgoingContext(ctx, md) {
t.Fatal("SetOutgoingContext not works")
}
md, ok := FromOutgoingContext(ctx)
if md == nil || !ok {
t.Fatal("SetOutgoingContext not works")
} else if v, ok := md.Get("key"); !ok || v != "val" {
t.Fatal("SetOutgoingContext not works")
}
}
func TestNewIncomingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := NewIncomingContext(context.TODO(), md)
c, ok := FromIncomingContext(ctx)
if c == nil || !ok {
t.Fatal("NewIncomingContext not works")
}
}
func TestNewOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := NewOutgoingContext(context.TODO(), md)
c, ok := FromOutgoingContext(ctx)
if c == nil || !ok {
t.Fatal("NewOutgoingContext not works")
}
}
func TestAppendIncomingContext(t *testing.T) {
md := New(1)
md.Set("key1", "val1")
ctx := AppendIncomingContext(context.TODO(), "key2", "val2")
nmd, ok := FromIncomingContext(ctx)
if nmd == nil || !ok {
t.Fatal("AppendIncomingContext not works")
}
if v, ok := nmd.Get("key2"); !ok || v != "val2" {
t.Fatal("AppendIncomingContext not works")
}
}
func TestAppendOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key1", "val1")
ctx := AppendOutgoingContext(context.TODO(), "key2", "val2")
nmd, ok := FromOutgoingContext(ctx)
if nmd == nil || !ok {
t.Fatal("AppendOutgoingContext not works")
}
if v, ok := nmd.Get("key2"); !ok || v != "val2" {
t.Fatal("AppendOutgoingContext not works")
}
}

View File

@@ -1,19 +0,0 @@
// Package metadata is a way of defining message headers
package metadata
var (
// HeaderTopic is the header name that contains topic name.
HeaderTopic = "Micro-Topic"
// HeaderContentType specifies content type of message.
HeaderContentType = "Content-Type"
// HeaderEndpoint specifies endpoint in service.
HeaderEndpoint = "Micro-Endpoint"
// HeaderService specifies service.
HeaderService = "Micro-Service"
// HeaderTimeout specifies timeout of operation.
HeaderTimeout = "Micro-Timeout"
// HeaderAuthorization specifies Authorization header.
HeaderAuthorization = "Authorization"
// HeaderXRequestID specifies request id.
HeaderXRequestID = "X-Request-Id"
)

View File

@@ -1,7 +0,0 @@
package metadata
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
return vals
}

View File

@@ -1,37 +0,0 @@
package metadata
import "sort"
type Iterator struct {
md Metadata
keys []string
cur int
cnt int
}
// Next advances the iterator to the next element.
func (iter *Iterator) Next(k *string, v *[]string) bool {
if iter.cur+1 > iter.cnt {
return false
}
if k != nil && v != nil {
*k = iter.keys[iter.cur]
vv := iter.md[*k]
*v = make([]string, len(vv))
copy(*v, vv)
iter.cur++
}
return true
}
// Iterator returns an iterator for iterating over metadata in sorted order.
func (md Metadata) Iterator() *Iterator {
iter := &Iterator{md: md, cnt: len(md)}
iter.keys = make([]string, 0, iter.cnt)
for k := range md {
iter.keys = append(iter.keys, k)
}
sort.Strings(iter.keys)
return iter
}

View File

@@ -1,160 +1,167 @@
// Package metadata is a way of defining message headers
package metadata
import (
"fmt"
"net/textproto"
"sort"
"strings"
)
// defaultMetadataSize is used when initializing new Metadata.
var (
// HeaderTopic is the header name that contains topic name
HeaderTopic = "Micro-Topic"
// HeaderContentType specifies content type of message
HeaderContentType = "Content-Type"
// HeaderEndpoint specifies endpoint in service
HeaderEndpoint = "Micro-Endpoint"
// HeaderService specifies service
HeaderService = "Micro-Service"
// HeaderTimeout specifies timeout of operation
HeaderTimeout = "Micro-Timeout"
// HeaderAuthorization specifies Authorization header
HeaderAuthorization = "Authorization"
// HeaderXRequestID specifies request id
HeaderXRequestID = "X-Request-Id"
)
// Metadata is our way of representing request headers internally.
// They're used at the RPC level and translate back and forth
// from Transport headers.
type Metadata map[string]string
type rawMetadata struct {
md Metadata
}
// defaultMetadataSize used when need to init new Metadata
var defaultMetadataSize = 2
// Metadata maps keys to values. Use the New, NewWithMetadata and Pairs functions to create it.
type Metadata map[string][]string
// New creates a zero-value Metadata with the specified size.
func New(l int) Metadata {
if l == 0 {
l = defaultMetadataSize
}
md := make(Metadata, l)
return md
// Iterator used to iterate over metadata with order
type Iterator struct {
md Metadata
keys []string
cur int
cnt int
}
// NewWithMetadata creates a Metadata from the provided key-value map.
func NewWithMetadata(m map[string]string) Metadata {
md := make(Metadata, len(m))
for key, val := range m {
md[key] = append(md[key], val)
// Next advance iterator to next element
func (iter *Iterator) Next(k, v *string) bool {
if iter.cur+1 > iter.cnt {
return false
}
return md
*k = iter.keys[iter.cur]
*v = iter.md[*k]
iter.cur++
return true
}
// Pairs returns a Metadata formed from the key-value mapping. It panics if the length of kv is odd.
func Pairs(kv ...string) Metadata {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
// Iterator returns the itarator for metadata in sorted order
func (md Metadata) Iterator() *Iterator {
iter := &Iterator{md: md, cnt: len(md)}
iter.keys = make([]string, 0, iter.cnt)
for k := range md {
iter.keys = append(iter.keys, k)
}
md := make(Metadata, len(kv)/2)
for i := 0; i < len(kv); i += 2 {
md[kv[i]] = append(md[kv[i]], kv[i+1])
}
return md
sort.Strings(iter.keys)
return iter
}
// Join combines multiple Metadatas into a single Metadata.
// The order of values for each key is determined by the order in which the Metadatas are provided to Join.
func Join(mds ...Metadata) Metadata {
out := Metadata{}
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
func (md Metadata) MustGet(key string) string {
val, ok := md.Get(key)
if !ok {
panic("missing metadata key")
}
return val
}
// Get returns value from metadata by key
func (md Metadata) Get(key string) (string, bool) {
// fast path
val, ok := md[key]
if !ok {
// slow path
val, ok = md[textproto.CanonicalMIMEHeaderKey(key)]
if !ok {
val, ok = md[strings.ToLower(key)]
}
}
return out
return val, ok
}
// Copy returns a deep copy of Metadata.
func Copy(src Metadata) Metadata {
out := make(Metadata, len(src))
for k, v := range src {
out[k] = copyOf(v)
// Set is used to store value in metadata
func (md Metadata) Set(kv ...string) {
if len(kv)%2 == 1 {
kv = kv[:len(kv)-1]
}
for idx := 0; idx < len(kv); idx += 2 {
md[textproto.CanonicalMIMEHeaderKey(kv[idx])] = kv[idx+1]
}
return out
}
// Copy returns a deep copy of Metadata.
func (md Metadata) Copy() Metadata {
out := make(Metadata, len(md))
// Del is used to remove value from metadata
func (md Metadata) Del(keys ...string) {
for _, key := range keys {
// fast path
delete(md, key)
// slow path
delete(md, textproto.CanonicalMIMEHeaderKey(key))
// very slow path
delete(md, strings.ToLower(key))
}
}
// Copy makes a copy of the metadata
func (md Metadata) CopyTo(dst Metadata) {
for k, v := range md {
out[k] = copyOf(v)
dst[k] = v
}
return out
}
// CopyTo performs a deep copy of Metadata to the out.
func (md Metadata) CopyTo(out Metadata) {
// Copy makes a copy of the metadata
func Copy(md Metadata, exclude ...string) Metadata {
nmd := New(len(md))
for k, v := range md {
out[k] = copyOf(v)
nmd[k] = v
}
nmd.Del(exclude...)
return nmd
}
// Len returns the number of items in Metadata.
func (md Metadata) Len() int {
return len(md)
// New return new sized metadata
func New(size int) Metadata {
if size == 0 {
size = defaultMetadataSize
}
return make(Metadata, size)
}
// AsMap returns a deep copy of Metadata as a map[string]string
func (md Metadata) AsMap() map[string]string {
out := make(map[string]string, len(md))
for k, v := range md {
out[k] = strings.Join(v, ",")
// Merge merges metadata to existing metadata, overwriting if specified
func Merge(omd Metadata, mmd Metadata, overwrite bool) Metadata {
var ok bool
nmd := Copy(omd)
for key, val := range mmd {
_, ok = nmd[key]
switch {
case ok && !overwrite:
continue
case val != "":
nmd[key] = val
case ok && val == "":
nmd.Del(key)
}
}
return out
return nmd
}
// AsHTTP1 returns a deep copy of Metadata with keys converted to canonical MIME header key format.
func (md Metadata) AsHTTP1() map[string][]string {
out := make(map[string][]string, len(md))
for k, v := range md {
out[textproto.CanonicalMIMEHeaderKey(k)] = copyOf(v)
// Pairs from which metadata created
func Pairs(kv ...string) (Metadata, bool) {
if len(kv)%2 == 1 {
return nil, false
}
return out
}
// AsHTTP2 returns a deep copy of Metadata with keys converted to lowercase.
func (md Metadata) AsHTTP2() map[string][]string {
out := make(map[string][]string, len(md))
for k, v := range md {
out[strings.ToLower(k)] = copyOf(v)
}
return out
}
// Get retrieves the values for a given key, checking the key in three formats:
// - exact case,
// - lower case,
// - canonical MIME header key format.
func (md Metadata) Get(k string) []string {
v, ok := md[k]
if !ok {
v, ok = md[strings.ToLower(k)]
}
if !ok {
v = md[textproto.CanonicalMIMEHeaderKey(k)]
}
return v
}
// GetJoined retrieves the values for a given key and joins them into a single string, separated by commas.
func (md Metadata) GetJoined(k string) string {
return strings.Join(md.Get(k), ",")
}
// Set assigns the values to the given key.
func (md Metadata) Set(key string, vals ...string) {
if len(vals) == 0 {
return
}
md[key] = vals
}
// Append adds values to the existing values for the given key.
func (md Metadata) Append(key string, vals ...string) {
if len(vals) == 0 {
return
}
md[key] = append(md[key], vals...)
}
// Del removes the values for the given keys k. It checks and removes the keys in the following formats:
// - exact case,
// - lower case,
// - canonical MIME header key format.
func (md Metadata) Del(k ...string) {
for i := range k {
delete(md, k[i])
delete(md, strings.ToLower(k[i]))
delete(md, textproto.CanonicalMIMEHeaderKey(k[i]))
md := New(len(kv) / 2)
for idx := 0; idx < len(kv); idx += 2 {
md[kv[idx]] = kv[idx+1]
}
return md, true
}

View File

@@ -5,31 +5,11 @@ import (
"testing"
)
func TesSet(t *testing.T) {
md := Pairs("key1", "val1", "key2", "val2")
md.Set("key1", "val2", "val3")
v := md.GetJoined("X-Request-Id")
if v != "val2, val3" {
t.Fatal("set not works")
}
}
/*
func TestAppendOutgoingContextModify(t *testing.T) {
md := Pairs("key1", "val1")
ctx := NewOutgoingContext(context.TODO(), md)
nctx := AppendOutgoingContext(ctx, "key1", "val3", "key2", "val2")
_ = nctx
omd := MustOutgoingContext(nctx)
fmt.Printf("%#+v\n", omd)
}
*/
func TestLowercase(t *testing.T) {
md := New(1)
md["x-request-id"] = []string{"12345"}
v := md.GetJoined("X-Request-Id")
if v == "" {
md["x-request-id"] = "12345"
v, ok := md.Get("X-Request-Id")
if !ok || v == "" {
t.Fatalf("metadata invalid %#+v", md)
}
}
@@ -56,13 +36,50 @@ func TestMultipleUsage(t *testing.T) {
_ = omd
}
func TestPairs(t *testing.T) {
md := Pairs("key1", "val1", "key2", "val2")
if v := md.Get("key1"); v == nil {
func TestMetadataSetMultiple(t *testing.T) {
md := New(4)
md.Set("key1", "val1", "key2", "val2", "key3")
if v, ok := md.Get("key1"); !ok || v != "val1" {
t.Fatalf("invalid kv %#+v", md)
}
if v, ok := md.Get("key2"); !ok || v != "val2" {
t.Fatalf("invalid kv %#+v", md)
}
if _, ok := md.Get("key3"); ok {
t.Fatalf("invalid kv %#+v", md)
}
}
func TestAppend(t *testing.T) {
ctx := context.Background()
ctx = AppendIncomingContext(ctx, "key1", "val1", "key2", "val2")
md, ok := FromIncomingContext(ctx)
if !ok {
t.Fatal("metadata empty")
}
if _, ok := md.Get("key1"); !ok {
t.Fatal("key1 not found")
}
}
func TestPairs(t *testing.T) {
md, ok := Pairs("key1", "val1", "key2", "val2")
if !ok {
t.Fatal("odd number of kv")
}
if _, ok = md.Get("key1"); !ok {
t.Fatal("key1 not found")
}
}
func testCtx(ctx context.Context) {
md := New(2)
md.Set("Key1", "Val1_new")
md.Set("Key3", "Val3")
SetOutgoingContext(ctx, md)
}
func TestPassing(t *testing.T) {
ctx := context.TODO()
md1 := New(2)
@@ -70,63 +87,70 @@ func TestPassing(t *testing.T) {
md1.Set("Key2", "Val2")
ctx = NewIncomingContext(ctx, md1)
testCtx(ctx)
_, ok := FromOutgoingContext(ctx)
if ok {
t.Fatalf("create outgoing context")
}
ctx = NewOutgoingContext(ctx, md1)
ctx = NewOutgoingContext(ctx, New(1))
testCtx(ctx)
md, ok := FromOutgoingContext(ctx)
if !ok {
t.Fatalf("missing metadata from outgoing context")
}
if v := md.Get("Key1"); v == nil || v[0] != "Val1" {
if v, ok := md.Get("Key1"); !ok || v != "Val1_new" {
t.Fatalf("invalid metadata value %#+v", md)
}
}
func TestIterator(t *testing.T) {
md := Pairs(
"1Last", "last",
"2First", "first",
"3Second", "second",
)
iter := md.Iterator()
var k string
var v []string
chk := New(3)
for iter.Next(&k, &v) {
chk[k] = v
func TestMerge(t *testing.T) {
omd := Metadata{
"key1": "val1",
}
mmd := Metadata{
"key2": "val2",
}
for k, v := range chk {
if cv, ok := md[k]; !ok || len(cv) != len(v) || cv[0] != v[0] {
t.Fatalf("XXXX %#+v %#+v", chk, md)
}
nmd := Merge(omd, mmd, true)
if len(nmd) != 2 {
t.Fatalf("merge failed: %v", nmd)
}
}
func TestIterator(_ *testing.T) {
md := Metadata{
"1Last": "last",
"2First": "first",
"3Second": "second",
}
iter := md.Iterator()
var k, v string
for iter.Next(&k, &v) {
// fmt.Printf("k: %s, v: %s\n", k, v)
}
}
func TestMedataCanonicalKey(t *testing.T) {
md := New(1)
md.Set("x-request-id", "12345")
v := md.GetJoined("x-request-id")
if v == "" {
v, ok := md.Get("x-request-id")
if !ok {
t.Fatalf("failed to get x-request-id")
} else if v != "12345" {
t.Fatalf("invalid metadata value: %s != %s", "12345", v)
}
v = md.GetJoined("X-Request-Id")
if v == "" {
v, ok = md.Get("X-Request-Id")
if !ok {
t.Fatalf("failed to get x-request-id")
} else if v != "12345" {
t.Fatalf("invalid metadata value: %s != %s", "12345", v)
}
v = md.GetJoined("X-Request-ID")
if v == "" {
v, ok = md.Get("X-Request-ID")
if !ok {
t.Fatalf("failed to get x-request-id")
} else if v != "12345" {
t.Fatalf("invalid metadata value: %s != %s", "12345", v)
@@ -138,8 +162,8 @@ func TestMetadataSet(t *testing.T) {
md.Set("Key", "val")
val := md.GetJoined("Key")
if val == "" {
val, ok := md.Get("Key")
if !ok {
t.Fatal("key Key not found")
}
if val != "val" {
@@ -149,27 +173,36 @@ func TestMetadataSet(t *testing.T) {
func TestMetadataDelete(t *testing.T) {
md := Metadata{
"Foo": []string{"bar"},
"Baz": []string{"empty"},
"Foo": "bar",
"Baz": "empty",
}
md.Del("Baz")
v := md.Get("Baz")
if v != nil {
_, ok := md.Get("Baz")
if ok {
t.Fatal("key Baz not deleted")
}
}
func TestNilContext(t *testing.T) {
var ctx context.Context
_, ok := FromContext(ctx)
if ok {
t.Fatal("nil context")
}
}
func TestMetadataCopy(t *testing.T) {
md := Metadata{
"Foo": []string{"bar"},
"Bar": []string{"baz"},
"Foo": "bar",
"Bar": "baz",
}
cp := Copy(md)
for k, v := range md {
if cv := cp[k]; cv[0] != v[0] {
if cv := cp[k]; cv != v {
t.Fatalf("Got %s:%s for %s:%s", k, cv, k, v)
}
}
@@ -177,7 +210,7 @@ func TestMetadataCopy(t *testing.T) {
func TestMetadataContext(t *testing.T) {
md := Metadata{
"Foo": []string{"bar"},
"Foo": "bar",
}
ctx := NewContext(context.TODO(), md)
@@ -187,7 +220,7 @@ func TestMetadataContext(t *testing.T) {
t.Errorf("Unexpected error retrieving metadata, got %t", ok)
}
if emd["Foo"][0] != md["Foo"][0] {
if emd["Foo"] != md["Foo"] {
t.Errorf("Expected key: %s val: %s, got key: %s val: %s", "Foo", md["Foo"], "Foo", emd["Foo"])
}
@@ -196,74 +229,13 @@ func TestMetadataContext(t *testing.T) {
}
}
func TestFromContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), metadataCurrentKey{}, rawMetadata{md: New(0)})
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("FromContext not works")
}
}
func TestNewContext(t *testing.T) {
ctx := NewContext(context.TODO(), New(0))
c, ok := FromContext(ctx)
if c == nil || !ok {
t.Fatal("NewContext not works")
}
}
func TestFromIncomingContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), metadataIncomingKey{}, rawMetadata{md: New(0)})
c, ok := FromIncomingContext(ctx)
if c == nil || !ok {
t.Fatal("FromIncomingContext not works")
}
}
func TestFromOutgoingContext(t *testing.T) {
ctx := context.WithValue(context.TODO(), metadataOutgoingKey{}, rawMetadata{md: New(0)})
c, ok := FromOutgoingContext(ctx)
if c == nil || !ok {
t.Fatal("FromOutgoingContext not works")
}
}
func TestNewIncomingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := NewIncomingContext(context.TODO(), md)
c, ok := FromIncomingContext(ctx)
if c == nil || !ok {
t.Fatal("NewIncomingContext not works")
}
}
func TestNewOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key", "val")
ctx := NewOutgoingContext(context.TODO(), md)
c, ok := FromOutgoingContext(ctx)
if c == nil || !ok {
t.Fatal("NewOutgoingContext not works")
}
}
func TestAppendOutgoingContext(t *testing.T) {
md := New(1)
md.Set("key1", "val1")
ctx := AppendOutgoingContext(context.TODO(), "key2", "val2")
nmd, ok := FromOutgoingContext(ctx)
if nmd == nil || !ok {
t.Fatal("AppendOutgoingContext not works")
}
if v := nmd.GetJoined("key2"); v != "val2" {
t.Fatal("AppendOutgoingContext not works")
func TestCopy(t *testing.T) {
md := New(2)
md.Set("key1", "val1", "key2", "val2")
nmd := Copy(md, "key2")
if len(nmd) != 1 {
t.Fatal("Copy exclude not works")
} else if nmd["Key1"] != "val1" {
t.Fatal("Copy exclude not works")
}
}

View File

@@ -4,8 +4,8 @@ package meter
import (
"io"
"sort"
"strconv"
"strings"
"sync"
"time"
)
@@ -117,39 +117,6 @@ func BuildLabels(labels ...string) []string {
return labels
}
var spool = newStringsPool(500)
type stringsPool struct {
p *sync.Pool
c int
}
func newStringsPool(size int) *stringsPool {
p := &stringsPool{c: size}
p.p = &sync.Pool{
New: func() interface{} {
return &strings.Builder{}
},
}
return p
}
func (p *stringsPool) Cap() int {
return p.c
}
func (p *stringsPool) Get() *strings.Builder {
return p.p.Get().(*strings.Builder)
}
func (p *stringsPool) Put(b *strings.Builder) {
if b.Cap() > p.c {
return
}
b.Reset()
p.p.Put(b)
}
// BuildName used to combine metric with labels.
// If labels count is odd, drop last element
func BuildName(name string, labels ...string) string {
@@ -158,6 +125,8 @@ func BuildName(name string, labels ...string) string {
}
if len(labels) > 2 {
sort.Sort(byKey(labels))
idx := 0
for {
if labels[idx] == labels[idx+2] {
@@ -172,9 +141,7 @@ func BuildName(name string, labels ...string) string {
}
}
b := spool.Get()
defer spool.Put(b)
var b strings.Builder
_, _ = b.WriteString(name)
_, _ = b.WriteRune('{')
for idx := 0; idx < len(labels); idx += 2 {
@@ -182,9 +149,8 @@ func BuildName(name string, labels ...string) string {
_, _ = b.WriteRune(',')
}
_, _ = b.WriteString(labels[idx])
_, _ = b.WriteString(`="`)
_, _ = b.WriteString(labels[idx+1])
_, _ = b.WriteRune('"')
_, _ = b.WriteString(`=`)
_, _ = b.WriteString(strconv.Quote(labels[idx+1]))
}
_, _ = b.WriteRune('}')

View File

@@ -50,12 +50,11 @@ func TestBuildName(t *testing.T) {
data := map[string][]string{
`my_metric{firstlabel="value2",zerolabel="value3"}`: {
"my_metric",
"firstlabel", "value2",
"zerolabel", "value3",
"zerolabel", "value3", "firstlabel", "value2",
},
`my_metric{broker="broker2",register="mdns",server="tcp"}`: {
"my_metric",
"broker", "broker1", "broker", "broker2", "register", "mdns", "server", "http", "server", "tcp",
"broker", "broker1", "broker", "broker2", "server", "http", "server", "tcp", "register", "mdns",
},
`my_metric{aaa="aaa"}`: {
"my_metric",

View File

@@ -3,21 +3,21 @@ package micro
import (
"reflect"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/client"
"go.unistack.org/micro/v4/codec"
"go.unistack.org/micro/v4/flow"
"go.unistack.org/micro/v4/fsm"
"go.unistack.org/micro/v4/logger"
"go.unistack.org/micro/v4/meter"
"go.unistack.org/micro/v4/register"
"go.unistack.org/micro/v4/resolver"
"go.unistack.org/micro/v4/router"
"go.unistack.org/micro/v4/selector"
"go.unistack.org/micro/v4/server"
"go.unistack.org/micro/v4/store"
"go.unistack.org/micro/v4/sync"
"go.unistack.org/micro/v4/tracer"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/flow"
"go.unistack.org/micro/v3/fsm"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/register"
"go.unistack.org/micro/v3/resolver"
"go.unistack.org/micro/v3/router"
"go.unistack.org/micro/v3/selector"
"go.unistack.org/micro/v3/server"
"go.unistack.org/micro/v3/store"
"go.unistack.org/micro/v3/sync"
"go.unistack.org/micro/v3/tracer"
)
func As(b any, target any) bool {

View File

@@ -6,9 +6,8 @@ import (
"reflect"
"testing"
"go.unistack.org/micro/v4/broker"
"go.unistack.org/micro/v4/fsm"
"go.unistack.org/micro/v4/metadata"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/fsm"
)
func TestAs(t *testing.T) {
@@ -62,8 +61,6 @@ func TestAs(t *testing.T) {
}
}
var _ broker.Broker = (*bro)(nil)
type bro struct {
name string
}
@@ -90,18 +87,23 @@ func (p *bro) Connect(_ context.Context) error { return nil }
// Disconnect disconnect from broker
func (p *bro) Disconnect(_ context.Context) error { return nil }
// NewMessage creates new message
func (p *bro) NewMessage(_ context.Context, _ metadata.Metadata, _ interface{}, _ ...broker.MessageOption) (broker.Message, error) {
return nil, nil
}
// Publish message, msg can be single broker.Message or []broker.Message
func (p *bro) Publish(_ context.Context, _ string, _ ...broker.Message) error {
func (p *bro) Publish(_ context.Context, _ string, _ *broker.Message, _ ...broker.PublishOption) error {
return nil
}
// BatchPublish messages to broker with multiple topics
func (p *bro) BatchPublish(_ context.Context, _ []*broker.Message, _ ...broker.PublishOption) error {
return nil
}
// BatchSubscribe subscribes to topic messages via handler
func (p *bro) BatchSubscribe(_ context.Context, _ string, _ broker.BatchHandler, _ ...broker.SubscribeOption) (broker.Subscriber, error) {
return nil, nil
}
// Subscribe subscribes to topic message via handler
func (p *bro) Subscribe(_ context.Context, _ string, _ interface{}, _ ...broker.SubscribeOption) (broker.Subscriber, error) {
func (p *bro) Subscribe(_ context.Context, _ string, _ broker.Handler, _ ...broker.SubscribeOption) (broker.Subscriber, error) {
return nil, nil
}

55
network/network.go Normal file
View File

@@ -0,0 +1,55 @@
// Package network is for creating internetworks
package network
import (
"go.unistack.org/micro/v3/client"
"go.unistack.org/micro/v3/server"
)
// Error is network node errors
type Error interface {
// Count is current count of errors
Count() int
// Msg is last error message
Msg() string
}
// Status is node status
type Status interface {
// Error reports error status
Error() Error
}
// Node is network node
type Node interface {
// Id is node id
Id() string
// Address is node bind address
Address() string
// Peers returns node peers
Peers() []Node
// Network is the network node is in
Network() Network
// Status returns node status
Status() Status
}
// Network is micro network
type Network interface {
// Node is network node
Node
// Initialise options
Init(...Option) error
// Options returns the network options
Options() Options
// Name of the network
Name() string
// Connect starts the resolver and tunnel server
Connect() error
// Close stops the tunnel and resolving
Close() error
// Client is micro client
Client() client.Client
// Server is micro server
Server() server.Server
}

135
network/options.go Normal file
View File

@@ -0,0 +1,135 @@
package network
import (
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/network/tunnel"
"go.unistack.org/micro/v3/proxy"
"go.unistack.org/micro/v3/router"
"go.unistack.org/micro/v3/tracer"
"go.unistack.org/micro/v3/util/id"
)
// Option func
type Option func(*Options)
// Options configure network
type Options struct {
// Router used for routing
Router router.Router
// Proxy holds the proxy
Proxy proxy.Proxy
// Logger used for logging
Logger logger.Logger
// Meter used for metrics
Meter meter.Meter
// Tracer used for tracing
Tracer tracer.Tracer
// Tunnel used for transfer data
Tunnel tunnel.Tunnel
// ID of the node
ID string
// Name of the network
Name string
// Address to bind to
Address string
// Advertise sets the address to advertise
Advertise string
// Nodes is a list of nodes to connect to
Nodes []string
}
// ID sets the id of the network node
func ID(id string) Option {
return func(o *Options) {
o.ID = id
}
}
// Name sets the network name
func Name(n string) Option {
return func(o *Options) {
o.Name = n
}
}
// Address sets the network address
func Address(a string) Option {
return func(o *Options) {
o.Address = a
}
}
// Advertise sets the address to advertise
func Advertise(a string) Option {
return func(o *Options) {
o.Advertise = a
}
}
// Nodes is a list of nodes to connect to
func Nodes(n ...string) Option {
return func(o *Options) {
o.Nodes = n
}
}
// Tunnel sets the network tunnel
func Tunnel(t tunnel.Tunnel) Option {
return func(o *Options) {
o.Tunnel = t
}
}
// Router sets the network router
func Router(r router.Router) Option {
return func(o *Options) {
o.Router = r
}
}
// Proxy sets the network proxy
func Proxy(p proxy.Proxy) Option {
return func(o *Options) {
o.Proxy = p
}
}
// Logger sets the network logger
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// Meter sets the meter
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Tracer to be used for tracing
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
// NewOptions returns network default options
func NewOptions(opts ...Option) Options {
options := Options{
ID: id.MustNew(),
Name: "go.micro",
Address: ":0",
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
}
for _, o := range opts {
o(&options)
}
return options
}

View File

@@ -0,0 +1,34 @@
package transport
import (
"context"
)
type transportKey struct{}
// FromContext get transport from context
func FromContext(ctx context.Context) (Transport, bool) {
if ctx == nil {
return nil, false
}
c, ok := ctx.Value(transportKey{}).(Transport)
return c, ok
}
// NewContext put transport in context
func NewContext(ctx context.Context, c Transport) context.Context {
if ctx == nil {
ctx = context.Background()
}
return context.WithValue(ctx, transportKey{}, c)
}
// SetOption returns a function to setup a context with given value
func SetOption(k, v interface{}) Option {
return func(o *Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, k, v)
}
}

258
network/transport/memory.go Normal file
View File

@@ -0,0 +1,258 @@
package transport
import (
"context"
"errors"
"fmt"
"net"
"sync"
"time"
maddr "go.unistack.org/micro/v3/util/addr"
mnet "go.unistack.org/micro/v3/util/net"
"go.unistack.org/micro/v3/util/rand"
)
type memorySocket struct {
ctx context.Context
recv chan *Message
exit chan bool
lexit chan bool
send chan *Message
local string
remote string
timeout time.Duration
sync.RWMutex
}
type memoryClient struct {
*memorySocket
opts DialOptions
}
type memoryListener struct {
lopts ListenOptions
ctx context.Context
exit chan bool
conn chan *memorySocket
addr string
topts Options
sync.RWMutex
}
type memoryTransport struct {
listeners map[string]*memoryListener
opts Options
sync.RWMutex
}
func (ms *memorySocket) Recv(m *Message) error {
ms.RLock()
defer ms.RUnlock()
ctx := ms.ctx
if ms.timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ms.ctx, ms.timeout)
defer cancel()
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ms.exit:
return errors.New("connection closed")
case <-ms.lexit:
return errors.New("server connection closed")
case cm := <-ms.recv:
*m = *cm
}
return nil
}
func (ms *memorySocket) Local() string {
return ms.local
}
func (ms *memorySocket) Remote() string {
return ms.remote
}
func (ms *memorySocket) Send(m *Message) error {
ms.RLock()
defer ms.RUnlock()
ctx := ms.ctx
if ms.timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ms.ctx, ms.timeout)
defer cancel()
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ms.exit:
return errors.New("connection closed")
case <-ms.lexit:
return errors.New("server connection closed")
case ms.send <- m:
}
return nil
}
func (ms *memorySocket) Close() error {
ms.Lock()
defer ms.Unlock()
select {
case <-ms.exit:
return nil
default:
close(ms.exit)
}
return nil
}
func (m *memoryListener) Addr() string {
return m.addr
}
func (m *memoryListener) Close() error {
m.Lock()
defer m.Unlock()
select {
case <-m.exit:
return nil
default:
close(m.exit)
}
return nil
}
func (m *memoryListener) Accept(fn func(Socket)) error {
for {
select {
case <-m.exit:
return nil
case c := <-m.conn:
go fn(&memorySocket{
lexit: c.lexit,
exit: c.exit,
send: c.recv,
recv: c.send,
local: c.Remote(),
remote: c.Local(),
timeout: m.topts.Timeout,
ctx: m.topts.Context,
})
}
}
}
func (m *memoryTransport) Dial(ctx context.Context, addr string, opts ...DialOption) (Client, error) {
m.RLock()
defer m.RUnlock()
listener, ok := m.listeners[addr]
if !ok {
return nil, errors.New("could not dial " + addr)
}
options := NewDialOptions(opts...)
client := &memoryClient{
&memorySocket{
send: make(chan *Message),
recv: make(chan *Message),
exit: make(chan bool),
lexit: listener.exit,
local: addr,
remote: addr,
timeout: m.opts.Timeout,
ctx: m.opts.Context,
},
options,
}
// pseudo connect
select {
case <-listener.exit:
return nil, errors.New("connection error")
case listener.conn <- client.memorySocket:
}
return client, nil
}
func (m *memoryTransport) Listen(ctx context.Context, addr string, opts ...ListenOption) (Listener, error) {
m.Lock()
defer m.Unlock()
options := NewListenOptions(opts...)
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
addr, err = maddr.Extract(host)
if err != nil {
return nil, err
}
// if zero port then randomly assign one
if len(port) > 0 && port == "0" {
var rng rand.Rand
i := rng.Intn(20000)
port = fmt.Sprintf("%d", 10000+i)
}
// set addr with port
addr = mnet.HostPort(addr, port)
if _, ok := m.listeners[addr]; ok {
return nil, errors.New("already listening on " + addr)
}
listener := &memoryListener{
lopts: options,
topts: m.opts,
addr: addr,
conn: make(chan *memorySocket),
exit: make(chan bool),
ctx: m.opts.Context,
}
m.listeners[addr] = listener
return listener, nil
}
func (m *memoryTransport) Init(opts ...Option) error {
for _, o := range opts {
o(&m.opts)
}
return nil
}
func (m *memoryTransport) Options() Options {
return m.opts
}
func (m *memoryTransport) String() string {
return "memory"
}
func (m *memoryTransport) Name() string {
return m.opts.Name
}
// NewTransport returns new memory transport with options
func NewTransport(opts ...Option) Transport {
options := NewOptions(opts...)
return &memoryTransport{
opts: options,
listeners: make(map[string]*memoryListener),
}
}

View File

@@ -0,0 +1,100 @@
package transport
import (
"context"
"os"
"testing"
)
func TestMemoryTransport(t *testing.T) {
tr := NewTransport()
ctx := context.Background()
// bind / listen
l, err := tr.Listen(ctx, "127.0.0.1:8080")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l.Close()
cherr := make(chan error, 1)
// accept
go func() {
if nerr := l.Accept(func(sock Socket) {
for {
var m Message
if rerr := sock.Recv(&m); rerr != nil {
cherr <- rerr
return
}
if len(os.Getenv("INTEGRATION_TESTS")) == 0 {
t.Logf("Server Received %s", string(m.Body))
}
if cerr := sock.Send(&Message{
Body: []byte(`pong`),
}); cerr != nil {
cherr <- cerr
return
}
}
}); nerr != nil {
cherr <- err
}
}()
// dial
c, err := tr.Dial(ctx, "127.0.0.1:8080")
if err != nil {
t.Fatalf("Unexpected error dialing %v", err)
}
defer c.Close()
select {
case err := <-cherr:
t.Fatal(err)
default:
// send <=> receive
for i := 0; i < 3; i++ {
if err := c.Send(&Message{
Body: []byte(`ping`),
}); err != nil {
return
}
var m Message
if err := c.Recv(&m); err != nil {
return
}
if len(os.Getenv("INTEGRATION_TESTS")) == 0 {
t.Logf("Client Received %s", string(m.Body))
}
}
}
}
func TestListener(t *testing.T) {
tr := NewTransport()
ctx := context.Background()
// bind / listen on random port
l, err := tr.Listen(ctx, ":0")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l.Close()
// try again
l2, err := tr.Listen(ctx, ":0")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l2.Close()
// now make sure it still fails
l3, err := tr.Listen(ctx, ":8080")
if err != nil {
t.Fatalf("Unexpected error listening %v", err)
}
defer l3.Close()
if _, err := tr.Listen(ctx, ":8080"); err == nil {
t.Fatal("Expected error binding to :8080 got nil")
}
}

View File

@@ -0,0 +1,175 @@
package transport
import (
"context"
"crypto/tls"
"time"
"go.unistack.org/micro/v3/codec"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/meter"
"go.unistack.org/micro/v3/tracer"
)
// Options struct holds the transport options
type Options struct {
// Meter used for metrics
Meter meter.Meter
// Tracer used for tracing
Tracer tracer.Tracer
// Codec used for marshal/unmarshal messages
Codec codec.Codec
// Logger used for logging
Logger logger.Logger
// Context holds external options
Context context.Context
// TLSConfig holds tls.TLSConfig options
TLSConfig *tls.Config
// Name holds the transport name
Name string
// Addrs holds the transport addrs
Addrs []string
// Timeout holds the timeout
Timeout time.Duration
}
// NewOptions returns new options
func NewOptions(opts ...Option) Options {
options := Options{
Logger: logger.DefaultLogger,
Meter: meter.DefaultMeter,
Tracer: tracer.DefaultTracer,
Context: context.Background(),
}
for _, o := range opts {
o(&options)
}
return options
}
// DialOptions struct
type DialOptions struct {
// Context holds the external options
Context context.Context
// Timeout holds the timeout
Timeout time.Duration
// Stream flag
Stream bool
}
// NewDialOptions returns new DialOptions
func NewDialOptions(opts ...DialOption) DialOptions {
options := DialOptions{
Timeout: DefaultDialTimeout,
Context: context.Background(),
}
for _, o := range opts {
o(&options)
}
return options
}
// ListenOptions struct
type ListenOptions struct {
// TODO: add tls options when listening
// Currently set in global options
// Context holds the external options
Context context.Context
// TLSConfig holds the *tls.Config options
TLSConfig *tls.Config
}
// NewListenOptions returns new ListenOptions
func NewListenOptions(opts ...ListenOption) ListenOptions {
options := ListenOptions{
Context: context.Background(),
}
for _, o := range opts {
o(&options)
}
return options
}
// Addrs to use for transport
func Addrs(addrs ...string) Option {
return func(o *Options) {
o.Addrs = addrs
}
}
// Logger sets the logger
func Logger(l logger.Logger) Option {
return func(o *Options) {
o.Logger = l
}
}
// Meter sets the meter
func Meter(m meter.Meter) Option {
return func(o *Options) {
o.Meter = m
}
}
// Context sets the context
func Context(ctx context.Context) Option {
return func(o *Options) {
o.Context = ctx
}
}
// Codec sets the codec used for encoding where the transport
// does not support message headers
func Codec(c codec.Codec) Option {
return func(o *Options) {
o.Codec = c
}
}
// Timeout sets the timeout for Send/Recv execution
func Timeout(t time.Duration) Option {
return func(o *Options) {
o.Timeout = t
}
}
// TLSConfig to be used for the transport.
func TLSConfig(t *tls.Config) Option {
return func(o *Options) {
o.TLSConfig = t
}
}
// WithStream indicates whether this is a streaming connection
func WithStream() DialOption {
return func(o *DialOptions) {
o.Stream = true
}
}
// WithTimeout used when dialling the remote side
func WithTimeout(d time.Duration) DialOption {
return func(o *DialOptions) {
o.Timeout = d
}
}
// Tracer to be used for tracing
func Tracer(t tracer.Tracer) Option {
return func(o *Options) {
o.Tracer = t
}
}
// Name sets the name
func Name(n string) Option {
return func(o *Options) {
o.Name = n
}
}

View File

@@ -0,0 +1,63 @@
// Package transport is an interface for synchronous connection based communication
package transport
import (
"context"
"time"
"go.unistack.org/micro/v3/metadata"
)
var (
// DefaultTransport is the global default transport
DefaultTransport = NewTransport()
// DefaultDialTimeout the default dial timeout
DefaultDialTimeout = time.Second * 5
)
// Transport is an interface which is used for communication between
// services. It uses connection based socket send/recv semantics and
// has various implementations; http, grpc, quic.
type Transport interface {
Init(...Option) error
Options() Options
Dial(ctx context.Context, addr string, opts ...DialOption) (Client, error)
Listen(ctx context.Context, addr string, opts ...ListenOption) (Listener, error)
String() string
}
// Message is used to transfer data
type Message struct {
Header metadata.Metadata
Body []byte
}
// Socket bastraction interface
type Socket interface {
Recv(*Message) error
Send(*Message) error
Close() error
Local() string
Remote() string
}
// Client is the socket owner
type Client interface {
Socket
}
// Listener is the interface for stream oriented messaging
type Listener interface {
Addr() string
Close() error
Accept(func(Socket)) error
}
// Option is the option signature
type Option func(*Options)
// DialOption is the option signature
type DialOption func(*DialOptions)
// ListenOption is the option signature
type ListenOption func(*ListenOptions)

View File

@@ -0,0 +1,372 @@
// Package broker is a tunnel broker
package broker
import (
"context"
"fmt"
"go.unistack.org/micro/v3/broker"
"go.unistack.org/micro/v3/logger"
"go.unistack.org/micro/v3/metadata"
"go.unistack.org/micro/v3/network/transport"
"go.unistack.org/micro/v3/network/tunnel"
)
type tunBroker struct {
tunnel tunnel.Tunnel
opts broker.Options
}
type tunSubscriber struct {
listener tunnel.Listener
handler broker.Handler
closed chan bool
topic string
opts broker.SubscribeOptions
}
type tunBatchSubscriber struct {
listener tunnel.Listener
handler broker.BatchHandler
closed chan bool
topic string
opts broker.SubscribeOptions
}
type tunEvent struct {
err error
message *broker.Message
topic string
}
// used to access tunnel from options context
type (
tunnelKey struct{}
tunnelAddr struct{}
)
func (t *tunBroker) Live() bool {
return true
}
func (t *tunBroker) Ready() bool {
return true
}
func (t *tunBroker) Health() bool {
return true
}
func (t *tunBroker) Init(opts ...broker.Option) error {
for _, o := range opts {
o(&t.opts)
}
return nil
}
func (t *tunBroker) Name() string {
return t.opts.Name
}
func (t *tunBroker) Options() broker.Options {
return t.opts
}
func (t *tunBroker) Address() string {
return t.tunnel.Address()
}
func (t *tunBroker) Connect(ctx context.Context) error {
return t.tunnel.Connect(ctx)
}
func (t *tunBroker) Disconnect(ctx context.Context) error {
return t.tunnel.Close(ctx)
}
func (t *tunBroker) BatchPublish(ctx context.Context, msgs []*broker.Message, _ ...broker.PublishOption) error {
// TODO: this is probably inefficient, we might want to just maintain an open connection
// it may be easier to add broadcast to the tunnel
topicMap := make(map[string]tunnel.Session)
var err error
for _, msg := range msgs {
topic, _ := msg.Header.Get(metadata.HeaderTopic)
c, ok := topicMap[topic]
if !ok {
c, err = t.tunnel.Dial(ctx, topic, tunnel.DialMode(tunnel.Multicast))
if err != nil {
return err
}
defer c.Close()
topicMap[topic] = c
}
if err = c.Send(&transport.Message{
Header: msg.Header,
Body: msg.Body,
}); err != nil {
// msg.SetError(err)
return err
}
}
return nil
}
func (t *tunBroker) Publish(ctx context.Context, topic string, m *broker.Message, _ ...broker.PublishOption) error {
// TODO: this is probably inefficient, we might want to just maintain an open connection
// it may be easier to add broadcast to the tunnel
c, err := t.tunnel.Dial(ctx, topic, tunnel.DialMode(tunnel.Multicast))
if err != nil {
return err
}
defer c.Close()
return c.Send(&transport.Message{
Header: m.Header,
Body: m.Body,
})
}
func (t *tunBroker) BatchSubscribe(ctx context.Context, topic string, h broker.BatchHandler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
l, err := t.tunnel.Listen(ctx, topic, tunnel.ListenMode(tunnel.Multicast))
if err != nil {
return nil, err
}
tunSub := &tunBatchSubscriber{
topic: topic,
handler: h,
opts: broker.NewSubscribeOptions(opts...),
closed: make(chan bool),
listener: l,
}
// start processing
go tunSub.run()
return tunSub, nil
}
func (t *tunBroker) Subscribe(ctx context.Context, topic string, h broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
l, err := t.tunnel.Listen(ctx, topic, tunnel.ListenMode(tunnel.Multicast))
if err != nil {
return nil, err
}
tunSub := &tunSubscriber{
topic: topic,
handler: h,
opts: broker.NewSubscribeOptions(opts...),
closed: make(chan bool),
listener: l,
}
// start processing
go tunSub.run()
return tunSub, nil
}
func (t *tunBroker) String() string {
return "tunnel"
}
func (t *tunBatchSubscriber) run() {
for {
// accept a new connection
c, err := t.listener.Accept()
if err != nil {
select {
case <-t.closed:
return
default:
continue
}
}
// receive message
m := new(transport.Message)
if err := c.Recv(m); err != nil {
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
if err = c.Close(); err != nil {
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
}
continue
}
// close the connection
c.Close()
evts := broker.Events{&tunEvent{
topic: t.topic,
message: &broker.Message{
Header: m.Header,
Body: m.Body,
},
}}
// handle the message
go func() {
_ = t.handler(evts)
}()
}
}
func (t *tunSubscriber) run() {
for {
// accept a new connection
c, err := t.listener.Accept()
if err != nil {
select {
case <-t.closed:
return
default:
continue
}
}
// receive message
m := new(transport.Message)
if err := c.Recv(m); err != nil {
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
if err = c.Close(); err != nil {
if logger.DefaultLogger.V(logger.ErrorLevel) {
logger.DefaultLogger.Error(t.opts.Context, err.Error(), err)
}
}
continue
}
// close the connection
c.Close()
// handle the message
go func() {
_ = t.handler(&tunEvent{
topic: t.topic,
message: &broker.Message{
Header: m.Header,
Body: m.Body,
},
})
}()
}
}
func (t *tunBatchSubscriber) Options() broker.SubscribeOptions {
return t.opts
}
func (t *tunBatchSubscriber) Topic() string {
return t.topic
}
func (t *tunBatchSubscriber) Unsubscribe(ctx context.Context) error {
select {
case <-t.closed:
return nil
default:
close(t.closed)
return t.listener.Close()
}
}
func (t *tunSubscriber) Options() broker.SubscribeOptions {
return t.opts
}
func (t *tunSubscriber) Topic() string {
return t.topic
}
func (t *tunSubscriber) Unsubscribe(ctx context.Context) error {
select {
case <-t.closed:
return nil
default:
close(t.closed)
return t.listener.Close()
}
}
func (t *tunEvent) Topic() string {
return t.topic
}
func (t *tunEvent) Message() *broker.Message {
return t.message
}
func (t *tunEvent) Ack() error {
return nil
}
func (t *tunEvent) Error() error {
return t.err
}
func (t *tunEvent) SetError(err error) {
t.err = err
}
func (t *tunEvent) Context() context.Context {
return context.TODO()
}
// NewBroker returns new tunnel broker
func NewBroker(opts ...broker.Option) (broker.Broker, error) {
options := broker.NewOptions(opts...)
t, ok := options.Context.Value(tunnelKey{}).(tunnel.Tunnel)
if !ok {
return nil, fmt.Errorf("tunnel not set")
}
a, ok := options.Context.Value(tunnelAddr{}).(string)
if ok {
// initialise address
if err := t.Init(tunnel.Address(a)); err != nil {
return nil, err
}
}
if len(options.Addrs) > 0 {
// initialise nodes
if err := t.Init(tunnel.Nodes(options.Addrs...)); err != nil {
return nil, err
}
}
return &tunBroker{
opts: options,
tunnel: t,
}, nil
}
// WithAddress sets the tunnel address
func WithAddress(a string) broker.Option {
return func(o *broker.Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, tunnelAddr{}, a)
}
}
// WithTunnel sets the internal tunnel
func WithTunnel(t tunnel.Tunnel) broker.Option {
return func(o *broker.Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, tunnelKey{}, t)
}
}

Some files were not shown because too many files have changed in this diff Show More