Compare commits
17 Commits
Author | SHA1 | Date | |
---|---|---|---|
fa636ef6a9 | |||
cdb81a9ba3 | |||
413c6cc2f0 | |||
|
f56bd70136 | ||
b51b4107a8 | |||
2067c9de6b | |||
3f82cb3ba4 | |||
|
306b7a3962 | ||
a8eda9d58d | |||
7e4477dcb4 | |||
|
d846044fc6 | ||
29d956e74e | |||
fcc4faff8a | |||
5df8f83f45 | |||
|
27fa6e9173 | ||
bd55a35dc3 | |||
653bd386cc |
@@ -26,24 +26,24 @@ jobs:
|
||||
|
||||
- name: test coverage
|
||||
run: |
|
||||
go test -v -cover ./... -coverprofile coverage.out -coverpkg ./...
|
||||
go test -v -cover ./... -covermode=count -coverprofile coverage.out -coverpkg ./...
|
||||
go tool cover -func coverage.out -o coverage.out
|
||||
|
||||
- name: coverage badge
|
||||
uses: tj-actions/coverage-badge-go@v1
|
||||
uses: tj-actions/coverage-badge-go@v2
|
||||
with:
|
||||
green: 80
|
||||
filename: coverage.out
|
||||
|
||||
- uses: stefanzweifel/git-auto-commit-action@v4
|
||||
id: auto-commit-action
|
||||
name: autocommit
|
||||
with:
|
||||
commit_message: Apply Code Coverage Badge
|
||||
skip_fetch: true
|
||||
skip_checkout: true
|
||||
file_pattern: ./README.md
|
||||
|
||||
- name: Push Changes
|
||||
- name: push
|
||||
if: steps.auto-commit-action.outputs.changes_detected == 'true'
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
|
28
README.md
28
README.md
@@ -1,5 +1,9 @@
|
||||
# Micro
|
||||

|
||||
# Micro
|
||||

|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
[](https://pkg.go.dev/go.unistack.org/micro/v3?tab=overview)
|
||||
[](https://git.unistack.org/unistack-org/micro/actions?query=workflow%3Abuild+branch%3Av3+event%3Apush)
|
||||
[](https://goreportcard.com/report/go.unistack.org/micro/v3)
|
||||
|
||||
Micro is a standard library for microservices.
|
||||
|
||||
@@ -11,30 +15,20 @@ Micro provides the core requirements for distributed systems development includi
|
||||
|
||||
Micro abstracts away the details of distributed systems. Here are the main features.
|
||||
|
||||
- **Authentication** - Auth is built in as a first class citizen. Authentication and authorization enable secure
|
||||
zero trust networking by providing every service an identity and certificates. This additionally includes rule
|
||||
based access control.
|
||||
|
||||
- **Dynamic Config** - Load and hot reload dynamic config from anywhere. The config interface provides a way to load application
|
||||
level config from any source such as env vars, file, etcd. You can merge the sources and even define fallbacks.
|
||||
level config from any source such as env vars, cmdline, file, consul, vault... You can merge the sources and even define fallbacks.
|
||||
|
||||
- **Data Storage** - A simple data store interface to read, write and delete records. It includes support for memory, file and
|
||||
CockroachDB by default. State and persistence becomes a core requirement beyond prototyping and Micro looks to build that into the framework.
|
||||
s3. State and persistence becomes a core requirement beyond prototyping and Micro looks to build that into the framework.
|
||||
|
||||
- **Service Discovery** - Automatic service registration and name resolution. Service discovery is at the core of micro service
|
||||
development. When service A needs to speak to service B it needs the location of that service.
|
||||
|
||||
- **Load Balancing** - Client side load balancing built on service discovery. Once we have the addresses of any number of instances
|
||||
of a service we now need a way to decide which node to route to. We use random hashed load balancing to provide even distribution
|
||||
across the services and retry a different node if there's a problem.
|
||||
|
||||
- **Message Encoding** - Dynamic message encoding based on content-type. The client and server will use codecs along with content-type
|
||||
to seamlessly encode and decode Go types for you. Any variety of messages could be encoded and sent from different clients. The client
|
||||
and server handle this by default.
|
||||
|
||||
- **Transport** - gRPC or http based request/response with support for bidirectional streaming. We provide an abstraction for synchronous communication. A request made to a service will be automatically resolved, load balanced, dialled and streamed.
|
||||
|
||||
- **Async Messaging** - PubSub is built in as a first class citizen for asynchronous communication and event driven architectures.
|
||||
- **Async Messaging** - Pub/Sub is built in as a first class citizen for asynchronous communication and event driven architectures.
|
||||
Event notifications are a core pattern in micro service development.
|
||||
|
||||
- **Synchronization** - Distributed systems are often built in an eventually consistent manner. Support for distributed locking and
|
||||
@@ -43,10 +37,6 @@ leadership are built in as a Sync interface. When using an eventually consistent
|
||||
- **Pluggable Interfaces** - Micro makes use of Go interfaces for each system abstraction. Because of this these interfaces
|
||||
are pluggable and allows Micro to be runtime agnostic.
|
||||
|
||||
## Getting Started
|
||||
|
||||
To be created.
|
||||
|
||||
## License
|
||||
|
||||
Micro is Apache 2.0 licensed.
|
||||
|
@@ -588,7 +588,6 @@ func (n *noopClient) publish(ctx context.Context, ps []Message, opts ...PublishO
|
||||
|
||||
for _, p := range ps {
|
||||
md := metadata.Copy(omd)
|
||||
md[metadata.HeaderContentType] = p.ContentType()
|
||||
topic := p.Topic()
|
||||
if len(exchange) > 0 {
|
||||
topic = exchange
|
||||
@@ -600,6 +599,8 @@ func (n *noopClient) publish(ctx context.Context, ps []Message, opts ...PublishO
|
||||
md.Set(k, v)
|
||||
}
|
||||
|
||||
md[metadata.HeaderContentType] = p.ContentType()
|
||||
|
||||
var body []byte
|
||||
|
||||
// passed in raw data
|
||||
|
@@ -99,6 +99,7 @@ func WithAddFields(fields ...interface{}) Option {
|
||||
iv, iok := o.Fields[i].(string)
|
||||
jv, jok := fields[j].(string)
|
||||
if iok && jok && iv == jv {
|
||||
o.Fields[i+1] = fields[j+1]
|
||||
fields = slices.Delete(fields, j, j+2)
|
||||
}
|
||||
}
|
||||
|
@@ -278,7 +278,7 @@ func (s *slogLogger) printLog(ctx context.Context, lvl logger.Level, msg string,
|
||||
}
|
||||
}
|
||||
|
||||
if (s.opts.AddStacktrace || lvl == logger.FatalLevel) || (s.opts.AddStacktrace && lvl == logger.ErrorLevel) {
|
||||
if s.opts.AddStacktrace && (lvl == logger.FatalLevel || lvl == logger.ErrorLevel) {
|
||||
stackInfo := make([]byte, 1024*1024)
|
||||
if stackSize := runtime.Stack(stackInfo, false); stackSize > 0 {
|
||||
traceLines := reTrace.Split(string(stackInfo[:stackSize]), -1)
|
||||
|
@@ -14,13 +14,14 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/metadata"
|
||||
"go.unistack.org/micro/v3/util/buffer"
|
||||
)
|
||||
|
||||
// always first to have proper check
|
||||
func TestStacktrace(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(buf),
|
||||
l := NewLogger(logger.WithLevel(logger.DebugLevel), logger.WithOutput(buf),
|
||||
WithHandlerFunc(slog.NewTextHandler),
|
||||
logger.WithAddStacktrace(true),
|
||||
)
|
||||
@@ -30,11 +31,30 @@ func TestStacktrace(t *testing.T) {
|
||||
|
||||
l.Error(ctx, "msg1", errors.New("err"))
|
||||
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`slog_test.go:31`)) {
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`slog_test.go:32`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelayedBuffer(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
dbuf := buffer.NewDelayedBuffer(100, 100*time.Millisecond, buf)
|
||||
l := NewLogger(logger.WithLevel(logger.ErrorLevel), logger.WithOutput(dbuf),
|
||||
WithHandlerFunc(slog.NewTextHandler),
|
||||
logger.WithAddStacktrace(true),
|
||||
)
|
||||
if err := l.Init(logger.WithFields("key1", "val1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l.Error(ctx, "msg1", errors.New("err"))
|
||||
time.Sleep(120 * time.Millisecond)
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`key1=val1`)) {
|
||||
t.Fatalf("logger delayed buffer not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTime(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
buf := bytes.NewBuffer(nil)
|
||||
@@ -104,7 +124,7 @@ func TestWithDedupKeysWithAddFields(t *testing.T) {
|
||||
|
||||
l.Info(ctx, "msg3")
|
||||
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`msg=msg3 key1=val1 key2=val2`)) {
|
||||
if !bytes.Contains(buf.Bytes(), []byte(`msg=msg3 key1=val4 key2=val3`)) {
|
||||
t.Fatalf("logger error not works, buf contains: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
@@ -89,6 +89,10 @@ func (s *Span) Tracer() tracer.Tracer {
|
||||
return s.tracer
|
||||
}
|
||||
|
||||
func (s *Span) IsRecording() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
name string
|
||||
labels []interface{}
|
||||
|
@@ -120,6 +120,10 @@ func (s *noopSpan) SetStatus(st SpanStatus, msg string) {
|
||||
s.statusMsg = msg
|
||||
}
|
||||
|
||||
func (s *noopSpan) IsRecording() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// NewTracer returns new memory tracer
|
||||
func NewTracer(opts ...Option) Tracer {
|
||||
return &noopTracer{
|
||||
|
@@ -78,4 +78,6 @@ type Span interface {
|
||||
TraceID() string
|
||||
// SpanID returns span id
|
||||
SpanID() string
|
||||
// IsRecording returns the recording state of the Span.
|
||||
IsRecording() bool
|
||||
}
|
||||
|
@@ -1,27 +0,0 @@
|
||||
package buf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
var _ io.Closer = &Buffer{}
|
||||
|
||||
// Buffer bytes.Buffer wrapper to satisfie io.Closer interface
|
||||
type Buffer struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
// Close reset buffer contents
|
||||
func (b *Buffer) Close() error {
|
||||
b.Buffer.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
// New creates new buffer that satisfies Closer interface
|
||||
func New(b *bytes.Buffer) *Buffer {
|
||||
if b == nil {
|
||||
b = bytes.NewBuffer(nil)
|
||||
}
|
||||
return &Buffer{b}
|
||||
}
|
85
util/buffer/delayed_buffer.go
Normal file
85
util/buffer/delayed_buffer.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ io.WriteCloser = (*DelayedBuffer)(nil)
|
||||
|
||||
// DelayedBuffer is the buffer that holds items until either the buffer filled or a specified time limit is reached
|
||||
type DelayedBuffer struct {
|
||||
mu sync.Mutex
|
||||
maxWait time.Duration
|
||||
flushTime time.Time
|
||||
buffer chan []byte
|
||||
ticker *time.Ticker
|
||||
w io.Writer
|
||||
err error
|
||||
}
|
||||
|
||||
func NewDelayedBuffer(size int, maxWait time.Duration, w io.Writer) *DelayedBuffer {
|
||||
b := &DelayedBuffer{
|
||||
buffer: make(chan []byte, size),
|
||||
ticker: time.NewTicker(maxWait),
|
||||
w: w,
|
||||
flushTime: time.Now(),
|
||||
maxWait: maxWait,
|
||||
}
|
||||
b.loop()
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *DelayedBuffer) loop() {
|
||||
go func() {
|
||||
for range b.ticker.C {
|
||||
b.mu.Lock()
|
||||
if time.Since(b.flushTime) > b.maxWait {
|
||||
b.flush()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (b *DelayedBuffer) flush() {
|
||||
bufLen := len(b.buffer)
|
||||
if bufLen > 0 {
|
||||
tmp := make([][]byte, bufLen)
|
||||
for i := 0; i < bufLen; i++ {
|
||||
tmp[i] = <-b.buffer
|
||||
}
|
||||
for _, t := range tmp {
|
||||
_, b.err = b.w.Write(t)
|
||||
}
|
||||
b.flushTime = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *DelayedBuffer) Put(items ...[]byte) {
|
||||
b.mu.Lock()
|
||||
for _, item := range items {
|
||||
select {
|
||||
case b.buffer <- item:
|
||||
default:
|
||||
b.flush()
|
||||
b.buffer <- item
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *DelayedBuffer) Close() error {
|
||||
b.mu.Lock()
|
||||
b.flush()
|
||||
close(b.buffer)
|
||||
b.ticker.Stop()
|
||||
b.mu.Unlock()
|
||||
return b.err
|
||||
}
|
||||
|
||||
func (b *DelayedBuffer) Write(data []byte) (int, error) {
|
||||
b.Put(data)
|
||||
return len(data), b.err
|
||||
}
|
22
util/buffer/delayed_buffer_test.go
Normal file
22
util/buffer/delayed_buffer_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTimedBuffer(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
b := NewDelayedBuffer(100, 300*time.Millisecond, buf)
|
||||
for i := 0; i < 100; i++ {
|
||||
_, _ = b.Write([]byte(`test`))
|
||||
}
|
||||
if buf.Len() != 0 {
|
||||
t.Fatal("delayed write not worked")
|
||||
}
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
if buf.Len() == 0 {
|
||||
t.Fatal("delayed write not worked")
|
||||
}
|
||||
}
|
78
util/buffer/seeker_buffer.go
Normal file
78
util/buffer/seeker_buffer.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package buffer
|
||||
|
||||
import "io"
|
||||
|
||||
var _ interface {
|
||||
io.ReadCloser
|
||||
io.ReadSeeker
|
||||
} = (*SeekerBuffer)(nil)
|
||||
|
||||
// Buffer is a ReadWriteCloser that supports seeking. It's intended to
|
||||
// replicate the functionality of bytes.Buffer that I use in my projects.
|
||||
//
|
||||
// Note that the seeking is limited to the read marker; all writes are
|
||||
// append-only.
|
||||
type SeekerBuffer struct {
|
||||
data []byte
|
||||
pos int64
|
||||
}
|
||||
|
||||
func NewSeekerBuffer(data []byte) *SeekerBuffer {
|
||||
return &SeekerBuffer{
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *SeekerBuffer) Read(p []byte) (int, error) {
|
||||
if b.pos >= int64(len(b.data)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n := copy(p, b.data[b.pos:])
|
||||
b.pos += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *SeekerBuffer) Write(p []byte) (int, error) {
|
||||
b.data = append(b.data, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Seek sets the read pointer to pos.
|
||||
func (b *SeekerBuffer) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
b.pos = offset
|
||||
case io.SeekEnd:
|
||||
b.pos = int64(len(b.data)) + offset
|
||||
case io.SeekCurrent:
|
||||
b.pos += offset
|
||||
}
|
||||
|
||||
return b.pos, nil
|
||||
}
|
||||
|
||||
// Rewind resets the read pointer to 0.
|
||||
func (b *SeekerBuffer) Rewind() error {
|
||||
if _, err := b.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close clears all the data out of the buffer and sets the read position to 0.
|
||||
func (b *SeekerBuffer) Close() error {
|
||||
b.data = nil
|
||||
b.pos = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the length of data remaining to be read.
|
||||
func (b *SeekerBuffer) Len() int {
|
||||
return len(b.data[b.pos:])
|
||||
}
|
||||
|
||||
// Bytes returns the underlying bytes from the current position.
|
||||
func (b *SeekerBuffer) Bytes() []byte {
|
||||
return b.data[b.pos:]
|
||||
}
|
55
util/buffer/seeker_buffer_test.go
Normal file
55
util/buffer/seeker_buffer_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func noErrorT(t *testing.T, err error) {
|
||||
if nil != err {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func boolT(t *testing.T, cond bool, s ...string) {
|
||||
if !cond {
|
||||
what := strings.Join(s, ", ")
|
||||
if len(what) > 0 {
|
||||
what = ": " + what
|
||||
}
|
||||
t.Fatalf("assert.Bool failed%s", what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeeking(t *testing.T) {
|
||||
partA := []byte("hello, ")
|
||||
partB := []byte("world!")
|
||||
|
||||
buf := NewSeekerBuffer(partA)
|
||||
|
||||
boolT(t, buf.Len() == len(partA), fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA)))
|
||||
|
||||
b := make([]byte, 32)
|
||||
|
||||
n, err := buf.Read(b)
|
||||
noErrorT(t, err)
|
||||
boolT(t, buf.Len() == 0, fmt.Sprintf("after reading 1: have length %d, want length 0", buf.Len()))
|
||||
boolT(t, n == len(partA), fmt.Sprintf("after reading 2: have length %d, want length %d", n, len(partA)))
|
||||
|
||||
n, err = buf.Write(partB)
|
||||
noErrorT(t, err)
|
||||
boolT(t, n == len(partB), fmt.Sprintf("after writing: have length %d, want length %d", n, len(partB)))
|
||||
|
||||
n, err = buf.Read(b)
|
||||
noErrorT(t, err)
|
||||
boolT(t, buf.Len() == 0, fmt.Sprintf("after rereading 1: have length %d, want length 0", buf.Len()))
|
||||
boolT(t, n == len(partB), fmt.Sprintf("after rereading 2: have length %d, want length %d", n, len(partB)))
|
||||
|
||||
partsLen := len(partA) + len(partB)
|
||||
_ = buf.Rewind()
|
||||
boolT(t, buf.Len() == partsLen, fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen))
|
||||
|
||||
buf.Close()
|
||||
boolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len()))
|
||||
}
|
@@ -35,11 +35,11 @@ func TestUnmarshalYAML(t *testing.T) {
|
||||
t.Fatalf("invalid duration %v != 10000000", v.TTL)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal([]byte(`{"ttl":"1y"}`), v)
|
||||
err = yaml.Unmarshal([]byte(`{"ttl":"1d"}`), v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if *(v.TTL) != 31622400000000000 {
|
||||
t.Fatalf("invalid duration %v != 31622400000000000", v.TTL)
|
||||
} else if *(v.TTL) != 86400000000000 {
|
||||
t.Fatalf("invalid duration %v != 86400000000000", *v.TTL)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,11 +68,11 @@ func TestUnmarshalJSON(t *testing.T) {
|
||||
t.Fatalf("invalid duration %v != 10000000", v.TTL)
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(`{"ttl":"1y"}`), v)
|
||||
err = json.Unmarshal([]byte(`{"ttl":"1d"}`), v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if v.TTL != 31622400000000000 {
|
||||
t.Fatalf("invalid duration %v != 31622400000000000", v.TTL)
|
||||
} else if v.TTL != 86400000000000 {
|
||||
t.Fatalf("invalid duration %v != 86400000000000", v.TTL)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,11 +87,11 @@ func TestParseDuration(t *testing.T) {
|
||||
if td.String() != "340h0m0s" {
|
||||
t.Fatalf("ParseDuration 14d != 340h0m0s : %s", td.String())
|
||||
}
|
||||
td, err = ParseDuration("1y")
|
||||
td, err = ParseDuration("1d")
|
||||
if err != nil {
|
||||
t.Fatalf("ParseDuration error: %v", err)
|
||||
}
|
||||
if td.String() != "8784h0m0s" {
|
||||
t.Fatalf("ParseDuration 1y != 8784h0m0s : %s", td.String())
|
||||
if td.String() != "24h0m0s" {
|
||||
t.Fatalf("ParseDuration 1d != 24h0m0s : %s", td.String())
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user