Moved to google.golang.org/genproto/googleapis/api/annotations

Fixes #52
This commit is contained in:
Valerio Gheri
2017-03-31 18:01:58 +02:00
parent 024c5a4e4e
commit c40779224f
2037 changed files with 831329 additions and 1854 deletions

2
vendor/github.com/go-kit/kit/sd/dnssrv/doc.go generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// Package dnssrv provides a subscriber implementation for DNS SRV records.
package dnssrv

7
vendor/github.com/go-kit/kit/sd/dnssrv/lookup.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
package dnssrv
import "net"
// Lookup is a function that resolves a DNS SRV record to multiple addresses.
// It has the same signature as net.LookupSRV.
type Lookup func(service, proto, name string) (cname string, addrs []*net.SRV, err error)

100
vendor/github.com/go-kit/kit/sd/dnssrv/subscriber.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
package dnssrv
import (
"fmt"
"net"
"time"
"github.com/go-kit/kit/endpoint"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/sd"
"github.com/go-kit/kit/sd/cache"
)
// Subscriber yields endpoints taken from the named DNS SRV record. The name is
// resolved on a fixed schedule. Priorities and weights are ignored.
type Subscriber struct {
name string
cache *cache.Cache
logger log.Logger
quit chan struct{}
}
// NewSubscriber returns a DNS SRV subscriber.
func NewSubscriber(
name string,
ttl time.Duration,
factory sd.Factory,
logger log.Logger,
) *Subscriber {
return NewSubscriberDetailed(name, time.NewTicker(ttl), net.LookupSRV, factory, logger)
}
// NewSubscriberDetailed is the same as NewSubscriber, but allows users to
// provide an explicit lookup refresh ticker instead of a TTL, and specify the
// lookup function instead of using net.LookupSRV.
func NewSubscriberDetailed(
name string,
refresh *time.Ticker,
lookup Lookup,
factory sd.Factory,
logger log.Logger,
) *Subscriber {
p := &Subscriber{
name: name,
cache: cache.New(factory, logger),
logger: logger,
quit: make(chan struct{}),
}
instances, err := p.resolve(lookup)
if err == nil {
logger.Log("name", name, "instances", len(instances))
} else {
logger.Log("name", name, "err", err)
}
p.cache.Update(instances)
go p.loop(refresh, lookup)
return p
}
// Stop terminates the Subscriber.
func (p *Subscriber) Stop() {
close(p.quit)
}
func (p *Subscriber) loop(t *time.Ticker, lookup Lookup) {
defer t.Stop()
for {
select {
case <-t.C:
instances, err := p.resolve(lookup)
if err != nil {
p.logger.Log("name", p.name, "err", err)
continue // don't replace potentially-good with bad
}
p.cache.Update(instances)
case <-p.quit:
return
}
}
}
// Endpoints implements the Subscriber interface.
func (p *Subscriber) Endpoints() ([]endpoint.Endpoint, error) {
return p.cache.Endpoints(), nil
}
func (p *Subscriber) resolve(lookup Lookup) ([]string, error) {
_, addrs, err := lookup("", "", p.name)
if err != nil {
return []string{}, err
}
instances := make([]string, len(addrs))
for i, addr := range addrs {
instances[i] = net.JoinHostPort(addr.Target, fmt.Sprint(addr.Port))
}
return instances, nil
}

View File

@@ -0,0 +1,85 @@
package dnssrv
import (
"io"
"net"
"sync/atomic"
"testing"
"time"
"github.com/go-kit/kit/endpoint"
"github.com/go-kit/kit/log"
)
func TestRefresh(t *testing.T) {
name := "some.service.internal"
ticker := time.NewTicker(time.Second)
ticker.Stop()
tickc := make(chan time.Time)
ticker.C = tickc
var lookups uint64
records := []*net.SRV{}
lookup := func(service, proto, name string) (string, []*net.SRV, error) {
t.Logf("lookup(%q, %q, %q)", service, proto, name)
atomic.AddUint64(&lookups, 1)
return "cname", records, nil
}
var generates uint64
factory := func(instance string) (endpoint.Endpoint, io.Closer, error) {
t.Logf("factory(%q)", instance)
atomic.AddUint64(&generates, 1)
return endpoint.Nop, nopCloser{}, nil
}
subscriber := NewSubscriberDetailed(name, ticker, lookup, factory, log.NewNopLogger())
defer subscriber.Stop()
// First lookup, empty
endpoints, err := subscriber.Endpoints()
if err != nil {
t.Error(err)
}
if want, have := 0, len(endpoints); want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := uint64(1), atomic.LoadUint64(&lookups); want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := uint64(0), atomic.LoadUint64(&generates); want != have {
t.Errorf("want %d, have %d", want, have)
}
// Load some records and lookup again
records = []*net.SRV{
{Target: "1.0.0.1", Port: 1001},
{Target: "1.0.0.2", Port: 1002},
{Target: "1.0.0.3", Port: 1003},
}
tickc <- time.Now()
// There is a race condition where the subscriber.Endpoints call below
// invokes the cache before it is updated by the tick above.
// TODO(pb): solve by running the read through the loop goroutine.
time.Sleep(100 * time.Millisecond)
endpoints, err = subscriber.Endpoints()
if err != nil {
t.Error(err)
}
if want, have := 3, len(endpoints); want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := uint64(2), atomic.LoadUint64(&lookups); want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := uint64(len(records)), atomic.LoadUint64(&generates); want != have {
t.Errorf("want %d, have %d", want, have)
}
}
type nopCloser struct{}
func (nopCloser) Close() error { return nil }