Moved to google.golang.org/genproto/googleapis/api/annotations

Fixes #52
This commit is contained in:
Valerio Gheri
2017-03-31 18:01:58 +02:00
parent 024c5a4e4e
commit c40779224f
2037 changed files with 831329 additions and 1854 deletions

42
vendor/github.com/go-kit/kit/ratelimit/token_bucket.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
package ratelimit
import (
"context"
"errors"
"time"
"github.com/juju/ratelimit"
"github.com/go-kit/kit/endpoint"
)
// ErrLimited is returned in the request path when the rate limiter is
// triggered and the request is rejected.
var ErrLimited = errors.New("rate limit exceeded")
// NewTokenBucketLimiter returns an endpoint.Middleware that acts as a rate
// limiter based on a token-bucket algorithm. Requests that would exceed the
// maximum request rate are simply rejected with an error.
func NewTokenBucketLimiter(tb *ratelimit.Bucket) endpoint.Middleware {
return func(next endpoint.Endpoint) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
if tb.TakeAvailable(1) == 0 {
return nil, ErrLimited
}
return next(ctx, request)
}
}
}
// NewTokenBucketThrottler returns an endpoint.Middleware that acts as a
// request throttler based on a token-bucket algorithm. Requests that would
// exceed the maximum request rate are delayed via the parameterized sleep
// function. By default you may pass time.Sleep.
func NewTokenBucketThrottler(tb *ratelimit.Bucket, sleep func(time.Duration)) endpoint.Middleware {
return func(next endpoint.Endpoint) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
sleep(tb.Take(1))
return next(ctx, request)
}
}
}

View File

@@ -0,0 +1,55 @@
package ratelimit_test
import (
"context"
"math"
"testing"
"time"
jujuratelimit "github.com/juju/ratelimit"
"github.com/go-kit/kit/endpoint"
"github.com/go-kit/kit/ratelimit"
)
func TestTokenBucketLimiter(t *testing.T) {
e := func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
for _, n := range []int{1, 2, 100} {
tb := jujuratelimit.NewBucketWithRate(float64(n), int64(n))
testLimiter(t, ratelimit.NewTokenBucketLimiter(tb)(e), n)
}
}
func TestTokenBucketThrottler(t *testing.T) {
d := time.Duration(0)
s := func(d0 time.Duration) { d = d0 }
e := func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
e = ratelimit.NewTokenBucketThrottler(jujuratelimit.NewBucketWithRate(1, 1), s)(e)
// First request should go through with no delay.
e(context.Background(), struct{}{})
if want, have := time.Duration(0), d; want != have {
t.Errorf("want %s, have %s", want, have)
}
// Next request should request a ~1s sleep.
e(context.Background(), struct{}{})
if want, have, tol := time.Second, d, time.Millisecond; math.Abs(float64(want-have)) > float64(tol) {
t.Errorf("want %s, have %s", want, have)
}
}
func testLimiter(t *testing.T, e endpoint.Endpoint, rate int) {
// First <rate> requests should succeed.
for i := 0; i < rate; i++ {
if _, err := e(context.Background(), struct{}{}); err != nil {
t.Fatalf("rate=%d: request %d/%d failed: %v", rate, i+1, rate, err)
}
}
// Next request should fail.
if _, err := e(context.Background(), struct{}{}); err != ratelimit.ErrLimited {
t.Errorf("rate=%d: want %v, have %v", rate, ratelimit.ErrLimited, err)
}
}