2016-04-26 20:49:29 +03:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"log"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/micro/go-micro/registry"
|
|
|
|
"github.com/micro/go-micro/selector"
|
|
|
|
)
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
/*
|
|
|
|
Cache selector is a selector which uses the registry.Watcher to Cache service entries.
|
|
|
|
It defaults to a TTL for 1 minute and causes a cache miss on the next request.
|
|
|
|
*/
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
type cacheSelector struct {
|
2016-05-03 21:26:50 +03:00
|
|
|
so selector.Options
|
|
|
|
ttl time.Duration
|
2016-04-26 20:49:29 +03:00
|
|
|
|
|
|
|
// registry cache
|
|
|
|
sync.Mutex
|
|
|
|
cache map[string][]*registry.Service
|
2016-05-03 21:26:50 +03:00
|
|
|
ttls map[string]time.Time
|
2016-04-26 20:49:29 +03:00
|
|
|
|
|
|
|
// used to close or reload watcher
|
|
|
|
reload chan bool
|
|
|
|
exit chan bool
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
var (
|
|
|
|
DefaultTTL = time.Minute
|
|
|
|
)
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
func (c *cacheSelector) quit() bool {
|
|
|
|
select {
|
|
|
|
case <-c.exit:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// cp copies a service. Because we're caching handing back pointers would
|
|
|
|
// create a race condition, so we do this instead
|
|
|
|
// its fast enough
|
|
|
|
func (c *cacheSelector) cp(current []*registry.Service) []*registry.Service {
|
|
|
|
var services []*registry.Service
|
|
|
|
|
|
|
|
for _, service := range current {
|
|
|
|
// copy service
|
|
|
|
s := new(registry.Service)
|
|
|
|
*s = *service
|
|
|
|
|
|
|
|
// copy nodes
|
|
|
|
var nodes []*registry.Node
|
|
|
|
for _, node := range service.Nodes {
|
|
|
|
n := new(registry.Node)
|
|
|
|
*n = *node
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
}
|
|
|
|
s.Nodes = nodes
|
|
|
|
|
|
|
|
// copy endpoints
|
|
|
|
var eps []*registry.Endpoint
|
|
|
|
for _, ep := range service.Endpoints {
|
|
|
|
e := new(registry.Endpoint)
|
|
|
|
*e = *ep
|
|
|
|
eps = append(eps, e)
|
|
|
|
}
|
|
|
|
s.Endpoints = eps
|
|
|
|
|
|
|
|
// append service
|
|
|
|
services = append(services, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
return services
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
func (c *cacheSelector) del(service string) {
|
|
|
|
delete(c.cache, service)
|
|
|
|
delete(c.ttls, service)
|
|
|
|
}
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
func (c *cacheSelector) get(service string) ([]*registry.Service, error) {
|
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
|
|
|
// check the cache first
|
|
|
|
services, ok := c.cache[service]
|
2016-05-03 21:26:50 +03:00
|
|
|
ttl, kk := c.ttls[service]
|
2016-04-26 20:49:29 +03:00
|
|
|
|
|
|
|
// got results, copy and return
|
|
|
|
if ok && len(services) > 0 {
|
2016-05-03 21:26:50 +03:00
|
|
|
// only return if its less than the ttl
|
|
|
|
if kk && time.Since(ttl) < c.ttl {
|
|
|
|
return c.cp(services), nil
|
|
|
|
}
|
2016-04-26 20:49:29 +03:00
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
// cache miss or ttl expired
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
// now ask the registry
|
|
|
|
services, err := c.so.Registry.GetService(service)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// we didn't have any results so cache
|
2016-04-29 21:29:24 +03:00
|
|
|
c.cache[service] = c.cp(services)
|
2016-05-03 21:26:50 +03:00
|
|
|
c.ttls[service] = time.Now().Add(c.ttl)
|
2016-04-26 20:49:29 +03:00
|
|
|
return services, nil
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
func (c *cacheSelector) set(service string, services []*registry.Service) {
|
|
|
|
c.cache[service] = services
|
|
|
|
c.ttls[service] = time.Now().Add(c.ttl)
|
|
|
|
}
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
func (c *cacheSelector) update(res *registry.Result) {
|
|
|
|
if res == nil || res.Service == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
|
|
|
services, ok := c.cache[res.Service.Name]
|
|
|
|
if !ok {
|
|
|
|
// we're not going to cache anything
|
|
|
|
// unless there was already a lookup
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(res.Service.Nodes) == 0 {
|
|
|
|
switch res.Action {
|
|
|
|
case "delete":
|
2016-05-03 21:26:50 +03:00
|
|
|
c.del(res.Service.Name)
|
2016-04-26 20:49:29 +03:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// existing service found
|
|
|
|
var service *registry.Service
|
|
|
|
var index int
|
|
|
|
for i, s := range services {
|
|
|
|
if s.Version == res.Service.Version {
|
|
|
|
service = s
|
|
|
|
index = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch res.Action {
|
|
|
|
case "create", "update":
|
|
|
|
if service == nil {
|
2016-05-03 21:26:50 +03:00
|
|
|
c.set(res.Service.Name, append(services, res.Service))
|
2016-04-26 20:49:29 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// append old nodes to new service
|
|
|
|
for _, cur := range service.Nodes {
|
|
|
|
var seen bool
|
|
|
|
for _, node := range res.Service.Nodes {
|
|
|
|
if cur.Id == node.Id {
|
|
|
|
seen = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !seen {
|
|
|
|
res.Service.Nodes = append(res.Service.Nodes, cur)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
services[index] = res.Service
|
2016-05-03 21:26:50 +03:00
|
|
|
c.set(res.Service.Name, services)
|
2016-04-26 20:49:29 +03:00
|
|
|
case "delete":
|
|
|
|
if service == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var nodes []*registry.Node
|
|
|
|
|
|
|
|
// filter cur nodes to remove the dead one
|
|
|
|
for _, cur := range service.Nodes {
|
|
|
|
var seen bool
|
|
|
|
for _, del := range res.Service.Nodes {
|
|
|
|
if del.Id == cur.Id {
|
|
|
|
seen = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !seen {
|
|
|
|
nodes = append(nodes, cur)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
// still got nodes, save and return
|
|
|
|
if len(nodes) > 0 {
|
|
|
|
service.Nodes = nodes
|
|
|
|
services[index] = service
|
|
|
|
c.set(service.Name, services)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// zero nodes left
|
|
|
|
|
|
|
|
// only have one thing to delete
|
|
|
|
// nuke the thing
|
|
|
|
if len(services) == 1 {
|
|
|
|
c.del(service.Name)
|
2016-04-26 20:49:29 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
// still have more than 1 service
|
|
|
|
// check the version and keep what we know
|
|
|
|
var srvs []*registry.Service
|
|
|
|
for _, s := range services {
|
|
|
|
if s.Version != service.Version {
|
|
|
|
srvs = append(srvs, s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// save
|
|
|
|
c.set(service.Name, srvs)
|
2016-04-26 20:49:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run starts the cache watcher loop
|
|
|
|
// it creates a new watcher if there's a problem
|
|
|
|
// reloads the watcher if Init is called
|
|
|
|
// and returns when Close is called
|
|
|
|
func (c *cacheSelector) run() {
|
2016-05-03 21:26:50 +03:00
|
|
|
go c.tick()
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
for {
|
|
|
|
// exit early if already dead
|
|
|
|
if c.quit() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// create new watcher
|
|
|
|
w, err := c.so.Registry.Watch()
|
|
|
|
if err != nil {
|
|
|
|
log.Println(err)
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// watch for events
|
|
|
|
if err := c.watch(w); err != nil {
|
|
|
|
log.Println(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
// check cache and expire on each tick
|
|
|
|
func (c *cacheSelector) tick() {
|
|
|
|
t := time.NewTicker(time.Minute)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-t.C:
|
|
|
|
c.Lock()
|
|
|
|
for service, expiry := range c.ttls {
|
|
|
|
if d := time.Since(expiry); d > c.ttl {
|
|
|
|
// TODO: maybe refresh the cache rather than blowing it away
|
|
|
|
c.del(service)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Unlock()
|
|
|
|
case <-c.exit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
// watch loops the next event and calls update
|
|
|
|
// it returns if there's an error
|
|
|
|
func (c *cacheSelector) watch(w registry.Watcher) error {
|
2016-05-03 21:26:50 +03:00
|
|
|
defer w.Stop()
|
|
|
|
|
|
|
|
// manage this loop
|
|
|
|
go func() {
|
|
|
|
// wait for exit or reload signal
|
|
|
|
select {
|
|
|
|
case <-c.exit:
|
|
|
|
case <-c.reload:
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop the watcher
|
|
|
|
w.Stop()
|
|
|
|
}()
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
for {
|
|
|
|
res, err := w.Next()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.update(res)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheSelector) Init(opts ...selector.Option) error {
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&c.so)
|
|
|
|
}
|
|
|
|
|
|
|
|
// reload the watcher
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-c.exit:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
c.reload <- true
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheSelector) Options() selector.Options {
|
|
|
|
return c.so
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheSelector) Select(service string, opts ...selector.SelectOption) (selector.Next, error) {
|
2016-05-04 00:06:19 +03:00
|
|
|
sopts := selector.SelectOptions{
|
|
|
|
Strategy: c.so.Strategy,
|
|
|
|
}
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
for _, opt := range opts {
|
|
|
|
opt(&sopts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the service
|
|
|
|
// try the cache first
|
|
|
|
// if that fails go directly to the registry
|
|
|
|
services, err := c.get(service)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// apply the filters
|
|
|
|
for _, filter := range sopts.Filters {
|
|
|
|
services = filter(services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// if there's nothing left, return
|
|
|
|
if len(services) == 0 {
|
2016-05-07 02:04:08 +03:00
|
|
|
return nil, selector.ErrNoneAvailable
|
2016-04-26 20:49:29 +03:00
|
|
|
}
|
|
|
|
|
2016-05-04 00:06:19 +03:00
|
|
|
return sopts.Strategy(services), nil
|
2016-04-26 20:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheSelector) Mark(service string, node *registry.Node, err error) {
|
2016-06-19 16:41:33 +03:00
|
|
|
return
|
2016-04-26 20:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheSelector) Reset(service string) {
|
2016-06-19 16:41:33 +03:00
|
|
|
return
|
2016-04-26 20:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close stops the watcher and destroys the cache
|
|
|
|
func (c *cacheSelector) Close() error {
|
|
|
|
c.Lock()
|
|
|
|
c.cache = make(map[string][]*registry.Service)
|
|
|
|
c.Unlock()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-c.exit:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
close(c.exit)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheSelector) String() string {
|
|
|
|
return "cache"
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewSelector(opts ...selector.Option) selector.Selector {
|
2016-05-04 00:06:19 +03:00
|
|
|
sopts := selector.Options{
|
|
|
|
Strategy: selector.Random,
|
|
|
|
}
|
2016-04-26 20:49:29 +03:00
|
|
|
|
|
|
|
for _, opt := range opts {
|
|
|
|
opt(&sopts)
|
|
|
|
}
|
|
|
|
|
|
|
|
if sopts.Registry == nil {
|
|
|
|
sopts.Registry = registry.DefaultRegistry
|
|
|
|
}
|
|
|
|
|
2016-05-03 21:26:50 +03:00
|
|
|
ttl := DefaultTTL
|
|
|
|
|
|
|
|
if sopts.Context != nil {
|
|
|
|
if t, ok := sopts.Context.Value(ttlKey{}).(time.Duration); ok {
|
|
|
|
ttl = t
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-26 20:49:29 +03:00
|
|
|
c := &cacheSelector{
|
|
|
|
so: sopts,
|
2016-05-03 21:26:50 +03:00
|
|
|
ttl: ttl,
|
2016-04-26 20:49:29 +03:00
|
|
|
cache: make(map[string][]*registry.Service),
|
2016-05-03 21:26:50 +03:00
|
|
|
ttls: make(map[string]time.Time),
|
2016-04-26 20:49:29 +03:00
|
|
|
reload: make(chan bool, 1),
|
|
|
|
exit: make(chan bool),
|
|
|
|
}
|
|
|
|
|
|
|
|
go c.run()
|
|
|
|
return c
|
|
|
|
}
|