2020-07-27 15:22:00 +03:00
|
|
|
package mucp
|
2019-08-20 14:48:51 +03:00
|
|
|
|
|
|
|
import (
|
2019-09-05 19:18:16 +03:00
|
|
|
"errors"
|
2019-09-17 19:55:42 +03:00
|
|
|
"fmt"
|
|
|
|
"hash/fnv"
|
2019-12-06 03:18:40 +03:00
|
|
|
"io"
|
2019-10-23 18:48:47 +03:00
|
|
|
"math"
|
2020-01-10 14:57:34 +03:00
|
|
|
"math/rand"
|
2019-12-10 20:57:04 +03:00
|
|
|
"sort"
|
2019-08-20 23:12:21 +03:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2019-08-20 23:15:02 +03:00
|
|
|
"github.com/golang/protobuf/proto"
|
2020-07-27 15:22:00 +03:00
|
|
|
"github.com/micro/go-micro/v3/client"
|
|
|
|
cmucp "github.com/micro/go-micro/v3/client/mucp"
|
|
|
|
"github.com/micro/go-micro/v3/logger"
|
|
|
|
"github.com/micro/go-micro/v3/network"
|
|
|
|
pb "github.com/micro/go-micro/v3/network/mucp/proto"
|
|
|
|
"github.com/micro/go-micro/v3/network/resolver/dns"
|
|
|
|
"github.com/micro/go-micro/v3/proxy"
|
|
|
|
"github.com/micro/go-micro/v3/router"
|
|
|
|
"github.com/micro/go-micro/v3/server"
|
|
|
|
smucp "github.com/micro/go-micro/v3/server/mucp"
|
|
|
|
"github.com/micro/go-micro/v3/transport"
|
|
|
|
"github.com/micro/go-micro/v3/tunnel"
|
|
|
|
bun "github.com/micro/go-micro/v3/tunnel/broker"
|
|
|
|
tun "github.com/micro/go-micro/v3/tunnel/transport"
|
|
|
|
"github.com/micro/go-micro/v3/util/backoff"
|
2019-08-20 23:12:21 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2020-07-27 15:22:00 +03:00
|
|
|
// DefaultName is default network name
|
|
|
|
DefaultName = "go.micro"
|
|
|
|
// DefaultAddress is default network address
|
|
|
|
DefaultAddress = ":0"
|
|
|
|
// ResolveTime defines time interval to periodically resolve network nodes
|
|
|
|
ResolveTime = 1 * time.Minute
|
|
|
|
// AnnounceTime defines time interval to periodically announce node neighbours
|
|
|
|
AnnounceTime = 1 * time.Second
|
|
|
|
// KeepAliveTime is the time in which we want to have sent a message to a peer
|
|
|
|
KeepAliveTime = 30 * time.Second
|
|
|
|
// SyncTime is the time a network node requests full sync from the network
|
|
|
|
SyncTime = 1 * time.Minute
|
|
|
|
// PruneTime defines time interval to periodically check nodes that need to be pruned
|
|
|
|
// due to their not announcing their presence within this time interval
|
|
|
|
PruneTime = 90 * time.Second
|
|
|
|
// MaxDepth defines max depth of peer topology
|
|
|
|
MaxDepth uint = 3
|
2019-08-28 22:11:19 +03:00
|
|
|
// NetworkChannel is the name of the tunnel channel for passing network messages
|
|
|
|
NetworkChannel = "network"
|
|
|
|
// ControlChannel is the name of the tunnel channel for passing control message
|
2019-08-21 21:16:18 +03:00
|
|
|
ControlChannel = "control"
|
2019-08-27 13:36:46 +03:00
|
|
|
// DefaultLink is default network link
|
|
|
|
DefaultLink = "network"
|
2019-10-08 17:15:50 +03:00
|
|
|
// MaxConnections is the max number of network client connections
|
|
|
|
MaxConnections = 3
|
2020-01-14 01:22:12 +03:00
|
|
|
// MaxPeerErrors is the max number of peer errors before we remove it from network graph
|
|
|
|
MaxPeerErrors = 3
|
2020-07-27 15:22:00 +03:00
|
|
|
// ErrPeerExists is returned when adding a peer which already exists
|
|
|
|
ErrPeerExists = errors.New("peer already exists")
|
|
|
|
// ErrPeerNotFound is returned when a peer could not be found in node topology
|
|
|
|
ErrPeerNotFound = errors.New("peer not found")
|
2019-09-05 19:43:59 +03:00
|
|
|
// ErrClientNotFound is returned when client for tunnel channel could not be found
|
|
|
|
ErrClientNotFound = errors.New("client not found")
|
2019-10-22 22:48:51 +03:00
|
|
|
// ErrPeerLinkNotFound is returned when peer link could not be found in tunnel Links
|
|
|
|
ErrPeerLinkNotFound = errors.New("peer link not found")
|
2020-01-14 01:22:12 +03:00
|
|
|
// ErrPeerMaxExceeded is returned when peer has reached its max error count limit
|
|
|
|
ErrPeerMaxExceeded = errors.New("peer max errors exceeded")
|
2019-08-20 14:48:51 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// network implements Network interface
|
2020-07-27 15:22:00 +03:00
|
|
|
type mucpNetwork struct {
|
2019-09-02 14:39:26 +03:00
|
|
|
// node is network node
|
|
|
|
*node
|
2019-08-20 14:48:51 +03:00
|
|
|
// options configure the network
|
2020-07-27 15:22:00 +03:00
|
|
|
options network.Options
|
2019-08-20 23:12:21 +03:00
|
|
|
// rtr is network router
|
2019-09-25 14:56:52 +03:00
|
|
|
router router.Router
|
2020-01-08 16:18:11 +03:00
|
|
|
// proxy is network proxy
|
2019-09-25 14:56:52 +03:00
|
|
|
proxy proxy.Proxy
|
2020-01-08 16:18:11 +03:00
|
|
|
// tunnel is network tunnel
|
2019-09-25 14:56:52 +03:00
|
|
|
tunnel tunnel.Tunnel
|
2019-08-28 01:08:35 +03:00
|
|
|
// server is network server
|
|
|
|
server server.Server
|
2019-08-20 23:12:21 +03:00
|
|
|
// client is network client
|
|
|
|
client client.Client
|
|
|
|
|
2020-01-08 16:18:11 +03:00
|
|
|
// tunClient is a map of tunnel channel clients
|
2020-01-16 03:28:58 +03:00
|
|
|
tunClient map[string]tunnel.Session
|
2019-10-22 22:48:51 +03:00
|
|
|
// peerLinks is a map of links for each peer
|
|
|
|
peerLinks map[string]tunnel.Link
|
2019-08-28 22:11:19 +03:00
|
|
|
|
2019-08-20 23:12:21 +03:00
|
|
|
sync.RWMutex
|
|
|
|
// connected marks the network as connected
|
|
|
|
connected bool
|
|
|
|
// closed closes the network
|
|
|
|
closed chan bool
|
2019-12-07 22:54:29 +03:00
|
|
|
// whether we've discovered by the network
|
|
|
|
discovered chan bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// message is network message
|
|
|
|
type message struct {
|
|
|
|
// msg is transport message
|
|
|
|
msg *transport.Message
|
|
|
|
// session is tunnel session
|
|
|
|
session tunnel.Session
|
2019-08-20 14:48:51 +03:00
|
|
|
}
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
// NewNetwork returns a new network node
|
|
|
|
func NewNetwork(opts ...network.Option) network.Network {
|
2020-01-08 16:18:11 +03:00
|
|
|
// create default options
|
2020-07-27 15:22:00 +03:00
|
|
|
options := network.DefaultOptions()
|
2020-01-08 16:18:11 +03:00
|
|
|
// initialize network options
|
2019-08-20 14:48:51 +03:00
|
|
|
for _, o := range opts {
|
|
|
|
o(&options)
|
|
|
|
}
|
|
|
|
|
2019-10-02 17:22:44 +03:00
|
|
|
// set the address to a hashed address
|
|
|
|
hasher := fnv.New64()
|
|
|
|
hasher.Write([]byte(options.Address + options.Id))
|
|
|
|
address := fmt.Sprintf("%d", hasher.Sum64())
|
|
|
|
|
2019-09-18 20:56:02 +03:00
|
|
|
// set the address to advertise
|
2019-10-02 17:22:44 +03:00
|
|
|
var advertise string
|
2019-10-03 19:37:29 +03:00
|
|
|
var peerAddress string
|
2019-10-02 17:22:44 +03:00
|
|
|
|
2019-09-18 20:56:02 +03:00
|
|
|
if len(options.Advertise) > 0 {
|
2019-10-02 17:22:44 +03:00
|
|
|
advertise = options.Advertise
|
2019-10-03 19:37:29 +03:00
|
|
|
peerAddress = options.Advertise
|
2019-10-02 17:22:44 +03:00
|
|
|
} else {
|
|
|
|
advertise = options.Address
|
2019-10-03 19:37:29 +03:00
|
|
|
peerAddress = address
|
2019-09-18 20:56:02 +03:00
|
|
|
}
|
|
|
|
|
2019-11-01 01:34:06 +03:00
|
|
|
// init tunnel address to the network bind address
|
|
|
|
options.Tunnel.Init(
|
|
|
|
tunnel.Address(options.Address),
|
|
|
|
)
|
|
|
|
|
|
|
|
// init router Id to the network id
|
|
|
|
options.Router.Init(
|
|
|
|
router.Id(options.Id),
|
|
|
|
router.Address(peerAddress),
|
|
|
|
)
|
|
|
|
|
|
|
|
// create tunnel client with tunnel transport
|
|
|
|
tunTransport := tun.NewTransport(
|
|
|
|
tun.WithTunnel(options.Tunnel),
|
|
|
|
)
|
|
|
|
|
2019-11-25 19:31:43 +03:00
|
|
|
// create the tunnel broker
|
|
|
|
tunBroker := bun.NewBroker(
|
|
|
|
bun.WithTunnel(options.Tunnel),
|
|
|
|
)
|
|
|
|
|
2019-08-28 01:08:35 +03:00
|
|
|
// server is network server
|
2020-01-18 23:48:08 +03:00
|
|
|
server := smucp.NewServer(
|
2019-08-28 01:08:35 +03:00
|
|
|
server.Id(options.Id),
|
2019-10-03 20:35:54 +03:00
|
|
|
server.Address(peerAddress),
|
2019-10-02 17:22:44 +03:00
|
|
|
server.Advertise(advertise),
|
2019-08-21 21:22:50 +03:00
|
|
|
server.Name(options.Name),
|
2019-08-20 23:12:21 +03:00
|
|
|
server.Transport(tunTransport),
|
2019-11-25 19:31:43 +03:00
|
|
|
server.Broker(tunBroker),
|
2019-08-20 23:12:21 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// client is network client
|
2020-01-18 23:48:08 +03:00
|
|
|
client := cmucp.NewClient(
|
2019-11-25 19:31:43 +03:00
|
|
|
client.Broker(tunBroker),
|
2019-08-20 23:12:21 +03:00
|
|
|
client.Transport(tunTransport),
|
2020-07-01 19:06:59 +03:00
|
|
|
client.Router(options.Router),
|
2019-08-20 23:12:21 +03:00
|
|
|
)
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
network := &mucpNetwork{
|
2019-09-02 14:39:26 +03:00
|
|
|
node: &node{
|
2019-09-10 03:14:23 +03:00
|
|
|
id: options.Id,
|
2019-10-03 19:37:29 +03:00
|
|
|
address: peerAddress,
|
2019-09-10 03:14:23 +03:00
|
|
|
peers: make(map[string]*node),
|
2020-01-14 21:12:36 +03:00
|
|
|
status: newStatus(),
|
2019-09-02 14:39:26 +03:00
|
|
|
},
|
2019-12-07 22:54:29 +03:00
|
|
|
options: options,
|
|
|
|
router: options.Router,
|
|
|
|
proxy: options.Proxy,
|
|
|
|
tunnel: options.Tunnel,
|
|
|
|
server: server,
|
|
|
|
client: client,
|
2020-01-16 03:28:58 +03:00
|
|
|
tunClient: make(map[string]tunnel.Session),
|
2019-12-07 22:54:29 +03:00
|
|
|
peerLinks: make(map[string]tunnel.Link),
|
|
|
|
discovered: make(chan bool, 1),
|
2019-08-20 14:48:51 +03:00
|
|
|
}
|
2019-09-02 14:39:26 +03:00
|
|
|
|
|
|
|
network.node.network = network
|
|
|
|
|
|
|
|
return network
|
2019-08-20 14:48:51 +03:00
|
|
|
}
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) Init(opts ...network.Option) error {
|
2019-10-13 14:38:13 +03:00
|
|
|
n.Lock()
|
2020-01-08 16:18:11 +03:00
|
|
|
defer n.Unlock()
|
|
|
|
|
2019-10-13 14:38:13 +03:00
|
|
|
// TODO: maybe only allow reinit of certain opts
|
|
|
|
for _, o := range opts {
|
|
|
|
o(&n.options)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-02 13:42:45 +03:00
|
|
|
// Options returns network options
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) Options() network.Options {
|
2019-09-13 05:02:20 +03:00
|
|
|
n.RLock()
|
2020-01-08 16:18:11 +03:00
|
|
|
defer n.RUnlock()
|
|
|
|
|
2019-09-02 13:42:45 +03:00
|
|
|
options := n.options
|
2020-01-08 16:18:11 +03:00
|
|
|
|
2019-09-02 13:42:45 +03:00
|
|
|
return options
|
|
|
|
}
|
|
|
|
|
2019-08-20 14:48:51 +03:00
|
|
|
// Name returns network name
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) Name() string {
|
2020-01-08 16:18:11 +03:00
|
|
|
n.RLock()
|
|
|
|
defer n.RUnlock()
|
|
|
|
|
|
|
|
name := n.options.Name
|
|
|
|
|
|
|
|
return name
|
2019-08-20 14:48:51 +03:00
|
|
|
}
|
|
|
|
|
2019-12-10 01:56:26 +03:00
|
|
|
// acceptNetConn accepts connections from NetworkChannel
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) acceptNetConn(l tunnel.Listener, recv chan *message) {
|
2019-12-10 01:56:26 +03:00
|
|
|
var i int
|
|
|
|
for {
|
|
|
|
// accept a connection
|
|
|
|
conn, err := l.Accept()
|
|
|
|
if err != nil {
|
|
|
|
sleep := backoff.Do(i)
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network tunnel [%s] accept error: %v, backing off for %v", ControlChannel, err, sleep)
|
2019-12-10 01:56:26 +03:00
|
|
|
time.Sleep(sleep)
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-n.closed:
|
|
|
|
if err := conn.Close(); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network tunnel [%s] failed to close connection: %v", NetworkChannel, err)
|
2019-12-10 01:56:26 +03:00
|
|
|
}
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// go handle NetworkChannel connection
|
|
|
|
go n.handleNetConn(conn, recv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// acceptCtrlConn accepts connections from ControlChannel
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) acceptCtrlConn(l tunnel.Listener, recv chan *message) {
|
2019-12-10 01:56:26 +03:00
|
|
|
var i int
|
|
|
|
for {
|
|
|
|
// accept a connection
|
|
|
|
conn, err := l.Accept()
|
|
|
|
if err != nil {
|
|
|
|
sleep := backoff.Do(i)
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network tunnel [%s] accept error: %v, backing off for %v", ControlChannel, err, sleep)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
time.Sleep(sleep)
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-n.closed:
|
|
|
|
if err := conn.Close(); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network tunnel [%s] failed to close connection: %v", ControlChannel, err)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
}
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// go handle ControlChannel connection
|
|
|
|
go n.handleCtrlConn(conn, recv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-16 02:06:58 +03:00
|
|
|
// maskRoute will mask the route so that we apply the right values
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) maskRoute(r *pb.Route) {
|
2020-01-16 02:06:58 +03:00
|
|
|
hasher := fnv.New64()
|
|
|
|
// the routes service address
|
|
|
|
address := r.Address
|
|
|
|
|
|
|
|
// only hash the address if we're advertising our own local routes
|
|
|
|
// avoid hashing * based routes
|
|
|
|
if r.Router == n.Id() && r.Address != "*" {
|
|
|
|
// hash the service before advertising it
|
|
|
|
hasher.Reset()
|
|
|
|
// routes for multiple instances of a service will be collapsed here.
|
|
|
|
// TODO: once we store labels in the table this may need to change
|
|
|
|
// to include the labels in case they differ but highly unlikely
|
|
|
|
hasher.Write([]byte(r.Service + n.Address()))
|
|
|
|
address = fmt.Sprintf("%d", hasher.Sum64())
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculate route metric to advertise
|
|
|
|
metric := n.getRouteMetric(r.Router, r.Gateway, r.Link)
|
|
|
|
|
|
|
|
// NOTE: we override Gateway, Link and Address here
|
|
|
|
r.Address = address
|
|
|
|
r.Gateway = n.Address()
|
|
|
|
r.Link = DefaultLink
|
|
|
|
r.Metric = metric
|
|
|
|
}
|
|
|
|
|
2019-12-10 01:56:26 +03:00
|
|
|
// advertise advertises routes to the network
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) advertise(advertChan <-chan *router.Advert) {
|
2020-01-10 14:57:34 +03:00
|
|
|
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
2019-12-10 01:56:26 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// process local adverts and randomly fire them at other nodes
|
|
|
|
case advert := <-advertChan:
|
|
|
|
// create a proto advert
|
2020-07-27 15:22:00 +03:00
|
|
|
var events []*pb.Event
|
2019-12-10 01:56:26 +03:00
|
|
|
|
|
|
|
for _, event := range advert.Events {
|
2020-01-16 02:06:58 +03:00
|
|
|
// make a copy of the route
|
2020-07-27 15:22:00 +03:00
|
|
|
route := &pb.Route{
|
2019-12-10 01:56:26 +03:00
|
|
|
Service: event.Route.Service,
|
2020-01-16 02:06:58 +03:00
|
|
|
Address: event.Route.Address,
|
|
|
|
Gateway: event.Route.Gateway,
|
2019-12-10 01:56:26 +03:00
|
|
|
Network: event.Route.Network,
|
|
|
|
Router: event.Route.Router,
|
2020-01-16 02:06:58 +03:00
|
|
|
Link: event.Route.Link,
|
|
|
|
Metric: event.Route.Metric,
|
2019-12-10 01:56:26 +03:00
|
|
|
}
|
2020-01-16 02:06:58 +03:00
|
|
|
|
|
|
|
// override the various values
|
|
|
|
n.maskRoute(route)
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
e := &pb.Event{
|
|
|
|
Type: pb.EventType(event.Type),
|
2019-12-10 01:56:26 +03:00
|
|
|
Timestamp: event.Timestamp.UnixNano(),
|
|
|
|
Route: route,
|
|
|
|
}
|
2020-01-16 02:06:58 +03:00
|
|
|
|
2019-12-10 01:56:26 +03:00
|
|
|
events = append(events, e)
|
|
|
|
}
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
msg := &pb.Advert{
|
2019-12-10 01:56:26 +03:00
|
|
|
Id: advert.Id,
|
2020-07-27 15:22:00 +03:00
|
|
|
Type: pb.AdvertType(advert.Type),
|
2019-12-10 01:56:26 +03:00
|
|
|
Timestamp: advert.Timestamp.UnixNano(),
|
|
|
|
Events: events,
|
|
|
|
}
|
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
// get a list of node peers
|
|
|
|
peers := n.Peers()
|
2020-01-16 00:02:53 +03:00
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
// continue if there is no one to send to
|
|
|
|
if len(peers) == 0 {
|
2019-12-10 01:56:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
// advertise to max 3 peers
|
|
|
|
max := len(peers)
|
|
|
|
if max > 3 {
|
|
|
|
max = 3
|
|
|
|
}
|
2020-01-16 02:06:58 +03:00
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
for i := 0; i < max; i++ {
|
|
|
|
if peer := n.node.GetPeerNode(peers[rnd.Intn(len(peers))].Id()); peer != nil {
|
|
|
|
if err := n.sendTo("advert", ControlChannel, peer, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to advertise routes to %s: %v", peer.Id(), err)
|
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
}
|
2019-12-12 20:10:32 +03:00
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
}
|
|
|
|
case <-n.closed:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-08 16:18:11 +03:00
|
|
|
// initNodes initializes tunnel with a list of resolved nodes
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) initNodes(startup bool) {
|
2019-12-08 15:12:20 +03:00
|
|
|
nodes, err := n.resolveNodes()
|
2020-01-08 16:18:11 +03:00
|
|
|
// NOTE: this condition never fires
|
|
|
|
// as resolveNodes() never returns error
|
2019-12-08 17:37:17 +03:00
|
|
|
if err != nil && !startup {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to init nodes: %v", err)
|
|
|
|
}
|
2019-12-08 15:12:20 +03:00
|
|
|
return
|
|
|
|
}
|
2019-12-08 17:37:17 +03:00
|
|
|
|
2020-01-21 15:36:05 +03:00
|
|
|
// strip self
|
|
|
|
var init []string
|
|
|
|
|
|
|
|
// our current address
|
|
|
|
advertised := n.server.Options().Advertise
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
// skip self
|
|
|
|
if node == advertised {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// add the node
|
|
|
|
init = append(init, node)
|
|
|
|
}
|
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
// initialize the tunnel
|
|
|
|
logger.Tracef("Network initialising nodes %+v\n", init)
|
|
|
|
}
|
2019-12-08 17:37:17 +03:00
|
|
|
|
2019-12-08 15:12:20 +03:00
|
|
|
n.tunnel.Init(
|
|
|
|
tunnel.Nodes(nodes...),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2019-08-28 22:11:19 +03:00
|
|
|
// resolveNodes resolves network nodes to addresses
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) resolveNodes() ([]string, error) {
|
2019-08-20 23:12:21 +03:00
|
|
|
// resolve the network address to network nodes
|
|
|
|
records, err := n.options.Resolver.Resolve(n.options.Name)
|
2019-12-08 17:37:17 +03:00
|
|
|
if err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to resolve nodes: %v", err)
|
|
|
|
}
|
2019-12-08 17:37:17 +03:00
|
|
|
}
|
|
|
|
|
2019-12-10 20:57:04 +03:00
|
|
|
// sort by lowest priority
|
2020-08-10 00:11:57 +03:00
|
|
|
if err == nil && len(records) > 0 {
|
2019-12-10 20:57:04 +03:00
|
|
|
sort.Slice(records, func(i, j int) bool { return records[i].Priority < records[j].Priority })
|
|
|
|
}
|
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
// keep processing
|
2019-08-20 23:12:21 +03:00
|
|
|
|
2019-08-29 16:53:30 +03:00
|
|
|
nodeMap := make(map[string]bool)
|
|
|
|
|
2019-08-20 23:12:21 +03:00
|
|
|
// collect network node addresses
|
2019-12-03 22:59:44 +03:00
|
|
|
//nolint:prealloc
|
2019-08-29 16:53:30 +03:00
|
|
|
var nodes []string
|
2019-10-12 22:26:06 +03:00
|
|
|
var i int
|
2019-10-08 17:48:52 +03:00
|
|
|
|
2019-08-29 17:09:01 +03:00
|
|
|
for _, record := range records {
|
2019-10-12 22:26:06 +03:00
|
|
|
if _, ok := nodeMap[record.Address]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-08-29 16:53:30 +03:00
|
|
|
nodeMap[record.Address] = true
|
2019-10-12 22:26:06 +03:00
|
|
|
nodes = append(nodes, record.Address)
|
|
|
|
|
2019-10-08 17:48:52 +03:00
|
|
|
i++
|
2019-10-12 22:26:06 +03:00
|
|
|
|
2019-10-08 17:48:52 +03:00
|
|
|
// break once MaxConnection nodes has been reached
|
|
|
|
if i == MaxConnections {
|
|
|
|
break
|
|
|
|
}
|
2019-08-29 16:53:30 +03:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:18:11 +03:00
|
|
|
// use the DNS resolver to expand peers
|
2019-10-08 11:25:23 +03:00
|
|
|
dns := &dns.Resolver{}
|
|
|
|
|
2019-08-29 16:53:30 +03:00
|
|
|
// append seed nodes if we have them
|
2019-12-08 02:28:39 +03:00
|
|
|
for _, node := range n.options.Nodes {
|
2019-10-08 11:25:23 +03:00
|
|
|
// resolve anything that looks like a host name
|
|
|
|
records, err := dns.Resolve(node)
|
|
|
|
if err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Failed to resolve %v %v", node, err)
|
|
|
|
}
|
2019-10-08 11:25:23 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// add to the node map
|
|
|
|
for _, record := range records {
|
|
|
|
if _, ok := nodeMap[record.Address]; !ok {
|
|
|
|
nodes = append(nodes, record.Address)
|
|
|
|
}
|
2019-08-29 16:53:30 +03:00
|
|
|
}
|
2019-08-20 23:12:21 +03:00
|
|
|
}
|
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
return nodes, nil
|
2019-08-20 23:12:21 +03:00
|
|
|
}
|
|
|
|
|
2019-08-28 22:11:19 +03:00
|
|
|
// handleNetConn handles network announcement messages
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) handleNetConn(s tunnel.Session, msg chan *message) {
|
2019-08-20 23:12:21 +03:00
|
|
|
for {
|
|
|
|
m := new(transport.Message)
|
2019-10-22 22:48:51 +03:00
|
|
|
if err := s.Recv(m); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network tunnel [%s] receive error: %v", NetworkChannel, err)
|
|
|
|
}
|
2019-12-08 02:28:39 +03:00
|
|
|
switch err {
|
|
|
|
case io.EOF, tunnel.ErrReadTimeout:
|
2019-12-06 03:18:40 +03:00
|
|
|
s.Close()
|
|
|
|
return
|
2019-10-07 21:07:56 +03:00
|
|
|
}
|
2019-12-06 03:18:40 +03:00
|
|
|
continue
|
2019-08-20 23:12:21 +03:00
|
|
|
}
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// check if peer is set
|
|
|
|
peer := m.Header["Micro-Peer"]
|
|
|
|
|
|
|
|
// check who the message is intended for
|
|
|
|
if len(peer) > 0 && peer != n.options.Id {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-08-22 15:17:32 +03:00
|
|
|
select {
|
2019-10-23 19:32:45 +03:00
|
|
|
case msg <- &message{
|
2019-10-22 22:48:51 +03:00
|
|
|
msg: m,
|
|
|
|
session: s,
|
|
|
|
}:
|
2019-08-22 15:17:32 +03:00
|
|
|
case <-n.closed:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-08-20 23:12:21 +03:00
|
|
|
|
2020-01-10 14:57:34 +03:00
|
|
|
// handleCtrlConn handles ControlChannel connections
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) handleCtrlConn(s tunnel.Session, msg chan *message) {
|
2019-08-28 22:11:19 +03:00
|
|
|
for {
|
2019-12-10 01:56:26 +03:00
|
|
|
m := new(transport.Message)
|
|
|
|
if err := s.Recv(m); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network tunnel [%s] receive error: %v", ControlChannel, err)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
switch err {
|
|
|
|
case io.EOF, tunnel.ErrReadTimeout:
|
|
|
|
s.Close()
|
|
|
|
return
|
2019-10-15 17:58:33 +03:00
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if peer is set
|
|
|
|
peer := m.Header["Micro-Peer"]
|
|
|
|
|
|
|
|
// check who the message is intended for
|
|
|
|
if len(peer) > 0 && peer != n.options.Id {
|
2019-10-15 17:58:33 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-08-28 22:11:19 +03:00
|
|
|
|
|
|
|
select {
|
2019-12-10 01:56:26 +03:00
|
|
|
case msg <- &message{
|
|
|
|
msg: m,
|
|
|
|
session: s,
|
|
|
|
}:
|
2019-08-28 22:11:19 +03:00
|
|
|
case <-n.closed:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-10 01:56:26 +03:00
|
|
|
// getHopCount queries network graph and returns hop count for given router
|
2020-01-13 23:07:10 +03:00
|
|
|
// NOTE: this should be called getHopeMetric
|
2019-12-10 01:56:26 +03:00
|
|
|
// - Routes for local services have hop count 1
|
2020-01-13 23:07:10 +03:00
|
|
|
// - Routes with ID of adjacent nodes have hop count 10
|
|
|
|
// - Routes by peers of the advertiser have hop count 100
|
|
|
|
// - Routes beyond node neighbourhood have hop count 1000
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) getHopCount(rtr string) int {
|
2019-12-10 01:56:26 +03:00
|
|
|
// make sure node.peers are not modified
|
|
|
|
n.node.RLock()
|
|
|
|
defer n.node.RUnlock()
|
|
|
|
|
|
|
|
// we are the origin of the route
|
|
|
|
if rtr == n.options.Id {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// the route origin is our peer
|
2019-12-12 15:27:46 +03:00
|
|
|
if _, ok := n.node.peers[rtr]; ok {
|
2019-12-10 01:56:26 +03:00
|
|
|
return 10
|
|
|
|
}
|
|
|
|
|
|
|
|
// the route origin is the peer of our peer
|
2019-12-12 15:27:46 +03:00
|
|
|
for _, peer := range n.node.peers {
|
2019-12-10 01:56:26 +03:00
|
|
|
for id := range peer.peers {
|
|
|
|
if rtr == id {
|
|
|
|
return 100
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// otherwise we are three hops away
|
|
|
|
return 1000
|
|
|
|
}
|
|
|
|
|
|
|
|
// getRouteMetric calculates router metric and returns it
|
|
|
|
// Route metric is calculated based on link status and route hopd count
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) getRouteMetric(router string, gateway string, link string) int64 {
|
2019-12-10 01:56:26 +03:00
|
|
|
// set the route metric
|
|
|
|
n.RLock()
|
|
|
|
defer n.RUnlock()
|
|
|
|
|
|
|
|
// local links are marked as 1
|
|
|
|
if link == "local" && gateway == "" {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// local links from other gateways as 2
|
|
|
|
if link == "local" && gateway != "" {
|
|
|
|
return 2
|
|
|
|
}
|
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
logger.Tracef("Network looking up %s link to gateway: %s", link, gateway)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
// attempt to find link based on gateway address
|
|
|
|
lnk, ok := n.peerLinks[gateway]
|
|
|
|
if !ok {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to find a link to gateway: %s", gateway)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
// no link found so infinite metric returned
|
|
|
|
return math.MaxInt64
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculating metric
|
|
|
|
|
|
|
|
delay := lnk.Delay()
|
|
|
|
hops := n.getHopCount(router)
|
|
|
|
length := lnk.Length()
|
|
|
|
|
|
|
|
// make sure delay is non-zero
|
|
|
|
if delay == 0 {
|
|
|
|
delay = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure length is non-zero
|
|
|
|
if length == 0 {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Link length is 0 %v %v", link, lnk.Length())
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
length = 10e9
|
|
|
|
}
|
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
logger.Tracef("Network calculated metric %v delay %v length %v distance %v", (delay*length*int64(hops))/10e6, delay, length, hops)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
|
|
|
|
return (delay * length * int64(hops)) / 10e6
|
|
|
|
}
|
|
|
|
|
|
|
|
// processCtrlChan processes messages received on ControlChannel
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) processCtrlChan(listener tunnel.Listener) {
|
2019-12-08 02:28:39 +03:00
|
|
|
defer listener.Close()
|
|
|
|
|
2019-12-10 01:56:26 +03:00
|
|
|
// receive control message queue
|
2019-10-23 19:32:45 +03:00
|
|
|
recv := make(chan *message, 128)
|
2019-08-22 15:17:32 +03:00
|
|
|
|
2020-07-16 18:33:11 +03:00
|
|
|
// accept ControlChannel connections
|
2019-12-10 01:56:26 +03:00
|
|
|
go n.acceptCtrlConn(listener, recv)
|
2019-08-28 22:11:19 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case m := <-recv:
|
|
|
|
// switch on type of message and take action
|
2019-10-22 22:48:51 +03:00
|
|
|
switch m.msg.Header["Micro-Method"] {
|
2019-12-10 01:56:26 +03:00
|
|
|
case "advert":
|
2020-07-27 15:22:00 +03:00
|
|
|
pbAdvert := &pb.Advert{}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
if err := proto.Unmarshal(m.msg.Body, pbAdvert); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network fail to unmarshal advert message: %v", err)
|
|
|
|
}
|
2019-08-28 22:11:19 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-08-28 22:11:19 +03:00
|
|
|
// don't process your own messages
|
2020-07-27 15:22:00 +03:00
|
|
|
if pbAdvert.Id == n.options.Id {
|
2019-12-10 01:56:26 +03:00
|
|
|
continue
|
|
|
|
}
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network received advert message from: %s", pbAdvert.Id)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
|
2020-07-16 18:33:11 +03:00
|
|
|
// lookup advertising node in our peer topology
|
2020-07-27 15:22:00 +03:00
|
|
|
advertNode := n.node.GetPeerNode(pbAdvert.Id)
|
2019-12-10 01:56:26 +03:00
|
|
|
if advertNode == nil {
|
|
|
|
// if we can't find the node in our topology (MaxDepth) we skipp prcessing adverts
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network skipping advert message from unknown peer: %s", pbAdvert.Id)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
var events []*router.Event
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
for _, event := range pbAdvert.Events {
|
2020-01-23 15:41:22 +03:00
|
|
|
// for backwards compatibility reasons
|
|
|
|
if event == nil || event.Route == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-12-10 01:56:26 +03:00
|
|
|
// we know the advertising node is not the origin of the route
|
2020-07-27 15:22:00 +03:00
|
|
|
if pbAdvert.Id != event.Route.Router {
|
2019-12-10 01:56:26 +03:00
|
|
|
// if the origin router is not the advertising node peer
|
|
|
|
// we can't rule out potential routing loops so we bail here
|
|
|
|
if peer := advertNode.GetPeerNode(event.Route.Router); peer == nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network skipping advert message from peer: %s", pbAdvert.Id)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
route := router.Route{
|
|
|
|
Service: event.Route.Service,
|
|
|
|
Address: event.Route.Address,
|
|
|
|
Gateway: event.Route.Gateway,
|
|
|
|
Network: event.Route.Network,
|
|
|
|
Router: event.Route.Router,
|
|
|
|
Link: event.Route.Link,
|
|
|
|
Metric: event.Route.Metric,
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculate route metric and add to the advertised metric
|
|
|
|
// we need to make sure we do not overflow math.MaxInt64
|
|
|
|
metric := n.getRouteMetric(event.Route.Router, event.Route.Gateway, event.Route.Link)
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
logger.Tracef("Network metric for router %s and gateway %s: %v", event.Route.Router, event.Route.Gateway, metric)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
|
|
|
|
// check we don't overflow max int 64
|
|
|
|
if d := route.Metric + metric; d <= 0 {
|
|
|
|
// set to max int64 if we overflow
|
|
|
|
route.Metric = math.MaxInt64
|
|
|
|
} else {
|
|
|
|
// set the combined value of metrics otherwise
|
|
|
|
route.Metric = d
|
|
|
|
}
|
|
|
|
|
|
|
|
// create router event
|
|
|
|
e := &router.Event{
|
|
|
|
Type: router.EventType(event.Type),
|
2020-07-27 15:22:00 +03:00
|
|
|
Timestamp: time.Unix(0, pbAdvert.Timestamp),
|
2019-12-10 01:56:26 +03:00
|
|
|
Route: route,
|
|
|
|
}
|
|
|
|
events = append(events, e)
|
|
|
|
}
|
|
|
|
|
|
|
|
// if no events are eligible for processing continue
|
|
|
|
if len(events) == 0 {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
logger.Tracef("Network no events to be processed by router: %s", n.options.Id)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// create an advert and process it
|
|
|
|
advert := &router.Advert{
|
2020-07-27 15:22:00 +03:00
|
|
|
Id: pbAdvert.Id,
|
|
|
|
Type: router.AdvertType(pbAdvert.Type),
|
|
|
|
Timestamp: time.Unix(0, pbAdvert.Timestamp),
|
|
|
|
TTL: time.Duration(pbAdvert.Ttl),
|
2019-12-10 01:56:26 +03:00
|
|
|
Events: events,
|
|
|
|
}
|
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
logger.Tracef("Network router %s processing advert: %s", n.Id(), advert.Id)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
if err := n.router.Process(advert); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to process advert %s: %v", advert.Id, err)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-n.closed:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// processNetChan processes messages received on NetworkChannel
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) processNetChan(listener tunnel.Listener) {
|
2019-12-10 01:56:26 +03:00
|
|
|
defer listener.Close()
|
|
|
|
|
|
|
|
// receive network message queue
|
|
|
|
recv := make(chan *message, 128)
|
|
|
|
|
|
|
|
// accept NetworkChannel connections
|
|
|
|
go n.acceptNetConn(listener, recv)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case m := <-recv:
|
|
|
|
// switch on type of message and take action
|
|
|
|
switch m.msg.Header["Micro-Method"] {
|
|
|
|
case "connect":
|
|
|
|
// mark the time the message has been received
|
|
|
|
now := time.Now()
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
pbConnect := &pb.Connect{}
|
|
|
|
if err := proto.Unmarshal(m.msg.Body, pbConnect); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network tunnel [%s] connect unmarshal error: %v", NetworkChannel, err)
|
2019-12-10 01:56:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// don't process your own messages
|
2020-07-27 15:22:00 +03:00
|
|
|
if pbConnect.Node.Id == n.options.Id {
|
2019-08-28 22:11:19 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network received connect message from: %s", pbConnect.Node.Id)
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-09-13 05:02:20 +03:00
|
|
|
peer := &node{
|
2020-07-27 15:22:00 +03:00
|
|
|
id: pbConnect.Node.Id,
|
|
|
|
address: pbConnect.Node.Address,
|
2019-12-10 01:56:26 +03:00
|
|
|
link: m.msg.Header["Micro-Link"],
|
2019-09-10 03:14:23 +03:00
|
|
|
peers: make(map[string]*node),
|
2020-01-14 21:12:36 +03:00
|
|
|
status: newStatus(),
|
2019-09-10 03:14:23 +03:00
|
|
|
lastSeen: now,
|
2019-08-28 22:11:19 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-10-22 22:48:51 +03:00
|
|
|
// update peer links
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-01-10 14:57:34 +03:00
|
|
|
// TODO: should we do this only if we manage to add a peer
|
|
|
|
// What should we do if the peer links failed to be updated?
|
2019-12-10 01:56:26 +03:00
|
|
|
if err := n.updatePeerLinks(peer); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network failed updating peer links: %s", err)
|
2019-10-22 22:48:51 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-10-23 00:46:25 +03:00
|
|
|
// add peer to the list of node peers
|
2020-01-10 14:57:34 +03:00
|
|
|
if err := n.AddPeer(peer); err == ErrPeerExists {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Tracef("Network peer exists, refreshing: %s", peer.id)
|
2020-01-10 14:57:34 +03:00
|
|
|
// update lastSeen time for the peer
|
2019-12-10 01:56:26 +03:00
|
|
|
if err := n.RefreshPeer(peer.id, peer.link, now); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network failed refreshing peer %s: %v", peer.id, err)
|
2019-09-13 05:02:20 +03:00
|
|
|
}
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-01-10 14:57:34 +03:00
|
|
|
// we send the sync message because someone has sent connect
|
|
|
|
// and wants to either connect or reconnect to the network
|
|
|
|
// The faster it gets the network config (routes and peer graph)
|
|
|
|
// the faster the network converges to a stable state
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-12-12 16:34:08 +03:00
|
|
|
go func() {
|
2020-01-10 14:57:34 +03:00
|
|
|
// get node peer graph to send back to the connecting node
|
|
|
|
node := PeersToProto(n.node, MaxDepth)
|
2020-01-09 20:48:28 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
msg := &pb.Sync{
|
2020-01-10 14:57:34 +03:00
|
|
|
Peer: node,
|
2019-12-12 16:34:08 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-01-16 19:08:49 +03:00
|
|
|
// get a list of the best routes for each service in our routing table
|
2020-01-16 22:43:10 +03:00
|
|
|
routes, err := n.getProtoRoutes()
|
|
|
|
if err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network node %s failed listing routes: %v", n.id, err)
|
2019-12-12 16:34:08 +03:00
|
|
|
}
|
2020-01-16 22:43:10 +03:00
|
|
|
// attached the routes to the message
|
|
|
|
msg.Routes = routes
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-01-10 14:57:34 +03:00
|
|
|
// send sync message to the newly connected peer
|
|
|
|
if err := n.sendTo("sync", NetworkChannel, peer, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network failed to send sync message: %v", err)
|
2020-01-10 14:57:34 +03:00
|
|
|
}
|
2019-12-12 16:34:08 +03:00
|
|
|
}()
|
2019-09-10 03:14:23 +03:00
|
|
|
case "peer":
|
2019-09-04 20:02:13 +03:00
|
|
|
// mark the time the message has been received
|
|
|
|
now := time.Now()
|
2020-07-27 15:22:00 +03:00
|
|
|
pbPeer := &pb.Peer{}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
if err := proto.Unmarshal(m.msg.Body, pbPeer); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network tunnel [%s] peer unmarshal error: %v", NetworkChannel, err)
|
2019-08-28 22:11:19 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-08-28 22:11:19 +03:00
|
|
|
// don't process your own messages
|
2020-07-27 15:22:00 +03:00
|
|
|
if pbPeer.Node.Id == n.options.Id {
|
2019-08-28 22:11:19 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network received peer message from: %s %s", pbPeer.Node.Id, pbPeer.Node.Address)
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-09-13 05:02:20 +03:00
|
|
|
peer := &node{
|
2020-07-27 15:22:00 +03:00
|
|
|
id: pbPeer.Node.Id,
|
|
|
|
address: pbPeer.Node.Address,
|
2019-12-10 01:56:26 +03:00
|
|
|
link: m.msg.Header["Micro-Link"],
|
2019-09-13 05:02:20 +03:00
|
|
|
peers: make(map[string]*node),
|
2020-07-27 15:22:00 +03:00
|
|
|
status: newPeerStatus(pbPeer),
|
2019-09-13 05:02:20 +03:00
|
|
|
lastSeen: now,
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-10-22 22:48:51 +03:00
|
|
|
// update peer links
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-01-10 14:57:34 +03:00
|
|
|
// TODO: should we do this only if we manage to add a peer
|
|
|
|
// What should we do if the peer links failed to be updated?
|
2019-12-10 01:56:26 +03:00
|
|
|
if err := n.updatePeerLinks(peer); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network failed updating peer links: %s", err)
|
2019-10-22 22:48:51 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
// if it's a new peer i.e. we do not have it in our graph, we request full sync
|
2019-09-25 16:30:35 +03:00
|
|
|
if err := n.node.AddPeer(peer); err == nil {
|
2019-12-12 16:34:08 +03:00
|
|
|
go func() {
|
2020-01-16 22:43:10 +03:00
|
|
|
// marshal node graph into protobuf
|
|
|
|
node := PeersToProto(n.node, MaxDepth)
|
2019-12-12 16:34:08 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
msg := &pb.Sync{
|
2020-01-16 22:43:10 +03:00
|
|
|
Peer: node,
|
2020-01-09 20:48:28 +03:00
|
|
|
}
|
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
// get a list of the best routes for each service in our routing table
|
|
|
|
routes, err := n.getProtoRoutes()
|
|
|
|
if err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network node %s failed listing routes: %v", n.id, err)
|
2019-12-12 16:34:08 +03:00
|
|
|
}
|
2020-01-16 22:43:10 +03:00
|
|
|
// attached the routes to the message
|
|
|
|
msg.Routes = routes
|
2019-12-13 18:27:47 +03:00
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
// send sync message to the newly connected peer
|
|
|
|
if err := n.sendTo("sync", NetworkChannel, peer, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network failed to send sync message: %v", err)
|
2019-12-13 18:27:47 +03:00
|
|
|
}
|
2019-12-12 16:34:08 +03:00
|
|
|
}()
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-09-10 01:34:27 +03:00
|
|
|
continue
|
2020-01-10 14:57:34 +03:00
|
|
|
// if we already have the peer in our graph, skip further steps
|
2019-09-25 16:30:35 +03:00
|
|
|
} else if err != ErrPeerExists {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network got error adding peer %v", err)
|
2019-09-25 16:30:35 +03:00
|
|
|
continue
|
2019-09-04 20:02:13 +03:00
|
|
|
}
|
2019-09-25 16:30:35 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Tracef("Network peer exists, refreshing: %s", pbPeer.Node.Id)
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-09-13 05:02:20 +03:00
|
|
|
// update lastSeen time for the peer
|
2019-12-13 18:27:47 +03:00
|
|
|
if err := n.RefreshPeer(peer.id, peer.link, now); err != nil {
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network failed refreshing peer %s: %v", pbPeer.Node.Id, err)
|
2019-09-10 01:34:27 +03:00
|
|
|
}
|
2019-09-25 16:30:35 +03:00
|
|
|
|
2020-07-16 18:33:11 +03:00
|
|
|
// NOTE: we don't unpack MaxDepth topology
|
2020-07-27 15:22:00 +03:00
|
|
|
peer = UnpackPeerTopology(pbPeer, now, MaxDepth-1)
|
2019-12-13 18:27:47 +03:00
|
|
|
// update the link
|
|
|
|
peer.link = m.msg.Header["Micro-Link"]
|
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Tracef("Network updating topology of node: %s", n.node.id)
|
2019-09-16 21:22:55 +03:00
|
|
|
if err := n.node.UpdatePeer(peer); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network failed to update peers: %v", err)
|
2019-09-03 18:39:27 +03:00
|
|
|
}
|
2019-12-06 03:18:40 +03:00
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// tell the connect loop that we've been discovered
|
|
|
|
// so it stops sending connect messages out
|
2019-12-06 03:18:40 +03:00
|
|
|
select {
|
2019-12-07 22:54:29 +03:00
|
|
|
case n.discovered <- true:
|
2019-12-06 03:18:40 +03:00
|
|
|
default:
|
2019-12-07 22:54:29 +03:00
|
|
|
// don't block here
|
2019-12-06 03:18:40 +03:00
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
case "sync":
|
|
|
|
// record the timestamp of the message receipt
|
|
|
|
now := time.Now()
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
pbSync := &pb.Sync{}
|
|
|
|
if err := proto.Unmarshal(m.msg.Body, pbSync); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
logger.Debugf("Network tunnel [%s] sync unmarshal error: %v", NetworkChannel, err)
|
2020-01-10 14:57:34 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// don't process your own messages
|
2020-07-27 15:22:00 +03:00
|
|
|
if pbSync.Peer.Node.Id == n.options.Id {
|
2020-01-10 14:57:34 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network received sync message from: %s", pbSync.Peer.Node.Id)
|
2020-01-10 14:57:34 +03:00
|
|
|
|
|
|
|
peer := &node{
|
2020-07-27 15:22:00 +03:00
|
|
|
id: pbSync.Peer.Node.Id,
|
|
|
|
address: pbSync.Peer.Node.Address,
|
2020-01-14 21:48:42 +03:00
|
|
|
link: m.msg.Header["Micro-Link"],
|
|
|
|
peers: make(map[string]*node),
|
2020-07-27 15:22:00 +03:00
|
|
|
status: newPeerStatus(pbSync.Peer),
|
2020-01-10 14:57:34 +03:00
|
|
|
lastSeen: now,
|
|
|
|
}
|
|
|
|
|
|
|
|
// update peer links
|
|
|
|
|
|
|
|
// TODO: should we do this only if we manage to add a peer
|
|
|
|
// What should we do if the peer links failed to be updated?
|
|
|
|
if err := n.updatePeerLinks(peer); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed updating peer links: %s", err)
|
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// add peer to the list of node peers
|
|
|
|
if err := n.node.AddPeer(peer); err == ErrPeerExists {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
logger.Tracef("Network peer exists, refreshing: %s", peer.id)
|
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
// update lastSeen time for the existing node
|
|
|
|
if err := n.RefreshPeer(peer.id, peer.link, now); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed refreshing peer %s: %v", peer.id, err)
|
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// when we receive a sync message we update our routing table
|
|
|
|
// and send a peer message back to the network to announce our presence
|
|
|
|
|
2020-01-10 22:02:42 +03:00
|
|
|
// add all the routes we have received in the sync message
|
2020-07-27 15:22:00 +03:00
|
|
|
for _, pbRoute := range pbSync.Routes {
|
2020-01-16 19:08:49 +03:00
|
|
|
// unmarshal the routes received from remote peer
|
2020-07-27 15:22:00 +03:00
|
|
|
route := ProtoToRoute(pbRoute)
|
2020-01-16 19:42:23 +03:00
|
|
|
// continue if we are the originator of the route
|
2020-01-16 20:04:04 +03:00
|
|
|
if route.Router == n.router.Options().Id {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network node %s skipping route addition: route already present", n.id)
|
|
|
|
}
|
2020-01-16 19:42:23 +03:00
|
|
|
continue
|
|
|
|
}
|
2020-01-17 21:24:36 +03:00
|
|
|
|
|
|
|
metric := n.getRouteMetric(route.Router, route.Gateway, route.Link)
|
|
|
|
// check we don't overflow max int 64
|
|
|
|
if d := route.Metric + metric; d <= 0 {
|
|
|
|
// set to max int64 if we overflow
|
|
|
|
route.Metric = math.MaxInt64
|
|
|
|
} else {
|
|
|
|
// set the combined value of metrics otherwise
|
|
|
|
route.Metric = d
|
|
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////
|
|
|
|
// maybe we should not be this clever ¯\_(ツ)_/¯ //
|
|
|
|
/////////////////////////////////////////////////////////////////////
|
2020-01-16 19:42:23 +03:00
|
|
|
// lookup best routes for the services in the just received route
|
2020-01-16 19:08:49 +03:00
|
|
|
q := []router.QueryOption{
|
|
|
|
router.QueryService(route.Service),
|
2020-01-16 19:53:39 +03:00
|
|
|
router.QueryStrategy(n.router.Options().Advertise),
|
2020-01-16 19:08:49 +03:00
|
|
|
}
|
2020-01-16 19:53:39 +03:00
|
|
|
|
2020-01-16 20:04:04 +03:00
|
|
|
routes, err := n.router.Table().Query(q...)
|
2020-01-17 15:58:13 +03:00
|
|
|
if err != nil && err != router.ErrRouteNotFound {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network node %s failed listing best routes for %s: %v", n.id, route.Service, err)
|
|
|
|
}
|
2020-01-16 19:08:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2020-01-16 19:42:23 +03:00
|
|
|
|
2020-01-16 19:53:39 +03:00
|
|
|
// we found no routes for the given service
|
|
|
|
// create the new route we have just received
|
|
|
|
if len(routes) == 0 {
|
2020-01-16 19:08:49 +03:00
|
|
|
if err := n.router.Table().Create(route); err != nil && err != router.ErrDuplicateRoute {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network node %s failed to add route: %v", n.id, err)
|
|
|
|
}
|
2020-01-16 19:08:49 +03:00
|
|
|
}
|
2020-01-16 20:04:04 +03:00
|
|
|
continue
|
2020-01-16 19:08:49 +03:00
|
|
|
}
|
|
|
|
|
2020-01-16 19:53:39 +03:00
|
|
|
// find the best route for the given service
|
|
|
|
// from the routes that we would advertise
|
|
|
|
bestRoute := routes[0]
|
|
|
|
for _, r := range routes[0:] {
|
|
|
|
if bestRoute.Metric > r.Metric {
|
|
|
|
bestRoute = r
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-16 19:08:49 +03:00
|
|
|
// Take the best route to given service and:
|
2020-01-16 19:53:39 +03:00
|
|
|
// only add new routes if the metric is better
|
|
|
|
// than the metric of our best route
|
2020-01-16 19:42:23 +03:00
|
|
|
|
|
|
|
if bestRoute.Metric <= route.Metric {
|
|
|
|
continue
|
2020-01-16 19:08:49 +03:00
|
|
|
}
|
2020-01-17 21:24:36 +03:00
|
|
|
///////////////////////////////////////////////////////////////////////
|
|
|
|
///////////////////////////////////////////////////////////////////////
|
2020-01-16 19:42:23 +03:00
|
|
|
|
|
|
|
// add route to the routing table
|
|
|
|
if err := n.router.Table().Create(route); err != nil && err != router.ErrDuplicateRoute {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network node %s failed to add route: %v", n.id, err)
|
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
}
|
2020-01-10 22:02:42 +03:00
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
|
2020-01-10 22:02:42 +03:00
|
|
|
// update your sync timestamp
|
|
|
|
// NOTE: this might go away as we will be doing full table advert to random peer
|
|
|
|
if err := n.RefreshSync(now); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed refreshing sync time: %v", err)
|
|
|
|
}
|
2020-01-10 22:02:42 +03:00
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
|
2020-01-10 22:02:42 +03:00
|
|
|
go func() {
|
2020-01-10 14:57:34 +03:00
|
|
|
// get node peer graph to send back to the syncing node
|
|
|
|
msg := PeersToProto(n.node, MaxDepth)
|
|
|
|
|
|
|
|
// advertise yourself to the new node
|
|
|
|
if err := n.sendTo("peer", NetworkChannel, peer, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to advertise peers: %v", err)
|
|
|
|
}
|
2020-01-10 14:57:34 +03:00
|
|
|
}
|
|
|
|
}()
|
2019-08-28 22:11:19 +03:00
|
|
|
case "close":
|
2020-07-27 15:22:00 +03:00
|
|
|
pbClose := &pb.Close{}
|
|
|
|
if err := proto.Unmarshal(m.msg.Body, pbClose); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network tunnel [%s] close unmarshal error: %v", NetworkChannel, err)
|
|
|
|
}
|
2019-08-28 22:11:19 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-08-28 22:11:19 +03:00
|
|
|
// don't process your own messages
|
2020-07-27 15:22:00 +03:00
|
|
|
if pbClose.Node.Id == n.options.Id {
|
2019-08-28 22:11:19 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
2020-07-27 15:22:00 +03:00
|
|
|
logger.Debugf("Network received close message from: %s", pbClose.Node.Id)
|
2020-03-11 20:55:39 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-09-15 14:16:08 +03:00
|
|
|
peer := &node{
|
2020-07-27 15:22:00 +03:00
|
|
|
id: pbClose.Node.Id,
|
|
|
|
address: pbClose.Node.Address,
|
2019-09-15 14:16:08 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-09-16 21:22:55 +03:00
|
|
|
if err := n.DeletePeerNode(peer.id); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to delete node %s routes: %v", peer.id, err)
|
|
|
|
}
|
2019-09-16 21:22:55 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-09-16 21:22:55 +03:00
|
|
|
if err := n.prunePeerRoutes(peer); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed pruning peer %s routes: %v", peer.id, err)
|
|
|
|
}
|
2019-09-03 12:00:14 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-01-10 14:57:34 +03:00
|
|
|
// NOTE: we should maybe advertise this to the network so we converge faster on closed nodes
|
|
|
|
// as opposed to our waiting until the node eventually gets pruned; something to think about
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// delete peer from the peerLinks
|
2019-10-22 22:48:51 +03:00
|
|
|
n.Lock()
|
2020-07-27 15:22:00 +03:00
|
|
|
delete(n.peerLinks, pbClose.Node.Address)
|
2019-10-22 22:48:51 +03:00
|
|
|
n.Unlock()
|
2019-08-28 22:11:19 +03:00
|
|
|
}
|
|
|
|
case <-n.closed:
|
|
|
|
return
|
|
|
|
}
|
2019-08-22 15:17:32 +03:00
|
|
|
}
|
2019-08-28 22:11:19 +03:00
|
|
|
}
|
2019-08-22 15:17:32 +03:00
|
|
|
|
2019-09-15 14:16:08 +03:00
|
|
|
// pruneRoutes prunes routes return by given query
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) pruneRoutes(q ...router.QueryOption) error {
|
2019-10-09 19:13:52 +03:00
|
|
|
routes, err := n.router.Table().Query(q...)
|
2019-09-03 12:00:14 +03:00
|
|
|
if err != nil && err != router.ErrRouteNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2019-09-15 14:16:08 +03:00
|
|
|
|
2019-09-03 12:00:14 +03:00
|
|
|
for _, route := range routes {
|
2019-09-25 14:56:52 +03:00
|
|
|
if err := n.router.Table().Delete(route); err != nil && err != router.ErrRouteNotFound {
|
2019-09-03 12:00:14 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-15 14:16:08 +03:00
|
|
|
// pruneNodeRoutes prunes routes that were either originated by or routable via given node
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) prunePeerRoutes(peer *node) error {
|
2019-09-15 14:16:08 +03:00
|
|
|
// lookup all routes originated by router
|
2019-10-09 19:13:52 +03:00
|
|
|
q := []router.QueryOption{
|
2019-09-15 14:16:08 +03:00
|
|
|
router.QueryRouter(peer.id),
|
2019-10-09 19:13:52 +03:00
|
|
|
}
|
|
|
|
if err := n.pruneRoutes(q...); err != nil {
|
2019-09-15 14:16:08 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// lookup all routes routable via gw
|
2019-10-09 19:13:52 +03:00
|
|
|
q = []router.QueryOption{
|
2019-10-10 13:25:28 +03:00
|
|
|
router.QueryGateway(peer.address),
|
2019-10-09 19:13:52 +03:00
|
|
|
}
|
|
|
|
if err := n.pruneRoutes(q...); err != nil {
|
2019-09-15 14:16:08 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// manage the process of announcing to peers and prune any peer nodes that have not been
|
|
|
|
// seen for a period of time. Also removes all the routes either originated by or routable
|
2020-01-08 16:18:11 +03:00
|
|
|
// by the stale nodes. it also resolves nodes periodically and adds them to the tunnel
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) manage() {
|
2020-01-10 22:02:42 +03:00
|
|
|
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
2019-12-07 22:54:29 +03:00
|
|
|
announce := time.NewTicker(AnnounceTime)
|
|
|
|
defer announce.Stop()
|
2019-09-03 12:00:14 +03:00
|
|
|
prune := time.NewTicker(PruneTime)
|
|
|
|
defer prune.Stop()
|
2019-12-08 15:12:20 +03:00
|
|
|
resolve := time.NewTicker(ResolveTime)
|
|
|
|
defer resolve.Stop()
|
2020-01-10 22:02:42 +03:00
|
|
|
netsync := time.NewTicker(SyncTime)
|
|
|
|
defer netsync.Stop()
|
2019-09-03 12:00:14 +03:00
|
|
|
|
2019-12-13 18:27:47 +03:00
|
|
|
// list of links we've sent to
|
|
|
|
links := make(map[string]time.Time)
|
|
|
|
|
2019-09-03 12:00:14 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-n.closed:
|
|
|
|
return
|
2019-12-07 22:54:29 +03:00
|
|
|
case <-announce.C:
|
2019-12-13 18:27:47 +03:00
|
|
|
current := make(map[string]time.Time)
|
|
|
|
|
|
|
|
// build link map of current links
|
|
|
|
for _, link := range n.tunnel.Links() {
|
|
|
|
if n.isLoopback(link) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// get an existing timestamp if it exists
|
|
|
|
current[link.Id()] = links[link.Id()]
|
|
|
|
}
|
|
|
|
|
|
|
|
// replace link map
|
|
|
|
// we do this because a growing map is not
|
|
|
|
// garbage collected
|
|
|
|
links = current
|
|
|
|
|
|
|
|
n.RLock()
|
|
|
|
var i int
|
|
|
|
// create a list of peers to send to
|
|
|
|
var peers []*node
|
|
|
|
|
|
|
|
// check peers to see if they need to be sent to
|
|
|
|
for _, peer := range n.peers {
|
|
|
|
if i >= 3 {
|
|
|
|
break
|
|
|
|
}
|
2019-12-12 15:27:46 +03:00
|
|
|
|
2019-12-13 18:27:47 +03:00
|
|
|
// get last sent
|
|
|
|
lastSent := links[peer.link]
|
|
|
|
|
|
|
|
// check when we last sent to the peer
|
2020-07-16 18:33:11 +03:00
|
|
|
// and send a peer message if we haven't
|
2019-12-13 18:27:47 +03:00
|
|
|
if lastSent.IsZero() || time.Since(lastSent) > KeepAliveTime {
|
|
|
|
link := peer.link
|
|
|
|
id := peer.id
|
|
|
|
|
|
|
|
// might not exist for some weird reason
|
|
|
|
if len(link) == 0 {
|
|
|
|
// set the link via peer links
|
|
|
|
l, ok := n.peerLinks[peer.address]
|
|
|
|
if ok {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network link not found for peer %s cannot announce", peer.id)
|
|
|
|
}
|
2019-12-13 18:27:47 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
link = l.Id()
|
|
|
|
}
|
2019-12-12 15:27:46 +03:00
|
|
|
|
2019-12-13 18:27:47 +03:00
|
|
|
// add to the list of peers we're going to send to
|
|
|
|
peers = append(peers, &node{
|
|
|
|
id: id,
|
|
|
|
link: link,
|
|
|
|
})
|
|
|
|
|
|
|
|
// increment our count
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
n.RUnlock()
|
|
|
|
|
|
|
|
// peers to proto
|
2019-12-07 22:54:29 +03:00
|
|
|
msg := PeersToProto(n.node, MaxDepth)
|
2019-12-13 18:27:47 +03:00
|
|
|
|
|
|
|
// we're only going to send to max 3 peers at any given tick
|
|
|
|
for _, peer := range peers {
|
|
|
|
// advertise yourself to the network
|
|
|
|
if err := n.sendTo("peer", NetworkChannel, peer, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to advertise peer %s: %v", peer.id, err)
|
|
|
|
}
|
2019-12-13 18:27:47 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// update last sent time
|
|
|
|
links[peer.link] = time.Now()
|
|
|
|
}
|
|
|
|
|
|
|
|
// now look at links we may not have sent to. this may occur
|
|
|
|
// where a connect message was lost
|
|
|
|
for link, lastSent := range links {
|
2020-01-08 16:18:11 +03:00
|
|
|
if !lastSent.IsZero() || time.Since(lastSent) < KeepAliveTime {
|
2019-12-13 18:27:47 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
peer := &node{
|
|
|
|
// unknown id of the peer
|
|
|
|
link: link,
|
|
|
|
}
|
|
|
|
|
|
|
|
// unknown link and peer so lets do the connect flow
|
|
|
|
if err := n.sendTo("connect", NetworkChannel, peer, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to connect %s: %v", peer.id, err)
|
|
|
|
}
|
2019-12-13 18:27:47 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
links[peer.link] = time.Now()
|
2019-12-07 22:54:29 +03:00
|
|
|
}
|
2019-09-03 12:00:14 +03:00
|
|
|
case <-prune.C:
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network node %s pruning stale peers", n.id)
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
pruned := n.PruneStalePeers(PruneTime)
|
2019-12-08 15:12:20 +03:00
|
|
|
|
2019-09-16 21:22:55 +03:00
|
|
|
for id, peer := range pruned {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network peer exceeded prune time: %s", id)
|
|
|
|
}
|
2019-10-23 19:29:03 +03:00
|
|
|
n.Lock()
|
|
|
|
delete(n.peerLinks, peer.address)
|
|
|
|
n.Unlock()
|
2019-12-08 15:12:20 +03:00
|
|
|
|
2019-09-16 21:22:55 +03:00
|
|
|
if err := n.prunePeerRoutes(peer); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed pruning peer %s routes: %v", id, err)
|
|
|
|
}
|
2019-09-03 12:00:14 +03:00
|
|
|
}
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-10-10 17:25:54 +03:00
|
|
|
// get a list of all routes
|
|
|
|
routes, err := n.options.Router.Table().List()
|
|
|
|
if err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed listing routes when pruning peers: %v", err)
|
|
|
|
}
|
2019-10-10 17:25:54 +03:00
|
|
|
continue
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-10-10 17:25:54 +03:00
|
|
|
// collect all the router IDs in the routing table
|
|
|
|
routers := make(map[string]bool)
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2019-10-10 17:25:54 +03:00
|
|
|
for _, route := range routes {
|
2019-12-07 22:54:29 +03:00
|
|
|
// check if its been processed
|
|
|
|
if _, ok := routers[route.Router]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark as processed
|
|
|
|
routers[route.Router] = true
|
|
|
|
|
2020-01-08 16:18:11 +03:00
|
|
|
// if the router is in our peer graph do NOT delete routes originated by it
|
2019-12-07 22:54:29 +03:00
|
|
|
if peer := n.node.GetPeerNode(route.Router); peer != nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-01-08 16:18:11 +03:00
|
|
|
// otherwise delete all the routes originated by it
|
2019-12-07 22:54:29 +03:00
|
|
|
if err := n.pruneRoutes(router.QueryRouter(route.Router)); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed deleting routes by %s: %v", route.Router, err)
|
|
|
|
}
|
2019-10-10 17:25:54 +03:00
|
|
|
}
|
|
|
|
}
|
2020-01-10 22:02:42 +03:00
|
|
|
case <-netsync.C:
|
|
|
|
// get a list of node peers
|
|
|
|
peers := n.Peers()
|
2020-01-16 00:02:53 +03:00
|
|
|
|
|
|
|
// skip when there are no peers
|
|
|
|
if len(peers) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-10 22:02:42 +03:00
|
|
|
// pick a random peer from the list of peers and request full sync
|
2020-01-14 21:48:42 +03:00
|
|
|
peer := n.node.GetPeerNode(peers[rnd.Intn(len(peers))].Id())
|
2020-07-16 18:33:11 +03:00
|
|
|
// skip if we can't find randomly selected peer
|
2020-01-14 22:37:50 +03:00
|
|
|
if peer == nil {
|
2020-01-14 21:48:42 +03:00
|
|
|
continue
|
|
|
|
}
|
2020-01-10 22:02:42 +03:00
|
|
|
|
2020-01-14 21:48:42 +03:00
|
|
|
go func() {
|
|
|
|
// get node peer graph to send back to the connecting node
|
|
|
|
node := PeersToProto(n.node, MaxDepth)
|
2020-01-10 22:02:42 +03:00
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
msg := &pb.Sync{
|
2020-01-14 21:48:42 +03:00
|
|
|
Peer: node,
|
|
|
|
}
|
2020-01-10 22:02:42 +03:00
|
|
|
|
2020-01-16 19:08:49 +03:00
|
|
|
// get a list of the best routes for each service in our routing table
|
2020-01-16 22:43:10 +03:00
|
|
|
routes, err := n.getProtoRoutes()
|
|
|
|
if err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network node %s failed listing routes: %v", n.id, err)
|
|
|
|
}
|
2020-01-14 21:48:42 +03:00
|
|
|
}
|
2020-01-16 22:43:10 +03:00
|
|
|
// attached the routes to the message
|
|
|
|
msg.Routes = routes
|
2020-01-14 21:48:42 +03:00
|
|
|
|
|
|
|
// send sync message to the newly connected peer
|
|
|
|
if err := n.sendTo("sync", NetworkChannel, peer, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to send sync message: %v", err)
|
|
|
|
}
|
2020-01-14 21:48:42 +03:00
|
|
|
}
|
|
|
|
}()
|
2019-12-08 15:12:20 +03:00
|
|
|
case <-resolve.C:
|
2019-12-08 17:37:17 +03:00
|
|
|
n.initNodes(false)
|
2019-09-03 12:00:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
// getAdvertProtoRoutes returns a list of routes to advertise to remote peer
|
|
|
|
// based on the advertisement strategy encoded in protobuf
|
|
|
|
// It returns error if the routes failed to be retrieved from the routing table
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) getProtoRoutes() ([]*pb.Route, error) {
|
2020-01-16 22:43:10 +03:00
|
|
|
// get a list of the best routes for each service in our routing table
|
|
|
|
q := []router.QueryOption{
|
|
|
|
router.QueryStrategy(n.router.Options().Advertise),
|
|
|
|
}
|
|
|
|
|
|
|
|
routes, err := n.router.Table().Query(q...)
|
2020-01-17 15:58:13 +03:00
|
|
|
if err != nil && err != router.ErrRouteNotFound {
|
2020-01-16 22:43:10 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// encode the routes to protobuf
|
2020-07-27 15:22:00 +03:00
|
|
|
pbRoutes := make([]*pb.Route, 0, len(routes))
|
2020-01-16 22:43:10 +03:00
|
|
|
for _, route := range routes {
|
|
|
|
// generate new route proto
|
2020-07-27 15:22:00 +03:00
|
|
|
pbRoute := RouteToProto(route)
|
2020-01-16 22:43:10 +03:00
|
|
|
// mask the route before outbounding
|
|
|
|
n.maskRoute(pbRoute)
|
|
|
|
// add to list of routes
|
|
|
|
pbRoutes = append(pbRoutes, pbRoute)
|
|
|
|
}
|
2020-01-17 21:24:36 +03:00
|
|
|
|
2020-01-16 22:43:10 +03:00
|
|
|
return pbRoutes, nil
|
|
|
|
}
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) sendConnect() {
|
2019-12-10 01:56:26 +03:00
|
|
|
// send connect message to NetworkChannel
|
|
|
|
// NOTE: in theory we could do this as soon as
|
|
|
|
// Dial to NetworkChannel succeeds, but instead
|
|
|
|
// we initialize all other node resources first
|
2020-07-27 15:22:00 +03:00
|
|
|
msg := &pb.Connect{
|
|
|
|
Node: &pb.Node{
|
2019-12-10 01:56:26 +03:00
|
|
|
Id: n.node.id,
|
|
|
|
Address: n.node.address,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := n.sendMsg("connect", NetworkChannel, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to send connect message: %s", err)
|
|
|
|
}
|
2019-12-10 01:56:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// sendTo sends a message to a specific node as a one off.
|
|
|
|
// we need this because when links die, we have no discovery info,
|
|
|
|
// and sending to an existing multicast link doesn't immediately work
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) sendTo(method, channel string, peer *node, msg proto.Message) error {
|
2019-12-07 22:54:29 +03:00
|
|
|
body, err := proto.Marshal(msg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-14 21:12:36 +03:00
|
|
|
|
2019-12-11 17:37:03 +03:00
|
|
|
// Create a unicast connection to the peer but don't do the open/accept flow
|
|
|
|
c, err := n.tunnel.Dial(channel, tunnel.DialWait(false), tunnel.DialLink(peer.link))
|
2019-12-07 22:54:29 +03:00
|
|
|
if err != nil {
|
2020-01-14 21:12:36 +03:00
|
|
|
if peerNode := n.GetPeerNode(peer.id); peerNode != nil {
|
|
|
|
// update node status when error happens
|
|
|
|
peerNode.status.err.Update(err)
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network increment peer %v error count to: %d", peerNode, peerNode, peerNode.status.Error().Count())
|
|
|
|
}
|
2020-01-14 21:12:36 +03:00
|
|
|
if count := peerNode.status.Error().Count(); count == MaxPeerErrors {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network peer %v error count exceeded %d. Prunning.", peerNode, MaxPeerErrors)
|
|
|
|
}
|
2020-01-14 21:12:36 +03:00
|
|
|
n.PrunePeer(peerNode.id)
|
|
|
|
}
|
2020-01-14 01:22:12 +03:00
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
|
2019-12-13 18:27:47 +03:00
|
|
|
id := peer.id
|
|
|
|
|
|
|
|
if len(id) == 0 {
|
|
|
|
id = peer.link
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network sending %s message from: %s to %s", method, n.options.Id, id)
|
|
|
|
}
|
2019-12-13 18:27:47 +03:00
|
|
|
tmsg := &transport.Message{
|
2019-12-07 22:54:29 +03:00
|
|
|
Header: map[string]string{
|
|
|
|
"Micro-Method": method,
|
|
|
|
},
|
|
|
|
Body: body,
|
2019-12-13 18:27:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// setting the peer header
|
|
|
|
if len(peer.id) > 0 {
|
|
|
|
tmsg.Header["Micro-Peer"] = peer.id
|
|
|
|
}
|
|
|
|
|
2020-01-14 01:22:12 +03:00
|
|
|
if err := c.Send(tmsg); err != nil {
|
2020-01-14 21:12:36 +03:00
|
|
|
// TODO: Lookup peer in our graph
|
|
|
|
if peerNode := n.GetPeerNode(peer.id); peerNode != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network found peer %s: %v", peer.id, peerNode)
|
|
|
|
}
|
2020-01-14 21:12:36 +03:00
|
|
|
// update node status when error happens
|
|
|
|
peerNode.status.err.Update(err)
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network increment node peer %p %v count to: %d", peerNode, peerNode, peerNode.status.Error().Count())
|
|
|
|
}
|
2020-01-14 21:12:36 +03:00
|
|
|
if count := peerNode.status.Error().Count(); count == MaxPeerErrors {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network node peer %v count exceeded %d: %d", peerNode, MaxPeerErrors, peerNode.status.Error().Count())
|
|
|
|
}
|
2020-01-14 21:12:36 +03:00
|
|
|
n.PrunePeer(peerNode.id)
|
|
|
|
}
|
2020-01-14 01:22:12 +03:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2019-12-07 22:54:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// sendMsg sends a message to the tunnel channel
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) sendMsg(method, channel string, msg proto.Message) error {
|
2019-12-07 22:54:29 +03:00
|
|
|
body, err := proto.Marshal(msg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if the channel client is initialized
|
|
|
|
n.RLock()
|
|
|
|
client, ok := n.tunClient[channel]
|
|
|
|
if !ok || client == nil {
|
|
|
|
n.RUnlock()
|
|
|
|
return ErrClientNotFound
|
|
|
|
}
|
|
|
|
n.RUnlock()
|
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network sending %s message from: %s", method, n.options.Id)
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
|
|
|
|
return client.Send(&transport.Message{
|
|
|
|
Header: map[string]string{
|
|
|
|
"Micro-Method": method,
|
|
|
|
},
|
|
|
|
Body: body,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-12-08 02:28:39 +03:00
|
|
|
// updatePeerLinks updates link for a given peer
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) updatePeerLinks(peer *node) error {
|
2019-12-08 02:28:39 +03:00
|
|
|
n.Lock()
|
|
|
|
defer n.Unlock()
|
|
|
|
|
2019-12-10 01:56:26 +03:00
|
|
|
linkId := peer.link
|
2019-12-08 03:53:55 +03:00
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
logger.Tracef("Network looking up link %s in the peer links", linkId)
|
|
|
|
}
|
2019-12-08 02:28:39 +03:00
|
|
|
|
|
|
|
// lookup the peer link
|
|
|
|
var peerLink tunnel.Link
|
|
|
|
|
|
|
|
for _, link := range n.tunnel.Links() {
|
|
|
|
if link.Id() == linkId {
|
|
|
|
peerLink = link
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if peerLink == nil {
|
|
|
|
return ErrPeerLinkNotFound
|
|
|
|
}
|
|
|
|
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
|
|
|
|
// if the peerLink is found in the returned links update peerLinks
|
|
|
|
logger.Tracef("Network updating peer links for peer %s", peer.address)
|
|
|
|
}
|
2019-12-08 02:28:39 +03:00
|
|
|
|
2020-01-08 16:18:11 +03:00
|
|
|
// lookup a link and update it if better link is available
|
2019-12-10 01:56:26 +03:00
|
|
|
if link, ok := n.peerLinks[peer.address]; ok {
|
2019-12-08 02:28:39 +03:00
|
|
|
// if the existing has better Length then the new, replace it
|
|
|
|
if link.Length() < peerLink.Length() {
|
2019-12-10 01:56:26 +03:00
|
|
|
n.peerLinks[peer.address] = peerLink
|
2019-12-08 02:28:39 +03:00
|
|
|
}
|
2020-01-08 16:18:11 +03:00
|
|
|
return nil
|
2019-12-08 02:28:39 +03:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:18:11 +03:00
|
|
|
// add peerLink to the peerLinks map
|
|
|
|
n.peerLinks[peer.address] = peerLink
|
|
|
|
|
2019-12-08 02:28:39 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-13 18:27:47 +03:00
|
|
|
// isLoopback checks if a link is a loopback to ourselves
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) isLoopback(link tunnel.Link) bool {
|
2019-12-13 18:27:47 +03:00
|
|
|
// skip loopback
|
|
|
|
if link.Loopback() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-01-08 16:18:11 +03:00
|
|
|
// our advertise address
|
|
|
|
loopback := n.server.Options().Advertise
|
|
|
|
// actual address
|
|
|
|
address := n.tunnel.Address()
|
|
|
|
|
2019-12-13 18:27:47 +03:00
|
|
|
// if remote is ourselves
|
|
|
|
switch link.Remote() {
|
|
|
|
case loopback, address:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// connect will wait for a link to be established and send the connect
|
|
|
|
// message. We're trying to ensure convergence pretty quickly. So we want
|
|
|
|
// to hear back. In the case we become completely disconnected we'll
|
|
|
|
// connect again once a new link is established
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) connect() {
|
2019-12-07 22:54:29 +03:00
|
|
|
// discovered lets us know what we received a peer message back
|
|
|
|
var discovered bool
|
|
|
|
var attempts int
|
|
|
|
|
2019-12-06 03:18:40 +03:00
|
|
|
for {
|
2019-12-07 22:54:29 +03:00
|
|
|
// connected is used to define if the link is connected
|
|
|
|
var connected bool
|
|
|
|
|
|
|
|
// check the links state
|
2019-12-06 03:18:40 +03:00
|
|
|
for _, link := range n.tunnel.Links() {
|
2019-12-07 22:54:29 +03:00
|
|
|
// skip loopback
|
2019-12-13 18:27:47 +03:00
|
|
|
if n.isLoopback(link) {
|
2019-12-07 22:54:29 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-12-06 03:18:40 +03:00
|
|
|
if link.State() == "connected" {
|
|
|
|
connected = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// if we're not connected wait
|
2019-12-06 03:18:40 +03:00
|
|
|
if !connected {
|
2019-12-07 22:54:29 +03:00
|
|
|
// reset discovered
|
|
|
|
discovered = false
|
|
|
|
// sleep for a second
|
2019-12-06 03:18:40 +03:00
|
|
|
time.Sleep(time.Second)
|
2019-12-07 22:54:29 +03:00
|
|
|
// now try again
|
2019-12-06 03:18:40 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// we're connected but are we discovered?
|
|
|
|
if !discovered {
|
|
|
|
// recreate the clients because all the tunnel links are gone
|
|
|
|
// so we haven't send discovery beneath
|
2020-01-09 20:48:28 +03:00
|
|
|
// NOTE: when starting the tunnel for the first time we might be recreating potentially
|
|
|
|
// well functioning tunnel clients as "discovered" will be false until the
|
|
|
|
// n.discovered channel is read at some point later on.
|
2019-12-07 22:54:29 +03:00
|
|
|
if err := n.createClients(); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Failed to recreate network/control clients: %v", err)
|
|
|
|
}
|
2019-12-07 22:54:29 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// send the connect message
|
|
|
|
n.sendConnect()
|
|
|
|
}
|
2019-12-06 03:18:40 +03:00
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// check if we've been discovered
|
2019-12-06 03:18:40 +03:00
|
|
|
select {
|
2019-12-07 22:54:29 +03:00
|
|
|
case <-n.discovered:
|
|
|
|
discovered = true
|
|
|
|
attempts = 0
|
|
|
|
case <-n.closed:
|
2019-12-06 03:18:40 +03:00
|
|
|
return
|
2019-12-07 22:54:29 +03:00
|
|
|
case <-time.After(time.Second + backoff.Do(attempts)):
|
|
|
|
// we have to try again
|
|
|
|
attempts++
|
2019-12-06 03:18:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-20 14:48:51 +03:00
|
|
|
// Connect connects the network
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) Connect() error {
|
2019-09-13 20:46:24 +03:00
|
|
|
n.Lock()
|
2019-12-08 02:28:39 +03:00
|
|
|
defer n.Unlock()
|
2019-08-20 23:12:21 +03:00
|
|
|
|
2019-12-08 02:28:39 +03:00
|
|
|
// connect network tunnel
|
|
|
|
if err := n.tunnel.Connect(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-13 20:36:22 +03:00
|
|
|
// return if already connected
|
|
|
|
if n.connected {
|
2019-12-08 17:37:17 +03:00
|
|
|
// initialise the nodes
|
|
|
|
n.initNodes(false)
|
2019-10-13 20:36:22 +03:00
|
|
|
// send the connect message
|
2019-12-08 17:37:17 +03:00
|
|
|
go n.sendConnect()
|
2019-10-13 20:36:22 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
// initialise the nodes
|
|
|
|
n.initNodes(true)
|
|
|
|
|
2019-09-17 17:40:00 +03:00
|
|
|
// set our internal node address
|
2019-09-18 21:04:22 +03:00
|
|
|
// if advertise address is not set
|
|
|
|
if len(n.options.Advertise) == 0 {
|
2019-09-25 14:56:52 +03:00
|
|
|
n.server.Init(server.Advertise(n.tunnel.Address()))
|
2019-09-18 21:04:22 +03:00
|
|
|
}
|
2019-09-17 17:40:00 +03:00
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
// listen on NetworkChannel
|
|
|
|
netListener, err := n.tunnel.Listen(
|
|
|
|
NetworkChannel,
|
|
|
|
tunnel.ListenMode(tunnel.Multicast),
|
|
|
|
)
|
2019-08-20 23:12:21 +03:00
|
|
|
if err != nil {
|
2019-08-22 15:17:32 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// listen on ControlChannel
|
2019-12-08 02:28:39 +03:00
|
|
|
ctrlListener, err := n.tunnel.Listen(
|
|
|
|
ControlChannel,
|
|
|
|
tunnel.ListenMode(tunnel.Multicast),
|
|
|
|
)
|
2019-08-28 22:11:19 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
// dial into ControlChannel to send route adverts
|
2020-01-08 16:18:11 +03:00
|
|
|
ctrlClient, err := n.tunnel.Dial(
|
|
|
|
ControlChannel,
|
|
|
|
tunnel.DialMode(tunnel.Multicast),
|
|
|
|
)
|
2019-08-28 22:11:19 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
n.tunClient[ControlChannel] = ctrlClient
|
2019-08-28 22:11:19 +03:00
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
// dial into NetworkChannel to send network messages
|
2020-01-08 16:18:11 +03:00
|
|
|
netClient, err := n.tunnel.Dial(
|
|
|
|
NetworkChannel,
|
|
|
|
tunnel.DialMode(tunnel.Multicast),
|
|
|
|
)
|
2019-08-22 15:17:32 +03:00
|
|
|
if err != nil {
|
2019-08-20 23:12:21 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-08 17:37:17 +03:00
|
|
|
n.tunClient[NetworkChannel] = netClient
|
|
|
|
|
2019-08-20 23:12:21 +03:00
|
|
|
// create closed channel
|
|
|
|
n.closed = make(chan bool)
|
|
|
|
|
|
|
|
// start advertising routes
|
|
|
|
advertChan, err := n.options.Router.Advertise()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-21 21:16:18 +03:00
|
|
|
// start the server
|
2019-08-28 01:08:35 +03:00
|
|
|
if err := n.server.Start(); err != nil {
|
2019-08-21 21:16:18 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-09-03 21:51:52 +03:00
|
|
|
// advertise service routes
|
2019-10-16 22:44:22 +03:00
|
|
|
go n.advertise(advertChan)
|
2019-12-07 22:54:29 +03:00
|
|
|
// listen to network messages
|
|
|
|
go n.processNetChan(netListener)
|
2019-09-03 21:51:52 +03:00
|
|
|
// accept and process routes
|
2019-10-16 22:44:22 +03:00
|
|
|
go n.processCtrlChan(ctrlListener)
|
2019-12-08 17:37:17 +03:00
|
|
|
// manage connection once links are established
|
|
|
|
go n.connect()
|
|
|
|
// resolve nodes, broadcast announcements and prune stale nodes
|
|
|
|
go n.manage()
|
2019-09-03 21:51:52 +03:00
|
|
|
|
2019-12-08 02:28:39 +03:00
|
|
|
// we're now connected
|
2019-09-03 21:51:52 +03:00
|
|
|
n.connected = true
|
|
|
|
|
2019-08-20 23:12:21 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) close() error {
|
2019-08-21 21:16:18 +03:00
|
|
|
// stop the server
|
2019-08-28 01:08:35 +03:00
|
|
|
if err := n.server.Stop(); err != nil {
|
2019-08-21 21:16:18 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-24 13:09:16 +03:00
|
|
|
// close the router
|
|
|
|
if err := n.router.Close(); err != nil {
|
2019-08-20 23:12:21 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// close the tunnel
|
2019-09-25 14:56:52 +03:00
|
|
|
if err := n.tunnel.Close(); err != nil {
|
2019-08-20 23:12:21 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-20 14:48:51 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
// createClients is used to create new clients in the event we lose all the tunnels
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) createClients() error {
|
2019-12-07 22:54:29 +03:00
|
|
|
// dial into ControlChannel to send route adverts
|
|
|
|
ctrlClient, err := n.tunnel.Dial(ControlChannel, tunnel.DialMode(tunnel.Multicast))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// dial into NetworkChannel to send network messages
|
|
|
|
netClient, err := n.tunnel.Dial(NetworkChannel, tunnel.DialMode(tunnel.Multicast))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
n.Lock()
|
|
|
|
defer n.Unlock()
|
|
|
|
|
|
|
|
// set the control client
|
|
|
|
c, ok := n.tunClient[ControlChannel]
|
|
|
|
if ok {
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
n.tunClient[ControlChannel] = ctrlClient
|
|
|
|
|
|
|
|
// set the network client
|
|
|
|
c, ok = n.tunClient[NetworkChannel]
|
|
|
|
if ok {
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
n.tunClient[NetworkChannel] = netClient
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-20 14:48:51 +03:00
|
|
|
// Close closes network connection
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) Close() error {
|
2019-08-20 23:12:21 +03:00
|
|
|
n.Lock()
|
|
|
|
|
|
|
|
if !n.connected {
|
2019-09-13 05:02:20 +03:00
|
|
|
n.Unlock()
|
2019-08-20 23:12:21 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-n.closed:
|
2019-09-13 05:02:20 +03:00
|
|
|
n.Unlock()
|
2019-08-20 23:12:21 +03:00
|
|
|
return nil
|
|
|
|
default:
|
2019-09-13 05:02:20 +03:00
|
|
|
close(n.closed)
|
2019-12-13 02:20:31 +03:00
|
|
|
|
2019-09-13 05:02:20 +03:00
|
|
|
// set connected to false
|
|
|
|
n.connected = false
|
|
|
|
|
|
|
|
// unlock the lock otherwise we'll deadlock sending the close
|
|
|
|
n.Unlock()
|
|
|
|
|
2020-07-27 15:22:00 +03:00
|
|
|
msg := &pb.Close{
|
|
|
|
Node: &pb.Node{
|
2019-09-17 17:40:00 +03:00
|
|
|
Id: n.node.id,
|
|
|
|
Address: n.node.address,
|
2019-09-11 17:55:32 +03:00
|
|
|
},
|
|
|
|
}
|
2019-12-08 02:28:39 +03:00
|
|
|
|
2019-12-07 22:54:29 +03:00
|
|
|
if err := n.sendMsg("close", NetworkChannel, msg); err != nil {
|
2020-03-11 20:55:39 +03:00
|
|
|
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
|
|
|
|
logger.Debugf("Network failed to send close message: %s", err)
|
|
|
|
}
|
2019-08-28 22:11:19 +03:00
|
|
|
}
|
2019-12-13 02:20:31 +03:00
|
|
|
<-time.After(time.Millisecond * 100)
|
2019-08-28 22:11:19 +03:00
|
|
|
}
|
|
|
|
|
2019-08-20 23:12:21 +03:00
|
|
|
return n.close()
|
2019-08-20 14:48:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Client returns network client
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) Client() client.Client {
|
2019-08-20 23:12:21 +03:00
|
|
|
return n.client
|
2019-08-20 14:48:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Server returns network server
|
2020-07-27 15:22:00 +03:00
|
|
|
func (n *mucpNetwork) Server() server.Server {
|
2019-08-28 01:08:35 +03:00
|
|
|
return n.server
|
2019-08-20 14:48:51 +03:00
|
|
|
}
|
2020-07-27 15:22:00 +03:00
|
|
|
|
|
|
|
// RouteToProto encodes route into protobuf and returns it
|
|
|
|
func RouteToProto(route router.Route) *pb.Route {
|
|
|
|
return &pb.Route{
|
|
|
|
Service: route.Service,
|
|
|
|
Address: route.Address,
|
|
|
|
Gateway: route.Gateway,
|
|
|
|
Network: route.Network,
|
|
|
|
Router: route.Router,
|
|
|
|
Link: route.Link,
|
|
|
|
Metric: int64(route.Metric),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProtoToRoute decodes protobuf route into router route and returns it
|
|
|
|
func ProtoToRoute(route *pb.Route) router.Route {
|
|
|
|
return router.Route{
|
|
|
|
Service: route.Service,
|
|
|
|
Address: route.Address,
|
|
|
|
Gateway: route.Gateway,
|
|
|
|
Network: route.Network,
|
|
|
|
Router: route.Router,
|
|
|
|
Link: route.Link,
|
|
|
|
Metric: route.Metric,
|
|
|
|
}
|
|
|
|
}
|