micro/network/mucp/mucp.go

1855 lines
49 KiB
Go
Raw Normal View History

package mucp
import (
"errors"
"fmt"
"hash/fnv"
2019-12-06 00:18:40 +00:00
"io"
"math"
"math/rand"
"sync"
"time"
2019-08-20 21:15:02 +01:00
"github.com/golang/protobuf/proto"
"github.com/unistack-org/micro/v3/client"
cmucp "github.com/unistack-org/micro/v3/client/mucp"
"github.com/unistack-org/micro/v3/logger"
"github.com/unistack-org/micro/v3/network"
pb "github.com/unistack-org/micro/v3/network/mucp/proto"
"github.com/unistack-org/micro/v3/proxy"
"github.com/unistack-org/micro/v3/registry/noop"
"github.com/unistack-org/micro/v3/resolver/dns"
"github.com/unistack-org/micro/v3/router"
"github.com/unistack-org/micro/v3/server"
smucp "github.com/unistack-org/micro/v3/server/mucp"
"github.com/unistack-org/micro/v3/transport"
"github.com/unistack-org/micro/v3/tunnel"
bun "github.com/unistack-org/micro/v3/tunnel/broker"
tun "github.com/unistack-org/micro/v3/tunnel/transport"
"github.com/unistack-org/micro/v3/util/backoff"
)
var (
// DefaultName is default network name
DefaultName = "go.micro"
// DefaultAddress is default network address
DefaultAddress = ":0"
// AnnounceTime defines time interval to periodically announce node neighbours
AnnounceTime = 1 * time.Second
// KeepAliveTime is the time in which we want to have sent a message to a peer
KeepAliveTime = 30 * time.Second
// SyncTime is the time a network node requests full sync from the network
SyncTime = 1 * time.Minute
// PruneTime defines time interval to periodically check nodes that need to be pruned
// due to their not announcing their presence within this time interval
PruneTime = 90 * time.Second
// MaxDepth defines max depth of peer topology
MaxDepth uint = 3
// NetworkChannel is the name of the tunnel channel for passing network messages
NetworkChannel = "network"
// ControlChannel is the name of the tunnel channel for passing control message
ControlChannel = "control"
// DefaultLink is default network link
DefaultLink = "network"
// MaxConnections is the max number of network client connections
MaxConnections = 3
// MaxPeerErrors is the max number of peer errors before we remove it from network graph
MaxPeerErrors = 3
// ErrPeerExists is returned when adding a peer which already exists
ErrPeerExists = errors.New("peer already exists")
// ErrPeerNotFound is returned when a peer could not be found in node topology
ErrPeerNotFound = errors.New("peer not found")
2019-09-05 17:43:59 +01:00
// ErrClientNotFound is returned when client for tunnel channel could not be found
ErrClientNotFound = errors.New("client not found")
2019-10-22 20:48:51 +01:00
// ErrPeerLinkNotFound is returned when peer link could not be found in tunnel Links
ErrPeerLinkNotFound = errors.New("peer link not found")
// ErrPeerMaxExceeded is returned when peer has reached its max error count limit
ErrPeerMaxExceeded = errors.New("peer max errors exceeded")
)
// network implements Network interface
type mucpNetwork struct {
// node is network node
*node
// options configure the network
options network.Options
// rtr is network router
2019-09-25 12:56:52 +01:00
router router.Router
// proxy is network proxy
2019-09-25 12:56:52 +01:00
proxy proxy.Proxy
// tunnel is network tunnel
2019-09-25 12:56:52 +01:00
tunnel tunnel.Tunnel
// server is network server
server server.Server
// client is network client
client client.Client
// tunClient is a map of tunnel channel clients
tunClient map[string]tunnel.Session
2019-10-22 20:48:51 +01:00
// peerLinks is a map of links for each peer
peerLinks map[string]tunnel.Link
sync.RWMutex
// connected marks the network as connected
connected bool
// closed closes the network
closed chan bool
// whether we've discovered by the network
discovered chan bool
}
// message is network message
type message struct {
// msg is transport message
msg *transport.Message
// session is tunnel session
session tunnel.Session
}
// NewNetwork returns a new network node
func NewNetwork(opts ...network.Option) network.Network {
// create default options
options := network.DefaultOptions()
// initialize network options
for _, o := range opts {
o(&options)
}
2019-10-02 15:22:44 +01:00
// set the address to a hashed address
hasher := fnv.New64()
hasher.Write([]byte(options.Address + options.Id))
address := fmt.Sprintf("%d", hasher.Sum64())
2019-09-18 18:56:02 +01:00
// set the address to advertise
2019-10-02 15:22:44 +01:00
var advertise string
var peerAddress string
2019-10-02 15:22:44 +01:00
2019-09-18 18:56:02 +01:00
if len(options.Advertise) > 0 {
2019-10-02 15:22:44 +01:00
advertise = options.Advertise
peerAddress = options.Advertise
2019-10-02 15:22:44 +01:00
} else {
advertise = options.Address
peerAddress = address
2019-09-18 18:56:02 +01:00
}
// init tunnel address to the network bind address
options.Tunnel.Init(
tunnel.Address(options.Address),
)
// init router Id to the network id
options.Router.Init(
router.Id(options.Id),
router.Address(peerAddress),
)
// create tunnel client with tunnel transport
tunTransport := tun.NewTransport(
tun.WithTunnel(options.Tunnel),
)
// create the tunnel broker
tunBroker := bun.NewBroker(
bun.WithTunnel(options.Tunnel),
)
// server is network server
2020-08-14 21:52:05 +01:00
// TODO: use the real registry
2020-01-18 20:48:08 +00:00
server := smucp.NewServer(
server.Id(options.Id),
server.Address(peerAddress),
2019-10-02 15:22:44 +01:00
server.Advertise(advertise),
2019-08-21 19:22:50 +01:00
server.Name(options.Name),
server.Transport(tunTransport),
server.Broker(tunBroker),
2020-08-14 21:52:05 +01:00
server.Registry(noop.NewRegistry()),
)
// client is network client
2020-01-18 20:48:08 +00:00
client := cmucp.NewClient(
client.Broker(tunBroker),
client.Transport(tunTransport),
client.Router(options.Router),
)
network := &mucpNetwork{
node: &node{
id: options.Id,
address: peerAddress,
peers: make(map[string]*node),
status: newStatus(),
},
options: options,
router: options.Router,
proxy: options.Proxy,
tunnel: options.Tunnel,
server: server,
client: client,
tunClient: make(map[string]tunnel.Session),
peerLinks: make(map[string]tunnel.Link),
discovered: make(chan bool, 1),
}
network.node.network = network
return network
}
func (n *mucpNetwork) Init(opts ...network.Option) error {
2019-10-13 12:38:13 +01:00
n.Lock()
defer n.Unlock()
2019-10-13 12:38:13 +01:00
// TODO: maybe only allow reinit of certain opts
for _, o := range opts {
o(&n.options)
}
return nil
}
// Options returns network options
func (n *mucpNetwork) Options() network.Options {
n.RLock()
defer n.RUnlock()
options := n.options
return options
}
// Name returns network name
func (n *mucpNetwork) Name() string {
n.RLock()
defer n.RUnlock()
name := n.options.Name
return name
}
2019-12-09 22:56:26 +00:00
// acceptNetConn accepts connections from NetworkChannel
func (n *mucpNetwork) acceptNetConn(l tunnel.Listener, recv chan *message) {
2019-12-09 22:56:26 +00:00
var i int
for {
// accept a connection
conn, err := l.Accept()
if err != nil {
sleep := backoff.Do(i)
logger.Debugf("Network tunnel [%s] accept error: %v, backing off for %v", ControlChannel, err, sleep)
2019-12-09 22:56:26 +00:00
time.Sleep(sleep)
i++
continue
}
select {
case <-n.closed:
if err := conn.Close(); err != nil {
logger.Debugf("Network tunnel [%s] failed to close connection: %v", NetworkChannel, err)
2019-12-09 22:56:26 +00:00
}
return
default:
// go handle NetworkChannel connection
go n.handleNetConn(conn, recv)
}
}
}
// acceptCtrlConn accepts connections from ControlChannel
func (n *mucpNetwork) acceptCtrlConn(l tunnel.Listener, recv chan *message) {
2019-12-09 22:56:26 +00:00
var i int
for {
// accept a connection
conn, err := l.Accept()
if err != nil {
sleep := backoff.Do(i)
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network tunnel [%s] accept error: %v, backing off for %v", ControlChannel, err, sleep)
}
2019-12-09 22:56:26 +00:00
time.Sleep(sleep)
i++
continue
}
select {
case <-n.closed:
if err := conn.Close(); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network tunnel [%s] failed to close connection: %v", ControlChannel, err)
}
2019-12-09 22:56:26 +00:00
}
return
default:
// go handle ControlChannel connection
go n.handleCtrlConn(conn, recv)
}
}
}
2020-01-15 23:06:58 +00:00
// maskRoute will mask the route so that we apply the right values
func (n *mucpNetwork) maskRoute(r *pb.Route) {
2020-01-15 23:06:58 +00:00
hasher := fnv.New64()
// the routes service address
address := r.Address
// only hash the address if we're advertising our own local routes
// avoid hashing * based routes
if r.Router == n.Id() && r.Address != "*" {
// hash the service before advertising it
hasher.Reset()
// routes for multiple instances of a service will be collapsed here.
// TODO: once we store labels in the table this may need to change
// to include the labels in case they differ but highly unlikely
hasher.Write([]byte(r.Service + n.Address()))
address = fmt.Sprintf("%d", hasher.Sum64())
}
// calculate route metric to advertise
metric := n.getRouteMetric(r.Router, r.Gateway, r.Link)
// NOTE: we override Gateway, Link and Address here
r.Address = address
r.Gateway = n.Address()
r.Link = DefaultLink
r.Metric = metric
}
2019-12-09 22:56:26 +00:00
// advertise advertises routes to the network
2020-08-14 23:51:52 +01:00
func (n *mucpNetwork) advertise(eventChan <-chan *router.Event) {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
2020-08-14 23:51:52 +01:00
2019-12-09 22:56:26 +00:00
for {
select {
2020-08-14 23:51:52 +01:00
// process local events and randomly fire them at other nodes
case event := <-eventChan:
2019-12-09 22:56:26 +00:00
// create a proto advert
2020-08-14 23:51:52 +01:00
var pbEvents []*pb.Event
// make a copy of the route
route := &pb.Route{
Service: event.Route.Service,
Address: event.Route.Address,
Gateway: event.Route.Gateway,
Network: event.Route.Network,
Router: event.Route.Router,
Link: event.Route.Link,
Metric: event.Route.Metric,
}
2020-01-15 23:06:58 +00:00
2020-08-14 23:51:52 +01:00
// override the various values
n.maskRoute(route)
2020-01-15 23:06:58 +00:00
2020-08-14 23:51:52 +01:00
e := &pb.Event{
Type: pb.EventType(event.Type),
Timestamp: event.Timestamp.UnixNano(),
Route: route,
2019-12-09 22:56:26 +00:00
}
2020-08-14 23:51:52 +01:00
pbEvents = append(pbEvents, e)
msg := &pb.Advert{
2020-08-14 23:51:52 +01:00
Id: n.Id(),
Type: pb.AdvertType(event.Type),
Timestamp: event.Timestamp.UnixNano(),
Events: pbEvents,
2019-12-09 22:56:26 +00:00
}
// get a list of node peers
peers := n.Peers()
2020-01-15 21:02:53 +00:00
// continue if there is no one to send to
if len(peers) == 0 {
2019-12-09 22:56:26 +00:00
continue
}
// advertise to max 3 peers
max := len(peers)
if max > 3 {
max = 3
}
2020-01-15 23:06:58 +00:00
for i := 0; i < max; i++ {
if peer := n.node.GetPeerNode(peers[rnd.Intn(len(peers))].Id()); peer != nil {
if err := n.sendTo("advert", ControlChannel, peer, msg); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to advertise routes to %s: %v", peer.Id(), err)
}
}
2019-12-12 17:10:32 +00:00
}
2019-12-09 22:56:26 +00:00
}
case <-n.closed:
return
}
}
}
// initNodes initializes tunnel with a list of resolved nodes
func (n *mucpNetwork) initNodes(startup bool) {
2019-12-08 12:12:20 +00:00
nodes, err := n.resolveNodes()
// NOTE: this condition never fires
// as resolveNodes() never returns error
2019-12-08 14:37:17 +00:00
if err != nil && !startup {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to init nodes: %v", err)
}
2019-12-08 12:12:20 +00:00
return
}
2019-12-08 14:37:17 +00:00
2020-01-21 12:36:05 +00:00
// strip self
var init []string
// our current address
advertised := n.server.Options().Advertise
for _, node := range nodes {
// skip self
if node == advertised {
continue
}
// add the node
init = append(init, node)
}
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
// initialize the tunnel
logger.Tracef("Network initialising nodes %+v\n", init)
}
2019-12-08 14:37:17 +00:00
2019-12-08 12:12:20 +00:00
n.tunnel.Init(
tunnel.Nodes(nodes...),
)
}
// resolveNodes resolves network nodes to addresses
func (n *mucpNetwork) resolveNodes() ([]string, error) {
nodeMap := make(map[string]bool)
// collect network node addresses
//nolint:prealloc
var nodes []string
// use the DNS resolver to expand peers
2019-10-08 09:25:23 +01:00
dns := &dns.Resolver{}
// append seed nodes if we have them
2019-12-07 23:28:39 +00:00
for _, node := range n.options.Nodes {
2019-10-08 09:25:23 +01:00
// resolve anything that looks like a host name
records, err := dns.Resolve(node)
if err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Failed to resolve %v %v", node, err)
}
2019-10-08 09:25:23 +01:00
continue
}
// add to the node map
for _, record := range records {
if _, ok := nodeMap[record.Address]; !ok {
nodes = append(nodes, record.Address)
}
2020-08-18 21:38:29 +01:00
nodeMap[record.Address] = true
}
}
2019-12-08 14:37:17 +00:00
return nodes, nil
}
// handleNetConn handles network announcement messages
func (n *mucpNetwork) handleNetConn(s tunnel.Session, msg chan *message) {
for {
m := new(transport.Message)
2019-10-22 20:48:51 +01:00
if err := s.Recv(m); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network tunnel [%s] receive error: %v", NetworkChannel, err)
}
2019-12-07 23:28:39 +00:00
switch err {
case io.EOF, tunnel.ErrReadTimeout:
2019-12-06 00:18:40 +00:00
s.Close()
return
}
2019-12-06 00:18:40 +00:00
continue
}
// check if peer is set
peer := m.Header["Micro-Peer"]
// check who the message is intended for
if len(peer) > 0 && peer != n.options.Id {
continue
}
select {
2019-10-23 17:32:45 +01:00
case msg <- &message{
2019-10-22 20:48:51 +01:00
msg: m,
session: s,
}:
case <-n.closed:
return
}
}
}
// handleCtrlConn handles ControlChannel connections
func (n *mucpNetwork) handleCtrlConn(s tunnel.Session, msg chan *message) {
for {
2019-12-09 22:56:26 +00:00
m := new(transport.Message)
if err := s.Recv(m); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network tunnel [%s] receive error: %v", ControlChannel, err)
}
2019-12-09 22:56:26 +00:00
switch err {
case io.EOF, tunnel.ErrReadTimeout:
s.Close()
return
}
2019-12-09 22:56:26 +00:00
continue
}
// check if peer is set
peer := m.Header["Micro-Peer"]
// check who the message is intended for
if len(peer) > 0 && peer != n.options.Id {
continue
}
select {
2019-12-09 22:56:26 +00:00
case msg <- &message{
msg: m,
session: s,
}:
case <-n.closed:
return
}
}
}
2019-12-09 22:56:26 +00:00
// getHopCount queries network graph and returns hop count for given router
2020-01-13 20:07:10 +00:00
// NOTE: this should be called getHopeMetric
2019-12-09 22:56:26 +00:00
// - Routes for local services have hop count 1
2020-01-13 20:07:10 +00:00
// - Routes with ID of adjacent nodes have hop count 10
// - Routes by peers of the advertiser have hop count 100
// - Routes beyond node neighbourhood have hop count 1000
func (n *mucpNetwork) getHopCount(rtr string) int {
2019-12-09 22:56:26 +00:00
// make sure node.peers are not modified
n.node.RLock()
defer n.node.RUnlock()
// we are the origin of the route
if rtr == n.options.Id {
return 1
}
// the route origin is our peer
2019-12-12 12:27:46 +00:00
if _, ok := n.node.peers[rtr]; ok {
2019-12-09 22:56:26 +00:00
return 10
}
// the route origin is the peer of our peer
2019-12-12 12:27:46 +00:00
for _, peer := range n.node.peers {
2019-12-09 22:56:26 +00:00
for id := range peer.peers {
if rtr == id {
return 100
}
}
}
// otherwise we are three hops away
return 1000
}
// getRouteMetric calculates router metric and returns it
// Route metric is calculated based on link status and route hopd count
func (n *mucpNetwork) getRouteMetric(router string, gateway string, link string) int64 {
2019-12-09 22:56:26 +00:00
// set the route metric
n.RLock()
defer n.RUnlock()
// local links are marked as 1
if link == "local" && gateway == "" {
return 1
}
// local links from other gateways as 2
if link == "local" && gateway != "" {
return 2
}
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
logger.Tracef("Network looking up %s link to gateway: %s", link, gateway)
}
2019-12-09 22:56:26 +00:00
// attempt to find link based on gateway address
lnk, ok := n.peerLinks[gateway]
if !ok {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to find a link to gateway: %s", gateway)
}
2019-12-09 22:56:26 +00:00
// no link found so infinite metric returned
return math.MaxInt64
}
// calculating metric
delay := lnk.Delay()
hops := n.getHopCount(router)
length := lnk.Length()
// make sure delay is non-zero
if delay == 0 {
delay = 1
}
// make sure length is non-zero
if length == 0 {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Link length is 0 %v %v", link, lnk.Length())
}
2019-12-09 22:56:26 +00:00
length = 10e9
}
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
logger.Tracef("Network calculated metric %v delay %v length %v distance %v", (delay*length*int64(hops))/10e6, delay, length, hops)
}
2019-12-09 22:56:26 +00:00
return (delay * length * int64(hops)) / 10e6
}
// processCtrlChan processes messages received on ControlChannel
func (n *mucpNetwork) processCtrlChan(listener tunnel.Listener) {
2019-12-07 23:28:39 +00:00
defer listener.Close()
2019-12-09 22:56:26 +00:00
// receive control message queue
2019-10-23 17:32:45 +01:00
recv := make(chan *message, 128)
// accept ControlChannel connections
2019-12-09 22:56:26 +00:00
go n.acceptCtrlConn(listener, recv)
for {
select {
case m := <-recv:
// switch on type of message and take action
2019-10-22 20:48:51 +01:00
switch m.msg.Header["Micro-Method"] {
2019-12-09 22:56:26 +00:00
case "advert":
pbAdvert := &pb.Advert{}
if err := proto.Unmarshal(m.msg.Body, pbAdvert); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network fail to unmarshal advert message: %v", err)
}
continue
}
// don't process your own messages
2020-08-14 23:51:52 +01:00
if pbAdvert.Id == n.Id() {
2019-12-09 22:56:26 +00:00
continue
}
2020-08-14 23:51:52 +01:00
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network received advert message from: %s", pbAdvert.Id)
}
2019-12-09 22:56:26 +00:00
// lookup advertising node in our peer topology
advertNode := n.node.GetPeerNode(pbAdvert.Id)
2019-12-09 22:56:26 +00:00
if advertNode == nil {
// if we can't find the node in our topology (MaxDepth) we skipp prcessing adverts
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network skipping advert message from unknown peer: %s", pbAdvert.Id)
}
2019-12-09 22:56:26 +00:00
continue
}
for _, event := range pbAdvert.Events {
2020-01-23 12:41:22 +00:00
// for backwards compatibility reasons
if event == nil || event.Route == nil {
continue
}
2019-12-09 22:56:26 +00:00
// we know the advertising node is not the origin of the route
if pbAdvert.Id != event.Route.Router {
2019-12-09 22:56:26 +00:00
// if the origin router is not the advertising node peer
// we can't rule out potential routing loops so we bail here
if peer := advertNode.GetPeerNode(event.Route.Router); peer == nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network skipping advert message from peer: %s", pbAdvert.Id)
}
2019-12-09 22:56:26 +00:00
continue
}
}
route := router.Route{
Service: event.Route.Service,
Address: event.Route.Address,
Gateway: event.Route.Gateway,
Network: event.Route.Network,
Router: event.Route.Router,
Link: event.Route.Link,
Metric: event.Route.Metric,
}
// calculate route metric and add to the advertised metric
// we need to make sure we do not overflow math.MaxInt64
metric := n.getRouteMetric(event.Route.Router, event.Route.Gateway, event.Route.Link)
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
logger.Tracef("Network metric for router %s and gateway %s: %v", event.Route.Router, event.Route.Gateway, metric)
}
2019-12-09 22:56:26 +00:00
// check we don't overflow max int 64
if d := route.Metric + metric; d <= 0 {
// set to max int64 if we overflow
route.Metric = math.MaxInt64
} else {
// set the combined value of metrics otherwise
route.Metric = d
}
2020-08-14 23:51:52 +01:00
// update the local table
if err := n.router.Table().Update(route); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to process advert %s: %v", event.Id, err)
}
}
2019-12-09 22:56:26 +00:00
}
}
case <-n.closed:
return
}
}
}
// processNetChan processes messages received on NetworkChannel
func (n *mucpNetwork) processNetChan(listener tunnel.Listener) {
2019-12-09 22:56:26 +00:00
defer listener.Close()
// receive network message queue
recv := make(chan *message, 128)
// accept NetworkChannel connections
go n.acceptNetConn(listener, recv)
for {
select {
case m := <-recv:
// switch on type of message and take action
switch m.msg.Header["Micro-Method"] {
case "connect":
// mark the time the message has been received
now := time.Now()
pbConnect := &pb.Connect{}
if err := proto.Unmarshal(m.msg.Body, pbConnect); err != nil {
logger.Debugf("Network tunnel [%s] connect unmarshal error: %v", NetworkChannel, err)
2019-12-09 22:56:26 +00:00
continue
}
// don't process your own messages
if pbConnect.Node.Id == n.options.Id {
continue
}
logger.Debugf("Network received connect message from: %s", pbConnect.Node.Id)
peer := &node{
id: pbConnect.Node.Id,
address: pbConnect.Node.Address,
2019-12-09 22:56:26 +00:00
link: m.msg.Header["Micro-Link"],
peers: make(map[string]*node),
status: newStatus(),
lastSeen: now,
}
2019-10-22 20:48:51 +01:00
// update peer links
// TODO: should we do this only if we manage to add a peer
// What should we do if the peer links failed to be updated?
2019-12-09 22:56:26 +00:00
if err := n.updatePeerLinks(peer); err != nil {
logger.Debugf("Network failed updating peer links: %s", err)
2019-10-22 20:48:51 +01:00
}
2019-10-22 22:46:25 +01:00
// add peer to the list of node peers
if err := n.AddPeer(peer); err == ErrPeerExists {
logger.Tracef("Network peer exists, refreshing: %s", peer.id)
// update lastSeen time for the peer
2019-12-09 22:56:26 +00:00
if err := n.RefreshPeer(peer.id, peer.link, now); err != nil {
logger.Debugf("Network failed refreshing peer %s: %v", peer.id, err)
}
}
// we send the sync message because someone has sent connect
// and wants to either connect or reconnect to the network
// The faster it gets the network config (routes and peer graph)
// the faster the network converges to a stable state
go func() {
// get node peer graph to send back to the connecting node
node := PeersToProto(n.node, MaxDepth)
msg := &pb.Sync{
Peer: node,
}
// get a list of the best routes for each service in our routing table
routes, err := n.getProtoRoutes()
if err != nil {
logger.Debugf("Network node %s failed listing routes: %v", n.id, err)
}
// attached the routes to the message
msg.Routes = routes
// send sync message to the newly connected peer
if err := n.sendTo("sync", NetworkChannel, peer, msg); err != nil {
logger.Debugf("Network failed to send sync message: %v", err)
}
}()
case "peer":
// mark the time the message has been received
now := time.Now()
pbPeer := &pb.Peer{}
if err := proto.Unmarshal(m.msg.Body, pbPeer); err != nil {
logger.Debugf("Network tunnel [%s] peer unmarshal error: %v", NetworkChannel, err)
continue
}
// don't process your own messages
if pbPeer.Node.Id == n.options.Id {
continue
}
logger.Debugf("Network received peer message from: %s %s", pbPeer.Node.Id, pbPeer.Node.Address)
peer := &node{
id: pbPeer.Node.Id,
address: pbPeer.Node.Address,
2019-12-09 22:56:26 +00:00
link: m.msg.Header["Micro-Link"],
peers: make(map[string]*node),
status: newPeerStatus(pbPeer),
lastSeen: now,
}
2019-10-22 20:48:51 +01:00
// update peer links
// TODO: should we do this only if we manage to add a peer
// What should we do if the peer links failed to be updated?
2019-12-09 22:56:26 +00:00
if err := n.updatePeerLinks(peer); err != nil {
logger.Debugf("Network failed updating peer links: %s", err)
2019-10-22 20:48:51 +01:00
}
// if it's a new peer i.e. we do not have it in our graph, we request full sync
if err := n.node.AddPeer(peer); err == nil {
go func() {
// marshal node graph into protobuf
node := PeersToProto(n.node, MaxDepth)
msg := &pb.Sync{
Peer: node,
}
// get a list of the best routes for each service in our routing table
routes, err := n.getProtoRoutes()
if err != nil {
logger.Debugf("Network node %s failed listing routes: %v", n.id, err)
}
// attached the routes to the message
msg.Routes = routes
2019-12-13 15:27:47 +00:00
// send sync message to the newly connected peer
if err := n.sendTo("sync", NetworkChannel, peer, msg); err != nil {
logger.Debugf("Network failed to send sync message: %v", err)
2019-12-13 15:27:47 +00:00
}
}()
continue
// if we already have the peer in our graph, skip further steps
} else if err != ErrPeerExists {
logger.Debugf("Network got error adding peer %v", err)
continue
}
logger.Tracef("Network peer exists, refreshing: %s", pbPeer.Node.Id)
// update lastSeen time for the peer
2019-12-13 15:27:47 +00:00
if err := n.RefreshPeer(peer.id, peer.link, now); err != nil {
logger.Debugf("Network failed refreshing peer %s: %v", pbPeer.Node.Id, err)
}
// NOTE: we don't unpack MaxDepth topology
peer = UnpackPeerTopology(pbPeer, now, MaxDepth-1)
2019-12-13 15:27:47 +00:00
// update the link
peer.link = m.msg.Header["Micro-Link"]
logger.Tracef("Network updating topology of node: %s", n.node.id)
if err := n.node.UpdatePeer(peer); err != nil {
logger.Debugf("Network failed to update peers: %v", err)
}
2019-12-06 00:18:40 +00:00
// tell the connect loop that we've been discovered
// so it stops sending connect messages out
2019-12-06 00:18:40 +00:00
select {
case n.discovered <- true:
2019-12-06 00:18:40 +00:00
default:
// don't block here
2019-12-06 00:18:40 +00:00
}
case "sync":
// record the timestamp of the message receipt
now := time.Now()
pbSync := &pb.Sync{}
if err := proto.Unmarshal(m.msg.Body, pbSync); err != nil {
logger.Debugf("Network tunnel [%s] sync unmarshal error: %v", NetworkChannel, err)
continue
}
// don't process your own messages
if pbSync.Peer.Node.Id == n.options.Id {
continue
}
logger.Debugf("Network received sync message from: %s", pbSync.Peer.Node.Id)
peer := &node{
id: pbSync.Peer.Node.Id,
address: pbSync.Peer.Node.Address,
2020-01-14 18:48:42 +00:00
link: m.msg.Header["Micro-Link"],
peers: make(map[string]*node),
status: newPeerStatus(pbSync.Peer),
lastSeen: now,
}
// update peer links
// TODO: should we do this only if we manage to add a peer
// What should we do if the peer links failed to be updated?
if err := n.updatePeerLinks(peer); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed updating peer links: %s", err)
}
}
// add peer to the list of node peers
if err := n.node.AddPeer(peer); err == ErrPeerExists {
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
logger.Tracef("Network peer exists, refreshing: %s", peer.id)
}
// update lastSeen time for the existing node
if err := n.RefreshPeer(peer.id, peer.link, now); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed refreshing peer %s: %v", peer.id, err)
}
}
}
// when we receive a sync message we update our routing table
// and send a peer message back to the network to announce our presence
// add all the routes we have received in the sync message
for _, pbRoute := range pbSync.Routes {
// unmarshal the routes received from remote peer
route := ProtoToRoute(pbRoute)
// continue if we are the originator of the route
2020-01-16 17:04:04 +00:00
if route.Router == n.router.Options().Id {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network node %s skipping route addition: route already present", n.id)
}
continue
}
metric := n.getRouteMetric(route.Router, route.Gateway, route.Link)
// check we don't overflow max int 64
if d := route.Metric + metric; d <= 0 {
// set to max int64 if we overflow
route.Metric = math.MaxInt64
} else {
// set the combined value of metrics otherwise
route.Metric = d
}
q := []router.QueryOption{
router.QueryService(route.Service),
2020-08-14 23:51:52 +01:00
router.QueryLink(route.Link),
}
2020-01-16 17:04:04 +00:00
routes, err := n.router.Table().Query(q...)
if err != nil && err != router.ErrRouteNotFound {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network node %s failed listing best routes for %s: %v", n.id, route.Service, err)
}
continue
}
// we found no routes for the given service
// create the new route we have just received
if len(routes) == 0 {
if err := n.router.Table().Create(route); err != nil && err != router.ErrDuplicateRoute {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network node %s failed to add route: %v", n.id, err)
}
}
2020-01-16 17:04:04 +00:00
continue
}
// find the best route for the given service
// from the routes that we would advertise
bestRoute := routes[0]
for _, r := range routes[0:] {
if bestRoute.Metric > r.Metric {
bestRoute = r
}
}
// Take the best route to given service and:
// only add new routes if the metric is better
// than the metric of our best route
if bestRoute.Metric <= route.Metric {
continue
}
// add route to the routing table
if err := n.router.Table().Create(route); err != nil && err != router.ErrDuplicateRoute {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network node %s failed to add route: %v", n.id, err)
}
}
}
// update your sync timestamp
// NOTE: this might go away as we will be doing full table advert to random peer
if err := n.RefreshSync(now); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed refreshing sync time: %v", err)
}
}
go func() {
// get node peer graph to send back to the syncing node
msg := PeersToProto(n.node, MaxDepth)
// advertise yourself to the new node
if err := n.sendTo("peer", NetworkChannel, peer, msg); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to advertise peers: %v", err)
}
}
}()
case "close":
pbClose := &pb.Close{}
if err := proto.Unmarshal(m.msg.Body, pbClose); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network tunnel [%s] close unmarshal error: %v", NetworkChannel, err)
}
continue
}
// don't process your own messages
if pbClose.Node.Id == n.options.Id {
continue
}
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network received close message from: %s", pbClose.Node.Id)
}
2019-09-15 12:16:08 +01:00
peer := &node{
id: pbClose.Node.Id,
address: pbClose.Node.Address,
2019-09-15 12:16:08 +01:00
}
if err := n.DeletePeerNode(peer.id); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to delete node %s routes: %v", peer.id, err)
}
}
if err := n.prunePeerRoutes(peer); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed pruning peer %s routes: %v", peer.id, err)
}
}
// NOTE: we should maybe advertise this to the network so we converge faster on closed nodes
// as opposed to our waiting until the node eventually gets pruned; something to think about
// delete peer from the peerLinks
2019-10-22 20:48:51 +01:00
n.Lock()
delete(n.peerLinks, pbClose.Node.Address)
2019-10-22 20:48:51 +01:00
n.Unlock()
}
case <-n.closed:
return
}
}
}
2019-09-15 12:16:08 +01:00
// pruneRoutes prunes routes return by given query
func (n *mucpNetwork) pruneRoutes(q ...router.QueryOption) error {
routes, err := n.router.Table().Query(q...)
if err != nil && err != router.ErrRouteNotFound {
return err
}
2019-09-15 12:16:08 +01:00
for _, route := range routes {
2019-09-25 12:56:52 +01:00
if err := n.router.Table().Delete(route); err != nil && err != router.ErrRouteNotFound {
return err
}
}
return nil
}
2019-09-15 12:16:08 +01:00
// pruneNodeRoutes prunes routes that were either originated by or routable via given node
func (n *mucpNetwork) prunePeerRoutes(peer *node) error {
2019-09-15 12:16:08 +01:00
// lookup all routes originated by router
q := []router.QueryOption{
2019-09-15 12:16:08 +01:00
router.QueryRouter(peer.id),
2020-08-14 23:51:52 +01:00
router.QueryLink("*"),
}
if err := n.pruneRoutes(q...); err != nil {
2019-09-15 12:16:08 +01:00
return err
}
// lookup all routes routable via gw
q = []router.QueryOption{
2019-10-10 11:25:28 +01:00
router.QueryGateway(peer.address),
2020-08-14 23:51:52 +01:00
router.QueryLink("*"),
}
if err := n.pruneRoutes(q...); err != nil {
2019-09-15 12:16:08 +01:00
return err
}
return nil
}
// manage the process of announcing to peers and prune any peer nodes that have not been
// seen for a period of time. Also removes all the routes either originated by or routable
// by the stale nodes. it also resolves nodes periodically and adds them to the tunnel
func (n *mucpNetwork) manage() {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
announce := time.NewTicker(AnnounceTime)
defer announce.Stop()
prune := time.NewTicker(PruneTime)
defer prune.Stop()
netsync := time.NewTicker(SyncTime)
defer netsync.Stop()
2019-12-13 15:27:47 +00:00
// list of links we've sent to
links := make(map[string]time.Time)
for {
select {
case <-n.closed:
return
case <-announce.C:
2019-12-13 15:27:47 +00:00
current := make(map[string]time.Time)
// build link map of current links
for _, link := range n.tunnel.Links() {
if n.isLoopback(link) {
continue
}
// get an existing timestamp if it exists
current[link.Id()] = links[link.Id()]
}
// replace link map
// we do this because a growing map is not
// garbage collected
links = current
n.RLock()
var i int
// create a list of peers to send to
var peers []*node
// check peers to see if they need to be sent to
for _, peer := range n.peers {
if i >= 3 {
break
}
2019-12-12 12:27:46 +00:00
2019-12-13 15:27:47 +00:00
// get last sent
lastSent := links[peer.link]
// check when we last sent to the peer
// and send a peer message if we haven't
2019-12-13 15:27:47 +00:00
if lastSent.IsZero() || time.Since(lastSent) > KeepAliveTime {
link := peer.link
id := peer.id
// might not exist for some weird reason
if len(link) == 0 {
// set the link via peer links
l, ok := n.peerLinks[peer.address]
if ok {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network link not found for peer %s cannot announce", peer.id)
}
2019-12-13 15:27:47 +00:00
continue
}
link = l.Id()
}
2019-12-12 12:27:46 +00:00
2019-12-13 15:27:47 +00:00
// add to the list of peers we're going to send to
peers = append(peers, &node{
id: id,
link: link,
})
// increment our count
i++
}
}
n.RUnlock()
// peers to proto
msg := PeersToProto(n.node, MaxDepth)
2019-12-13 15:27:47 +00:00
// we're only going to send to max 3 peers at any given tick
for _, peer := range peers {
// advertise yourself to the network
if err := n.sendTo("peer", NetworkChannel, peer, msg); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to advertise peer %s: %v", peer.id, err)
}
2019-12-13 15:27:47 +00:00
continue
}
// update last sent time
links[peer.link] = time.Now()
}
// now look at links we may not have sent to. this may occur
// where a connect message was lost
for link, lastSent := range links {
if !lastSent.IsZero() || time.Since(lastSent) < KeepAliveTime {
2019-12-13 15:27:47 +00:00
continue
}
peer := &node{
// unknown id of the peer
link: link,
}
// unknown link and peer so lets do the connect flow
if err := n.sendTo("connect", NetworkChannel, peer, msg); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to connect %s: %v", peer.id, err)
}
2019-12-13 15:27:47 +00:00
continue
}
links[peer.link] = time.Now()
}
case <-prune.C:
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network node %s pruning stale peers", n.id)
}
pruned := n.PruneStalePeers(PruneTime)
2019-12-08 12:12:20 +00:00
for id, peer := range pruned {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network peer exceeded prune time: %s", id)
}
2019-10-23 17:29:03 +01:00
n.Lock()
delete(n.peerLinks, peer.address)
n.Unlock()
2019-12-08 12:12:20 +00:00
if err := n.prunePeerRoutes(peer); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed pruning peer %s routes: %v", id, err)
}
}
}
// get a list of all routes
routes, err := n.options.Router.Table().List()
if err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed listing routes when pruning peers: %v", err)
}
continue
}
// collect all the router IDs in the routing table
routers := make(map[string]bool)
for _, route := range routes {
2020-08-14 23:04:55 +01:00
// don't process routes originated by ourselves
if route.Router == n.Id() {
continue
}
// check if its been processed
if _, ok := routers[route.Router]; ok {
continue
}
// mark as processed
routers[route.Router] = true
// if the router is in our peer graph do NOT delete routes originated by it
if peer := n.node.GetPeerNode(route.Router); peer != nil {
continue
}
2020-08-14 23:04:55 +01:00
// otherwise delete all the routes originated by it
if err := n.pruneRoutes(router.QueryRouter(route.Router)); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed deleting routes by %s: %v", route.Router, err)
}
}
}
case <-netsync.C:
// get a list of node peers
peers := n.Peers()
2020-01-15 21:02:53 +00:00
// skip when there are no peers
if len(peers) == 0 {
continue
}
// pick a random peer from the list of peers and request full sync
2020-01-14 18:48:42 +00:00
peer := n.node.GetPeerNode(peers[rnd.Intn(len(peers))].Id())
// skip if we can't find randomly selected peer
if peer == nil {
2020-01-14 18:48:42 +00:00
continue
}
2020-01-14 18:48:42 +00:00
go func() {
// get node peer graph to send back to the connecting node
node := PeersToProto(n.node, MaxDepth)
msg := &pb.Sync{
2020-01-14 18:48:42 +00:00
Peer: node,
}
// get a list of the best routes for each service in our routing table
routes, err := n.getProtoRoutes()
if err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network node %s failed listing routes: %v", n.id, err)
}
2020-01-14 18:48:42 +00:00
}
// attached the routes to the message
msg.Routes = routes
2020-01-14 18:48:42 +00:00
// send sync message to the newly connected peer
if err := n.sendTo("sync", NetworkChannel, peer, msg); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to send sync message: %v", err)
}
2020-01-14 18:48:42 +00:00
}
}()
}
}
}
// getAdvertProtoRoutes returns a list of routes to advertise to remote peer
// based on the advertisement strategy encoded in protobuf
// It returns error if the routes failed to be retrieved from the routing table
func (n *mucpNetwork) getProtoRoutes() ([]*pb.Route, error) {
2020-08-14 23:51:52 +01:00
routes, err := n.router.Table().List()
if err != nil && err != router.ErrRouteNotFound {
return nil, err
}
// encode the routes to protobuf
pbRoutes := make([]*pb.Route, 0, len(routes))
for _, route := range routes {
// generate new route proto
pbRoute := RouteToProto(route)
// mask the route before outbounding
n.maskRoute(pbRoute)
// add to list of routes
pbRoutes = append(pbRoutes, pbRoute)
}
return pbRoutes, nil
}
func (n *mucpNetwork) sendConnect() {
2019-12-09 22:56:26 +00:00
// send connect message to NetworkChannel
// NOTE: in theory we could do this as soon as
// Dial to NetworkChannel succeeds, but instead
// we initialize all other node resources first
msg := &pb.Connect{
Node: &pb.Node{
2019-12-09 22:56:26 +00:00
Id: n.node.id,
Address: n.node.address,
},
}
if err := n.sendMsg("connect", NetworkChannel, msg); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to send connect message: %s", err)
}
2019-12-09 22:56:26 +00:00
}
}
// sendTo sends a message to a specific node as a one off.
// we need this because when links die, we have no discovery info,
// and sending to an existing multicast link doesn't immediately work
func (n *mucpNetwork) sendTo(method, channel string, peer *node, msg proto.Message) error {
body, err := proto.Marshal(msg)
if err != nil {
return err
}
2019-12-11 14:37:03 +00:00
// Create a unicast connection to the peer but don't do the open/accept flow
c, err := n.tunnel.Dial(channel, tunnel.DialWait(false), tunnel.DialLink(peer.link))
if err != nil {
if peerNode := n.GetPeerNode(peer.id); peerNode != nil {
// update node status when error happens
peerNode.status.err.Update(err)
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network increment peer %v error count to: %d", peerNode, peerNode, peerNode.status.Error().Count())
}
if count := peerNode.status.Error().Count(); count == MaxPeerErrors {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network peer %v error count exceeded %d. Prunning.", peerNode, MaxPeerErrors)
}
n.PrunePeer(peerNode.id)
}
}
return err
}
defer c.Close()
2019-12-13 15:27:47 +00:00
id := peer.id
if len(id) == 0 {
id = peer.link
}
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network sending %s message from: %s to %s", method, n.options.Id, id)
}
2019-12-13 15:27:47 +00:00
tmsg := &transport.Message{
Header: map[string]string{
"Micro-Method": method,
},
Body: body,
2019-12-13 15:27:47 +00:00
}
// setting the peer header
if len(peer.id) > 0 {
tmsg.Header["Micro-Peer"] = peer.id
}
if err := c.Send(tmsg); err != nil {
// TODO: Lookup peer in our graph
if peerNode := n.GetPeerNode(peer.id); peerNode != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network found peer %s: %v", peer.id, peerNode)
}
// update node status when error happens
peerNode.status.err.Update(err)
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network increment node peer %p %v count to: %d", peerNode, peerNode, peerNode.status.Error().Count())
}
if count := peerNode.status.Error().Count(); count == MaxPeerErrors {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network node peer %v count exceeded %d: %d", peerNode, MaxPeerErrors, peerNode.status.Error().Count())
}
n.PrunePeer(peerNode.id)
}
}
return err
}
return nil
}
// sendMsg sends a message to the tunnel channel
func (n *mucpNetwork) sendMsg(method, channel string, msg proto.Message) error {
body, err := proto.Marshal(msg)
if err != nil {
return err
}
// check if the channel client is initialized
n.RLock()
client, ok := n.tunClient[channel]
if !ok || client == nil {
n.RUnlock()
return ErrClientNotFound
}
n.RUnlock()
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network sending %s message from: %s", method, n.options.Id)
}
return client.Send(&transport.Message{
Header: map[string]string{
"Micro-Method": method,
},
Body: body,
})
}
2019-12-07 23:28:39 +00:00
// updatePeerLinks updates link for a given peer
func (n *mucpNetwork) updatePeerLinks(peer *node) error {
2019-12-07 23:28:39 +00:00
n.Lock()
defer n.Unlock()
2019-12-09 22:56:26 +00:00
linkId := peer.link
2019-12-08 00:53:55 +00:00
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
logger.Tracef("Network looking up link %s in the peer links", linkId)
}
2019-12-07 23:28:39 +00:00
// lookup the peer link
var peerLink tunnel.Link
for _, link := range n.tunnel.Links() {
if link.Id() == linkId {
peerLink = link
break
}
}
if peerLink == nil {
return ErrPeerLinkNotFound
}
if logger.V(logger.TraceLevel, logger.DefaultLogger) {
// if the peerLink is found in the returned links update peerLinks
logger.Tracef("Network updating peer links for peer %s", peer.address)
}
2019-12-07 23:28:39 +00:00
// lookup a link and update it if better link is available
2019-12-09 22:56:26 +00:00
if link, ok := n.peerLinks[peer.address]; ok {
2019-12-07 23:28:39 +00:00
// if the existing has better Length then the new, replace it
if link.Length() < peerLink.Length() {
2019-12-09 22:56:26 +00:00
n.peerLinks[peer.address] = peerLink
2019-12-07 23:28:39 +00:00
}
return nil
2019-12-07 23:28:39 +00:00
}
// add peerLink to the peerLinks map
n.peerLinks[peer.address] = peerLink
2019-12-07 23:28:39 +00:00
return nil
}
2019-12-13 15:27:47 +00:00
// isLoopback checks if a link is a loopback to ourselves
func (n *mucpNetwork) isLoopback(link tunnel.Link) bool {
2019-12-13 15:27:47 +00:00
// skip loopback
if link.Loopback() {
return true
}
// our advertise address
loopback := n.server.Options().Advertise
// actual address
address := n.tunnel.Address()
2019-12-13 15:27:47 +00:00
// if remote is ourselves
switch link.Remote() {
case loopback, address:
return true
}
return false
}
// connect will wait for a link to be established and send the connect
// message. We're trying to ensure convergence pretty quickly. So we want
// to hear back. In the case we become completely disconnected we'll
// connect again once a new link is established
func (n *mucpNetwork) connect() {
// discovered lets us know what we received a peer message back
var discovered bool
var attempts int
2019-12-06 00:18:40 +00:00
for {
// connected is used to define if the link is connected
var connected bool
// check the links state
2019-12-06 00:18:40 +00:00
for _, link := range n.tunnel.Links() {
// skip loopback
2019-12-13 15:27:47 +00:00
if n.isLoopback(link) {
continue
}
2019-12-06 00:18:40 +00:00
if link.State() == "connected" {
connected = true
break
}
}
// if we're not connected wait
2019-12-06 00:18:40 +00:00
if !connected {
// reset discovered
discovered = false
// sleep for a second
2019-12-06 00:18:40 +00:00
time.Sleep(time.Second)
// now try again
2019-12-06 00:18:40 +00:00
continue
}
// we're connected but are we discovered?
if !discovered {
// recreate the clients because all the tunnel links are gone
// so we haven't send discovery beneath
// NOTE: when starting the tunnel for the first time we might be recreating potentially
// well functioning tunnel clients as "discovered" will be false until the
// n.discovered channel is read at some point later on.
if err := n.createClients(); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Failed to recreate network/control clients: %v", err)
}
continue
}
// send the connect message
n.sendConnect()
}
2019-12-06 00:18:40 +00:00
// check if we've been discovered
2019-12-06 00:18:40 +00:00
select {
case <-n.discovered:
discovered = true
attempts = 0
case <-n.closed:
2019-12-06 00:18:40 +00:00
return
case <-time.After(time.Second + backoff.Do(attempts)):
// we have to try again
attempts++
2019-12-06 00:18:40 +00:00
}
}
}
// Connect connects the network
func (n *mucpNetwork) Connect() error {
n.Lock()
2019-12-07 23:28:39 +00:00
defer n.Unlock()
2019-12-07 23:28:39 +00:00
// connect network tunnel
if err := n.tunnel.Connect(); err != nil {
return err
}
2019-10-13 18:36:22 +01:00
// return if already connected
if n.connected {
2019-12-08 14:37:17 +00:00
// initialise the nodes
n.initNodes(false)
2019-10-13 18:36:22 +01:00
// send the connect message
2019-12-08 14:37:17 +00:00
go n.sendConnect()
2019-10-13 18:36:22 +01:00
return nil
}
2019-12-08 14:37:17 +00:00
// initialise the nodes
n.initNodes(true)
2019-09-17 15:40:00 +01:00
// set our internal node address
// if advertise address is not set
if len(n.options.Advertise) == 0 {
2019-09-25 12:56:52 +01:00
n.server.Init(server.Advertise(n.tunnel.Address()))
}
2019-09-17 15:40:00 +01:00
2019-12-08 14:37:17 +00:00
// listen on NetworkChannel
netListener, err := n.tunnel.Listen(
NetworkChannel,
tunnel.ListenMode(tunnel.Multicast),
)
if err != nil {
return err
}
// listen on ControlChannel
2019-12-07 23:28:39 +00:00
ctrlListener, err := n.tunnel.Listen(
ControlChannel,
tunnel.ListenMode(tunnel.Multicast),
)
if err != nil {
return err
}
2019-12-08 14:37:17 +00:00
// dial into ControlChannel to send route adverts
ctrlClient, err := n.tunnel.Dial(
ControlChannel,
tunnel.DialMode(tunnel.Multicast),
)
if err != nil {
return err
}
2019-12-08 14:37:17 +00:00
n.tunClient[ControlChannel] = ctrlClient
2019-12-08 14:37:17 +00:00
// dial into NetworkChannel to send network messages
netClient, err := n.tunnel.Dial(
NetworkChannel,
tunnel.DialMode(tunnel.Multicast),
)
if err != nil {
return err
}
2019-12-08 14:37:17 +00:00
n.tunClient[NetworkChannel] = netClient
// create closed channel
n.closed = make(chan bool)
// start advertising routes
2020-08-14 23:51:52 +01:00
watcher, err := n.options.Router.Watch()
if err != nil {
return err
}
advertChan, err := watcher.Chan()
if err != nil {
return err
}
// start the server
if err := n.server.Start(); err != nil {
return err
}
// advertise service routes
go n.advertise(advertChan)
// listen to network messages
go n.processNetChan(netListener)
// accept and process routes
go n.processCtrlChan(ctrlListener)
2019-12-08 14:37:17 +00:00
// manage connection once links are established
go n.connect()
// resolve nodes, broadcast announcements and prune stale nodes
go n.manage()
2019-12-07 23:28:39 +00:00
// we're now connected
n.connected = true
return nil
}
func (n *mucpNetwork) close() error {
// stop the server
if err := n.server.Stop(); err != nil {
return err
}
// close the router
if err := n.router.Close(); err != nil {
return err
}
// close the tunnel
2019-09-25 12:56:52 +01:00
if err := n.tunnel.Close(); err != nil {
return err
}
return nil
}
// createClients is used to create new clients in the event we lose all the tunnels
func (n *mucpNetwork) createClients() error {
// dial into ControlChannel to send route adverts
ctrlClient, err := n.tunnel.Dial(ControlChannel, tunnel.DialMode(tunnel.Multicast))
if err != nil {
return err
}
// dial into NetworkChannel to send network messages
netClient, err := n.tunnel.Dial(NetworkChannel, tunnel.DialMode(tunnel.Multicast))
if err != nil {
return err
}
n.Lock()
defer n.Unlock()
// set the control client
c, ok := n.tunClient[ControlChannel]
if ok {
c.Close()
}
n.tunClient[ControlChannel] = ctrlClient
// set the network client
c, ok = n.tunClient[NetworkChannel]
if ok {
c.Close()
}
n.tunClient[NetworkChannel] = netClient
return nil
}
// Close closes network connection
func (n *mucpNetwork) Close() error {
n.Lock()
if !n.connected {
n.Unlock()
return nil
}
select {
case <-n.closed:
n.Unlock()
return nil
default:
close(n.closed)
2019-12-12 23:20:31 +00:00
// set connected to false
n.connected = false
// unlock the lock otherwise we'll deadlock sending the close
n.Unlock()
msg := &pb.Close{
Node: &pb.Node{
2019-09-17 15:40:00 +01:00
Id: n.node.id,
Address: n.node.address,
},
}
2019-12-07 23:28:39 +00:00
if err := n.sendMsg("close", NetworkChannel, msg); err != nil {
if logger.V(logger.DebugLevel, logger.DefaultLogger) {
logger.Debugf("Network failed to send close message: %s", err)
}
}
2019-12-12 23:20:31 +00:00
<-time.After(time.Millisecond * 100)
}
return n.close()
}
// Client returns network client
func (n *mucpNetwork) Client() client.Client {
return n.client
}
// Server returns network server
func (n *mucpNetwork) Server() server.Server {
return n.server
}
// RouteToProto encodes route into protobuf and returns it
func RouteToProto(route router.Route) *pb.Route {
return &pb.Route{
Service: route.Service,
Address: route.Address,
Gateway: route.Gateway,
Network: route.Network,
Router: route.Router,
Link: route.Link,
Metric: int64(route.Metric),
}
}
// ProtoToRoute decodes protobuf route into router route and returns it
func ProtoToRoute(route *pb.Route) router.Route {
return router.Route{
Service: route.Service,
Address: route.Address,
Gateway: route.Gateway,
Network: route.Network,
Router: route.Router,
Link: route.Link,
Metric: route.Metric,
}
}