further cleanup of tunnel/network

This commit is contained in:
Asim Aslam 2019-12-08 12:12:20 +00:00
parent 283c85d256
commit 6307d6ba51
5 changed files with 98 additions and 119 deletions

View File

@ -202,6 +202,18 @@ func (n *network) Name() string {
return n.options.Name return n.options.Name
} }
func (n *network) initNodes() {
nodes, err := n.resolveNodes()
if err != nil {
log.Debugf("Network failed to resolve nodes: %v", err)
return
}
// initialize the tunnel
n.tunnel.Init(
tunnel.Nodes(nodes...),
)
}
// resolveNodes resolves network nodes to addresses // resolveNodes resolves network nodes to addresses
func (n *network) resolveNodes() ([]string, error) { func (n *network) resolveNodes() ([]string, error) {
// resolve the network address to network nodes // resolve the network address to network nodes
@ -253,29 +265,6 @@ func (n *network) resolveNodes() ([]string, error) {
return nodes, err return nodes, err
} }
// resolve continuously resolves network nodes and initializes network tunnel with resolved addresses
func (n *network) resolve() {
resolve := time.NewTicker(ResolveTime)
defer resolve.Stop()
for {
select {
case <-n.closed:
return
case <-resolve.C:
nodes, err := n.resolveNodes()
if err != nil {
log.Debugf("Network failed to resolve nodes: %v", err)
continue
}
// initialize the tunnel
n.tunnel.Init(
tunnel.Nodes(nodes...),
)
}
}
}
// handleNetConn handles network announcement messages // handleNetConn handles network announcement messages
func (n *network) handleNetConn(s tunnel.Session, msg chan *message) { func (n *network) handleNetConn(s tunnel.Session, msg chan *message) {
for { for {
@ -565,12 +554,14 @@ func (n *network) prunePeerRoutes(peer *node) error {
// manage the process of announcing to peers and prune any peer nodes that have not been // manage the process of announcing to peers and prune any peer nodes that have not been
// seen for a period of time. Also removes all the routes either originated by or routable // seen for a period of time. Also removes all the routes either originated by or routable
//by the stale nodes //by the stale nodes. it also resolves nodes periodically and adds them to the tunnel
func (n *network) manage() { func (n *network) manage() {
announce := time.NewTicker(AnnounceTime) announce := time.NewTicker(AnnounceTime)
defer announce.Stop() defer announce.Stop()
prune := time.NewTicker(PruneTime) prune := time.NewTicker(PruneTime)
defer prune.Stop() defer prune.Stop()
resolve := time.NewTicker(ResolveTime)
defer resolve.Stop()
for { for {
select { select {
@ -584,11 +575,14 @@ func (n *network) manage() {
} }
case <-prune.C: case <-prune.C:
pruned := n.PruneStalePeers(PruneTime) pruned := n.PruneStalePeers(PruneTime)
for id, peer := range pruned { for id, peer := range pruned {
log.Debugf("Network peer exceeded prune time: %s", id) log.Debugf("Network peer exceeded prune time: %s", id)
n.Lock() n.Lock()
delete(n.peerLinks, peer.address) delete(n.peerLinks, peer.address)
n.Unlock() n.Unlock()
if err := n.prunePeerRoutes(peer); err != nil { if err := n.prunePeerRoutes(peer); err != nil {
log.Debugf("Network failed pruning peer %s routes: %v", id, err) log.Debugf("Network failed pruning peer %s routes: %v", id, err)
} }
@ -622,6 +616,8 @@ func (n *network) manage() {
log.Debugf("Network failed deleting routes by %s: %v", route.Router, err) log.Debugf("Network failed deleting routes by %s: %v", route.Router, err)
} }
} }
case <-resolve.C:
n.initNodes()
} }
} }
} }
@ -1165,25 +1161,19 @@ func (n *network) Connect() error {
n.Lock() n.Lock()
defer n.Unlock() defer n.Unlock()
// try to resolve network nodes
nodes, err := n.resolveNodes()
if err != nil {
log.Debugf("Network failed to resolve nodes: %v", err)
}
// initialize the tunnel to resolved nodes
n.tunnel.Init(
tunnel.Nodes(nodes...),
)
// connect network tunnel // connect network tunnel
if err := n.tunnel.Connect(); err != nil { if err := n.tunnel.Connect(); err != nil {
n.Unlock()
return err return err
} }
// initialise the nodes
n.initNodes()
// return if already connected // return if already connected
if n.connected { if n.connected {
// immediately resolve
initNodes()
// send the connect message // send the connect message
n.sendConnect() n.sendConnect()
return nil return nil
@ -1250,11 +1240,9 @@ func (n *network) Connect() error {
return err return err
} }
// send connect after there's a link established // manage connection once links are established
go n.connect() go n.connect()
// resolve network nodes and re-init the tunnel // resolve nodes, broadcast announcements and prune stale nodes
go n.resolve()
// broadcast announcements and prune stale nodes
go n.manage() go n.manage()
// advertise service routes // advertise service routes
go n.advertise(advertChan) go n.advertise(advertChan)

View File

@ -202,7 +202,7 @@ func (t *tun) manage() {
reconnect := time.NewTicker(ReconnectTime) reconnect := time.NewTicker(ReconnectTime)
defer reconnect.Stop() defer reconnect.Stop()
// do it immediately // do immediately
t.manageLinks() t.manageLinks()
for { for {
@ -215,6 +215,47 @@ func (t *tun) manage() {
} }
} }
// manageLink sends channel discover requests periodically and
// keepalive messages to link
func (t *tun) manageLink(link *link) {
keepalive := time.NewTicker(KeepAliveTime)
defer keepalive.Stop()
discover := time.NewTicker(DiscoverTime)
defer discover.Stop()
for {
select {
case <-t.closed:
return
case <-link.closed:
return
case <-discover.C:
// send a discovery message to all links
if err := link.Send(&transport.Message{
Header: map[string]string{
"Micro-Tunnel": "discover",
"Micro-Tunnel-Id": t.id,
},
}); err != nil {
log.Debugf("Tunnel failed to send discover to link %s: %v", link.Remote(), err)
}
case <-keepalive.C:
// send keepalive message
log.Debugf("Tunnel sending keepalive to link: %v", link.Remote())
if err := link.Send(&transport.Message{
Header: map[string]string{
"Micro-Tunnel": "keepalive",
"Micro-Tunnel-Id": t.id,
},
}); err != nil {
log.Debugf("Tunnel error sending keepalive to link %v: %v", link.Remote(), err)
t.delLink(link.Remote())
return
}
}
}
}
// manageLinks is a function that can be called to immediately to link setup // manageLinks is a function that can be called to immediately to link setup
func (t *tun) manageLinks() { func (t *tun) manageLinks() {
var delLinks []string var delLinks []string
@ -278,8 +319,15 @@ func (t *tun) manageLinks() {
// save the link // save the link
t.Lock() t.Lock()
defer t.Unlock()
// just check nothing else was setup in the interim
if _, ok := t.links[node]; ok {
link.Close()
return
}
// save the link
t.links[node] = link t.links[node] = link
t.Unlock()
}(node) }(node)
} }
@ -723,59 +771,6 @@ func (t *tun) listen(link *link) {
} }
} }
// discover sends channel discover requests periodically
func (t *tun) discover(link *link) {
tick := time.NewTicker(DiscoverTime)
defer tick.Stop()
for {
select {
case <-tick.C:
// send a discovery message to all links
if err := link.Send(&transport.Message{
Header: map[string]string{
"Micro-Tunnel": "discover",
"Micro-Tunnel-Id": t.id,
},
}); err != nil {
log.Debugf("Tunnel failed to send discover to link %s: %v", link.Remote(), err)
}
case <-link.closed:
return
case <-t.closed:
return
}
}
}
// keepalive periodically sends keepalive messages to link
func (t *tun) keepalive(link *link) {
keepalive := time.NewTicker(KeepAliveTime)
defer keepalive.Stop()
for {
select {
case <-t.closed:
return
case <-link.closed:
return
case <-keepalive.C:
// send keepalive message
log.Debugf("Tunnel sending keepalive to link: %v", link.Remote())
if err := link.Send(&transport.Message{
Header: map[string]string{
"Micro-Tunnel": "keepalive",
"Micro-Tunnel-Id": t.id,
},
}); err != nil {
log.Debugf("Tunnel error sending keepalive to link %v: %v", link.Remote(), err)
t.delLink(link.Remote())
return
}
}
}
}
// setupLink connects to node and returns link if successful // setupLink connects to node and returns link if successful
// It returns error if the link failed to be established // It returns error if the link failed to be established
func (t *tun) setupLink(node string) (*link, error) { func (t *tun) setupLink(node string) (*link, error) {
@ -812,11 +807,8 @@ func (t *tun) setupLink(node string) (*link, error) {
// process incoming messages // process incoming messages
go t.listen(link) go t.listen(link)
// start keepalive monitor // manage keepalives and discovery messages
go t.keepalive(link) go t.manageLink(link)
// discover things on the remote side
go t.discover(link)
return link, nil return link, nil
} }
@ -839,11 +831,8 @@ func (t *tun) connect() error {
// create a new link // create a new link
link := newLink(sock) link := newLink(sock)
// start keepalive monitor // manage the link
go t.keepalive(link) go t.manageLink(link)
// discover things on the remote side
go t.discover(link)
// listen for inbound messages. // listen for inbound messages.
// only save the link once connected. // only save the link once connected.
@ -870,6 +859,8 @@ func (t *tun) Connect() error {
// already connected // already connected
if t.connected { if t.connected {
// do it immediately
t.manageLinks()
// setup links // setup links
return nil return nil
} }
@ -884,13 +875,13 @@ func (t *tun) Connect() error {
// create new close channel // create new close channel
t.closed = make(chan bool) t.closed = make(chan bool)
// manage the links
go t.manage()
// process outbound messages to be sent // process outbound messages to be sent
// process sends to all links // process sends to all links
go t.process() go t.process()
// manage the links
go t.manage()
return nil return nil
} }

View File

@ -9,15 +9,19 @@ import (
) )
func TestReconnectTunnel(t *testing.T) { func TestReconnectTunnel(t *testing.T) {
// we manually override the tunnel.ReconnectTime value here
// this is so that we make the reconnects faster than the default 5s
ReconnectTime = 100 * time.Millisecond
// create a new tunnel client // create a new tunnel client
tunA := NewTunnel( tunA := NewTunnel(
Address("127.0.0.1:9096"), Address("127.0.0.1:9098"),
Nodes("127.0.0.1:9097"), Nodes("127.0.0.1:9099"),
) )
// create a new tunnel server // create a new tunnel server
tunB := NewTunnel( tunB := NewTunnel(
Address("127.0.0.1:9097"), Address("127.0.0.1:9099"),
) )
// start tunnel // start tunnel
@ -27,10 +31,6 @@ func TestReconnectTunnel(t *testing.T) {
} }
defer tunB.Close() defer tunB.Close()
// we manually override the tunnel.ReconnectTime value here
// this is so that we make the reconnects faster than the default 5s
ReconnectTime = 200 * time.Millisecond
// start tunnel // start tunnel
err = tunA.Connect() err = tunA.Connect()
if err != nil { if err != nil {
@ -48,7 +48,7 @@ func TestReconnectTunnel(t *testing.T) {
wg.Add(1) wg.Add(1)
// start tunnel sender // start tunnel sender
go testBrokenTunSend(t, tunA, wait, &wg) go testBrokenTunSend(t, tunA, wait, &wg, ReconnectTime)
// wait until done // wait until done
wg.Wait() wg.Wait()

View File

@ -420,7 +420,7 @@ func (s *session) Close() error {
default: default:
close(s.closed) close(s.closed)
// don't send close on multicast // don't send close on multicast or broadcast
if s.mode != Unicast { if s.mode != Unicast {
return nil return nil
} }

View File

@ -206,7 +206,7 @@ func testBrokenTunAccept(t *testing.T, tun Tunnel, wait chan bool, wg *sync.Wait
wait <- true wait <- true
} }
func testBrokenTunSend(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGroup) { func testBrokenTunSend(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGroup, reconnect time.Duration) {
defer wg.Done() defer wg.Done()
// wait for the listener to get ready // wait for the listener to get ready
@ -234,7 +234,7 @@ func testBrokenTunSend(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGr
<-wait <-wait
// give it time to reconnect // give it time to reconnect
time.Sleep(5 * ReconnectTime) time.Sleep(10 * reconnect)
// send the message // send the message
if err := c.Send(&m); err != nil { if err := c.Send(&m); err != nil {