nebula/lighthouse.go

609 lines
15 KiB
Go
Raw Normal View History

2019-11-19 18:00:20 +01:00
package nebula
import (
"errors"
2019-11-19 18:00:20 +01:00
"fmt"
"net"
"sync"
"time"
"github.com/golang/protobuf/proto"
"github.com/rcrowley/go-metrics"
2021-03-19 02:37:24 +01:00
"github.com/sirupsen/logrus"
2019-11-19 18:00:20 +01:00
"github.com/slackhq/nebula/cert"
)
var ErrHostNotKnown = errors.New("host not known")
2019-11-19 18:00:20 +01:00
type LightHouse struct {
sync.RWMutex //Because we concurrently read and write to our maps
amLighthouse bool
myIp uint32
punchConn *udpConn
// Local cache of answers from light houses
2021-03-19 02:37:24 +01:00
addrMap map[uint32][]*udpAddr
2019-11-19 18:00:20 +01:00
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 21:36:43 +02:00
// filters remote addresses allowed for each host
// - When we are a lighthouse, this filters what addresses we store and
// respond with.
// - When we are not a lighthouse, this filters which addresses we accept
// from lighthouses.
remoteAllowList *AllowList
// filters local addresses that we advertise to lighthouses
localAllowList *AllowList
// used to trigger the HandshakeManager when we receive HostQueryReply
handshakeTrigger chan<- uint32
2019-11-19 18:00:20 +01:00
// staticList exists to avoid having a bool in each addrMap entry
// since static should be rare
staticList map[uint32]struct{}
lighthouses map[uint32]struct{}
interval int
2021-03-19 02:37:24 +01:00
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
2019-11-19 18:00:20 +01:00
punchBack bool
punchDelay time.Duration
metrics *MessageMetrics
metricHolepunchTx metrics.Counter
2021-03-26 15:46:30 +01:00
l *logrus.Logger
2019-11-19 18:00:20 +01:00
}
type EncWriter interface {
SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte)
SendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte)
}
2021-03-26 15:46:30 +01:00
func NewLightHouse(l *logrus.Logger, amLighthouse bool, myIp uint32, ips []uint32, interval int, nebulaPort uint32, pc *udpConn, punchBack bool, punchDelay time.Duration, metricsEnabled bool) *LightHouse {
2019-11-19 18:00:20 +01:00
h := LightHouse{
amLighthouse: amLighthouse,
myIp: myIp,
2021-03-19 02:37:24 +01:00
addrMap: make(map[uint32][]*udpAddr),
2019-11-19 18:00:20 +01:00
nebulaPort: nebulaPort,
lighthouses: make(map[uint32]struct{}),
staticList: make(map[uint32]struct{}),
interval: interval,
punchConn: pc,
punchBack: punchBack,
punchDelay: punchDelay,
2021-03-26 15:46:30 +01:00
l: l,
2019-11-19 18:00:20 +01:00
}
if metricsEnabled {
h.metrics = newLighthouseMetrics()
h.metricHolepunchTx = metrics.GetOrRegisterCounter("messages.tx.holepunch", nil)
} else {
h.metricHolepunchTx = metrics.NilCounter{}
}
for _, ip := range ips {
h.lighthouses[ip] = struct{}{}
2019-11-19 18:00:20 +01:00
}
return &h
}
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 21:36:43 +02:00
func (lh *LightHouse) SetRemoteAllowList(allowList *AllowList) {
lh.Lock()
defer lh.Unlock()
lh.remoteAllowList = allowList
}
func (lh *LightHouse) SetLocalAllowList(allowList *AllowList) {
lh.Lock()
defer lh.Unlock()
lh.localAllowList = allowList
}
2019-11-24 00:55:23 +01:00
func (lh *LightHouse) ValidateLHStaticEntries() error {
for lhIP, _ := range lh.lighthouses {
if _, ok := lh.staticList[lhIP]; !ok {
2019-11-24 00:55:23 +01:00
return fmt.Errorf("Lighthouse %s does not have a static_host_map entry", IntIp(lhIP))
}
}
2019-11-24 00:55:23 +01:00
return nil
}
2021-03-19 02:37:24 +01:00
func (lh *LightHouse) Query(ip uint32, f EncWriter) ([]*udpAddr, error) {
2019-11-19 18:00:20 +01:00
if !lh.IsLighthouseIP(ip) {
lh.QueryServer(ip, f)
}
lh.RLock()
if v, ok := lh.addrMap[ip]; ok {
lh.RUnlock()
return v, nil
}
lh.RUnlock()
return nil, ErrHostNotKnown
2019-11-19 18:00:20 +01:00
}
// This is asynchronous so no reply should be expected
func (lh *LightHouse) QueryServer(ip uint32, f EncWriter) {
if !lh.amLighthouse {
// Send a query to the lighthouses and hope for the best next time
query, err := proto.Marshal(NewLhQueryByInt(ip))
if err != nil {
2021-03-26 15:46:30 +01:00
lh.l.WithError(err).WithField("vpnIp", IntIp(ip)).Error("Failed to marshal lighthouse query payload")
2019-11-19 18:00:20 +01:00
return
}
lh.metricTx(NebulaMeta_HostQuery, int64(len(lh.lighthouses)))
2019-11-19 18:00:20 +01:00
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for n := range lh.lighthouses {
f.SendMessageToVpnIp(lightHouse, 0, n, query, nb, out)
}
}
}
// Query our local lighthouse cached results
2021-03-19 02:37:24 +01:00
func (lh *LightHouse) QueryCache(ip uint32) []*udpAddr {
2019-11-19 18:00:20 +01:00
lh.RLock()
if v, ok := lh.addrMap[ip]; ok {
lh.RUnlock()
return v
}
lh.RUnlock()
return nil
}
func (lh *LightHouse) DeleteVpnIP(vpnIP uint32) {
// First we check the static mapping
// and do nothing if it is there
if _, ok := lh.staticList[vpnIP]; ok {
return
}
lh.Lock()
//l.Debugln(lh.addrMap)
delete(lh.addrMap, vpnIP)
2021-03-26 15:46:30 +01:00
lh.l.Debugf("deleting %s from lighthouse.", IntIp(vpnIP))
2019-11-19 18:00:20 +01:00
lh.Unlock()
}
func (lh *LightHouse) AddRemote(vpnIP uint32, toIp *udpAddr, static bool) {
// First we check if the sender thinks this is a static entry
// and do nothing if it is not, but should be considered static
if static == false {
if _, ok := lh.staticList[vpnIP]; ok {
return
}
}
lh.Lock()
defer lh.Unlock()
2019-11-19 18:00:20 +01:00
for _, v := range lh.addrMap[vpnIP] {
if v.Equals(toIp) {
return
}
}
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 21:36:43 +02:00
2021-03-19 02:37:24 +01:00
allow := lh.remoteAllowList.Allow(toIp.IP)
2021-03-26 15:46:30 +01:00
lh.l.WithField("remoteIp", toIp).WithField("allow", allow).Debug("remoteAllowList.Allow")
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 21:36:43 +02:00
if !allow {
return
}
2019-11-19 18:00:20 +01:00
//l.Debugf("Adding reply of %s as %s\n", IntIp(vpnIP), toIp)
if static {
lh.staticList[vpnIP] = struct{}{}
}
2021-03-19 02:37:24 +01:00
lh.addrMap[vpnIP] = append(lh.addrMap[vpnIP], toIp.Copy())
2019-11-19 18:00:20 +01:00
}
func (lh *LightHouse) AddRemoteAndReset(vpnIP uint32, toIp *udpAddr) {
if lh.amLighthouse {
lh.DeleteVpnIP(vpnIP)
lh.AddRemote(vpnIP, toIp, false)
}
}
func (lh *LightHouse) IsLighthouseIP(vpnIP uint32) bool {
if _, ok := lh.lighthouses[vpnIP]; ok {
return true
}
return false
}
func NewLhQueryByInt(VpnIp uint32) *NebulaMeta {
return &NebulaMeta{
Type: NebulaMeta_HostQuery,
Details: &NebulaMetaDetails{
VpnIp: VpnIp,
},
}
}
2021-03-19 02:37:24 +01:00
type ip4Or6 struct {
v4 IpAndPort
v6 Ip6AndPort
}
func NewIpAndPort(ip net.IP, port uint32) ip4Or6 {
ipp := ip4Or6{}
if ipv4 := ip.To4(); ipv4 != nil {
ipp.v4 = IpAndPort{Port: port}
ipp.v4.Ip = ip2int(ip)
} else {
ipp.v6 = Ip6AndPort{Port: port}
ipp.v6.Ip = make([]byte, len(ip))
copy(ipp.v6.Ip, ip)
}
return ipp
}
func NewIpAndPortFromUDPAddr(addr *udpAddr) ip4Or6 {
return NewIpAndPort(addr.IP, uint32(addr.Port))
2019-11-19 18:00:20 +01:00
}
2021-03-19 02:37:24 +01:00
func NewUDPAddrFromLH4(ipp *IpAndPort) *udpAddr {
ip := ipp.Ip
return NewUDPAddr(
net.IPv4(byte(ip&0xff000000>>24), byte(ip&0x00ff0000>>16), byte(ip&0x0000ff00>>8), byte(ip&0x000000ff)),
uint16(ipp.Port),
)
}
func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udpAddr {
return NewUDPAddr(ipp.Ip, uint16(ipp.Port))
2019-11-19 18:00:20 +01:00
}
func (lh *LightHouse) LhUpdateWorker(f EncWriter) {
if lh.amLighthouse || lh.interval == 0 {
2019-11-19 18:00:20 +01:00
return
}
for {
lh.SendUpdate(f)
time.Sleep(time.Second * time.Duration(lh.interval))
}
}
2019-11-19 18:00:20 +01:00
func (lh *LightHouse) SendUpdate(f EncWriter) {
2021-03-19 02:37:24 +01:00
var v4 []*IpAndPort
var v6 []*Ip6AndPort
2019-11-19 18:00:20 +01:00
2021-03-26 15:46:30 +01:00
for _, e := range *localIps(lh.l, lh.localAllowList) {
// Only add IPs that aren't my VPN/tun IP
if ip2int(e) != lh.myIp {
ipp := NewIpAndPort(e, lh.nebulaPort)
2021-03-19 02:37:24 +01:00
if len(ipp.v6.Ip) > 0 {
v6 = append(v6, &ipp.v6)
} else {
v4 = append(v4, &ipp.v4)
}
2019-11-19 18:00:20 +01:00
}
}
m := &NebulaMeta{
Type: NebulaMeta_HostUpdateNotification,
Details: &NebulaMetaDetails{
2021-03-19 02:37:24 +01:00
VpnIp: lh.myIp,
IpAndPorts: v4,
Ip6AndPorts: v6,
},
}
lh.metricTx(NebulaMeta_HostUpdateNotification, int64(len(lh.lighthouses)))
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for vpnIp := range lh.lighthouses {
mm, err := proto.Marshal(m)
if err != nil {
2021-03-26 15:46:30 +01:00
lh.l.Debugf("Invalid marshal to update")
}
//l.Error("LIGHTHOUSE PACKET SEND", mm)
f.SendMessageToVpnIp(lightHouse, 0, vpnIp, mm, nb, out)
2019-11-19 18:00:20 +01:00
}
}
type LightHouseHandler struct {
lh *LightHouse
nb []byte
out []byte
meta *NebulaMeta
2021-03-19 02:37:24 +01:00
iap []ip4Or6
iapp []*ip4Or6
}
func (lh *LightHouse) NewRequestHandler() *LightHouseHandler {
lhh := &LightHouseHandler{
lh: lh,
nb: make([]byte, 12, 12),
out: make([]byte, mtu),
meta: &NebulaMeta{
Details: &NebulaMetaDetails{},
},
}
lhh.resizeIpAndPorts(10)
return lhh
}
// This method is similar to Reset(), but it re-uses the pointer structs
// so that we don't have to re-allocate them
func (lhh *LightHouseHandler) resetMeta() *NebulaMeta {
details := lhh.meta.Details
details.Reset()
lhh.meta.Reset()
lhh.meta.Details = details
return lhh.meta
}
func (lhh *LightHouseHandler) resizeIpAndPorts(n int) {
if cap(lhh.iap) < n {
2021-03-19 02:37:24 +01:00
lhh.iap = make([]ip4Or6, n)
lhh.iapp = make([]*ip4Or6, n)
for i := range lhh.iap {
lhh.iapp[i] = &lhh.iap[i]
}
}
lhh.iap = lhh.iap[:n]
lhh.iapp = lhh.iapp[:n]
}
2021-03-19 02:37:24 +01:00
func (lhh *LightHouseHandler) setIpAndPortsFromNetIps(ips []*udpAddr) []*ip4Or6 {
lhh.resizeIpAndPorts(len(ips))
for i, e := range ips {
lhh.iap[i] = NewIpAndPortFromUDPAddr(e)
}
return lhh.iapp
}
func (lhh *LightHouseHandler) HandleRequest(rAddr *udpAddr, vpnIp uint32, p []byte, c *cert.NebulaCertificate, f EncWriter) {
lh := lhh.lh
n := lhh.resetMeta()
err := proto.UnmarshalMerge(p, n)
2019-11-19 18:00:20 +01:00
if err != nil {
2021-03-26 15:46:30 +01:00
lh.l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).WithField("udpAddr", rAddr).
2019-11-19 18:00:20 +01:00
Error("Failed to unmarshal lighthouse packet")
//TODO: send recv_error?
return
}
if n.Details == nil {
2021-03-26 15:46:30 +01:00
lh.l.WithField("vpnIp", IntIp(vpnIp)).WithField("udpAddr", rAddr).
2019-11-19 18:00:20 +01:00
Error("Invalid lighthouse update")
//TODO: send recv_error?
return
}
lh.metricRx(n.Type, 1)
2019-11-19 18:00:20 +01:00
switch n.Type {
case NebulaMeta_HostQuery:
// Exit if we don't answer queries
if !lh.amLighthouse {
2021-03-26 15:46:30 +01:00
lh.l.Debugln("I don't answer queries, but received from: ", rAddr)
2019-11-19 18:00:20 +01:00
return
}
//l.Debugln("Got Query")
ips, err := lh.Query(n.Details.VpnIp, f)
if err != nil {
//l.Debugf("Can't answer query %s from %s because error: %s", IntIp(n.Details.VpnIp), rAddr, err)
return
} else {
reqVpnIP := n.Details.VpnIp
n = lhh.resetMeta()
n.Type = NebulaMeta_HostQueryReply
n.Details.VpnIp = reqVpnIP
2021-03-19 02:37:24 +01:00
v4s := make([]*IpAndPort, 0)
v6s := make([]*Ip6AndPort, 0)
for _, v := range lhh.setIpAndPortsFromNetIps(ips) {
if len(v.v6.Ip) > 0 {
v6s = append(v6s, &v.v6)
} else {
v4s = append(v4s, &v.v4)
}
}
if len(v4s) > 0 {
n.Details.IpAndPorts = v4s
}
if len(v6s) > 0 {
n.Details.Ip6AndPorts = v6s
}
reply, err := proto.Marshal(n)
2019-11-19 18:00:20 +01:00
if err != nil {
2021-03-26 15:46:30 +01:00
lh.l.WithError(err).WithField("vpnIp", IntIp(vpnIp)).Error("Failed to marshal lighthouse host query reply")
2019-11-19 18:00:20 +01:00
return
}
lh.metricTx(NebulaMeta_HostQueryReply, 1)
f.SendMessageToVpnIp(lightHouse, 0, vpnIp, reply, lhh.nb, lhh.out[:0])
2019-11-19 18:00:20 +01:00
// This signals the other side to punch some zero byte udp packets
ips, err = lh.Query(vpnIp, f)
if err != nil {
2021-03-26 15:46:30 +01:00
lh.l.WithField("vpnIp", IntIp(vpnIp)).Debugln("Can't notify host to punch")
2019-11-19 18:00:20 +01:00
return
} else {
//l.Debugln("Notify host to punch", iap)
n = lhh.resetMeta()
n.Type = NebulaMeta_HostPunchNotification
n.Details.VpnIp = vpnIp
2021-03-19 02:37:24 +01:00
v4s := make([]*IpAndPort, 0)
v6s := make([]*Ip6AndPort, 0)
for _, v := range lhh.setIpAndPortsFromNetIps(ips) {
if len(v.v6.Ip) > 0 {
v6s = append(v6s, &v.v6)
} else {
v4s = append(v4s, &v.v4)
}
}
if len(v4s) > 0 {
n.Details.IpAndPorts = v4s
}
if len(v6s) > 0 {
n.Details.Ip6AndPorts = v6s
}
reply, _ := proto.Marshal(n)
lh.metricTx(NebulaMeta_HostPunchNotification, 1)
f.SendMessageToVpnIp(lightHouse, 0, reqVpnIP, reply, lhh.nb, lhh.out[:0])
2019-11-19 18:00:20 +01:00
}
//fmt.Println(reply, remoteaddr)
}
case NebulaMeta_HostQueryReply:
if !lh.IsLighthouseIP(vpnIp) {
return
}
2021-03-19 02:37:24 +01:00
2019-11-19 18:00:20 +01:00
for _, a := range n.Details.IpAndPorts {
2021-03-19 02:37:24 +01:00
ans := NewUDPAddrFromLH4(a)
if ans != nil {
lh.AddRemote(n.Details.VpnIp, ans, false)
}
2019-11-19 18:00:20 +01:00
}
2021-03-19 02:37:24 +01:00
for _, a := range n.Details.Ip6AndPorts {
ans := NewUDPAddrFromLH6(a)
if ans != nil {
lh.AddRemote(n.Details.VpnIp, ans, false)
}
}
// Non-blocking attempt to trigger, skip if it would block
select {
case lh.handshakeTrigger <- n.Details.VpnIp:
default:
}
2019-11-19 18:00:20 +01:00
case NebulaMeta_HostUpdateNotification:
//Simple check that the host sent this not someone else
if n.Details.VpnIp != vpnIp {
2021-03-26 15:46:30 +01:00
lh.l.WithField("vpnIp", IntIp(vpnIp)).WithField("answer", IntIp(n.Details.VpnIp)).Debugln("Host sent invalid update")
2019-11-19 18:00:20 +01:00
return
}
2021-03-19 02:37:24 +01:00
2019-11-19 18:00:20 +01:00
for _, a := range n.Details.IpAndPorts {
2021-03-19 02:37:24 +01:00
ans := NewUDPAddrFromLH4(a)
if ans != nil {
lh.AddRemote(n.Details.VpnIp, ans, false)
}
}
for _, a := range n.Details.Ip6AndPorts {
ans := NewUDPAddrFromLH6(a)
if ans != nil {
lh.AddRemote(n.Details.VpnIp, ans, false)
}
2019-11-19 18:00:20 +01:00
}
2021-03-19 02:37:24 +01:00
2019-11-19 18:00:20 +01:00
case NebulaMeta_HostMovedNotification:
case NebulaMeta_HostPunchNotification:
if !lh.IsLighthouseIP(vpnIp) {
return
}
empty := []byte{0}
for _, a := range n.Details.IpAndPorts {
2021-03-19 02:37:24 +01:00
vpnPeer := NewUDPAddrFromLH4(a)
if vpnPeer == nil {
continue
}
2019-11-19 18:00:20 +01:00
go func() {
time.Sleep(lh.punchDelay)
lh.metricHolepunchTx.Inc(1)
lh.punchConn.WriteTo(empty, vpnPeer)
2019-11-19 18:00:20 +01:00
}()
2021-03-19 02:37:24 +01:00
2021-03-26 15:46:30 +01:00
if lh.l.Level >= logrus.DebugLevel {
2021-03-19 02:37:24 +01:00
//TODO: lacking the ip we are actually punching on, old: l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp))
2021-03-26 15:46:30 +01:00
lh.l.Debugf("Punching on %d for %s", a.Port, IntIp(n.Details.VpnIp))
2021-03-19 02:37:24 +01:00
}
2019-11-19 18:00:20 +01:00
}
2021-03-19 02:37:24 +01:00
for _, a := range n.Details.Ip6AndPorts {
vpnPeer := NewUDPAddrFromLH6(a)
if vpnPeer == nil {
continue
}
go func() {
time.Sleep(lh.punchDelay)
lh.metricHolepunchTx.Inc(1)
lh.punchConn.WriteTo(empty, vpnPeer)
}()
2021-03-26 15:46:30 +01:00
if lh.l.Level >= logrus.DebugLevel {
2021-03-19 02:37:24 +01:00
//TODO: lacking the ip we are actually punching on, old: l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp))
2021-03-26 15:46:30 +01:00
lh.l.Debugf("Punching on %d for %s", a.Port, IntIp(n.Details.VpnIp))
2021-03-19 02:37:24 +01:00
}
}
2019-11-19 18:00:20 +01:00
// This sends a nebula test packet to the host trying to contact us. In the case
// of a double nat or other difficult scenario, this may help establish
// a tunnel.
if lh.punchBack {
go func() {
time.Sleep(time.Second * 5)
2021-03-26 15:46:30 +01:00
lh.l.Debugf("Sending a nebula test packet to vpn ip %s", IntIp(n.Details.VpnIp))
// TODO we have to allocate a new output buffer here since we are spawning a new goroutine
// for each punchBack packet. We should move this into a timerwheel or a single goroutine
// managed by a channel.
f.SendMessageToVpnIp(test, testRequest, n.Details.VpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
2019-11-19 18:00:20 +01:00
}()
}
}
}
func (lh *LightHouse) metricRx(t NebulaMeta_MessageType, i int64) {
lh.metrics.Rx(NebulaMessageType(t), 0, i)
}
func (lh *LightHouse) metricTx(t NebulaMeta_MessageType, i int64) {
lh.metrics.Tx(NebulaMessageType(t), 0, i)
}
2019-11-19 18:00:20 +01:00
/*
func (f *Interface) sendPathCheck(ci *ConnectionState, endpoint *net.UDPAddr, counter int) {
c := ci.messageCounter
b := HeaderEncode(nil, Version, uint8(path_check), 0, ci.remoteIndex, c)
ci.messageCounter++
if ci.eKey != nil {
msg := ci.eKey.EncryptDanger(b, nil, []byte(strconv.Itoa(counter)), c)
//msg := ci.eKey.EncryptDanger(b, nil, []byte(fmt.Sprintf("%d", counter)), c)
f.outside.WriteTo(msg, endpoint)
l.Debugf("path_check sent, remote index: %d, pathCounter %d", ci.remoteIndex, counter)
}
}
func (f *Interface) sendPathCheckReply(ci *ConnectionState, endpoint *net.UDPAddr, counter []byte) {
c := ci.messageCounter
b := HeaderEncode(nil, Version, uint8(path_check_reply), 0, ci.remoteIndex, c)
ci.messageCounter++
if ci.eKey != nil {
msg := ci.eKey.EncryptDanger(b, nil, counter, c)
f.outside.WriteTo(msg, endpoint)
l.Debugln("path_check sent, remote index: ", ci.remoteIndex)
}
}
*/