diff --git a/node/config/version.go b/node/config/version.go index aff6922..338b501 100644 --- a/node/config/version.go +++ b/node/config/version.go @@ -36,5 +36,5 @@ func FormatVersion(version []byte) string { } func GetPatchNumber() byte { - return 0x07 + return 0x08 } diff --git a/node/consensus/data/consensus_frames.go b/node/consensus/data/consensus_frames.go index 4af1eb4..14029bf 100644 --- a/node/consensus/data/consensus_frames.go +++ b/node/consensus/data/consensus_frames.go @@ -225,7 +225,7 @@ func (e *DataClockConsensusEngine) sync( response, err := client.GetDataFrame( context.TODO(), &protobufs.GetDataFrameRequest{ - FrameNumber: 0, + FrameNumber: currentLatest.FrameNumber + 1, }, grpc.MaxCallRecvMsgSize(600*1024*1024), ) diff --git a/node/consensus/data/data_clock_consensus_engine.go b/node/consensus/data/data_clock_consensus_engine.go index 466744d..8f1b9d5 100644 --- a/node/consensus/data/data_clock_consensus_engine.go +++ b/node/consensus/data/data_clock_consensus_engine.go @@ -315,17 +315,27 @@ func (e *DataClockConsensusEngine) Start() <-chan error { go func() { thresholdBeforeConfirming := 4 - + frame, err := e.dataTimeReel.Head() + if err != nil { + panic(err) + } for { - list := &protobufs.DataPeerListAnnounce{ - PeerList: []*protobufs.DataPeer{}, - } - - frame, err := e.dataTimeReel.Head() + nextFrame, err := e.dataTimeReel.Head() if err != nil { panic(err) } + if frame.FrameNumber >= nextFrame.FrameNumber { + time.Sleep(30 * time.Second) + continue + } + + frame = nextFrame + + list := &protobufs.DataPeerListAnnounce{ + PeerList: []*protobufs.DataPeer{}, + } + e.latestFrameReceived = frame.FrameNumber e.logger.Info( "preparing peer announce", diff --git a/node/consensus/data/execution_registration.go b/node/consensus/data/execution_registration.go index ec89090..7534c2a 100644 --- a/node/consensus/data/execution_registration.go +++ b/node/consensus/data/execution_registration.go @@ -15,18 +15,18 @@ func (e *DataClockConsensusEngine) RegisterExecutor( go func() { for { - masterFrame, err := e.masterTimeReel.Head() + dataFrame, err := e.dataTimeReel.Head() if err != nil { panic(err) } logger.Info( "awaiting frame", - zap.Uint64("current_frame", masterFrame.FrameNumber), + zap.Uint64("current_frame", dataFrame.FrameNumber), zap.Uint64("target_frame", frame), ) - newFrame := masterFrame.FrameNumber + newFrame := dataFrame.FrameNumber if newFrame >= frame { logger.Info( "injecting execution engine at frame", @@ -57,18 +57,18 @@ func (e *DataClockConsensusEngine) UnregisterExecutor( go func() { for { - masterFrame, err := e.masterTimeReel.Head() + dataFrame, err := e.dataTimeReel.Head() if err != nil { panic(err) } logger.Info( "awaiting frame", - zap.Uint64("current_frame", masterFrame.FrameNumber), + zap.Uint64("current_frame", dataFrame.FrameNumber), zap.Uint64("target_frame", frame), ) - newFrame := masterFrame.FrameNumber + newFrame := dataFrame.FrameNumber if newFrame >= frame { logger.Info( "removing execution engine at frame", diff --git a/node/consensus/data/message_handler.go b/node/consensus/data/message_handler.go index 9a431a1..893f481 100644 --- a/node/consensus/data/message_handler.go +++ b/node/consensus/data/message_handler.go @@ -81,8 +81,6 @@ func (e *DataClockConsensusEngine) runMessageHandler() { return nil }() } - } else { - return } any := &anypb.Any{} @@ -273,7 +271,6 @@ func (e *DataClockConsensusEngine) handleDataPeerListAnnounce( } e.peerMapMx.Unlock() } - return nil } @@ -296,40 +293,42 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverJoin( address []byte, any *anypb.Any, ) error { - announce := &protobufs.AnnounceProverJoin{} - if err := any.UnmarshalTo(announce); err != nil { - return errors.Wrap(err, "handle data announce prover join") - } + if e.GetFrameProverTries()[0].Contains(e.parentSelector) { + announce := &protobufs.AnnounceProverJoin{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover join") + } - if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { - return errors.Wrap( - errors.New("invalid data"), - "handle data announce prover join", - ) - } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover join", + ) + } - address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) - if err != nil { - return errors.Wrap(err, "handle data announce prover join") - } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover join") + } - msg := []byte("join") - msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) - msg = append(msg, announce.Filter...) - if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { - return errors.Wrap(err, "handle data announce prover join") - } + msg := []byte("join") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover join") + } - e.proverTrieRequestsMx.Lock() - if len(announce.Filter) != len(e.filter) { - return errors.Wrap( - errors.New("filter width mismatch"), - "handle data announce prover join", - ) - } + e.proverTrieRequestsMx.Lock() + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover join", + ) + } - e.proverTrieJoinRequests[string(address)] = string(announce.Filter) - e.proverTrieRequestsMx.Unlock() + e.proverTrieJoinRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() + } return nil } @@ -338,41 +337,43 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave( address []byte, any *anypb.Any, ) error { - announce := &protobufs.AnnounceProverLeave{} - if err := any.UnmarshalTo(announce); err != nil { - return errors.Wrap(err, "handle data announce prover leave") + if e.GetFrameProverTries()[0].Contains(e.parentSelector) { + announce := &protobufs.AnnounceProverLeave{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover leave") + } + + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover leave", + ) + } + + e.proverTrieRequestsMx.Lock() + + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover leave", + ) + } + + msg := []byte("leave") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover leave") + } + + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover leave") + } + + e.proverTrieLeaveRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() } - - if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { - return errors.Wrap( - errors.New("invalid data"), - "handle data announce prover leave", - ) - } - - e.proverTrieRequestsMx.Lock() - - if len(announce.Filter) != len(e.filter) { - return errors.Wrap( - errors.New("filter width mismatch"), - "handle data announce prover leave", - ) - } - - msg := []byte("leave") - msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) - msg = append(msg, announce.Filter...) - if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { - return errors.Wrap(err, "handle data announce prover leave") - } - - address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) - if err != nil { - return errors.Wrap(err, "handle data announce prover leave") - } - - e.proverTrieLeaveRequests[string(address)] = string(announce.Filter) - e.proverTrieRequestsMx.Unlock() return nil } @@ -381,40 +382,42 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverPause( address []byte, any *anypb.Any, ) error { - announce := &protobufs.AnnounceProverPause{} - if err := any.UnmarshalTo(announce); err != nil { - return errors.Wrap(err, "handle data announce prover pause") - } + if e.GetFrameProverTries()[0].Contains(e.parentSelector) { + announce := &protobufs.AnnounceProverPause{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover pause") + } - if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { - return errors.Wrap( - errors.New("invalid data"), - "handle data announce prover leave", - ) - } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover leave", + ) + } - e.proverTrieRequestsMx.Lock() - if len(announce.Filter) != len(e.filter) { - return errors.Wrap( - errors.New("filter width mismatch"), - "handle data announce prover pause", - ) - } + e.proverTrieRequestsMx.Lock() + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover pause", + ) + } - msg := []byte("pause") - msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) - msg = append(msg, announce.Filter...) - if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { - return errors.Wrap(err, "handle data announce prover pause") - } + msg := []byte("pause") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover pause") + } - address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) - if err != nil { - return errors.Wrap(err, "handle data announce prover pause") - } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover pause") + } - e.proverTriePauseRequests[string(address)] = string(announce.Filter) - e.proverTrieRequestsMx.Unlock() + e.proverTriePauseRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() + } return nil } @@ -423,104 +426,108 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverResume( address []byte, any *anypb.Any, ) error { - announce := &protobufs.AnnounceProverResume{} - if err := any.UnmarshalTo(announce); err != nil { - return errors.Wrap(err, "handle data announce prover resume") - } + if e.GetFrameProverTries()[0].Contains(e.parentSelector) { + announce := &protobufs.AnnounceProverResume{} + if err := any.UnmarshalTo(announce); err != nil { + return errors.Wrap(err, "handle data announce prover resume") + } - if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { - return errors.Wrap( - errors.New("invalid data"), - "handle data announce prover resume", - ) - } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover resume", + ) + } - e.proverTrieRequestsMx.Lock() - if len(announce.Filter) != len(e.filter) { - return errors.Wrap( - errors.New("filter width mismatch"), - "handle data announce prover resume", - ) - } + e.proverTrieRequestsMx.Lock() + if len(announce.Filter) != len(e.filter) { + return errors.Wrap( + errors.New("filter width mismatch"), + "handle data announce prover resume", + ) + } - address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) - if err != nil { - return errors.Wrap(err, "handle data announce prover resume") - } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) + if err != nil { + return errors.Wrap(err, "handle data announce prover resume") + } - msg := []byte("resume") - msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) - msg = append(msg, announce.Filter...) - if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { - return errors.Wrap(err, "handle data announce prover resume") - } + msg := []byte("resume") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover resume") + } - e.proverTrieResumeRequests[string(address)] = string(announce.Filter) - e.proverTrieRequestsMx.Unlock() + e.proverTrieResumeRequests[string(address)] = string(announce.Filter) + e.proverTrieRequestsMx.Unlock() + } return nil } func (e *DataClockConsensusEngine) handleTokenRequest( transition *protobufs.TokenRequest, ) error { - e.stagedTransactionsMx.Lock() - if e.stagedTransactions == nil { - e.stagedTransactions = &protobufs.TokenRequests{} - } + if e.GetFrameProverTries()[0].Contains(e.parentSelector) { + e.stagedTransactionsMx.Lock() + if e.stagedTransactions == nil { + e.stagedTransactions = &protobufs.TokenRequests{} + } - found := false - for _, ti := range e.stagedTransactions.Requests { - switch t := ti.Request.(type) { - case *protobufs.TokenRequest_Transfer: - switch r := transition.Request.(type) { + found := false + for _, ti := range e.stagedTransactions.Requests { + switch t := ti.Request.(type) { case *protobufs.TokenRequest_Transfer: - if bytes.Equal(r.Transfer.OfCoin.Address, t.Transfer.OfCoin.Address) { - found = true + switch r := transition.Request.(type) { + case *protobufs.TokenRequest_Transfer: + if bytes.Equal(r.Transfer.OfCoin.Address, t.Transfer.OfCoin.Address) { + found = true + } } - } - case *protobufs.TokenRequest_Split: - switch r := transition.Request.(type) { case *protobufs.TokenRequest_Split: - if bytes.Equal(r.Split.OfCoin.Address, r.Split.OfCoin.Address) { - found = true + switch r := transition.Request.(type) { + case *protobufs.TokenRequest_Split: + if bytes.Equal(r.Split.OfCoin.Address, r.Split.OfCoin.Address) { + found = true + } } - } - case *protobufs.TokenRequest_Merge: - switch r := transition.Request.(type) { case *protobufs.TokenRequest_Merge: - checkmerge: - for i := range t.Merge.Coins { - for j := range r.Merge.Coins { - if bytes.Equal(t.Merge.Coins[i].Address, r.Merge.Coins[j].Address) { - found = true - break checkmerge + switch r := transition.Request.(type) { + case *protobufs.TokenRequest_Merge: + checkmerge: + for i := range t.Merge.Coins { + for j := range r.Merge.Coins { + if bytes.Equal(t.Merge.Coins[i].Address, r.Merge.Coins[j].Address) { + found = true + break checkmerge + } } } } - } - case *protobufs.TokenRequest_Mint: - switch r := transition.Request.(type) { case *protobufs.TokenRequest_Mint: - checkmint: - for i := range t.Mint.Proofs { - for j := range r.Mint.Proofs { - if bytes.Equal(t.Mint.Proofs[i], r.Mint.Proofs[j]) { - found = true - break checkmint + switch r := transition.Request.(type) { + case *protobufs.TokenRequest_Mint: + checkmint: + for i := range t.Mint.Proofs { + for j := range r.Mint.Proofs { + if bytes.Equal(t.Mint.Proofs[i], r.Mint.Proofs[j]) { + found = true + break checkmint + } } } } } } - } - if !found { - e.stagedTransactions.Requests = append( - e.stagedTransactions.Requests, - transition, - ) + if !found { + e.stagedTransactions.Requests = append( + e.stagedTransactions.Requests, + transition, + ) + } + e.stagedTransactionsMx.Unlock() } - e.stagedTransactionsMx.Unlock() return nil } @@ -547,8 +554,7 @@ func (e *DataClockConsensusEngine) handleClockFrameData( } for _, trie := range e.GetFrameProverTries() { - prover := trie.FindNearest(addr.Bytes()) - if !bytes.Equal(prover.External.Key, addr.Bytes()) { + if trie.Contains(addr.Bytes()) { e.logger.Info( "prover not in trie at frame, address may be in fork", zap.Binary("address", address), diff --git a/node/consensus/data/peer_messaging.go b/node/consensus/data/peer_messaging.go index 1e082d5..d3b40f8 100644 --- a/node/consensus/data/peer_messaging.go +++ b/node/consensus/data/peer_messaging.go @@ -133,7 +133,7 @@ func (e *DataClockConsensusEngine) decompressAndStoreCandidates( delete(e.peerMap, string(peerId)) } e.peerMapMx.Unlock() - return nil, errors.New("invalid continuity for compressed sync response") + return nil, errors.New("invalid continuity for compressed x response") } var final *protobufs.ClockFrame diff --git a/node/consensus/time/data_time_reel.go b/node/consensus/time/data_time_reel.go index d1af85d..97fbb67 100644 --- a/node/consensus/time/data_time_reel.go +++ b/node/consensus/time/data_time_reel.go @@ -277,71 +277,128 @@ func (d *DataTimeReel) runLoop() { for { select { case frame := <-d.frames: + rawFrame, err := d.clockStore.GetStagedDataClockFrame( + d.filter, + frame.frameNumber, + frame.selector.FillBytes(make([]byte, 32)), + false, + ) + if err != nil { + panic(err) + } + d.logger.Debug( + "processing frame", + zap.Uint64("frame_number", rawFrame.FrameNumber), + zap.String("output_tag", hex.EncodeToString(rawFrame.Output[:64])), + zap.Uint64("head_number", d.head.FrameNumber), + zap.String("head_output_tag", hex.EncodeToString(d.head.Output[:64])), + ) // Most common scenario: in order – new frame is higher number - if d.head.FrameNumber < frame.frameNumber { - d.logger.Debug( - "frame is higher", - zap.Uint64("head_frame_number", d.head.FrameNumber), - zap.Uint64("frame_number", frame.frameNumber), - ) + if d.head.FrameNumber < rawFrame.FrameNumber { + d.logger.Debug("frame is higher") - rawFrame, err := d.clockStore.GetStagedDataClockFrame( - d.filter, - frame.frameNumber, - frame.selector.FillBytes(make([]byte, 32)), - false, - ) + parent := new(big.Int).SetBytes(rawFrame.ParentSelector) + selector, err := rawFrame.GetSelector() if err != nil { panic(err) } distance, err := d.GetDistance(rawFrame) if err != nil { - panic(err) + if !errors.Is(err, store.ErrNotFound) { + panic(err) + } + + d.addPending(selector, parent, frame.frameNumber) + d.processPending(d.head, frame) + continue } - // Otherwise set it as the next and process all pending - d.setHead(rawFrame, distance) - } else if d.head.FrameNumber == frame.frameNumber { - // frames are equivalent, no need to act headSelector, err := d.head.GetSelector() if err != nil { panic(err) } - if headSelector.Cmp(frame.selector) == 0 { - d.logger.Debug("equivalent frame") + // If the frame has a gap from the head or is not descendent, mark it as + // pending: + if rawFrame.FrameNumber-d.head.FrameNumber != 1 { + d.logger.Debug( + "frame has has gap, fork choice", + zap.Bool("has_gap", rawFrame.FrameNumber-d.head.FrameNumber != 1), + zap.String("parent_selector", parent.Text(16)), + zap.String("head_selector", headSelector.Text(16)), + ) + + d.forkChoice(rawFrame, distance) + d.processPending(d.head, frame) continue } - rawFrame, err := d.clockStore.GetStagedDataClockFrame( - d.filter, - frame.frameNumber, - frame.selector.FillBytes(make([]byte, 32)), - false, - ) - if err != nil { - panic(err) + // Otherwise set it as the next and process all pending + d.setHead(rawFrame, distance) + d.processPending(d.head, frame) + } else if d.head.FrameNumber == rawFrame.FrameNumber { + // frames are equivalent, no need to act + if bytes.Equal(d.head.Output, rawFrame.Output) { + d.logger.Debug("equivalent frame") + d.processPending(d.head, frame) + continue } distance, err := d.GetDistance(rawFrame) if err != nil { panic(err) } + d.logger.Debug( + "frame is same height", + zap.String("head_distance", d.headDistance.Text(16)), + zap.String("distance", distance.Text(16)), + ) // Optimization: if competing frames share a parent we can short-circuit // fork choice - if new(big.Int).SetBytes(d.head.ParentSelector).Cmp( - frame.parentSelector, - ) == 0 && distance.Cmp(d.headDistance) < 0 { + if bytes.Equal(d.head.ParentSelector, rawFrame.ParentSelector) && + distance.Cmp(d.headDistance) < 0 { d.logger.Debug( "frame shares parent, has shorter distance, short circuit", ) + d.totalDistance.Sub(d.totalDistance, d.headDistance) d.setHead(rawFrame, distance) + d.processPending(d.head, frame) continue } + + // Choose fork + d.forkChoice(rawFrame, distance) + d.processPending(d.head, frame) } else { d.logger.Debug("frame is lower height") + + // tag: dusk – we should have some kind of check here to avoid brutal + // thrashing + existing, _, err := d.clockStore.GetDataClockFrame( + d.filter, + rawFrame.FrameNumber, + true, + ) + if err != nil { + // if this returns an error it's either not found (which shouldn't + // happen without corruption) or pebble is borked, either way, panic + panic(err) + } + + // It's a fork, but it's behind. We need to stash it until it catches + // up (or dies off) + if !bytes.Equal(existing.Output, rawFrame.Output) { + d.logger.Debug("is fork, add pending") + parent, selector, err := rawFrame.GetParentAndSelector() + if err != nil { + panic(err) + } + + d.addPending(selector, parent, frame.frameNumber) + d.processPending(d.head, frame) + } } case <-d.done: return diff --git a/node/main.go b/node/main.go index 50bfa72..7919902 100644 --- a/node/main.go +++ b/node/main.go @@ -332,6 +332,9 @@ func main() { } if *core != 0 { + runtime.GOMAXPROCS(2) + rdebug.SetGCPercent(9999) + if nodeConfig.Engine.DataWorkerMemoryLimit == 0 { nodeConfig.Engine.DataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75GiB } diff --git a/node/p2p/blossomsub.go b/node/p2p/blossomsub.go index 7ca7bfc..d56bec1 100644 --- a/node/p2p/blossomsub.go +++ b/node/p2p/blossomsub.go @@ -5,10 +5,15 @@ import ( "context" "crypto/rand" "encoding/hex" + "encoding/json" "fmt" + "io" "math/big" "math/bits" "net" + "net/http" + "strconv" + "strings" "sync" "time" @@ -28,6 +33,7 @@ import ( "github.com/mr-tron/base58" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" + mn "github.com/multiformats/go-multiaddr/net" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" @@ -207,6 +213,8 @@ func NewBlossomSub( routingDiscovery := routing.NewRoutingDiscovery(kademliaDHT) util.Advertise(ctx, routingDiscovery, getNetworkNamespace(p2pConfig.Network)) + verifyReachability(p2pConfig) + discoverPeers(p2pConfig, ctx, logger, h, routingDiscovery) // TODO: turn into an option flag for console logging, this is too noisy for @@ -473,10 +481,19 @@ func initDHT( var kademliaDHT *dht.IpfsDHT var err error if isBootstrapPeer { - panic( - "this release is for normal peers only, if you are running a " + - "bootstrap node, please use v2.0-bootstrap", - ) + if p2pConfig.Network == 0 { + panic( + "this release is for normal peers only, if you are running a " + + "bootstrap node, please use v2.0-bootstrap", + ) + } else { + kademliaDHT, err = dht.New( + ctx, + h, + dht.Mode(dht.ModeServer), + dht.BootstrapPeers(bootstrappers...), + ) + } } else { kademliaDHT, err = dht.New( ctx, @@ -520,7 +537,9 @@ func initDHT( go func() { for { time.Sleep(30 * time.Second) - reconnect() + if len(h.Network().Peers()) == 0 { + reconnect() + } } }() @@ -675,6 +694,91 @@ func (b *BlossomSub) SignMessage(msg []byte) ([]byte, error) { return sig, errors.Wrap(err, "sign message") } +type ReachabilityRequest struct { + Port uint16 `json:"port"` + Type string `json:"type"` +} + +type ReachabilityResponse struct { + Reachable bool `json:"reachable"` + Error string `json:"error"` +} + +func verifyReachability(cfg *config.P2PConfig) bool { + a, err := ma.NewMultiaddr(cfg.ListenMultiaddr) + if err != nil { + return false + } + + transport, addr, err := mn.DialArgs(a) + if err != nil { + return false + } + + addrparts := strings.Split(addr, ":") + if len(addrparts) != 2 { + return false + } + + port, err := strconv.ParseUint(addrparts[1], 10, 0) + if err != nil { + return false + } + + if !strings.Contains(transport, "tcp") { + transport = "quic" + } else { + transport = "tcp" + } + + req := &ReachabilityRequest{ + Port: uint16(port), + Type: transport, + } + + b, err := json.Marshal(req) + if err != nil { + return false + } + + resp, err := http.Post( + "https://rpc.quilibrium.com/connectivity-check", + "application/json", + bytes.NewBuffer(b), + ) + if err != nil { + fmt.Println("Reachability check not currently available, skipping test.") + return true + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + fmt.Println("Reachability check not currently available, skipping test.") + return true + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + fmt.Println("Reachability check not currently available, skipping test.") + return true + } + + r := &ReachabilityResponse{} + err = json.Unmarshal(bodyBytes, r) + if err != nil { + fmt.Println("Reachability check not currently available, skipping test.") + return true + } + + if r.Error != "" { + fmt.Println("Reachability check failed: " + r.Error) + return false + } + + fmt.Println("Node passed reachability check.") + return true +} + func discoverPeers( p2pConfig *config.P2PConfig, ctx context.Context, @@ -691,6 +795,7 @@ func discoverPeers( ) if err != nil { logger.Error("could not find peers", zap.Error(err)) + return } for peer := range peerChan { @@ -714,6 +819,9 @@ func discoverPeers( "connected to peer", zap.String("peer_id", peer.ID.String()), ) + if len(h.Network().Peers()) >= 6 { + break + } } } } @@ -722,8 +830,8 @@ func discoverPeers( go func() { for { - time.Sleep(5 * time.Minute) - if len(h.Network().Peers()) < 16 { + time.Sleep(5 * time.Second) + if len(h.Network().Peers()) < 6 { discover() } }