v1.2.2 - sneaky sync troubles (#35)

This commit is contained in:
Cassandra Heart 2024-01-10 00:58:38 -06:00 committed by GitHub
parent 443612ac9b
commit f48f177a10
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 84 additions and 28 deletions

View File

@ -818,11 +818,11 @@ func logoVersion(width int) string {
out += " ####################################### ########\n"
out += " ############################# ##\n"
out += " \n"
out += " Quilibrium Node - v1.2.1 Dawn\n"
out += " Quilibrium Node - v1.2.2 Dawn\n"
out += " \n"
out += " DB Console\n"
} else {
out = "Quilibrium Node - v1.2.1 Dawn - DB Console\n"
out = "Quilibrium Node - v1.2.2 Dawn - DB Console\n"
}
return out
}

View File

@ -249,6 +249,8 @@ func (e *CeremonyDataClockConsensusEngine) Start() <-chan error {
}
go func() {
thresholdBeforeConfirming := 4
for {
time.Sleep(30 * time.Second)
@ -291,7 +293,8 @@ func (e *CeremonyDataClockConsensusEngine) Start() <-chan error {
if v == nil {
continue
}
if v.timestamp <= time.Now().UnixMilli()-UNCOOPERATIVE_PEER_INFO_TTL {
if v.timestamp <= time.Now().UnixMilli()-UNCOOPERATIVE_PEER_INFO_TTL ||
thresholdBeforeConfirming > 0 {
deletes = append(deletes, v)
}
}
@ -303,11 +306,17 @@ func (e *CeremonyDataClockConsensusEngine) Start() <-chan error {
if err := e.publishMessage(e.filter, list); err != nil {
e.logger.Debug("error publishing message", zap.Error(err))
}
if thresholdBeforeConfirming > 0 {
thresholdBeforeConfirming--
}
}
}()
go func() {
e.logger.Info("waiting for peer list mappings")
// We need to re-tune this so that libp2p's peerstore activation threshold
// considers DHT peers to be correct:
time.Sleep(30 * time.Second)
for e.state < consensus.EngineStateStopping {
peerCount := e.pubSub.GetNetworkPeersCount()

View File

@ -12,6 +12,7 @@ import (
"math/big"
"os"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/ff"
"github.com/iden3/go-iden3-crypto/poseidon"
@ -913,21 +914,17 @@ func (e *CeremonyDataClockConsensusEngine) GetMostAheadPeer() (
max := e.frame.FrameNumber
var peer []byte = nil
for _, v := range e.peerMap {
if v.maxFrame > max {
_, ok := e.uncooperativePeersMap[string(v.peerId)]
if v.maxFrame > max && !ok {
peer = v.peerId
max = v.maxFrame
}
}
size := len(e.peerMap)
e.peerMapMx.Unlock()
if peer == nil {
if size > 1 {
return nil, 0, nil
} else {
return nil, 0, p2p.ErrNoPeersAvailable
}
}
return peer, max, nil
}
@ -948,10 +945,11 @@ func (e *CeremonyDataClockConsensusEngine) sync(
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
return latest, errors.Wrap(err, "reverse optimistic sync")
return latest, errors.Wrap(err, "sync")
}
client := protobufs.NewCeremonyServiceClient(cc)
@ -981,11 +979,15 @@ func (e *CeremonyDataClockConsensusEngine) sync(
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
return latest, errors.Wrap(err, "reverse optimistic sync")
return latest, errors.Wrap(err, "sync")
}
firstPass := true
var syncMsg *protobufs.CeremonyCompressedSync
for syncMsg, err = s.Recv(); err == nil; syncMsg, err = s.Recv() {
e.logger.Info(
@ -995,6 +997,31 @@ func (e *CeremonyDataClockConsensusEngine) sync(
zap.Int("frames", len(syncMsg.TruncatedClockFrames)),
zap.Int("proofs", len(syncMsg.Proofs)),
)
// This can only happen if we get a peer with state that was initially
// farther ahead, but something happened. However, this has a sticking
// effect that doesn't go away for them until they're caught up again,
// so let's not penalize their score and make everyone else suffer,
// let's just move on:
if syncMsg.FromFrameNumber == 0 &&
syncMsg.ToFrameNumber == 0 &&
firstPass {
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().
UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
return currentLatest, errors.Wrap(ErrNoNewFrames, "sync")
}
var next *protobufs.ClockFrame
if next, err = e.decompressAndStoreCandidates(
peerId,
@ -1008,6 +1035,8 @@ func (e *CeremonyDataClockConsensusEngine) sync(
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().
UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
@ -1016,18 +1045,28 @@ func (e *CeremonyDataClockConsensusEngine) sync(
e.logger.Error("error while closing connection", zap.Error(err))
}
return currentLatest, errors.Wrap(err, "reverse optimistic sync")
return currentLatest, errors.Wrap(err, "sync")
}
if next != nil {
latest = next
}
}
if err != nil && err != io.EOF && !errors.Is(err, ErrNoNewFrames) {
e.logger.Error("error while receiving sync", zap.Error(err))
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
e.logger.Error("error while receiving sync", zap.Error(err))
return latest, errors.Wrap(err, "reverse optimistic sync")
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
return latest, errors.Wrap(err, "sync")
}
e.logger.Info(
@ -1052,13 +1091,20 @@ func (e *CeremonyDataClockConsensusEngine) collect(
latest = e.previousHead
e.syncingStatus = SyncStatusNotSyncing
}
for {
peerId, maxFrame, err := e.GetMostAheadPeer()
if err != nil {
e.logger.Warn("no peers available, skipping sync")
break
} else if peerId == nil {
e.logger.Info("currently up to date, skipping sync")
break
} else if maxFrame-2 > latest.FrameNumber {
latest, err = e.sync(latest, maxFrame, peerId)
if err == nil {
break
}
}
}
e.logger.Info(
@ -1068,7 +1114,7 @@ func (e *CeremonyDataClockConsensusEngine) collect(
e.logger.Info("selecting leader")
latest, err = e.commitLongestPath(latest)
latest, err := e.commitLongestPath(latest)
if err != nil {
e.logger.Error("could not collect longest path", zap.Error(err))
latest, _, err = e.clockStore.GetDataClockFrame(e.filter, 0)

View File

@ -85,7 +85,7 @@ func (e *CeremonyDataClockConsensusEngine) GetCompressedSyncFrames(
)
if err != nil {
from = 1
e.logger.Info("peer fully out of sync, rewinding sync head to start")
e.logger.Debug("peer fully out of sync, rewinding sync head to start")
break
}
@ -95,8 +95,8 @@ func (e *CeremonyDataClockConsensusEngine) GetCompressedSyncFrames(
parent,
)
if err != nil {
from = 1
e.logger.Info("peer fully out of sync, rewinding sync head to start")
from = frame.FrameNumber - 16
e.logger.Debug("peer fully out of sync, rewinding sync head to min")
break
}
@ -167,6 +167,7 @@ func (e *CeremonyDataClockConsensusEngine) decompressAndStoreCandidates(
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()

View File

@ -59,5 +59,5 @@ func GetMinimumVersion() []byte {
}
func GetVersion() []byte {
return []byte{0x01, 0x02, 0x01}
return []byte{0x01, 0x02, 0x02}
}

View File

@ -249,5 +249,5 @@ func printLogo() {
func printVersion() {
fmt.Println(" ")
fmt.Println(" Quilibrium Node - v1.2.1 Dawn")
fmt.Println(" Quilibrium Node - v1.2.2 Dawn")
}