ceremonyclient/node/consensus/master/consensus_frames.go

153 lines
3.1 KiB
Go
Raw Normal View History

2023-09-03 23:47:09 +00:00
package master
import (
2024-03-08 05:05:04 +00:00
"context"
2023-09-03 23:47:09 +00:00
"time"
2024-03-08 05:05:04 +00:00
"github.com/mr-tron/base58"
2023-09-03 23:47:09 +00:00
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *MasterClockConsensusEngine) prove(
previousFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
2024-02-13 07:04:56 +00:00
e.logger.Debug("proving new frame")
2023-09-03 23:47:09 +00:00
2024-02-13 07:04:56 +00:00
frame, err := e.frameProver.ProveMasterClockFrame(
previousFrame,
time.Now().UnixMilli(),
e.difficulty,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
2023-09-03 23:47:09 +00:00
}
2024-02-13 07:04:56 +00:00
e.state = consensus.EngineStatePublishing
e.logger.Debug("returning new proven frame")
return frame, nil
2023-09-03 23:47:09 +00:00
}
2024-03-08 05:05:04 +00:00
func (e *MasterClockConsensusEngine) GetMostAheadPeers() (
[][]byte,
error,
) {
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
// Needs to be enough to make the sync worthwhile:
max := frame.FrameNumber + 10
var peers [][]byte = [][]byte{}
e.peerMapMx.Lock()
for peerId, v := range e.peerMap {
if v.MasterHeadFrame > max {
peers = append(peers, []byte(peerId))
}
if len(peers) >= 30 {
break
}
}
e.peerMapMx.Unlock()
if len(peers) == 0 {
return nil, p2p.ErrNoPeersAvailable
}
return peers, nil
}
2023-09-03 23:47:09 +00:00
func (e *MasterClockConsensusEngine) collect(
currentFramePublished *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
2024-02-13 07:04:56 +00:00
e.logger.Debug("collecting vdf proofs")
2023-09-03 23:47:09 +00:00
2024-02-13 07:04:56 +00:00
latest, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
2023-09-03 23:47:09 +00:00
2024-03-08 05:05:04 +00:00
// With the increase of network size, constrain down to top thirty
peers, err := e.GetMostAheadPeers()
if err != nil {
return latest, nil
}
for i := 0; i < len(peers); i++ {
peer := peers[i]
e.logger.Debug("setting syncing target", zap.Binary("peer_id", peer))
cc, err := e.pubSub.GetDirectChannel(peer, "validation")
2024-02-13 07:04:56 +00:00
if err != nil {
2024-03-08 05:05:04 +00:00
e.logger.Error(
"could not connect for sync",
zap.String("peer_id", base58.Encode(peer)),
2023-09-03 23:47:09 +00:00
)
2024-03-08 05:05:04 +00:00
continue
}
client := protobufs.NewValidationServiceClient(cc)
syncClient, err := client.Sync(
context.Background(),
&protobufs.SyncRequest{
FramesRequest: &protobufs.ClockFramesRequest{
2024-02-13 07:04:56 +00:00
Filter: e.filter,
2024-03-08 05:05:04 +00:00
FromFrameNumber: latest.FrameNumber,
ToFrameNumber: 0,
},
},
)
if err != nil {
cc.Close()
continue
}
2024-03-08 05:05:04 +00:00
for msg, err := syncClient.Recv(); msg != nil &&
err == nil; msg, err = syncClient.Recv() {
if msg.FramesResponse == nil {
break
}
for _, frame := range msg.FramesResponse.ClockFrames {
frame := frame
if frame.FrameNumber < latest.FrameNumber {
continue
2023-09-03 23:47:09 +00:00
}
2024-03-08 05:05:04 +00:00
if e.difficulty != frame.Difficulty {
e.logger.Debug(
"frame difficulty mismatched",
zap.Uint32("difficulty", frame.Difficulty),
)
break
}
2023-09-03 23:47:09 +00:00
2024-03-08 05:05:04 +00:00
if err := e.frameProver.VerifyMasterClockFrame(frame); err != nil {
e.logger.Error(
"peer returned invalid frame",
zap.String("peer_id", base58.Encode(peer)))
e.pubSub.SetPeerScore(peer, -1000)
break
}
2023-09-09 23:45:47 +00:00
2024-03-08 05:05:04 +00:00
e.masterTimeReel.Insert(frame)
latest = frame
2024-02-13 07:04:56 +00:00
}
2023-09-09 23:45:47 +00:00
}
2024-03-08 05:05:04 +00:00
if err != nil {
cc.Close()
break
}
cc.Close()
break
2023-09-09 23:45:47 +00:00
}
2024-02-13 07:04:56 +00:00
return latest, nil
2023-09-03 23:47:09 +00:00
}