ceremonyclient/node/consensus/master/consensus_frames.go
Cassandra Heart 6c567a04c1
v1.4.20 (#244)
* v1.4.20 base

* add inmemory dev mock for hypergraph

* add simple rdf + tr

* Update config.go (#234)

2 of bootstrap nodes are going to be closed due to low performances. Will consider to replace with better specs.

* go mod tidy

* go mod tidy

* bump name in readme

---------

Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
2024-06-21 12:46:36 -05:00

72 lines
1.4 KiB
Go

package master
import (
"time"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *MasterClockConsensusEngine) prove(
previousFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
e.logger.Debug("proving new frame")
frame, err := e.frameProver.ProveMasterClockFrame(
previousFrame,
time.Now().UnixMilli(),
e.difficulty,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
e.state = consensus.EngineStatePublishing
e.logger.Debug("returning new proven frame")
return frame, nil
}
func (e *MasterClockConsensusEngine) GetMostAheadPeers() (
[][]byte,
error,
) {
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
// Needs to be enough to make the sync worthwhile:
max := frame.FrameNumber + 10
var peers [][]byte = [][]byte{}
peerMap := e.peerInfoManager.GetPeerMap()
for peerId, v := range peerMap {
if v.MasterHeadFrame > max {
peers = append(peers, []byte(peerId))
}
if len(peers) >= 30 {
break
}
}
if len(peers) == 0 {
return nil, p2p.ErrNoPeersAvailable
}
return peers, nil
}
func (e *MasterClockConsensusEngine) collect(
currentFramePublished *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
latest, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
return latest, nil
}