ceremonyclient/node/consensus/ceremony/consensus_frames.go

460 lines
12 KiB
Go
Raw Normal View History

2023-09-25 02:43:35 +00:00
package ceremony
import (
"bytes"
"context"
"io"
2024-01-10 06:58:38 +00:00
"time"
2023-09-25 02:43:35 +00:00
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
2023-09-25 02:43:35 +00:00
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
2024-02-13 07:04:56 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony/application"
2023-09-25 02:43:35 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *CeremonyDataClockConsensusEngine) prove(
previousFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
2024-02-13 07:04:56 +00:00
if !e.frameProverTrie.Contains(e.provingKeyAddress) {
2023-09-25 02:43:35 +00:00
e.stagedLobbyStateTransitionsMx.Lock()
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
2024-02-13 07:04:56 +00:00
e.state = consensus.EngineStateCollecting
return previousFrame, nil
2023-09-25 02:43:35 +00:00
}
2024-02-13 07:04:56 +00:00
e.stagedLobbyStateTransitionsMx.Lock()
executionOutput := &protobufs.IntrinsicExecutionOutput{}
app, err := application.MaterializeApplicationFromFrame(previousFrame)
2023-09-25 02:43:35 +00:00
if err != nil {
2024-02-13 07:04:56 +00:00
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
2023-09-25 02:43:35 +00:00
}
2024-02-13 07:04:56 +00:00
if e.stagedLobbyStateTransitions == nil {
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
2023-09-25 02:43:35 +00:00
}
2024-02-13 07:04:56 +00:00
e.logger.Info(
"proving new frame",
zap.Int("state_transitions", len(e.stagedLobbyStateTransitions.TypeUrls)),
2023-09-25 02:43:35 +00:00
)
2024-02-13 07:04:56 +00:00
var validLobbyTransitions *protobufs.CeremonyLobbyStateTransition
var skippedTransition *protobufs.CeremonyLobbyStateTransition
app, validLobbyTransitions, skippedTransition, err = app.ApplyTransition(
previousFrame.FrameNumber,
e.stagedLobbyStateTransitions,
true,
2023-09-25 02:43:35 +00:00
)
if err != nil {
2024-02-13 07:04:56 +00:00
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
e.stagedLobbyStateTransitionsMx.Unlock()
return nil, errors.Wrap(err, "prove")
2023-09-25 02:43:35 +00:00
}
2024-02-13 07:04:56 +00:00
e.stagedLobbyStateTransitions = skippedTransition
defer e.stagedLobbyStateTransitionsMx.Unlock()
2023-09-25 02:43:35 +00:00
2024-02-13 07:04:56 +00:00
lobbyState, err := app.MaterializeLobbyStateFromApplication()
2024-01-03 07:31:42 +00:00
if err != nil {
2024-02-13 07:04:56 +00:00
return nil, errors.Wrap(err, "prove")
2024-01-03 07:31:42 +00:00
}
2024-02-13 07:04:56 +00:00
executionOutput.Address = application.CEREMONY_ADDRESS
executionOutput.Output, err = proto.Marshal(lobbyState)
2024-01-03 07:31:42 +00:00
if err != nil {
2024-02-13 07:04:56 +00:00
return nil, errors.Wrap(err, "prove")
2024-01-03 07:31:42 +00:00
}
2024-02-13 07:04:56 +00:00
executionOutput.Proof, err = proto.Marshal(validLobbyTransitions)
2023-09-25 02:43:35 +00:00
if err != nil {
2024-02-13 07:04:56 +00:00
return nil, errors.Wrap(err, "prove")
2023-09-25 02:43:35 +00:00
}
data, err := proto.Marshal(executionOutput)
if err != nil {
2024-02-13 07:04:56 +00:00
return nil, errors.Wrap(err, "prove")
2023-09-25 02:43:35 +00:00
}
2024-02-13 07:04:56 +00:00
e.logger.Debug("encoded execution output")
2023-09-25 02:43:35 +00:00
2024-02-13 07:04:56 +00:00
commitment, err := e.inclusionProver.Commit(
data,
protobufs.IntrinsicExecutionOutputType,
2023-09-25 02:43:35 +00:00
)
if err != nil {
2024-02-13 07:04:56 +00:00
return nil, errors.Wrap(err, "prove")
2023-09-25 02:43:35 +00:00
}
2024-02-13 07:04:56 +00:00
e.logger.Debug("creating kzg proof")
proof, err := e.inclusionProver.ProveAggregate(
[]*qcrypto.InclusionCommitment{commitment},
2023-09-25 02:43:35 +00:00
)
if err != nil {
2024-02-13 07:04:56 +00:00
return nil, errors.Wrap(err, "prove")
2023-09-25 02:43:35 +00:00
}
2024-02-13 07:04:56 +00:00
e.logger.Debug("finalizing execution proof")
2023-09-25 02:43:35 +00:00
2024-02-13 07:04:56 +00:00
frame, err := e.frameProver.ProveDataClockFrame(
previousFrame,
[][]byte{proof.AggregateCommitment},
[]*protobufs.InclusionAggregateProof{
2023-09-25 02:43:35 +00:00
{
Filter: e.filter,
2024-02-13 07:04:56 +00:00
FrameNumber: previousFrame.FrameNumber + 1,
InclusionCommitments: []*protobufs.InclusionCommitment{
{
Filter: e.filter,
FrameNumber: previousFrame.FrameNumber + 1,
TypeUrl: proof.InclusionCommitments[0].TypeUrl,
Commitment: proof.InclusionCommitments[0].Commitment,
Data: data,
Position: 0,
},
},
Proof: proof.Proof,
2023-09-25 02:43:35 +00:00
},
},
2024-02-13 07:04:56 +00:00
e.provingKey,
time.Now().UnixMilli(),
e.difficulty,
)
2023-09-25 02:43:35 +00:00
if err != nil {
2024-02-13 07:04:56 +00:00
return nil, errors.Wrap(err, "prove")
2023-09-25 02:43:35 +00:00
}
e.logger.Info(
2024-02-13 07:04:56 +00:00
"returning new proven frame",
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
zap.Int("commitment_count", len(frame.Input[516:])/74),
2023-09-25 02:43:35 +00:00
)
2024-02-13 07:04:56 +00:00
return frame, nil
2023-09-25 02:43:35 +00:00
}
2023-09-29 07:55:09 +00:00
func (e *CeremonyDataClockConsensusEngine) GetMostAheadPeer() (
[]byte,
uint64,
error,
) {
2024-02-13 07:04:56 +00:00
frame, err := e.dataTimeReel.Head()
if err != nil {
panic(err)
}
2024-02-20 07:59:03 +00:00
e.logger.Info(
"checking peer list",
zap.Int("peers", len(e.peerMap)),
zap.Int("uncooperative_peers", len(e.uncooperativePeersMap)),
zap.Uint64("current_head_frame", frame.FrameNumber),
)
2024-02-13 07:04:56 +00:00
max := frame.FrameNumber
2023-09-29 07:55:09 +00:00
var peer []byte = nil
2024-02-24 08:35:13 +00:00
e.peerMapMx.Lock()
2023-09-29 07:55:09 +00:00
for _, v := range e.peerMap {
2024-01-10 06:58:38 +00:00
_, ok := e.uncooperativePeersMap[string(v.peerId)]
2024-01-15 03:33:25 +00:00
if v.maxFrame > max &&
v.timestamp > consensus.GetMinimumVersionCutoff().UnixMilli() &&
bytes.Compare(v.version, consensus.GetMinimumVersion()) >= 0 && !ok {
2023-09-29 07:55:09 +00:00
peer = v.peerId
max = v.maxFrame
}
}
e.peerMapMx.Unlock()
if peer == nil {
2024-01-10 06:58:38 +00:00
return nil, 0, p2p.ErrNoPeersAvailable
2023-09-29 07:55:09 +00:00
}
return peer, max, nil
}
2024-01-03 07:31:42 +00:00
func (e *CeremonyDataClockConsensusEngine) sync(
currentLatest *protobufs.ClockFrame,
maxFrame uint64,
peerId []byte,
2023-09-25 02:43:35 +00:00
) (*protobufs.ClockFrame, error) {
latest := currentLatest
2024-01-03 07:31:42 +00:00
e.logger.Info("polling peer for new frames", zap.Binary("peer_id", peerId))
2024-03-01 07:12:31 +00:00
cc, err := e.pubSub.GetDirectChannel(peerId, "")
if err != nil {
e.logger.Error(
"could not establish direct channel",
zap.Error(err),
)
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
2024-01-10 06:58:38 +00:00
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
2024-01-10 06:58:38 +00:00
return latest, errors.Wrap(err, "sync")
}
2023-09-25 02:43:35 +00:00
client := protobufs.NewCeremonyServiceClient(cc)
2023-09-25 02:43:35 +00:00
from := latest.FrameNumber
2024-01-03 07:31:42 +00:00
if from == 0 {
from = 1
}
2023-09-25 02:43:35 +00:00
2024-02-24 08:35:13 +00:00
rangeParentSelectors := []*protobufs.ClockFrameParentSelectors{}
if from > 128 {
rangeSubtract := uint64(16)
for {
if from <= rangeSubtract {
break
}
parentNumber := from - uint64(rangeSubtract)
rangeSubtract *= 2
parent, _, err := e.clockStore.GetDataClockFrame(
e.filter,
parentNumber,
true,
)
if err != nil {
break
}
parentSelector, err := parent.GetSelector()
if err != nil {
panic(err)
}
rangeParentSelectors = append(
rangeParentSelectors,
&protobufs.ClockFrameParentSelectors{
FrameNumber: parentNumber,
ParentSelector: parentSelector.FillBytes(make([]byte, 32)),
},
)
}
}
2024-02-28 09:00:20 +00:00
s, err := client.NegotiateCompressedSyncFrames(
2024-01-03 07:31:42 +00:00
context.Background(),
grpc.MaxCallRecvMsgSize(600*1024*1024),
)
2023-10-26 20:54:49 +00:00
if err != nil {
2024-01-13 06:21:16 +00:00
e.logger.Debug(
2024-01-03 07:31:42 +00:00
"received error from peer",
2023-10-26 20:54:49 +00:00
zap.Error(err),
)
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
2024-01-10 06:58:38 +00:00
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
2023-10-26 20:54:49 +00:00
e.peerMapMx.Unlock()
2024-01-10 06:58:38 +00:00
return latest, errors.Wrap(err, "sync")
2023-10-26 20:54:49 +00:00
}
2024-01-10 06:58:38 +00:00
2024-02-28 09:00:20 +00:00
err = s.Send(&protobufs.CeremonyCompressedSyncRequestMessage{
SyncMessage: &protobufs.CeremonyCompressedSyncRequestMessage_Preflight{
Preflight: &protobufs.ClockFramesPreflight{
RangeParentSelectors: rangeParentSelectors,
},
},
})
if err != nil {
return latest, errors.Wrap(err, "sync")
}
syncMsg, err := s.Recv()
if err != nil {
return latest, errors.Wrap(err, "sync")
}
preflight, ok := syncMsg.
SyncMessage.(*protobufs.CeremonyCompressedSyncResponseMessage_Preflight)
if !ok {
s.CloseSend()
return latest, errors.Wrap(
errors.New("preflight message invalid"),
"sync",
)
}
// loop through parent selectors, set found to first match, and if subsequent
// matches fail to be found, cancel the search, start from 1.
found := uint64(0)
parentSelector := make([]byte, 32)
for _, selector := range preflight.Preflight.RangeParentSelectors {
match, err := e.clockStore.GetParentDataClockFrame(
e.filter,
selector.FrameNumber,
selector.ParentSelector,
true,
)
if err != nil && found == 0 {
continue
}
if err != nil && found != 0 {
found = 1
e.logger.Info("could not find interstitial frame, setting search to 1")
break
}
if match != nil && found == 0 {
found = match.FrameNumber
parentSelector = match.ParentSelector
}
}
if found != 0 {
from = found
}
err = s.Send(&protobufs.CeremonyCompressedSyncRequestMessage{
SyncMessage: &protobufs.CeremonyCompressedSyncRequestMessage_Request{
Request: &protobufs.ClockFramesRequest{
Filter: e.filter,
FromFrameNumber: from,
ToFrameNumber: 0,
ParentSelector: parentSelector,
},
},
})
if err != nil {
return latest, errors.Wrap(err, "sync")
}
2024-01-03 07:31:42 +00:00
for syncMsg, err = s.Recv(); err == nil; syncMsg, err = s.Recv() {
2024-02-28 09:00:20 +00:00
sync, ok := syncMsg.
SyncMessage.(*protobufs.CeremonyCompressedSyncResponseMessage_Response)
if !ok {
return latest, errors.Wrap(
errors.New("response message invalid"),
"sync",
)
}
response := sync.Response
2024-01-03 07:31:42 +00:00
e.logger.Info(
"received compressed sync frame",
2024-02-28 09:00:20 +00:00
zap.Uint64("from", response.FromFrameNumber),
zap.Uint64("to", response.ToFrameNumber),
zap.Int("frames", len(response.TruncatedClockFrames)),
zap.Int("proofs", len(response.Proofs)),
2023-10-26 20:54:49 +00:00
)
2024-01-10 06:58:38 +00:00
// This can only happen if we get a peer with state that was initially
// farther ahead, but something happened. However, this has a sticking
// effect that doesn't go away for them until they're caught up again,
// so let's not penalize their score and make everyone else suffer,
// let's just move on:
2024-02-28 09:00:20 +00:00
if response.FromFrameNumber == 0 && response.ToFrameNumber == 0 {
2024-01-10 06:58:38 +00:00
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
return currentLatest, errors.Wrap(ErrNoNewFrames, "sync")
}
2024-01-03 07:31:42 +00:00
var next *protobufs.ClockFrame
if next, err = e.decompressAndStoreCandidates(
peerId,
2024-02-28 09:00:20 +00:00
response,
2024-01-03 07:31:42 +00:00
); err != nil && !errors.Is(err, ErrNoNewFrames) {
2023-10-26 20:54:49 +00:00
e.logger.Error(
2024-01-03 07:31:42 +00:00
"could not decompress and store candidate",
2023-10-26 20:54:49 +00:00
zap.Error(err),
)
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
2024-01-10 06:58:38 +00:00
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().
UnixMilli()
delete(e.peerMap, string(peerId))
}
2023-10-26 20:54:49 +00:00
e.peerMapMx.Unlock()
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
2024-01-10 06:58:38 +00:00
return currentLatest, errors.Wrap(err, "sync")
2024-01-03 07:31:42 +00:00
}
if next != nil {
latest = next
}
}
if err != nil && err != io.EOF && !errors.Is(err, ErrNoNewFrames) {
2024-01-13 06:21:16 +00:00
e.logger.Debug("error while receiving sync", zap.Error(err))
2024-01-10 06:58:38 +00:00
2023-10-26 20:54:49 +00:00
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
}
2024-01-10 06:58:38 +00:00
e.peerMapMx.Lock()
if _, ok := e.peerMap[string(peerId)]; ok {
e.uncooperativePeersMap[string(peerId)] = e.peerMap[string(peerId)]
e.uncooperativePeersMap[string(peerId)].timestamp = time.Now().UnixMilli()
delete(e.peerMap, string(peerId))
}
e.peerMapMx.Unlock()
return latest, errors.Wrap(err, "sync")
2024-01-03 07:31:42 +00:00
}
e.logger.Info(
"received new leading frame",
zap.Uint64("frame_number", latest.FrameNumber),
)
if err := cc.Close(); err != nil {
e.logger.Error("error while closing connection", zap.Error(err))
2023-10-26 20:54:49 +00:00
}
return latest, nil
}
func (e *CeremonyDataClockConsensusEngine) collect(
currentFramePublished *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
2024-02-13 07:04:56 +00:00
e.logger.Info("collecting vdf proofs")
2023-09-25 02:43:35 +00:00
2024-02-13 07:04:56 +00:00
latest := currentFramePublished
if e.syncingStatus == SyncStatusFailed {
e.syncingStatus = SyncStatusNotSyncing
}
2023-09-25 02:43:35 +00:00
// With the increase of network size, constrain down to top thirty
for i := 0; i < 30; i++ {
2024-02-13 07:04:56 +00:00
peerId, maxFrame, err := e.GetMostAheadPeer()
2024-01-03 07:31:42 +00:00
if err != nil {
2024-02-13 07:04:56 +00:00
e.logger.Warn("no peers available, skipping sync")
break
} else if peerId == nil {
e.logger.Info("currently up to date, skipping sync")
break
} else if maxFrame-2 > latest.FrameNumber {
latest, err = e.sync(latest, maxFrame, peerId)
if err == nil {
break
2024-01-03 07:31:42 +00:00
}
}
2023-09-25 02:43:35 +00:00
}
2024-02-16 21:46:54 +00:00
if latest.FrameNumber < currentFramePublished.FrameNumber {
latest = currentFramePublished
}
2024-02-13 07:04:56 +00:00
e.logger.Info(
"returning leader frame",
zap.Uint64("frame_number", latest.FrameNumber),
)
return latest, nil
2023-09-25 02:43:35 +00:00
}