mirror of
https://source.quilibrium.com/quilibrium/ceremonyclient.git
synced 2024-11-10 18:25:17 +00:00
QOL: logging
This commit is contained in:
parent
0274332cdc
commit
732bbe1c3a
@ -173,7 +173,7 @@ func (e *CeremonyDataClockConsensusEngine) handleKeyBundle(
|
|||||||
address []byte,
|
address []byte,
|
||||||
any *anypb.Any,
|
any *anypb.Any,
|
||||||
) error {
|
) error {
|
||||||
e.logger.Info("received key bundle")
|
e.logger.Debug("received key bundle")
|
||||||
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{}
|
keyBundleAnnouncement := &protobufs.KeyBundleAnnouncement{}
|
||||||
if err := any.UnmarshalTo(keyBundleAnnouncement); err != nil {
|
if err := any.UnmarshalTo(keyBundleAnnouncement); err != nil {
|
||||||
return errors.Wrap(err, "handle key bundle")
|
return errors.Wrap(err, "handle key bundle")
|
||||||
@ -243,24 +243,24 @@ func (e *CeremonyDataClockConsensusEngine) handleKeyBundle(
|
|||||||
|
|
||||||
// We have a matching proving key, we can set this up to be committed.
|
// We have a matching proving key, we can set this up to be committed.
|
||||||
if provingKey != nil {
|
if provingKey != nil {
|
||||||
e.logger.Info("verifying key bundle announcement")
|
e.logger.Debug("verifying key bundle announcement")
|
||||||
if err := keyBundleAnnouncement.Verify(provingKey); err != nil {
|
if err := keyBundleAnnouncement.Verify(provingKey); err != nil {
|
||||||
e.logger.Error(
|
e.logger.Debug(
|
||||||
"could not verify key bundle announcement",
|
"could not verify key bundle announcement",
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle key bundle")
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
e.logger.Info("adding key bundle announcement to pending commits")
|
e.logger.Debug("adding key bundle announcement to pending commits")
|
||||||
|
|
||||||
e.pendingCommits <- any
|
e.pendingCommits <- any
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
e.logger.Info("proving key not found, requesting from peers")
|
e.logger.Debug("proving key not found, requesting from peers")
|
||||||
|
|
||||||
if err = e.publishMessage(e.filter, &protobufs.ProvingKeyRequest{
|
if err = e.publishMessage(e.filter, &protobufs.ProvingKeyRequest{
|
||||||
ProvingKeyBytes: keyBundleAnnouncement.ProvingKeyBytes,
|
ProvingKeyBytes: keyBundleAnnouncement.ProvingKeyBytes,
|
||||||
@ -281,7 +281,7 @@ func (e *CeremonyDataClockConsensusEngine) handleProvingKey(
|
|||||||
address []byte,
|
address []byte,
|
||||||
any *anypb.Any,
|
any *anypb.Any,
|
||||||
) error {
|
) error {
|
||||||
e.logger.Info("received proving key")
|
e.logger.Debug("received proving key")
|
||||||
|
|
||||||
provingKeyAnnouncement := &protobufs.ProvingKeyAnnouncement{}
|
provingKeyAnnouncement := &protobufs.ProvingKeyAnnouncement{}
|
||||||
if err := any.UnmarshalTo(provingKeyAnnouncement); err != nil {
|
if err := any.UnmarshalTo(provingKeyAnnouncement); err != nil {
|
||||||
@ -298,7 +298,7 @@ func (e *CeremonyDataClockConsensusEngine) handleProvingKey(
|
|||||||
|
|
||||||
provingKey := provingKeyAnnouncement.PublicKey()
|
provingKey := provingKeyAnnouncement.PublicKey()
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"proving key staged",
|
"proving key staged",
|
||||||
zap.Binary("proving_key", provingKey),
|
zap.Binary("proving_key", provingKey),
|
||||||
)
|
)
|
||||||
@ -345,8 +345,15 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
|
|
||||||
earliestFrame, _, count := e.frameProverTrie.Get(address)
|
addr, err := poseidon.HashBytes(
|
||||||
_, latestFrame, _ := e.frameSeenProverTrie.Get(address)
|
frame.GetPublicKeySignatureEd448().PublicKey.KeyValue,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
|
}
|
||||||
|
|
||||||
|
earliestFrame, _, count := e.frameProverTrie.Get(addr.Bytes())
|
||||||
|
_, latestFrame, _ := e.frameSeenProverTrie.Get(addr.Bytes())
|
||||||
if frame.FrameNumber == latestFrame {
|
if frame.FrameNumber == latestFrame {
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"already received frame from address",
|
"already received frame from address",
|
||||||
@ -397,7 +404,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
for _, commit := range proof.GetInclusionCommitments() {
|
for _, commit := range proof.GetInclusionCommitments() {
|
||||||
switch commit.TypeUrl {
|
switch commit.TypeUrl {
|
||||||
case protobufs.IntrinsicExecutionOutputType:
|
case protobufs.IntrinsicExecutionOutputType:
|
||||||
e.logger.Info("confirming inclusion in aggregate")
|
e.logger.Debug("confirming inclusion in aggregate")
|
||||||
digest := sha3.NewShake256()
|
digest := sha3.NewShake256()
|
||||||
_, err := digest.Write(commit.Data)
|
_, err := digest.Write(commit.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -442,7 +449,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"created fft of polynomial",
|
"created fft of polynomial",
|
||||||
zap.Int("poly_size", len(evalPoly)),
|
zap.Int("poly_size", len(evalPoly)),
|
||||||
)
|
)
|
||||||
@ -458,7 +465,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
}
|
}
|
||||||
commitments = append(commitments, c.(curves.PairingPoint))
|
commitments = append(commitments, c.(curves.PairingPoint))
|
||||||
default:
|
default:
|
||||||
e.logger.Info("confirming inclusion in aggregate")
|
e.logger.Debug("confirming inclusion in aggregate")
|
||||||
poly, err := e.prover.BytesToPolynomial(commit.Data)
|
poly, err := e.prover.BytesToPolynomial(commit.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Error(
|
e.logger.Error(
|
||||||
@ -490,7 +497,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"created fft of polynomial",
|
"created fft of polynomial",
|
||||||
zap.Int("poly_size", len(evalPoly)),
|
zap.Int("poly_size", len(evalPoly)),
|
||||||
)
|
)
|
||||||
@ -548,7 +555,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"difference between selector/discriminator",
|
"difference between selector/discriminator",
|
||||||
zap.Binary("difference", distance.Bytes()),
|
zap.Binary("difference", distance.Bytes()),
|
||||||
)
|
)
|
||||||
@ -612,7 +619,7 @@ func (e *CeremonyDataClockConsensusEngine) publishProof(
|
|||||||
frame *protobufs.ClockFrame,
|
frame *protobufs.ClockFrame,
|
||||||
) error {
|
) error {
|
||||||
if e.state == consensus.EngineStatePublishing {
|
if e.state == consensus.EngineStatePublishing {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"publishing frame and aggregations",
|
"publishing frame and aggregations",
|
||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
zap.Uint64("frame_number", frame.FrameNumber),
|
||||||
)
|
)
|
||||||
@ -668,7 +675,7 @@ func (e *CeremonyDataClockConsensusEngine) publishMessage(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *CeremonyDataClockConsensusEngine) announceKeyBundle() error {
|
func (e *CeremonyDataClockConsensusEngine) announceKeyBundle() error {
|
||||||
e.logger.Info("announcing key bundle")
|
e.logger.Debug("announcing key bundle")
|
||||||
idk, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
|
idk, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, keys.KeyNotFoundErr) {
|
if errors.Is(err, keys.KeyNotFoundErr) {
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/iden3/go-iden3-crypto/ff"
|
"github.com/iden3/go-iden3-crypto/ff"
|
||||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
@ -31,18 +32,18 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
previousFrame *protobufs.ClockFrame,
|
previousFrame *protobufs.ClockFrame,
|
||||||
) (*protobufs.ClockFrame, error) {
|
) (*protobufs.ClockFrame, error) {
|
||||||
if e.state == consensus.EngineStateProving {
|
if e.state == consensus.EngineStateProving {
|
||||||
e.logger.Info("proving new frame")
|
|
||||||
if !e.frameProverTrie.Contains(e.provingKeyAddress) {
|
if !e.frameProverTrie.Contains(e.provingKeyAddress) {
|
||||||
e.state = consensus.EngineStateCollecting
|
e.state = consensus.EngineStateCollecting
|
||||||
return previousFrame, nil
|
return previousFrame, nil
|
||||||
}
|
}
|
||||||
|
e.logger.Info("proving new frame")
|
||||||
|
|
||||||
commitments := [][]byte{}
|
commitments := [][]byte{}
|
||||||
aggregations := []*protobufs.InclusionAggregateProof{}
|
aggregations := []*protobufs.InclusionAggregateProof{}
|
||||||
|
|
||||||
e.stagedKeyCommitsMx.Lock()
|
e.stagedKeyCommitsMx.Lock()
|
||||||
if len(e.stagedKeyCommits) > 0 && len(e.stagedKeyPolynomials) > 0 {
|
if len(e.stagedKeyCommits) > 0 && len(e.stagedKeyPolynomials) > 0 {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"adding staged key commits to frame",
|
"adding staged key commits to frame",
|
||||||
zap.Uint64("frame_number", previousFrame.FrameNumber+1),
|
zap.Uint64("frame_number", previousFrame.FrameNumber+1),
|
||||||
)
|
)
|
||||||
@ -52,7 +53,7 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
i := uint32(0)
|
i := uint32(0)
|
||||||
|
|
||||||
for commit, inclusion := range e.stagedKeyCommits {
|
for commit, inclusion := range e.stagedKeyCommits {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"adding staged key commit to aggregate proof",
|
"adding staged key commit to aggregate proof",
|
||||||
zap.Uint64("frame_number", previousFrame.FrameNumber+1),
|
zap.Uint64("frame_number", previousFrame.FrameNumber+1),
|
||||||
zap.Uint32("position", i),
|
zap.Uint32("position", i),
|
||||||
@ -148,7 +149,7 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
return nil, errors.Wrap(err, "prove")
|
return nil, errors.Wrap(err, "prove")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("encoded execution output")
|
e.logger.Debug("encoded execution output")
|
||||||
|
|
||||||
// Execution data in the ceremony is plaintext, we do not need to leverage
|
// Execution data in the ceremony is plaintext, we do not need to leverage
|
||||||
// full encoding for commit/proof reference.
|
// full encoding for commit/proof reference.
|
||||||
@ -178,7 +179,7 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
return nil, errors.Wrap(err, "prove")
|
return nil, errors.Wrap(err, "prove")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("proving execution output for inclusion")
|
e.logger.Debug("proving execution output for inclusion")
|
||||||
polys, err := qcrypto.FFT(
|
polys, err := qcrypto.FFT(
|
||||||
poly,
|
poly,
|
||||||
*curves.BLS48581(
|
*curves.BLS48581(
|
||||||
@ -193,9 +194,9 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
return nil, errors.Wrap(err, "prove")
|
return nil, errors.Wrap(err, "prove")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("converted execution output chunk to evaluation form")
|
e.logger.Debug("converted execution output chunk to evaluation form")
|
||||||
|
|
||||||
e.logger.Info("creating kzg commitment")
|
e.logger.Debug("creating kzg commitment")
|
||||||
commitment, err := e.prover.Commit(polys)
|
commitment, err := e.prover.Commit(polys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
|
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
|
||||||
@ -203,7 +204,7 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
return nil, errors.Wrap(err, "prove")
|
return nil, errors.Wrap(err, "prove")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("creating kzg proof")
|
e.logger.Debug("creating kzg proof")
|
||||||
proof, aggregate, err := e.prover.ProveAggregate(
|
proof, aggregate, err := e.prover.ProveAggregate(
|
||||||
[][]curves.PairingScalar{polys},
|
[][]curves.PairingScalar{polys},
|
||||||
[]curves.PairingPoint{commitment},
|
[]curves.PairingPoint{commitment},
|
||||||
@ -219,7 +220,7 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
|
|
||||||
commitments = append(commitments, aggregate.ToAffineCompressed())
|
commitments = append(commitments, aggregate.ToAffineCompressed())
|
||||||
|
|
||||||
e.logger.Info("finalizing execution proof")
|
e.logger.Debug("finalizing execution proof")
|
||||||
|
|
||||||
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
|
e.stagedLobbyStateTransitions = &protobufs.CeremonyLobbyStateTransition{}
|
||||||
e.stagedLobbyStateTransitionsMx.Unlock()
|
e.stagedLobbyStateTransitionsMx.Unlock()
|
||||||
@ -254,7 +255,7 @@ func (e *CeremonyDataClockConsensusEngine) prove(
|
|||||||
return nil, errors.Wrap(err, "prove")
|
return nil, errors.Wrap(err, "prove")
|
||||||
}
|
}
|
||||||
e.state = consensus.EngineStatePublishing
|
e.state = consensus.EngineStatePublishing
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"returning new proven frame",
|
"returning new proven frame",
|
||||||
zap.Int("proof_count", len(aggregations)),
|
zap.Int("proof_count", len(aggregations)),
|
||||||
zap.Int("commitment_count", len(commitments)),
|
zap.Int("commitment_count", len(commitments)),
|
||||||
@ -306,7 +307,7 @@ func (e *CeremonyDataClockConsensusEngine) setFrame(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(errors.Wrap(err, "set frame"))
|
panic(errors.Wrap(err, "set frame"))
|
||||||
}
|
}
|
||||||
e.logger.Info("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
e.logger.Debug("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
||||||
e.currentDistance = distance
|
e.currentDistance = distance
|
||||||
e.frame = frame.FrameNumber
|
e.frame = frame.FrameNumber
|
||||||
e.parentSelector = parent.Bytes()
|
e.parentSelector = parent.Bytes()
|
||||||
@ -666,7 +667,6 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath() (
|
|||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"adding candidate",
|
"adding candidate",
|
||||||
zap.Uint64("frame_number", value.FrameNumber),
|
zap.Uint64("frame_number", value.FrameNumber),
|
||||||
zap.Binary("output", value.Output),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
nextRunningFrames = append(
|
nextRunningFrames = append(
|
||||||
@ -697,7 +697,7 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath() (
|
|||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"committing candidate",
|
"committing candidate",
|
||||||
zap.Uint64("frame_number", s.FrameNumber),
|
zap.Uint64("frame_number", s.FrameNumber),
|
||||||
zap.Binary("output", s.Output),
|
zap.Binary("prover", s.GetPublicKeySignatureEd448().PublicKey.KeyValue),
|
||||||
)
|
)
|
||||||
|
|
||||||
addr, err := s.GetAddress()
|
addr, err := s.GetAddress()
|
||||||
@ -720,13 +720,13 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath() (
|
|||||||
return nil, errors.Wrap(err, "commit longest path")
|
return nil, errors.Wrap(err, "commit longest path")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"committing aggregate proofs",
|
"committing aggregate proofs",
|
||||||
zap.Int("proof_count", len(s.AggregateProofs)),
|
zap.Int("proof_count", len(s.AggregateProofs)),
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, p := range s.AggregateProofs {
|
for _, p := range s.AggregateProofs {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"committing inclusions",
|
"committing inclusions",
|
||||||
zap.Int("inclusions_count", len(p.InclusionCommitments)),
|
zap.Int("inclusions_count", len(p.InclusionCommitments)),
|
||||||
)
|
)
|
||||||
@ -745,7 +745,7 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath() (
|
|||||||
return nil, errors.Wrap(err, "commit longest path")
|
return nil, errors.Wrap(err, "commit longest path")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"committing proving key",
|
"committing proving key",
|
||||||
zap.Uint64("frame_number", s.FrameNumber),
|
zap.Uint64("frame_number", s.FrameNumber),
|
||||||
zap.Binary("commitment", c.Commitment),
|
zap.Binary("commitment", c.Commitment),
|
||||||
@ -772,7 +772,7 @@ func (e *CeremonyDataClockConsensusEngine) commitLongestPath() (
|
|||||||
return nil, errors.Wrap(err, "commit longest path")
|
return nil, errors.Wrap(err, "commit longest path")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"committing key bundle",
|
"committing key bundle",
|
||||||
zap.Uint64("frame_number", s.FrameNumber),
|
zap.Uint64("frame_number", s.FrameNumber),
|
||||||
zap.Binary("commitment", c.Commitment),
|
zap.Binary("commitment", c.Commitment),
|
||||||
@ -842,7 +842,7 @@ func (e *CeremonyDataClockConsensusEngine) collect(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if e.syncingStatus == SyncStatusNotSyncing {
|
if e.syncingStatus == SyncStatusNotSyncing {
|
||||||
peer, err := e.pubSub.GetRandomPeer(e.filter)
|
peerId, err := e.pubSub.GetRandomPeer(e.filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, p2p.ErrNoPeersAvailable) {
|
if errors.Is(err, p2p.ErrNoPeersAvailable) {
|
||||||
e.logger.Warn("no peers available, skipping sync")
|
e.logger.Warn("no peers available, skipping sync")
|
||||||
@ -851,17 +851,16 @@ func (e *CeremonyDataClockConsensusEngine) collect(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
e.syncingStatus = SyncStatusAwaitingResponse
|
e.syncingStatus = SyncStatusAwaitingResponse
|
||||||
e.logger.Info("setting syncing target", zap.Binary("peer_id", peer))
|
|
||||||
e.syncingTarget = peer
|
|
||||||
|
|
||||||
channel := e.createPeerReceiveChannel(peer)
|
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"listening on peer receive channel",
|
"setting syncing target",
|
||||||
zap.Binary("channel", channel),
|
zap.String("peer_id", peer.ID(peerId).String()),
|
||||||
)
|
)
|
||||||
|
channel := e.createPeerReceiveChannel(peerId)
|
||||||
e.pubSub.Subscribe(channel, e.handleSync, true)
|
e.pubSub.Subscribe(channel, e.handleSync, true)
|
||||||
|
e.syncingTarget = peerId
|
||||||
|
|
||||||
e.pubSub.Subscribe(
|
e.pubSub.Subscribe(
|
||||||
append(append([]byte{}, e.filter...), peer...),
|
append(append([]byte{}, e.filter...), peerId...),
|
||||||
func(message *pb.Message) error { return nil },
|
func(message *pb.Message) error { return nil },
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
@ -869,7 +868,7 @@ func (e *CeremonyDataClockConsensusEngine) collect(
|
|||||||
go func() {
|
go func() {
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
if err := e.publishMessage(
|
if err := e.publishMessage(
|
||||||
append(append([]byte{}, e.filter...), peer...),
|
append(append([]byte{}, e.filter...), peerId...),
|
||||||
&protobufs.ClockFramesRequest{
|
&protobufs.ClockFramesRequest{
|
||||||
Filter: e.filter,
|
Filter: e.filter,
|
||||||
FromFrameNumber: latest.FrameNumber + 1,
|
FromFrameNumber: latest.FrameNumber + 1,
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
@ -27,6 +26,10 @@ func (e *CeremonyDataClockConsensusEngine) handleSync(
|
|||||||
zap.Binary("from", message.From),
|
zap.Binary("from", message.From),
|
||||||
zap.Binary("signature", message.Signature),
|
zap.Binary("signature", message.Signature),
|
||||||
)
|
)
|
||||||
|
if bytes.Equal(message.From, e.pubSub.GetPeerID()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
msg := &protobufs.Message{}
|
msg := &protobufs.Message{}
|
||||||
|
|
||||||
if err := proto.Unmarshal(message.Data, msg); err != nil {
|
if err := proto.Unmarshal(message.Data, msg); err != nil {
|
||||||
@ -38,35 +41,15 @@ func (e *CeremonyDataClockConsensusEngine) handleSync(
|
|||||||
return errors.Wrap(err, "handle sync")
|
return errors.Wrap(err, "handle sync")
|
||||||
}
|
}
|
||||||
|
|
||||||
eg := errgroup.Group{}
|
|
||||||
eg.SetLimit(len(e.executionEngines))
|
|
||||||
|
|
||||||
for name := range e.executionEngines {
|
|
||||||
name := name
|
|
||||||
eg.Go(func() error {
|
|
||||||
// if message,err := e.executionEngines[name].ProcessMessage(
|
|
||||||
if _, err := e.executionEngines[name].ProcessMessage(
|
|
||||||
msg.Address,
|
|
||||||
msg,
|
|
||||||
); err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not process message for engine",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("engine_name", name),
|
|
||||||
)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
|
||||||
e.logger.Error("rejecting invalid message", zap.Error(err))
|
|
||||||
return errors.Wrap(err, "handle sync")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch any.TypeUrl {
|
switch any.TypeUrl {
|
||||||
|
case protobufs.ClockFrameType:
|
||||||
|
if err := e.handleClockFrameData(
|
||||||
|
message.From,
|
||||||
|
msg.Address,
|
||||||
|
any,
|
||||||
|
); err != nil {
|
||||||
|
return errors.Wrap(err, "handle sync")
|
||||||
|
}
|
||||||
case protobufs.ClockFramesResponseType:
|
case protobufs.ClockFramesResponseType:
|
||||||
if err := e.handleClockFramesResponse(
|
if err := e.handleClockFramesResponse(
|
||||||
message.From,
|
message.From,
|
||||||
@ -127,7 +110,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesResponse(
|
|||||||
address []byte,
|
address []byte,
|
||||||
any *anypb.Any,
|
any *anypb.Any,
|
||||||
) error {
|
) error {
|
||||||
if bytes.Equal(address, e.provingKeyAddress) {
|
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,7 +191,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesResponse(
|
|||||||
for _, commit := range proof.GetInclusionCommitments() {
|
for _, commit := range proof.GetInclusionCommitments() {
|
||||||
switch commit.TypeUrl {
|
switch commit.TypeUrl {
|
||||||
case protobufs.IntrinsicExecutionOutputType:
|
case protobufs.IntrinsicExecutionOutputType:
|
||||||
e.logger.Info("confirming inclusion in aggregate")
|
e.logger.Debug("confirming inclusion in aggregate")
|
||||||
digest := sha3.NewShake256()
|
digest := sha3.NewShake256()
|
||||||
_, err := digest.Write(commit.Data)
|
_, err := digest.Write(commit.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -253,7 +236,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesResponse(
|
|||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle clock frame response")
|
return errors.Wrap(err, "handle clock frame response")
|
||||||
}
|
}
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"created fft of polynomial",
|
"created fft of polynomial",
|
||||||
zap.Int("poly_size", len(evalPoly)),
|
zap.Int("poly_size", len(evalPoly)),
|
||||||
)
|
)
|
||||||
@ -355,7 +338,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesResponse(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "handle clock frame data")
|
return errors.Wrap(err, "handle clock frame data")
|
||||||
}
|
}
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"difference between selector/discriminator",
|
"difference between selector/discriminator",
|
||||||
zap.Binary("difference", distance.Bytes()),
|
zap.Binary("difference", distance.Bytes()),
|
||||||
)
|
)
|
||||||
@ -398,31 +381,28 @@ func (e *CeremonyDataClockConsensusEngine) handleProvingKeyRequest(
|
|||||||
address []byte,
|
address []byte,
|
||||||
any *anypb.Any,
|
any *anypb.Any,
|
||||||
) error {
|
) error {
|
||||||
if bytes.Equal(address, e.provingKeyAddress) {
|
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
request := &protobufs.ProvingKeyRequest{}
|
request := &protobufs.ProvingKeyRequest{}
|
||||||
if err := any.UnmarshalTo(request); err != nil {
|
if err := any.UnmarshalTo(request); err != nil {
|
||||||
return errors.Wrap(err, "handle proving key request")
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ProvingKeyBytes) == 0 {
|
if len(request.ProvingKeyBytes) == 0 {
|
||||||
e.logger.Warn(
|
e.logger.Debug(
|
||||||
"received proving key request for empty key",
|
"received proving key request for empty key",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Binary("address", address),
|
zap.Binary("address", address),
|
||||||
)
|
)
|
||||||
return errors.Wrap(
|
return nil
|
||||||
errors.New("empty proving key"),
|
|
||||||
"handle proving key request",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
channel := e.createPeerSendChannel(peerID)
|
channel := e.createPeerSendChannel(peerID)
|
||||||
e.pubSub.Subscribe(channel, e.handleSync, true)
|
e.pubSub.Subscribe(channel, e.handleSync, true)
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"received proving key request",
|
"received proving key request",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Binary("address", address),
|
zap.Binary("address", address),
|
||||||
@ -433,26 +413,26 @@ func (e *CeremonyDataClockConsensusEngine) handleProvingKeyRequest(
|
|||||||
inclusion, err := e.keyStore.GetProvingKey(request.ProvingKeyBytes)
|
inclusion, err := e.keyStore.GetProvingKey(request.ProvingKeyBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, store.ErrNotFound) {
|
if !errors.Is(err, store.ErrNotFound) {
|
||||||
e.logger.Error(
|
e.logger.Debug(
|
||||||
"peer asked for proving key that returned error",
|
"peer asked for proving key that returned error",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Binary("address", address),
|
zap.Binary("address", address),
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
zap.Binary("proving_key", request.ProvingKeyBytes),
|
||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle proving key request")
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
provingKey, err = e.keyStore.GetStagedProvingKey(request.ProvingKeyBytes)
|
provingKey, err = e.keyStore.GetStagedProvingKey(request.ProvingKeyBytes)
|
||||||
if !errors.Is(err, store.ErrNotFound) {
|
if !errors.Is(err, store.ErrNotFound) {
|
||||||
e.logger.Error(
|
e.logger.Debug(
|
||||||
"peer asked for proving key that returned error",
|
"peer asked for proving key that returned error",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Binary("address", address),
|
zap.Binary("address", address),
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
zap.Binary("proving_key", request.ProvingKeyBytes),
|
||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle proving key request")
|
return nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
e.logger.Warn(
|
e.logger.Debug(
|
||||||
"peer asked for unknown proving key",
|
"peer asked for unknown proving key",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Binary("address", address),
|
zap.Binary("address", address),
|
||||||
@ -463,13 +443,13 @@ func (e *CeremonyDataClockConsensusEngine) handleProvingKeyRequest(
|
|||||||
} else {
|
} else {
|
||||||
err := proto.Unmarshal(inclusion.Data, provingKey)
|
err := proto.Unmarshal(inclusion.Data, provingKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Error(
|
e.logger.Debug(
|
||||||
"inclusion commitment could not be deserialized",
|
"inclusion commitment could not be deserialized",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Binary("address", address),
|
zap.Binary("address", address),
|
||||||
zap.Binary("proving_key", request.ProvingKeyBytes),
|
zap.Binary("proving_key", request.ProvingKeyBytes),
|
||||||
)
|
)
|
||||||
return errors.Wrap(err, "handle proving key request")
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -485,7 +465,7 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesRequest(
|
|||||||
address []byte,
|
address []byte,
|
||||||
any *anypb.Any,
|
any *anypb.Any,
|
||||||
) error {
|
) error {
|
||||||
if bytes.Equal(address, e.provingKeyAddress) {
|
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -543,8 +523,8 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
to := request.ToFrameNumber
|
to := request.ToFrameNumber
|
||||||
if to == 0 || to-request.FromFrameNumber > 128 {
|
if to == 0 || to-request.FromFrameNumber > 32 {
|
||||||
to = request.FromFrameNumber + 127
|
to = request.FromFrameNumber + 31
|
||||||
}
|
}
|
||||||
|
|
||||||
set := []*protobufs.ClockFrame{base}
|
set := []*protobufs.ClockFrame{base}
|
||||||
@ -586,6 +566,12 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesRequest(
|
|||||||
return errors.Wrap(err, "handle clock frame request")
|
return errors.Wrap(err, "handle clock frame request")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if err = e.publishMessage(
|
||||||
|
append(append([]byte{}, e.filter...), peerID...),
|
||||||
|
frame,
|
||||||
|
); err != nil {
|
||||||
|
return errors.Wrap(err, "handle clock frame request")
|
||||||
|
}
|
||||||
nextSpan = append(nextSpan, frame)
|
nextSpan = append(nextSpan, frame)
|
||||||
set = append(set, frame)
|
set = append(set, frame)
|
||||||
}
|
}
|
||||||
@ -622,6 +608,12 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFramesRequest(
|
|||||||
return errors.Wrap(err, "handle clock frame request")
|
return errors.Wrap(err, "handle clock frame request")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = e.publishMessage(
|
||||||
|
append(append([]byte{}, e.filter...), peerID...),
|
||||||
|
frame,
|
||||||
|
); err != nil {
|
||||||
|
return errors.Wrap(err, "handle clock frame request")
|
||||||
|
}
|
||||||
nextSpan = append(nextSpan, frame)
|
nextSpan = append(nextSpan, frame)
|
||||||
set = append(set, frame)
|
set = append(set, frame)
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ func (e *MasterClockConsensusEngine) handleClockFrameData(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if e.frame > frame.FrameNumber {
|
if e.frame > frame.FrameNumber {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"received anachronistic frame",
|
"received anachronistic frame",
|
||||||
zap.Binary("sender", peerID),
|
zap.Binary("sender", peerID),
|
||||||
zap.Binary("filter", frame.Filter),
|
zap.Binary("filter", frame.Filter),
|
||||||
@ -107,7 +107,7 @@ func (e *MasterClockConsensusEngine) handleClockFrameData(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if e.difficulty != frame.Difficulty {
|
if e.difficulty != frame.Difficulty {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"frame difficulty mismatched",
|
"frame difficulty mismatched",
|
||||||
zap.Uint32("difficulty", frame.Difficulty),
|
zap.Uint32("difficulty", frame.Difficulty),
|
||||||
)
|
)
|
||||||
@ -117,7 +117,7 @@ func (e *MasterClockConsensusEngine) handleClockFrameData(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"got clock frame",
|
"got clock frame",
|
||||||
zap.Binary("sender", peerID),
|
zap.Binary("sender", peerID),
|
||||||
zap.Binary("filter", frame.Filter),
|
zap.Binary("filter", frame.Filter),
|
||||||
@ -154,7 +154,7 @@ func (e *MasterClockConsensusEngine) enqueueSeenFrame(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !found {
|
if !found {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"enqueuing frame for consensus",
|
"enqueuing frame for consensus",
|
||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
zap.Uint64("frame_number", frame.FrameNumber),
|
||||||
)
|
)
|
||||||
@ -169,7 +169,7 @@ func (e *MasterClockConsensusEngine) publishProof(
|
|||||||
frame *protobufs.ClockFrame,
|
frame *protobufs.ClockFrame,
|
||||||
) error {
|
) error {
|
||||||
if e.state == consensus.EngineStatePublishing {
|
if e.state == consensus.EngineStatePublishing {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"publishing frame",
|
"publishing frame",
|
||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
zap.Uint64("frame_number", frame.FrameNumber),
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ func (e *MasterClockConsensusEngine) prove(
|
|||||||
previousFrame *protobufs.ClockFrame,
|
previousFrame *protobufs.ClockFrame,
|
||||||
) (*protobufs.ClockFrame, error) {
|
) (*protobufs.ClockFrame, error) {
|
||||||
if e.state == consensus.EngineStateProving {
|
if e.state == consensus.EngineStateProving {
|
||||||
e.logger.Info("proving new frame")
|
e.logger.Debug("proving new frame")
|
||||||
|
|
||||||
frame, err := protobufs.ProveMasterClockFrame(
|
frame, err := protobufs.ProveMasterClockFrame(
|
||||||
previousFrame,
|
previousFrame,
|
||||||
@ -31,7 +31,7 @@ func (e *MasterClockConsensusEngine) prove(
|
|||||||
}
|
}
|
||||||
|
|
||||||
e.state = consensus.EngineStatePublishing
|
e.state = consensus.EngineStatePublishing
|
||||||
e.logger.Info("returning new proven frame")
|
e.logger.Debug("returning new proven frame")
|
||||||
return frame, nil
|
return frame, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ func (e *MasterClockConsensusEngine) setFrame(frame *protobufs.ClockFrame) {
|
|||||||
previousSelectorBytes := [516]byte{}
|
previousSelectorBytes := [516]byte{}
|
||||||
copy(previousSelectorBytes[:], frame.Output[:516])
|
copy(previousSelectorBytes[:], frame.Output[:516])
|
||||||
|
|
||||||
e.logger.Info("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
e.logger.Debug("set frame", zap.Uint64("frame_number", frame.FrameNumber))
|
||||||
e.frame = frame.FrameNumber
|
e.frame = frame.FrameNumber
|
||||||
e.latestFrame = frame
|
e.latestFrame = frame
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ func (e *MasterClockConsensusEngine) setFrame(frame *protobufs.ClockFrame) {
|
|||||||
func (
|
func (
|
||||||
e *MasterClockConsensusEngine,
|
e *MasterClockConsensusEngine,
|
||||||
) createGenesisFrame() *protobufs.ClockFrame {
|
) createGenesisFrame() *protobufs.ClockFrame {
|
||||||
e.logger.Info("creating genesis frame")
|
e.logger.Debug("creating genesis frame")
|
||||||
b := sha3.Sum256(e.input)
|
b := sha3.Sum256(e.input)
|
||||||
v := vdf.New(e.difficulty, b)
|
v := vdf.New(e.difficulty, b)
|
||||||
|
|
||||||
@ -62,7 +62,7 @@ func (
|
|||||||
o := v.GetOutput()
|
o := v.GetOutput()
|
||||||
inputMessage := o[:]
|
inputMessage := o[:]
|
||||||
|
|
||||||
e.logger.Info("proving genesis frame")
|
e.logger.Debug("proving genesis frame")
|
||||||
input := []byte{}
|
input := []byte{}
|
||||||
input = append(input, e.filter...)
|
input = append(input, e.filter...)
|
||||||
input = binary.BigEndian.AppendUint64(input, e.frame)
|
input = binary.BigEndian.AppendUint64(input, e.frame)
|
||||||
@ -105,7 +105,7 @@ func (e *MasterClockConsensusEngine) collect(
|
|||||||
currentFramePublished *protobufs.ClockFrame,
|
currentFramePublished *protobufs.ClockFrame,
|
||||||
) (*protobufs.ClockFrame, error) {
|
) (*protobufs.ClockFrame, error) {
|
||||||
if e.state == consensus.EngineStateCollecting {
|
if e.state == consensus.EngineStateCollecting {
|
||||||
e.logger.Info("collecting vdf proofs")
|
e.logger.Debug("collecting vdf proofs")
|
||||||
|
|
||||||
latest := e.latestFrame
|
latest := e.latestFrame
|
||||||
|
|
||||||
@ -119,11 +119,11 @@ func (e *MasterClockConsensusEngine) collect(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
e.syncingStatus = SyncStatusAwaitingResponse
|
e.syncingStatus = SyncStatusAwaitingResponse
|
||||||
e.logger.Info("setting syncing target", zap.Binary("peer_id", peer))
|
e.logger.Debug("setting syncing target", zap.Binary("peer_id", peer))
|
||||||
e.syncingTarget = peer
|
e.syncingTarget = peer
|
||||||
|
|
||||||
channel := e.createPeerReceiveChannel(peer)
|
channel := e.createPeerReceiveChannel(peer)
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"listening on peer receive channel",
|
"listening on peer receive channel",
|
||||||
zap.Binary("channel", channel),
|
zap.Binary("channel", channel),
|
||||||
)
|
)
|
||||||
@ -151,7 +151,7 @@ func (e *MasterClockConsensusEngine) collect(
|
|||||||
|
|
||||||
waitDecay := time.Duration(2000)
|
waitDecay := time.Duration(2000)
|
||||||
for e.syncingStatus != SyncStatusNotSyncing {
|
for e.syncingStatus != SyncStatusNotSyncing {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"waiting for sync to complete...",
|
"waiting for sync to complete...",
|
||||||
zap.Duration("wait_decay", waitDecay),
|
zap.Duration("wait_decay", waitDecay),
|
||||||
)
|
)
|
||||||
@ -161,7 +161,7 @@ func (e *MasterClockConsensusEngine) collect(
|
|||||||
waitDecay = waitDecay * 2
|
waitDecay = waitDecay * 2
|
||||||
if waitDecay >= (100 * (2 << 6)) {
|
if waitDecay >= (100 * (2 << 6)) {
|
||||||
if e.syncingStatus == SyncStatusAwaitingResponse {
|
if e.syncingStatus == SyncStatusAwaitingResponse {
|
||||||
e.logger.Info("maximum wait for sync response, skipping sync")
|
e.logger.Debug("maximum wait for sync response, skipping sync")
|
||||||
e.syncingStatus = SyncStatusNotSyncing
|
e.syncingStatus = SyncStatusNotSyncing
|
||||||
break
|
break
|
||||||
} else {
|
} else {
|
||||||
@ -170,14 +170,14 @@ func (e *MasterClockConsensusEngine) collect(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("selecting leader")
|
e.logger.Debug("selecting leader")
|
||||||
latestFrame, err := e.confirmLatestFrame()
|
latestFrame, err := e.confirmLatestFrame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Error("could not confirm latest frame", zap.Error(err))
|
e.logger.Error("could not confirm latest frame", zap.Error(err))
|
||||||
return nil, errors.Wrap(err, "collect")
|
return nil, errors.Wrap(err, "collect")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"returning leader frame",
|
"returning leader frame",
|
||||||
zap.Uint64("frame_number", latestFrame.FrameNumber),
|
zap.Uint64("frame_number", latestFrame.FrameNumber),
|
||||||
)
|
)
|
||||||
@ -210,14 +210,14 @@ func (
|
|||||||
curr := e.seenFrames[0]
|
curr := e.seenFrames[0]
|
||||||
e.seenFrames = e.seenFrames[1:]
|
e.seenFrames = e.seenFrames[1:]
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"checking continuity for frame",
|
"checking continuity for frame",
|
||||||
zap.Uint64("frame_number", curr.FrameNumber),
|
zap.Uint64("frame_number", curr.FrameNumber),
|
||||||
)
|
)
|
||||||
|
|
||||||
if prev.FrameNumber+1 < curr.FrameNumber ||
|
if prev.FrameNumber+1 < curr.FrameNumber ||
|
||||||
prev.FrameNumber > curr.FrameNumber {
|
prev.FrameNumber > curr.FrameNumber {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"continuity break found",
|
"continuity break found",
|
||||||
zap.Uint64("prev_frame_number", prev.FrameNumber),
|
zap.Uint64("prev_frame_number", prev.FrameNumber),
|
||||||
zap.Uint64("curr_frame_number", curr.FrameNumber),
|
zap.Uint64("curr_frame_number", curr.FrameNumber),
|
||||||
@ -229,7 +229,7 @@ func (
|
|||||||
prev = curr
|
prev = curr
|
||||||
committedSet = append(committedSet, prev)
|
committedSet = append(committedSet, prev)
|
||||||
} else {
|
} else {
|
||||||
e.logger.Info("frame mismatch on input/output")
|
e.logger.Debug("frame mismatch on input/output")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ func (
|
|||||||
return nil, errors.Wrap(err, "confirm latest frame")
|
return nil, errors.Wrap(err, "confirm latest frame")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info("stored frames", zap.Int("frame_count", len(committedSet)))
|
e.logger.Debug("stored frames", zap.Int("frame_count", len(committedSet)))
|
||||||
|
|
||||||
e.historicFramesMx.Lock()
|
e.historicFramesMx.Lock()
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ func (e *MasterClockConsensusEngine) handleClockFramesResponse(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, frame := range response.ClockFrames {
|
for _, frame := range response.ClockFrames {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"processing clock frame",
|
"processing clock frame",
|
||||||
zap.Binary("sender", peerID),
|
zap.Binary("sender", peerID),
|
||||||
zap.Binary("filter", frame.Filter),
|
zap.Binary("filter", frame.Filter),
|
||||||
@ -140,7 +140,7 @@ func (e *MasterClockConsensusEngine) handleClockFramesResponse(
|
|||||||
return errors.Wrap(err, "handle clock frame response")
|
return errors.Wrap(err, "handle clock frame response")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"clock frame was valid",
|
"clock frame was valid",
|
||||||
zap.Binary("sender", peerID),
|
zap.Binary("sender", peerID),
|
||||||
zap.Binary("filter", frame.Filter),
|
zap.Binary("filter", frame.Filter),
|
||||||
@ -175,7 +175,7 @@ func (e *MasterClockConsensusEngine) handleClockFramesRequest(
|
|||||||
|
|
||||||
e.pubSub.Subscribe(channel, e.handleSync, true)
|
e.pubSub.Subscribe(channel, e.handleSync, true)
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"received clock frame request",
|
"received clock frame request",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Uint64("from_frame_number", request.FromFrameNumber),
|
zap.Uint64("from_frame_number", request.FromFrameNumber),
|
||||||
@ -185,7 +185,7 @@ func (e *MasterClockConsensusEngine) handleClockFramesRequest(
|
|||||||
from := request.FromFrameNumber
|
from := request.FromFrameNumber
|
||||||
|
|
||||||
if e.frame < from || len(e.historicFrames) == 0 {
|
if e.frame < from || len(e.historicFrames) == 0 {
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"peer asked for undiscovered frame",
|
"peer asked for undiscovered frame",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Uint64("frame_number", request.FromFrameNumber),
|
zap.Uint64("frame_number", request.FromFrameNumber),
|
||||||
@ -212,7 +212,7 @@ func (e *MasterClockConsensusEngine) handleClockFramesRequest(
|
|||||||
to = e.latestFrame.FrameNumber
|
to = e.latestFrame.FrameNumber
|
||||||
}
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Debug(
|
||||||
"sending response",
|
"sending response",
|
||||||
zap.Binary("peer_id", peerID),
|
zap.Binary("peer_id", peerID),
|
||||||
zap.Uint64("from", from),
|
zap.Uint64("from", from),
|
||||||
|
@ -282,12 +282,6 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
|
|
||||||
switch app.LobbyState {
|
switch app.LobbyState {
|
||||||
case application.CEREMONY_APPLICATION_STATE_OPEN:
|
case application.CEREMONY_APPLICATION_STATE_OPEN:
|
||||||
e.logger.Info(
|
|
||||||
"lobby open for joins",
|
|
||||||
zap.Any("lobby_joins", app.LobbyJoins),
|
|
||||||
zap.Any("preferred_participants", app.NextRoundPreferredParticipants),
|
|
||||||
zap.Uint64("state_count", app.StateCount),
|
|
||||||
)
|
|
||||||
e.alreadyPublishedShare = false
|
e.alreadyPublishedShare = false
|
||||||
e.alreadyPublishedTranscript = false
|
e.alreadyPublishedTranscript = false
|
||||||
alreadyJoined := false
|
alreadyJoined := false
|
||||||
@ -301,6 +295,14 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e.logger.Info(
|
||||||
|
"lobby open for joins",
|
||||||
|
zap.Int("joined_participants", len(app.LobbyJoins)),
|
||||||
|
zap.Int("preferred_participants", len(app.NextRoundPreferredParticipants)),
|
||||||
|
zap.Bool("in_lobby", alreadyJoined),
|
||||||
|
zap.Uint64("state_count", app.StateCount),
|
||||||
|
)
|
||||||
|
|
||||||
if !alreadyJoined {
|
if !alreadyJoined {
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"joining lobby",
|
"joining lobby",
|
||||||
@ -318,33 +320,37 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
e.ensureSecrets(app)
|
e.ensureSecrets(app)
|
||||||
}
|
}
|
||||||
case application.CEREMONY_APPLICATION_STATE_IN_PROGRESS:
|
case application.CEREMONY_APPLICATION_STATE_IN_PROGRESS:
|
||||||
|
inRound := false
|
||||||
|
for _, p := range app.ActiveParticipants {
|
||||||
|
if bytes.Equal(p.KeyValue, e.proverPublicKey) {
|
||||||
|
inRound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(e.activeSecrets) == 0 && inRound {
|
||||||
|
// If we ended up in the scenario where we do not have any secrets
|
||||||
|
// available but we're in the round, we should politely leave.
|
||||||
|
e.publishDroppedParticipant(e.proverPublicKey)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"round in progress",
|
"round in progress",
|
||||||
zap.Any("participants", app.ActiveParticipants),
|
zap.Any("participants", app.ActiveParticipants),
|
||||||
zap.Any("current_seen_attestations", app.LatestSeenProverAttestations),
|
zap.Any("current_seen_attestations", len(app.LatestSeenProverAttestations)),
|
||||||
zap.Any(
|
zap.Any(
|
||||||
"current_dropped_attestations",
|
"current_dropped_attestations",
|
||||||
app.DroppedParticipantAttestations,
|
len(app.DroppedParticipantAttestations),
|
||||||
),
|
),
|
||||||
zap.Any(
|
zap.Any(
|
||||||
"preferred_participants_for_next_round",
|
"preferred_participants_for_next_round",
|
||||||
app.NextRoundPreferredParticipants,
|
len(app.NextRoundPreferredParticipants),
|
||||||
),
|
),
|
||||||
zap.Uint64("current_round", app.RoundCount),
|
zap.Bool("in_round", inRound),
|
||||||
|
zap.Uint64("current_sub_round", app.RoundCount),
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(e.activeSecrets) == 0 {
|
|
||||||
// If we ended up in the scenario where we do not have any secrets
|
|
||||||
// available but we're in the round, we should politely leave.
|
|
||||||
for _, p := range app.ActiveParticipants {
|
|
||||||
if bytes.Equal(p.KeyValue, e.proverPublicKey) {
|
|
||||||
e.publishDroppedParticipant(e.proverPublicKey)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
shouldConnect := false
|
shouldConnect := false
|
||||||
position := 0
|
position := 0
|
||||||
if len(e.peerChannels) == 0 && app.RoundCount == 1 &&
|
if len(e.peerChannels) == 0 && app.RoundCount == 1 &&
|
||||||
@ -405,15 +411,15 @@ func (e *CeremonyExecutionEngine) RunWorker() {
|
|||||||
case application.CEREMONY_APPLICATION_STATE_FINALIZING:
|
case application.CEREMONY_APPLICATION_STATE_FINALIZING:
|
||||||
e.logger.Info(
|
e.logger.Info(
|
||||||
"round contribution finalizing",
|
"round contribution finalizing",
|
||||||
zap.Any("participants", app.ActiveParticipants),
|
zap.Any("participants", len(app.ActiveParticipants)),
|
||||||
zap.Any("current_seen_attestations", app.LatestSeenProverAttestations),
|
zap.Any("current_seen_attestations", len(app.LatestSeenProverAttestations)),
|
||||||
zap.Any(
|
zap.Any(
|
||||||
"current_dropped_attestations",
|
"current_dropped_attestations",
|
||||||
app.DroppedParticipantAttestations,
|
len(app.DroppedParticipantAttestations),
|
||||||
),
|
),
|
||||||
zap.Any(
|
zap.Any(
|
||||||
"preferred_participants_for_next_round",
|
"preferred_participants_for_next_round",
|
||||||
app.NextRoundPreferredParticipants,
|
len(app.NextRoundPreferredParticipants),
|
||||||
),
|
),
|
||||||
zap.Int("finalized_shares", len(app.TranscriptShares)),
|
zap.Int("finalized_shares", len(app.TranscriptShares)),
|
||||||
)
|
)
|
||||||
|
@ -218,7 +218,7 @@ func (b *BlossomSub) GetRandomPeer(bitmask []byte) ([]byte, error) {
|
|||||||
"get random peer",
|
"get random peer",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
b.logger.Info("selecting from peers", zap.Any("peer_ids", peers))
|
b.logger.Debug("selecting from peers", zap.Any("peer_ids", peers))
|
||||||
sel, err := rand.Int(rand.Reader, big.NewInt(int64(len(peers))))
|
sel, err := rand.Int(rand.Reader, big.NewInt(int64(len(peers))))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get random peer")
|
return nil, errors.Wrap(err, "get random peer")
|
||||||
|
Loading…
Reference in New Issue
Block a user