ceremonyclient/node/consensus/master/master_clock_consensus_engine.go
Cassandra Heart 367566ea88
Squashed commit of the following:
commit 8e57cb3c50417665495617721687da33f7ae2a33
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Mon May 27 00:06:39 2024 -0500

    remove binaries, release ready

commit a032474e5f420707ae1b61f2cb1bcf87a7de113c
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Mon May 27 00:04:25 2024 -0500

    Signatory #8 added

commit 86ab72ea75a366045052daa2a5c71f4f1e2de717
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 23:55:02 2024 -0500

    Signatory #1 added

commit 9853bbff1c18bb941b4563acf3afbbc72846e57a
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 23:52:43 2024 -0500

    Signatory #16 added

commit d1eb0bd2b2e0aab92cbe1c9ca8c43dc19d93eb22
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 23:24:51 2024 -0500

    Signatory #2 added

commit 270591416ba2817c879471ea27bc93769bb22819
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 22:33:53 2024 -0500

    Signatory #3 added

commit ea767f9eaa5de1e7e00e3e87ff2760a23377205b
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 22:10:50 2024 -0500

    Signatory #12 added

commit e73a0a005a01b8045859673781f3e2d6360f13f8
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 22:09:07 2024 -0500

    Signatory #17 added

commit 07be249c52682c66c91a07df6eeb0eadf41603b5
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 22:04:40 2024 -0500

    Signatory #14 added

commit dbc014b8127e6452e31609b130ea68759a3c1f4a
Author: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Date:   Mon May 27 05:55:21 2024 +0300

    Signatory #4 added (#223)

commit 13407f6ff3347bd57fbacabda42d60d0285a927f
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 21:34:26 2024 -0500

    Signatory #13 added

commit 3731de7b66bd403c92bb7d66db25890567652e9d
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 21:33:50 2024 -0500

    add digests

commit b0a3493dd2f6162d7e02e9cb10aa482fb3ff6e9b
Author: Cassandra Heart <cassandra@quilibrium.com>
Date:   Sun May 26 21:26:54 2024 -0500

    replace binaries with patch build

commit 6a20b44441cba01189050ccf45b53c99f9a218ab
Author: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>
Date:   Sun May 26 21:15:07 2024 -0500

    fix: switch RPC for peer and node info (#222)

commit 72d730d23f91ce9a2bdd55617fb70f7a1193645e
Author: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>
Date:   Sun May 26 20:53:38 2024 -0500

    feat: recalibrate self-test on the fly (#221)

commit 99702af0b7afd3b556bb20e9bebc238b3e52b53a
Author: Marius Scurtescu <marius.scurtescu@gmail.com>
Date:   Sun May 26 19:31:39 2024 -0400

    Signer related fixes (#220)

    * add pems 16 and 17

    * remove .bin extension from generated binaries

    * no more json files to copy to docker image

commit 88d704ab16dd3a18985a7179f88fb39908110532
Author: Marius Scurtescu <marius.scurtescu@gmail.com>
Date:   Sun May 26 19:07:52 2024 -0400

    Docker split take 2 (#219)

    * split runtime docker files into a docker subfolder

    * split DOCKER-README.md

    * updated docker instructions

    * add restore command

    * add image update related tasks

    * add command to test if P2P port is visible

    * Remove bootstrap peer (#189)

    * Change bootstrap servers to DHT-only peers (#187)

    * support voucher file-based claims (#183)

    * Change bootstrap servers to DHT-only peers

    Changing my bootstrap servers to DHT-only peers with somewhat lower
    specs. One of the new ones is in the US and the other one is in
    Switzerland. Both use reliable providers and have 10Gbps network
    interfaces.

    ---------

    Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

    * Don't run self-test in DHT-only mode (#186)

    * support voucher file-based claims (#183)

    * Don't run self-test in DHT-only mode

    The node tries to create a self-test when ran with the `-dht-only`
    flag, but it doesn't load the KZG ceremony data in DHT-only mode
    which leads to a crash.

    Don't run self-test when the `-dht-only` flag is set.

    I tested by starting a node locally with and without existing
    self-test and with the `-dht-only` flag.

    ---------

    Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

    * Embed json files in binary (#182)

    * Embed ceremony.json in binary

    * Embed retroactive_peers.json in binary

    * Signers build and verification tasks (#181)

    * add signers specific Taskfile

    * add verify tasks

    * move signer task under signer folder

    * create docker image specific for signers

    * map current user into docker image and container

    * ignore node-tmp-*

    * add verify:build:internal

    * prevent tasks with docker commands from being run inside a container

    * rename *:internal to *:container

    * add README.md

    * add pem files to git

    * Updating Q Guide link (#173)

    * Update README.md

    Updated link to Quilibrium guide to new website

    * Update README.md

    ---------

    Co-authored-by: littleblackcloud <163544315+littleblackcloud@users.noreply.github.com>
    Co-authored-by: Agost Biro <5764438+agostbiro@users.noreply.github.com>
    Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>
    Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>

commit 20560176dcb8ace8ff4fe5a3bb0a8a188c11add2
Author: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>
Date:   Sun May 26 17:17:36 2024 -0500

    Revert "Change volume mapping so .config folder is created inside node folder…" (#218)

    This reverts commit 27f50a92c6f5e340fd4106da828c6e8cdc12116b.

commit b9ea4c158e4657c09976fa6b2e625b3809119687
Author: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>
Date:   Sun May 26 14:46:40 2024 -0500

    fix: keys file remains null (#217)

commit 6ed6728bfdb6825470cb772ee44a6a468887e3eb
Author: luk <luk@luktech.dev>
Date:   Sun May 26 22:38:50 2024 +0300

    switched get node info response to use masterClock frame for maxFrame field (#212)

commit 2bc8ab6a0a243a28300f999af695fe3b42db5e3e
Author: Ravish Ahmad <ravishahmad16@gmail.com>
Date:   Mon May 27 01:07:53 2024 +0530

    Update main.go to fix Q logo (#213)

    Q logo is not appearing correctly on the terminal while running node. Added a new line character after "Signature check passed" to fix it

commit 27f50a92c6f5e340fd4106da828c6e8cdc12116b
Author: AvAcalho <158583728+AvAcalho@users.noreply.github.com>
Date:   Sun May 26 20:37:14 2024 +0100

    Change volume mapping so .config folder is created inside node folder and not on root (#214)

commit 4656dedc2a2de608b9d69d0a6b4559b7169b03be
Author: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>
Date:   Sun May 26 14:27:55 2024 -0500

    experiment: verify in channel (#215)

commit 2bbd1e0690
Author: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>
Date:   Sat May 25 00:22:50 2024 -0500

    v1.4.18 (#193)

    * Remove bootstrap peer (#189)

    * Change bootstrap servers to DHT-only peers (#187)

    * support voucher file-based claims (#183)

    * Change bootstrap servers to DHT-only peers

    Changing my bootstrap servers to DHT-only peers with somewhat lower
    specs. One of the new ones is in the US and the other one is in
    Switzerland. Both use reliable providers and have 10Gbps network
    interfaces.

    ---------

    Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

    * Don't run self-test in DHT-only mode (#186)

    * support voucher file-based claims (#183)

    * Don't run self-test in DHT-only mode

    The node tries to create a self-test when ran with the `-dht-only`
    flag, but it doesn't load the KZG ceremony data in DHT-only mode
    which leads to a crash.

    Don't run self-test when the `-dht-only` flag is set.

    I tested by starting a node locally with and without existing
    self-test and with the `-dht-only` flag.

    ---------

    Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

    * Embed json files in binary (#182)

    * Embed ceremony.json in binary

    * Embed retroactive_peers.json in binary

    * Signers build and verification tasks (#181)

    * add signers specific Taskfile

    * add verify tasks

    * move signer task under signer folder

    * create docker image specific for signers

    * map current user into docker image and container

    * ignore node-tmp-*

    * add verify:build:internal

    * prevent tasks with docker commands from being run inside a container

    * rename *:internal to *:container

    * add README.md

    * add pem files to git

    * Updating Q Guide link (#173)

    * Update README.md

    Updated link to Quilibrium guide to new website

    * Update README.md

    * feat: network switching and namespaced announce strings/bitmasks (#190)

    * feat: network switching and namespaced announce strings/bitmasks

    * bump version name and logo

    * feat: mini pomw proofs as part of peer manifest (#191)

    * shift default config directory under current folder (#176)

    * feat: signature check (#192)

    * feat: signature check

    * adjust docker command so it doesn't invoke sigcheck

    * remove old version

    * add binaries and digests

    * fix bug, revert build

    * shasum has weird byte at end

    * proper binaries and digests

    * Signatory #13 added

    * Signatory #3 added

    * Signer 4 (#194)

    * Signatory #5 added

    * Signatory #9 added (#195)

    * Signatory #1 added

    * added sig.6 files (#196)

    * Signatories #8 and #16 added

    * Signatory #12 added

    * Add signature (#197)

    * reset build for v1.4.18 after testnet bug

    * updated build, resigned by #13

    * Signatory #16 added

    * added sig.6 files (#198)

    * Signatory #8 added

    * Signatory #17 added

    * Signatory #1 added

    * Signatory #7 added

    * Signatory #4 added

    * Signatory #14 added

    * remove binaries, ready to ship

    ---------

    Co-authored-by: littleblackcloud <163544315+littleblackcloud@users.noreply.github.com>
    Co-authored-by: Agost Biro <5764438+agostbiro@users.noreply.github.com>
    Co-authored-by: Marius Scurtescu <marius.scurtescu@gmail.com>
    Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
    Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
    Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-27 01:09:13 -05:00

740 lines
19 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

package master
import (
"bytes"
"context"
gcrypto "crypto"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"io"
"math/big"
"sync"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/mr-tron/base58"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qtime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
type SyncStatusType int
const (
SyncStatusNotSyncing = iota
SyncStatusAwaitingResponse
SyncStatusSynchronizing
)
type MasterClockConsensusEngine struct {
*protobufs.UnimplementedValidationServiceServer
difficulty uint32
logger *zap.Logger
state consensus.EngineState
pubSub p2p.PubSub
keyManager keys.KeyManager
frameProver crypto.FrameProver
lastFrameReceivedAt time.Time
frameChan chan *protobufs.ClockFrame
executionEngines map[string]execution.ExecutionEngine
filter []byte
input []byte
syncingStatus SyncStatusType
syncingTarget []byte
engineMx sync.Mutex
seenFramesMx sync.Mutex
historicFramesMx sync.Mutex
seenFrames []*protobufs.ClockFrame
historicFrames []*protobufs.ClockFrame
clockStore store.ClockStore
masterTimeReel *qtime.MasterTimeReel
peerInfoManager p2p.PeerInfoManager
report *protobufs.SelfTestReport
frameValidationCh chan *protobufs.ClockFrame
bandwidthTestCh chan []byte
verifyTestCh chan verifyChallenge
currentReceivingSyncPeers int
currentReceivingSyncPeersMx sync.Mutex
}
var _ consensus.ConsensusEngine = (*MasterClockConsensusEngine)(nil)
func NewMasterClockConsensusEngine(
engineConfig *config.EngineConfig,
logger *zap.Logger,
clockStore store.ClockStore,
keyManager keys.KeyManager,
pubSub p2p.PubSub,
frameProver crypto.FrameProver,
masterTimeReel *qtime.MasterTimeReel,
peerInfoManager p2p.PeerInfoManager,
report *protobufs.SelfTestReport,
) *MasterClockConsensusEngine {
if logger == nil {
panic(errors.New("logger is nil"))
}
if engineConfig == nil {
panic(errors.New("engine config is nil"))
}
if keyManager == nil {
panic(errors.New("key manager is nil"))
}
if pubSub == nil {
panic(errors.New("pubsub is nil"))
}
if frameProver == nil {
panic(errors.New("frame prover is nil"))
}
if masterTimeReel == nil {
panic(errors.New("master time reel is nil"))
}
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
if err != nil {
panic(errors.New("genesis seed is nil"))
}
e := &MasterClockConsensusEngine{
difficulty: 10000,
logger: logger,
state: consensus.EngineStateStopped,
keyManager: keyManager,
pubSub: pubSub,
executionEngines: map[string]execution.ExecutionEngine{},
frameChan: make(chan *protobufs.ClockFrame),
input: seed,
lastFrameReceivedAt: time.Time{},
syncingStatus: SyncStatusNotSyncing,
clockStore: clockStore,
frameProver: frameProver,
masterTimeReel: masterTimeReel,
peerInfoManager: peerInfoManager,
report: report,
frameValidationCh: make(chan *protobufs.ClockFrame),
bandwidthTestCh: make(chan []byte),
verifyTestCh: make(chan verifyChallenge),
}
e.addPeerManifestReport(e.pubSub.GetPeerID(), report)
if e.filter, err = hex.DecodeString(engineConfig.Filter); err != nil {
panic(errors.Wrap(err, "could not parse filter value"))
}
e.getProvingKey(engineConfig)
if err := e.createCommunicationKeys(); err != nil {
panic(err)
}
logger.Info("constructing consensus engine")
return e
}
func (e *MasterClockConsensusEngine) Start() <-chan error {
e.logger.Info("starting master consensus engine")
e.state = consensus.EngineStateStarting
errChan := make(chan error)
e.peerInfoManager.Start()
e.state = consensus.EngineStateLoading
e.logger.Info("syncing last seen state")
err := e.masterTimeReel.Start()
if err != nil {
panic(err)
}
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
e.buildHistoricFrameCache(frame)
go func() {
for {
select {
case newFrame := <-e.frameValidationCh:
head, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
if head.FrameNumber > newFrame.FrameNumber ||
newFrame.FrameNumber-head.FrameNumber > 128 {
e.logger.Debug(
"frame out of range, ignoring",
zap.Uint64("number", newFrame.FrameNumber),
)
continue
}
if err := e.frameProver.VerifyMasterClockFrame(newFrame); err != nil {
e.logger.Error("could not verify clock frame", zap.Error(err))
continue
}
e.masterTimeReel.Insert(newFrame, false)
case peerId := <-e.bandwidthTestCh:
e.performBandwidthTest(peerId)
case verifyTest := <-e.verifyTestCh:
e.performVerifyTest(verifyTest)
}
}
}()
e.logger.Info("subscribing to pubsub messages")
e.pubSub.Subscribe(e.filter, e.handleMessage, true)
e.state = consensus.EngineStateCollecting
go func() {
server := grpc.NewServer(
grpc.MaxSendMsgSize(600*1024*1024),
grpc.MaxRecvMsgSize(600*1024*1024),
)
protobufs.RegisterValidationServiceServer(server, e)
if err := e.pubSub.StartDirectChannelListener(
e.pubSub.GetPeerID(),
"validation",
server,
); err != nil {
panic(err)
}
}()
go func() {
for {
e.logger.Info(
"peers in store",
zap.Int("peer_store_count", e.pubSub.GetPeerstoreCount()),
zap.Int("network_peer_count", e.pubSub.GetNetworkPeersCount()),
)
time.Sleep(10 * time.Second)
}
}()
go func() {
// Let it sit until we at least have a few more peers inbound
time.Sleep(30 * time.Second)
difficultyMetric := int64(100000)
skew := (difficultyMetric * 12) / 10
for {
head, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
e.report.MasterHeadFrame = head.FrameNumber
e.report.DifficultyMetric = difficultyMetric
parallelism := e.report.Cores - 1
challenge := binary.BigEndian.AppendUint64(
[]byte{},
e.report.MasterHeadFrame,
)
challenge = append(challenge, e.pubSub.GetPeerID()...)
ts, proofs, nextDifficultyMetric, err :=
e.frameProver.CalculateChallengeProof(
challenge,
parallelism,
skew,
)
if err != nil {
panic(err)
}
e.logger.Info(
"recalibrating difficulty metric",
zap.Int64("previous_difficulty_metric", difficultyMetric),
zap.Int64("next_difficulty_metric", nextDifficultyMetric),
)
difficultyMetric = nextDifficultyMetric
skew = (nextDifficultyMetric * 12) / 10
proof := binary.BigEndian.AppendUint64([]byte{}, uint64(ts))
for i := 0; i < len(proofs); i++ {
proof = append(proof, proofs[i]...)
}
e.report.Proof = proof
e.logger.Info(
"broadcasting self-test info",
zap.Uint64("current_frame", e.report.MasterHeadFrame),
)
if err := e.publishMessage(e.filter, e.report); err != nil {
e.logger.Debug("error publishing message", zap.Error(err))
}
}
}()
go func() {
newFrameCh := e.masterTimeReel.NewFrameCh()
for e.state < consensus.EngineStateStopping {
var err error
select {
case frame := <-newFrameCh:
currentFrame := frame
latestFrame := frame
if latestFrame, err = e.collect(currentFrame); err != nil {
e.logger.Error("could not collect", zap.Error(err))
latestFrame = currentFrame
continue
}
if latestFrame, err = e.prove(latestFrame); err != nil {
e.logger.Error("could not prove", zap.Error(err))
latestFrame = currentFrame
}
if err = e.publishProof(latestFrame); err != nil {
e.logger.Error("could not publish", zap.Error(err))
}
case <-time.After(20 * time.Second):
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
if frame, err = e.prove(frame); err != nil {
e.logger.Error("could not prove", zap.Error(err))
continue
}
if err = e.publishProof(frame); err != nil {
e.logger.Error("could not publish", zap.Error(err))
}
}
}
}()
go func() {
errChan <- nil
}()
return errChan
}
func (e *MasterClockConsensusEngine) PerformValidation(
ctx context.Context,
msg *protobufs.ValidationMessage,
) (*protobufs.ValidationMessage, error) {
return msg, nil
}
func (e *MasterClockConsensusEngine) Stop(force bool) <-chan error {
e.logger.Info("stopping consensus engine")
e.state = consensus.EngineStateStopping
errChan := make(chan error)
wg := sync.WaitGroup{}
wg.Add(len(e.executionEngines))
for name := range e.executionEngines {
name := name
go func(name string) {
frame, err := e.masterTimeReel.Head()
if err != nil {
errChan <- err
return
}
err = <-e.UnregisterExecutor(name, frame.FrameNumber, force)
if err != nil {
errChan <- err
}
wg.Done()
}(name)
}
e.logger.Info("waiting for execution engines to stop")
wg.Wait()
e.logger.Info("execution engines stopped")
e.masterTimeReel.Stop()
e.peerInfoManager.Stop()
e.state = consensus.EngineStateStopped
go func() {
errChan <- nil
}()
return errChan
}
type verifyChallenge struct {
peerID []byte
challenge []byte
timestamp int64
difficultyMetric int64
proofs [][]byte
}
func (e *MasterClockConsensusEngine) performVerifyTest(
challenge verifyChallenge,
) {
if !e.frameProver.VerifyChallengeProof(
challenge.challenge,
challenge.timestamp,
challenge.difficultyMetric,
challenge.proofs,
) {
e.logger.Warn(
"received invalid proof from peer",
zap.String("peer_id", peer.ID(challenge.peerID).String()),
)
e.pubSub.SetPeerScore(challenge.peerID, -1000)
} else {
e.logger.Debug(
"received valid proof from peer",
zap.String("peer_id", peer.ID(challenge.peerID).String()),
)
info := e.peerInfoManager.GetPeerInfo(challenge.peerID)
info.LastSeen = time.Now().UnixMilli()
}
}
func (e *MasterClockConsensusEngine) performBandwidthTest(peerID []byte) {
result := e.pubSub.GetMultiaddrOfPeer(peerID)
if result == "" {
return
}
cc, err := e.pubSub.GetDirectChannel(peerID, "validation")
if err != nil {
e.logger.Debug(
"could not connect to peer for validation",
zap.String("peer_id", base58.Encode(peerID)),
)
// tag: dusk nuke this peer for now
e.pubSub.SetPeerScore(peerID, -1000)
return
}
client := protobufs.NewValidationServiceClient(cc)
verification := make([]byte, 1048576)
rand.Read(verification)
start := time.Now().UnixMilli()
validation, err := client.PerformValidation(
context.Background(),
&protobufs.ValidationMessage{
Validation: verification,
},
)
end := time.Now().UnixMilli()
if err != nil && err != io.EOF {
cc.Close()
e.logger.Debug(
"peer returned error",
zap.String("peer_id", base58.Encode(peerID)),
zap.Error(err),
)
// tag: dusk nuke this peer for now
e.pubSub.SetPeerScore(peerID, -1000)
return
}
cc.Close()
if !bytes.Equal(verification, validation.Validation) {
e.logger.Debug(
"peer provided invalid verification",
zap.String("peer_id", base58.Encode(peerID)),
)
// tag: dusk nuke this peer for now
e.pubSub.SetPeerScore(peerID, -1000)
return
}
if end-start > 2000 {
e.logger.Debug(
"peer has slow bandwidth, scoring out",
zap.String("peer_id", base58.Encode(peerID)),
)
// tag: dusk nuke this peer for now
e.pubSub.SetPeerScore(peerID, -1000)
return
}
duration := end - start
bandwidth := uint64(1048576*1000) / uint64(duration)
manifest := e.peerInfoManager.GetPeerInfo(peerID)
if manifest == nil {
return
}
peerManifest := &p2p.PeerManifest{
PeerId: peerID,
Difficulty: manifest.Difficulty,
DifficultyMetric: manifest.DifficultyMetric,
Commit_16Metric: manifest.Commit_16Metric,
Commit_128Metric: manifest.Commit_128Metric,
Commit_1024Metric: manifest.Commit_1024Metric,
Commit_65536Metric: manifest.Commit_65536Metric,
Proof_16Metric: manifest.Proof_16Metric,
Proof_128Metric: manifest.Proof_128Metric,
Proof_1024Metric: manifest.Proof_1024Metric,
Proof_65536Metric: manifest.Proof_65536Metric,
Cores: manifest.Cores,
Memory: manifest.Memory,
Storage: manifest.Storage,
Capabilities: []p2p.Capability{},
MasterHeadFrame: manifest.MasterHeadFrame,
Bandwidth: bandwidth,
}
for _, capability := range manifest.Capabilities {
metadata := make([]byte, len(capability.AdditionalMetadata))
copy(metadata[:], capability.AdditionalMetadata[:])
peerManifest.Capabilities = append(
peerManifest.Capabilities,
p2p.Capability{
ProtocolIdentifier: capability.ProtocolIdentifier,
AdditionalMetadata: metadata,
},
)
}
e.peerInfoManager.AddPeerInfo(manifest)
}
func (
e *MasterClockConsensusEngine,
) GetPeerManifests() *protobufs.PeerManifestsResponse {
response := &protobufs.PeerManifestsResponse{
PeerManifests: []*protobufs.PeerManifest{},
}
peerMap := e.peerInfoManager.GetPeerMap()
for peerId, peerManifest := range peerMap {
peerId := peerId
peerManifest := peerManifest
manifest := &protobufs.PeerManifest{
PeerId: []byte(peerId),
Difficulty: peerManifest.Difficulty,
DifficultyMetric: peerManifest.DifficultyMetric,
Commit_16Metric: peerManifest.Commit_16Metric,
Commit_128Metric: peerManifest.Commit_128Metric,
Commit_1024Metric: peerManifest.Commit_1024Metric,
Commit_65536Metric: peerManifest.Commit_65536Metric,
Proof_16Metric: peerManifest.Proof_16Metric,
Proof_128Metric: peerManifest.Proof_128Metric,
Proof_1024Metric: peerManifest.Proof_1024Metric,
Proof_65536Metric: peerManifest.Proof_65536Metric,
Cores: peerManifest.Cores,
Memory: new(big.Int).SetBytes(peerManifest.Memory).Bytes(),
Storage: new(big.Int).SetBytes(peerManifest.Storage).Bytes(),
MasterHeadFrame: peerManifest.MasterHeadFrame,
LastSeen: peerManifest.LastSeen,
}
for _, capability := range peerManifest.Capabilities {
metadata := make([]byte, len(capability.AdditionalMetadata))
copy(metadata[:], capability.AdditionalMetadata[:])
manifest.Capabilities = append(
manifest.Capabilities,
&protobufs.Capability{
ProtocolIdentifier: capability.ProtocolIdentifier,
AdditionalMetadata: metadata,
},
)
}
response.PeerManifests = append(
response.PeerManifests,
manifest,
)
}
return response
}
func (e *MasterClockConsensusEngine) GetDifficulty() uint32 {
return e.difficulty
}
func (e *MasterClockConsensusEngine) GetFrame() *protobufs.ClockFrame {
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
return frame
}
func (e *MasterClockConsensusEngine) GetState() consensus.EngineState {
return e.state
}
func (
e *MasterClockConsensusEngine,
) GetFrameChannel() <-chan *protobufs.ClockFrame {
return e.frameChan
}
func (e *MasterClockConsensusEngine) buildHistoricFrameCache(
latestFrame *protobufs.ClockFrame,
) {
e.historicFrames = []*protobufs.ClockFrame{}
if latestFrame.FrameNumber != 0 {
min := uint64(0)
if latestFrame.FrameNumber-255 > min && latestFrame.FrameNumber > 255 {
min = latestFrame.FrameNumber - 255
}
iter, err := e.clockStore.RangeMasterClockFrames(
e.filter,
min,
latestFrame.FrameNumber-1,
)
if err != nil {
panic(err)
}
for iter.First(); iter.Valid(); iter.Next() {
frame, err := iter.Value()
if err != nil {
panic(err)
}
e.historicFrames = append(e.historicFrames, frame)
}
if err = iter.Close(); err != nil {
panic(err)
}
}
e.historicFrames = append(e.historicFrames, latestFrame)
}
func (e *MasterClockConsensusEngine) addPeerManifestReport(
peerId []byte,
report *protobufs.SelfTestReport,
) {
manifest := &p2p.PeerManifest{
PeerId: peerId,
Difficulty: report.Difficulty,
DifficultyMetric: report.DifficultyMetric,
Commit_16Metric: report.Commit_16Metric,
Commit_128Metric: report.Commit_128Metric,
Commit_1024Metric: report.Commit_1024Metric,
Commit_65536Metric: report.Commit_65536Metric,
Proof_16Metric: report.Proof_16Metric,
Proof_128Metric: report.Proof_128Metric,
Proof_1024Metric: report.Proof_1024Metric,
Proof_65536Metric: report.Proof_65536Metric,
Cores: report.Cores,
Memory: report.Memory,
Storage: report.Storage,
Capabilities: []p2p.Capability{},
MasterHeadFrame: report.MasterHeadFrame,
LastSeen: time.Now().UnixMilli(),
}
for _, capability := range manifest.Capabilities {
metadata := make([]byte, len(capability.AdditionalMetadata))
copy(metadata[:], capability.AdditionalMetadata[:])
manifest.Capabilities = append(
manifest.Capabilities,
p2p.Capability{
ProtocolIdentifier: capability.ProtocolIdentifier,
AdditionalMetadata: metadata,
},
)
}
e.peerInfoManager.AddPeerInfo(manifest)
}
func (e *MasterClockConsensusEngine) getProvingKey(
engineConfig *config.EngineConfig,
) (gcrypto.Signer, keys.KeyType, []byte, []byte) {
provingKey, err := e.keyManager.GetSigningKey(engineConfig.ProvingKeyId)
if errors.Is(err, keys.KeyNotFoundErr) {
e.logger.Info("could not get proving key, generating")
provingKey, err = e.keyManager.CreateSigningKey(
engineConfig.ProvingKeyId,
keys.KeyTypeEd448,
)
}
if err != nil {
e.logger.Error("could not get proving key", zap.Error(err))
panic(err)
}
rawKey, err := e.keyManager.GetRawKey(engineConfig.ProvingKeyId)
if err != nil {
e.logger.Error("could not get proving key type", zap.Error(err))
panic(err)
}
provingKeyType := rawKey.Type
h, err := poseidon.HashBytes(rawKey.PublicKey)
if err != nil {
e.logger.Error("could not hash proving key", zap.Error(err))
panic(err)
}
provingKeyAddress := h.Bytes()
provingKeyAddress = append(
make([]byte, 32-len(provingKeyAddress)),
provingKeyAddress...,
)
return provingKey, provingKeyType, rawKey.PublicKey, provingKeyAddress
}
func (e *MasterClockConsensusEngine) createCommunicationKeys() error {
_, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
_, err = e.keyManager.CreateAgreementKey(
"q-ratchet-idk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "create communication keys")
}
} else {
return errors.Wrap(err, "create communication keys")
}
}
_, err = e.keyManager.GetAgreementKey("q-ratchet-spk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
_, err = e.keyManager.CreateAgreementKey(
"q-ratchet-spk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "create communication keys")
}
} else {
return errors.Wrap(err, "create communication keys")
}
}
return nil
}