From 7c9a820fc49bff381453b96834d71eccb6330a54 Mon Sep 17 00:00:00 2001 From: Cassandra Heart Date: Wed, 16 Oct 2024 23:51:02 -0500 Subject: [PATCH] v2.0.0-p5 --- node/app/node.go | 6 +- node/app/wire_gen.go | 12 +- node/config/config.go | 16 +- node/config/version.go | 2 +- node/consensus/data/consensus_frames.go | 16 - .../data/data_clock_consensus_engine.go | 247 ++++---- node/consensus/data/message_handler.go | 65 ++- node/consensus/master/broadcast_messaging.go | 14 +- node/consensus/master/consensus_frames.go | 2 +- .../master/master_clock_consensus_engine.go | 29 +- node/consensus/time/data_time_reel.go | 53 +- node/consensus/time/master_time_reel.go | 34 +- node/crypto/wesolowski_frame_prover.go | 7 +- .../token/application/token_application.go | 547 +++++++++++------- .../token/token_execution_engine.go | 293 +++++----- node/main.go | 19 +- node/p2p/bloom_utils.go | 11 +- node/p2p/bloom_utils_test.go | 24 + node/p2p/blossomsub.go | 4 + node/rpc/data_worker_ipc_server.go | 44 +- node/store/clock.go | 26 +- 21 files changed, 866 insertions(+), 605 deletions(-) diff --git a/node/app/node.go b/node/app/node.go index 7d2f039..ea0f13a 100644 --- a/node/app/node.go +++ b/node/app/node.go @@ -11,6 +11,7 @@ import ( "source.quilibrium.com/quilibrium/monorepo/node/consensus/master" "source.quilibrium.com/quilibrium/monorepo/node/crypto" "source.quilibrium.com/quilibrium/monorepo/node/execution" + "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token" "source.quilibrium.com/quilibrium/monorepo/node/keys" "source.quilibrium.com/quilibrium/monorepo/node/p2p" "source.quilibrium.com/quilibrium/monorepo/node/store" @@ -48,7 +49,7 @@ func newNode( coinStore store.CoinStore, keyManager keys.KeyManager, pubSub p2p.PubSub, - // execution engines wire in here + tokenExecutionEngine *token.TokenExecutionEngine, engine consensus.ConsensusEngine, ) (*Node, error) { if engine == nil { @@ -56,6 +57,9 @@ func newNode( } execEngines := make(map[string]execution.ExecutionEngine) + if tokenExecutionEngine != nil { + execEngines[tokenExecutionEngine.GetName()] = tokenExecutionEngine + } return &Node{ logger, diff --git a/node/app/wire_gen.go b/node/app/wire_gen.go index c2386ac..e64fcce 100644 --- a/node/app/wire_gen.go +++ b/node/app/wire_gen.go @@ -46,12 +46,14 @@ func NewDebugNode(configConfig *config.Config, selfTestReport *protobufs.SelfTes p2PConfig := configConfig.P2P blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger) engineConfig := configConfig.Engine - kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger) wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger) + kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger) masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver) inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger) + pebbleKeyStore := store.NewPebbleKeyStore(pebbleDB, zapLogger) + tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, engineConfig, fileKeyManager, blossomSub, wesolowskiFrameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport) masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport) - node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, masterClockConsensusEngine) + node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, tokenExecutionEngine, masterClockConsensusEngine) if err != nil { return nil, err } @@ -70,12 +72,14 @@ func NewNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestRepo p2PConfig := configConfig.P2P blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger) engineConfig := configConfig.Engine - kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger) wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger) + kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger) masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver) inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger) + pebbleKeyStore := store.NewPebbleKeyStore(pebbleDB, zapLogger) + tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, engineConfig, fileKeyManager, blossomSub, wesolowskiFrameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport) masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport) - node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, masterClockConsensusEngine) + node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, tokenExecutionEngine, masterClockConsensusEngine) if err != nil { return nil, err } diff --git a/node/config/config.go b/node/config/config.go index 5011e08..99d8d92 100644 --- a/node/config/config.go +++ b/node/config/config.go @@ -132,12 +132,12 @@ var Signatories = []string{ var unlock *SignedGenesisUnlock -func DownloadAndVerifyGenesis() (*SignedGenesisUnlock, error) { +func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) { if unlock != nil { return unlock, nil } - resp, err := http.Get("https://releases.quilibrium.com/stasis") + resp, err := http.Get("https://releases.quilibrium.com/genesisunlock") if err != nil || resp.StatusCode != 200 { fmt.Println("Stasis lock not yet released.") return nil, errors.New("stasis lock not yet released") @@ -185,14 +185,14 @@ func DownloadAndVerifyGenesis() (*SignedGenesisUnlock, error) { opensslMsg := "SHA3-256(genesis)= " + hex.EncodeToString(digest[:]) if !ed448.Verify(pubkey, append([]byte(opensslMsg), 0x0a), sig, "") { fmt.Printf("Failed signature check for signatory #%d\n", i) - return nil, err + return nil, errors.New("failed signature check") } count++ } - if count < len(Signatories)/2 { + if count < len(Signatories)/2+len(Signatories)%2 { fmt.Printf("Quorum on signatures not met") - return nil, err + return nil, errors.New("quorum on signatures not met") } fmt.Println("Stasis lock released. Welcome to 2.0.") @@ -200,6 +200,10 @@ func DownloadAndVerifyGenesis() (*SignedGenesisUnlock, error) { return unlock, err } +func GetGenesis() *SignedGenesisUnlock { + return unlock +} + var StasisSeed = "737461736973" func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) ( @@ -235,7 +239,7 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) ( genesisSeed := StasisSeed if !skipGenesisCheck { - output, err := DownloadAndVerifyGenesis() + output, err := DownloadAndVerifyGenesis(0) if err == nil { genesisSeed = output.GenesisSeedHex } diff --git a/node/config/version.go b/node/config/version.go index 160b363..892ac27 100644 --- a/node/config/version.go +++ b/node/config/version.go @@ -36,5 +36,5 @@ func FormatVersion(version []byte) string { } func GetPatchNumber() byte { - return 0x04 + return 0x05 } diff --git a/node/consensus/data/consensus_frames.go b/node/consensus/data/consensus_frames.go index ea67b40..4af1eb4 100644 --- a/node/consensus/data/consensus_frames.go +++ b/node/consensus/data/consensus_frames.go @@ -283,22 +283,6 @@ func (e *DataClockConsensusEngine) collect( e.logger.Info("no peers available for sync, waiting") time.Sleep(5 * time.Second) } else if maxFrame > latest.FrameNumber { - masterHead, err := e.masterTimeReel.Head() - if err != nil { - panic(err) - } - - if masterHead.FrameNumber < maxFrame { - e.logger.Info( - "master frame synchronization needed to continue, waiting", - zap.Uint64("master_frame_head", masterHead.FrameNumber), - zap.Uint64("max_data_frame_target", maxFrame), - ) - - time.Sleep(30 * time.Second) - continue - } - latest, err = e.sync(latest, maxFrame, peerId) if err == nil { break diff --git a/node/consensus/data/data_clock_consensus_engine.go b/node/consensus/data/data_clock_consensus_engine.go index ce2ec81..466744d 100644 --- a/node/consensus/data/data_clock_consensus_engine.go +++ b/node/consensus/data/data_clock_consensus_engine.go @@ -34,6 +34,8 @@ import ( const PEER_INFO_TTL = 60 * 60 * 1000 const UNCOOPERATIVE_PEER_INFO_TTL = 5 * 60 * 1000 +var ErrNoApplicableChallenge = errors.New("no applicable challenge") + type SyncStatusType int const ( @@ -214,7 +216,7 @@ func NewDataClockConsensusEngine( difficulty := engineConfig.Difficulty if difficulty == 0 { - difficulty = 100000 + difficulty = 200000 } e := &DataClockConsensusEngine{ @@ -250,6 +252,7 @@ func NewDataClockConsensusEngine( peerInfoManager: peerInfoManager, peerSeniority: newFromMap(peerSeniority), messageProcessorCh: make(chan *pb.Message), + engineConfig: engineConfig, } logger.Info("constructing consensus engine") @@ -456,8 +459,35 @@ func (e *DataClockConsensusEngine) PerformTimeProof( wg := sync.WaitGroup{} wg.Add(len(clients)) for i, client := range clients { + client := client go func() { for j := 3; j >= 0; j-- { + var err error + if client == nil { + if len(e.engineConfig.DataWorkerMultiaddrs) != 0 { + e.logger.Error( + "client failed, reconnecting after 50ms", + zap.Uint32("client", uint32(i)), + ) + time.Sleep(50 * time.Millisecond) + client, err = e.createParallelDataClientsFromListAndIndex(uint32(i)) + if err != nil { + e.logger.Error("failed to reconnect", zap.Error(err)) + } + } else if len(e.engineConfig.DataWorkerMultiaddrs) == 0 { + e.logger.Error( + "client failed, reconnecting after 50ms", + ) + time.Sleep(50 * time.Millisecond) + client, err = + e.createParallelDataClientsFromBaseMultiaddrAndIndex(uint32(i)) + if err != nil { + e.logger.Error("failed to reconnect", zap.Error(err)) + } + } + clients[i] = client + continue + } resp, err := client.CalculateChallengeProof( context.Background(), @@ -467,8 +497,11 @@ func (e *DataClockConsensusEngine) PerformTimeProof( }, ) if err != nil { + if errors.Is(err, ErrNoApplicableChallenge) { + break + } if j == 0 { - e.logger.Error("unable to get a response in time from worker") + e.logger.Error("unable to get a response in time from worker", zap.Error(err)) } if len(e.engineConfig.DataWorkerMultiaddrs) != 0 { e.logger.Error( @@ -532,7 +565,7 @@ func ( len(e.frameProverTries), ) - for _, trie := range e.frameProverTries { + for i, trie := range e.frameProverTries { newTrie := &tries.RollingFrecencyCritbitTrie{} b, err := trie.Serialize() if err != nil { @@ -543,7 +576,7 @@ func ( if err != nil { panic(err) } - frameProverTries = append(frameProverTries, newTrie) + frameProverTries[i] = newTrie } e.frameProverTriesMx.RUnlock() @@ -553,11 +586,6 @@ func ( func (e *DataClockConsensusEngine) runLoop() { dataFrameCh := e.dataTimeReel.NewFrameCh() - e.logger.Info("waiting for peer list mappings") - // We need to re-tune this so that libp2p's peerstore activation threshold - // considers DHT peers to be correct: - time.Sleep(30 * time.Second) - for e.state < consensus.EngineStateStopping { peerCount := e.pubSub.GetNetworkPeersCount() if peerCount < e.minimumPeersRequired { @@ -597,93 +625,92 @@ func (e *DataClockConsensusEngine) runLoop() { }() } - for _, trie := range e.GetFrameProverTries() { - if bytes.Equal( - trie.FindNearest(e.provingKeyAddress).External.Key, - e.provingKeyAddress, - ) { - var nextFrame *protobufs.ClockFrame - if nextFrame, err = e.prove(latestFrame); err != nil { - e.logger.Error("could not prove", zap.Error(err)) - e.state = consensus.EngineStateCollecting - continue - } - - e.proverTrieRequestsMx.Lock() - joinAddrs := tries.NewMinHeap[peerSeniorityItem]() - leaveAddrs := tries.NewMinHeap[peerSeniorityItem]() - for _, addr := range e.proverTrieJoinRequests { - if _, ok := (*e.peerSeniority)[addr]; !ok { - joinAddrs.Push(peerSeniorityItem{ - addr: addr, - seniority: 0, - }) - } else { - joinAddrs.Push((*e.peerSeniority)[addr]) - } - } - for _, addr := range e.proverTrieLeaveRequests { - if _, ok := (*e.peerSeniority)[addr]; !ok { - leaveAddrs.Push(peerSeniorityItem{ - addr: addr, - seniority: 0, - }) - } else { - leaveAddrs.Push((*e.peerSeniority)[addr]) - } - } - for _, addr := range e.proverTrieResumeRequests { - if _, ok := e.proverTriePauseRequests[addr]; ok { - delete(e.proverTriePauseRequests, addr) - } - } - - joinReqs := make([]peerSeniorityItem, len(joinAddrs.All())) - copy(joinReqs, joinAddrs.All()) - slices.Reverse(joinReqs) - leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All())) - copy(leaveReqs, leaveAddrs.All()) - slices.Reverse(leaveReqs) - - e.proverTrieJoinRequests = make(map[string]string) - e.proverTrieLeaveRequests = make(map[string]string) - e.proverTrieRequestsMx.Unlock() - - e.frameProverTriesMx.Lock() - for _, addr := range joinReqs { - rings := len(e.frameProverTries) - last := e.frameProverTries[rings-1] - set := last.FindNearestAndApproximateNeighbors(make([]byte, 32)) - if len(set) == 8 { - e.frameProverTries = append( - e.frameProverTries, - &tries.RollingFrecencyCritbitTrie{}, - ) - last = e.frameProverTries[rings] - } - last.Add([]byte(addr.addr), nextFrame.FrameNumber) - } - for _, addr := range leaveReqs { - for _, t := range e.frameProverTries { - if bytes.Equal( - t.FindNearest([]byte(addr.addr)).External.Key, - []byte(addr.addr), - ) { - t.Remove([]byte(addr.addr)) - break - } - } - } - e.frameProverTriesMx.Unlock() - - e.dataTimeReel.Insert(nextFrame, false) - - if err = e.publishProof(nextFrame); err != nil { - e.logger.Error("could not publish", zap.Error(err)) - e.state = consensus.EngineStateCollecting - } - break + trie := e.GetFrameProverTries()[0] + if bytes.Equal( + trie.FindNearest(e.provingKeyAddress).External.Key, + e.provingKeyAddress, + ) { + var nextFrame *protobufs.ClockFrame + if nextFrame, err = e.prove(latestFrame); err != nil { + e.logger.Error("could not prove", zap.Error(err)) + e.state = consensus.EngineStateCollecting + continue } + + e.proverTrieRequestsMx.Lock() + joinAddrs := tries.NewMinHeap[peerSeniorityItem]() + leaveAddrs := tries.NewMinHeap[peerSeniorityItem]() + for _, addr := range e.proverTrieJoinRequests { + if _, ok := (*e.peerSeniority)[addr]; !ok { + joinAddrs.Push(peerSeniorityItem{ + addr: addr, + seniority: 0, + }) + } else { + joinAddrs.Push((*e.peerSeniority)[addr]) + } + } + for _, addr := range e.proverTrieLeaveRequests { + if _, ok := (*e.peerSeniority)[addr]; !ok { + leaveAddrs.Push(peerSeniorityItem{ + addr: addr, + seniority: 0, + }) + } else { + leaveAddrs.Push((*e.peerSeniority)[addr]) + } + } + for _, addr := range e.proverTrieResumeRequests { + if _, ok := e.proverTriePauseRequests[addr]; ok { + delete(e.proverTriePauseRequests, addr) + } + } + + joinReqs := make([]peerSeniorityItem, len(joinAddrs.All())) + copy(joinReqs, joinAddrs.All()) + slices.Reverse(joinReqs) + leaveReqs := make([]peerSeniorityItem, len(leaveAddrs.All())) + copy(leaveReqs, leaveAddrs.All()) + slices.Reverse(leaveReqs) + + e.proverTrieJoinRequests = make(map[string]string) + e.proverTrieLeaveRequests = make(map[string]string) + e.proverTrieRequestsMx.Unlock() + + e.frameProverTriesMx.Lock() + for _, addr := range joinReqs { + rings := len(e.frameProverTries) + last := e.frameProverTries[rings-1] + set := last.FindNearestAndApproximateNeighbors(make([]byte, 32)) + if len(set) == 8 { + e.frameProverTries = append( + e.frameProverTries, + &tries.RollingFrecencyCritbitTrie{}, + ) + last = e.frameProverTries[rings] + } + last.Add([]byte(addr.addr), nextFrame.FrameNumber) + } + for _, addr := range leaveReqs { + for _, t := range e.frameProverTries { + if bytes.Equal( + t.FindNearest([]byte(addr.addr)).External.Key, + []byte(addr.addr), + ) { + t.Remove([]byte(addr.addr)) + break + } + } + } + e.frameProverTriesMx.Unlock() + + e.dataTimeReel.Insert(nextFrame, false) + + if err = e.publishProof(nextFrame); err != nil { + e.logger.Error("could not publish", zap.Error(err)) + e.state = consensus.EngineStateCollecting + } + break } case <-time.After(20 * time.Second): dataFrame, err := e.dataTimeReel.Head() @@ -812,6 +839,25 @@ func (e *DataClockConsensusEngine) Stop(force bool) <-chan error { e.state = consensus.EngineStateStopping errChan := make(chan error) + msg := []byte("pause") + msg = binary.BigEndian.AppendUint64(msg, e.GetFrame().FrameNumber) + msg = append(msg, e.filter...) + sig, err := e.pubSub.SignMessage(msg) + if err != nil { + panic(err) + } + + e.publishMessage(e.filter, &protobufs.AnnounceProverPause{ + Filter: e.filter, + FrameNumber: e.GetFrame().FrameNumber, + PublicKeySignatureEd448: &protobufs.Ed448Signature{ + PublicKey: &protobufs.Ed448PublicKey{ + KeyValue: e.pubSub.GetPublicKey(), + }, + Signature: sig, + }, + }) + wg := sync.WaitGroup{} wg.Add(len(e.executionEngines)) for name := range e.executionEngines { @@ -1057,7 +1103,8 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromList() ( _, addr, err := mn.DialArgs(ma) if err != nil { - panic(err) + e.logger.Error("could not get dial args", zap.Error(err)) + continue } conn, err := grpc.Dial( @@ -1071,7 +1118,8 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromList() ( ), ) if err != nil { - panic(err) + e.logger.Error("could not dial", zap.Error(err)) + continue } clients[i] = protobufs.NewDataIPCServiceClient(conn) @@ -1115,7 +1163,7 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr( _, addr, err := mn.DialArgs(ma) if err != nil { - e.logger.Warn("could not connect to client", zap.String("addr", addr)) + e.logger.Error("could not get dial args", zap.Error(err)) continue } @@ -1130,7 +1178,8 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr( ), ) if err != nil { - e.logger.Warn("could not dial to client", zap.String("addr", addr)) + e.logger.Error("could not dial", zap.Error(err)) + continue } clients[i] = protobufs.NewDataIPCServiceClient(conn) diff --git a/node/consensus/data/message_handler.go b/node/consensus/data/message_handler.go index 6c4bebc..9a431a1 100644 --- a/node/consensus/data/message_handler.go +++ b/node/consensus/data/message_handler.go @@ -280,6 +280,9 @@ func (e *DataClockConsensusEngine) handleDataPeerListAnnounce( func (e *DataClockConsensusEngine) getAddressFromSignature( sig *protobufs.Ed448Signature, ) ([]byte, error) { + if sig.PublicKey == nil || sig.PublicKey.KeyValue == nil { + return nil, errors.New("invalid data") + } addrBI, err := poseidon.HashBytes(sig.PublicKey.KeyValue) if err != nil { return nil, errors.Wrap(err, "get address from signature") @@ -298,11 +301,25 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverJoin( return errors.Wrap(err, "handle data announce prover join") } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover join", + ) + } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) if err != nil { return errors.Wrap(err, "handle data announce prover join") } + msg := []byte("join") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover join") + } + e.proverTrieRequestsMx.Lock() if len(announce.Filter) != len(e.filter) { return errors.Wrap( @@ -325,7 +342,16 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave( if err := any.UnmarshalTo(announce); err != nil { return errors.Wrap(err, "handle data announce prover leave") } + + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover leave", + ) + } + e.proverTrieRequestsMx.Lock() + if len(announce.Filter) != len(e.filter) { return errors.Wrap( errors.New("filter width mismatch"), @@ -333,6 +359,13 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave( ) } + msg := []byte("leave") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover leave") + } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) if err != nil { return errors.Wrap(err, "handle data announce prover leave") @@ -353,6 +386,13 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverPause( return errors.Wrap(err, "handle data announce prover pause") } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover leave", + ) + } + e.proverTrieRequestsMx.Lock() if len(announce.Filter) != len(e.filter) { return errors.Wrap( @@ -361,12 +401,19 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverPause( ) } + msg := []byte("pause") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover pause") + } + address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448) if err != nil { return errors.Wrap(err, "handle data announce prover pause") } - e.proverTrieLeaveRequests[string(address)] = string(announce.Filter) + e.proverTriePauseRequests[string(address)] = string(announce.Filter) e.proverTrieRequestsMx.Unlock() return nil } @@ -381,6 +428,13 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverResume( return errors.Wrap(err, "handle data announce prover resume") } + if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil { + return errors.Wrap( + errors.New("invalid data"), + "handle data announce prover resume", + ) + } + e.proverTrieRequestsMx.Lock() if len(announce.Filter) != len(e.filter) { return errors.Wrap( @@ -394,7 +448,14 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverResume( return errors.Wrap(err, "handle data announce prover resume") } - e.proverTrieLeaveRequests[string(address)] = string(announce.Filter) + msg := []byte("resume") + msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber) + msg = append(msg, announce.Filter...) + if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil { + return errors.Wrap(err, "handle data announce prover resume") + } + + e.proverTrieResumeRequests[string(address)] = string(announce.Filter) e.proverTrieRequestsMx.Unlock() return nil } diff --git a/node/consensus/master/broadcast_messaging.go b/node/consensus/master/broadcast_messaging.go index c3669a4..2dd30d4 100644 --- a/node/consensus/master/broadcast_messaging.go +++ b/node/consensus/master/broadcast_messaging.go @@ -159,14 +159,14 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport( func (e *MasterClockConsensusEngine) publishProof( frame *protobufs.ClockFrame, ) error { - e.logger.Debug( - "publishing frame", - zap.Uint64("frame_number", frame.FrameNumber), - ) - - e.masterTimeReel.Insert(frame, false) - if bytes.Equal(e.pubSub.GetPeerID(), []byte(e.beacon)) { + e.logger.Debug( + "publishing frame", + zap.Uint64("frame_number", frame.FrameNumber), + ) + + e.masterTimeReel.Insert(frame, false) + err := e.publishMessage(e.filter, frame) if err != nil { return errors.Wrap(err, "publish proof") diff --git a/node/consensus/master/consensus_frames.go b/node/consensus/master/consensus_frames.go index 3214d1a..57680e0 100644 --- a/node/consensus/master/consensus_frames.go +++ b/node/consensus/master/consensus_frames.go @@ -13,8 +13,8 @@ import ( func (e *MasterClockConsensusEngine) prove( previousFrame *protobufs.ClockFrame, ) (*protobufs.ClockFrame, error) { - e.logger.Debug("proving new frame") if bytes.Equal(e.pubSub.GetPeerID(), []byte(e.beacon)) { + e.logger.Debug("proving new frame") e.collectedProverSlotsMx.Lock() collectedProverSlots := e.collectedProverSlots e.collectedProverSlots = []*protobufs.InclusionAggregateProof{} diff --git a/node/consensus/master/master_clock_consensus_engine.go b/node/consensus/master/master_clock_consensus_engine.go index eb837fe..4eea18d 100644 --- a/node/consensus/master/master_clock_consensus_engine.go +++ b/node/consensus/master/master_clock_consensus_engine.go @@ -8,6 +8,7 @@ import ( "time" "github.com/iden3/go-iden3-crypto/poseidon" + pcrypto "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "go.uber.org/zap" @@ -69,7 +70,7 @@ type MasterClockConsensusEngine struct { var _ consensus.ConsensusEngine = (*MasterClockConsensusEngine)(nil) -var MASTER_CLOCK_RATE = uint32(10000000) +var MASTER_CLOCK_RATE = uint32(1000000) func NewMasterClockConsensusEngine( engineConfig *config.EngineConfig, @@ -167,26 +168,19 @@ func (e *MasterClockConsensusEngine) Start() <-chan error { e.state = consensus.EngineStateLoading e.logger.Info("syncing last seen state") - var genesis *config.SignedGenesisUnlock - var err error - - for { - genesis, err := config.DownloadAndVerifyGenesis() - if err != nil { - time.Sleep(10 * time.Minute) - continue - } - - e.engineConfig.GenesisSeed = genesis.GenesisSeedHex - break - } - - err = e.masterTimeReel.Start() + err := e.masterTimeReel.Start() if err != nil { panic(err) } - e.beacon, err = peer.IDFromBytes(genesis.Beacon) + beaconPubKey, err := pcrypto.UnmarshalEd448PublicKey( + config.GetGenesis().Beacon, + ) + if err != nil { + panic(err) + } + + e.beacon, err = peer.IDFromPublicKey(beaconPubKey) if err != nil { panic(err) } @@ -196,6 +190,7 @@ func (e *MasterClockConsensusEngine) Start() <-chan error { panic(err) } + e.logger.Info("building historic frame cache") e.buildHistoricFrameCache(frame) go func() { diff --git a/node/consensus/time/data_time_reel.go b/node/consensus/time/data_time_reel.go index e97fbb4..d1af85d 100644 --- a/node/consensus/time/data_time_reel.go +++ b/node/consensus/time/data_time_reel.go @@ -17,13 +17,6 @@ import ( "source.quilibrium.com/quilibrium/monorepo/node/tries" ) -var allBitmaskFilter = []byte{ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -} - var unknownDistance = new(big.Int).SetBytes([]byte{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -41,12 +34,11 @@ type DataTimeReel struct { rwMutex sync.RWMutex running bool - filter []byte - engineConfig *config.EngineConfig - logger *zap.Logger - clockStore store.ClockStore - frameProver crypto.FrameProver - parentTimeReel TimeReel + filter []byte + engineConfig *config.EngineConfig + logger *zap.Logger + clockStore store.ClockStore + frameProver crypto.FrameProver origin []byte initialInclusionProof *crypto.InclusionAggregateProof @@ -227,7 +219,7 @@ func (d *DataTimeReel) createGenesisFrame() ( difficulty := d.engineConfig.Difficulty if difficulty == 0 || difficulty == 10000 { - difficulty = 100000 + difficulty = 200000 } frame, tries, err := d.frameProver.CreateDataGenesisFrame( @@ -293,22 +285,6 @@ func (d *DataTimeReel) runLoop() { zap.Uint64("frame_number", frame.frameNumber), ) - // tag: equinox – master filter changes - _, err := d.clockStore.GetMasterClockFrame( - allBitmaskFilter, - frame.frameNumber) - if err != nil { - d.logger.Debug("no master, add pending") - - // If the frame arrived ahead of a master, e.g. the master data is not - // synced, we'll go ahead and mark it as pending and process it when - // we can, but if we had a general fault, panic: - if !errors.Is(err, store.ErrNotFound) { - panic(err) - } - continue - } - rawFrame, err := d.clockStore.GetStagedDataClockFrame( d.filter, frame.frameNumber, @@ -617,21 +593,26 @@ func (d *DataTimeReel) GetDistance(frame *protobufs.ClockFrame) ( *big.Int, error, ) { - // tag: equinox – master filter changes - master, err := d.clockStore.GetMasterClockFrame( - allBitmaskFilter, - frame.FrameNumber) + if frame.FrameNumber == 0 { + return big.NewInt(0), nil + } + + prev, _, err := d.clockStore.GetDataClockFrame( + d.filter, + frame.FrameNumber-1, + false, + ) if err != nil { return unknownDistance, errors.Wrap(err, "get distance") } - masterSelector, err := master.GetSelector() + prevSelector, err := prev.GetSelector() if err != nil { return unknownDistance, errors.Wrap(err, "get distance") } discriminatorNode := - d.proverTries[0].FindNearest(masterSelector.FillBytes(make([]byte, 32))) + d.proverTries[0].FindNearest(prevSelector.FillBytes(make([]byte, 32))) discriminator := discriminatorNode.External.Key addr, err := frame.GetAddress() if err != nil { diff --git a/node/consensus/time/master_time_reel.go b/node/consensus/time/master_time_reel.go index 9b0e125..f916d8c 100644 --- a/node/consensus/time/master_time_reel.go +++ b/node/consensus/time/master_time_reel.go @@ -5,11 +5,12 @@ import ( "errors" "math/big" "sync" - "time" "go.uber.org/zap" "source.quilibrium.com/quilibrium/monorepo/node/config" "source.quilibrium.com/quilibrium/monorepo/node/crypto" + "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application" + "source.quilibrium.com/quilibrium/monorepo/node/p2p" "source.quilibrium.com/quilibrium/monorepo/node/protobufs" "source.quilibrium.com/quilibrium/monorepo/node/store" ) @@ -53,7 +54,9 @@ func NewMasterTimeReel( panic("frame prover is nil") } - filter, err := hex.DecodeString(engineConfig.Filter) + filter, err := hex.DecodeString( + "0000000000000000000000000000000000000000000000000000000000000000", + ) if err != nil { panic(err) } @@ -74,32 +77,26 @@ func NewMasterTimeReel( // Start implements TimeReel. func (m *MasterTimeReel) Start() error { + m.logger.Debug("starting master time reel") frame, err := m.clockStore.GetLatestMasterClockFrame(m.filter) if err != nil && !errors.Is(err, store.ErrNotFound) { panic(err) } + m.logger.Debug("fetching genesis frame") genesis, err := m.clockStore.GetMasterClockFrame(m.filter, 0) if err != nil && !errors.Is(err, store.ErrNotFound) { panic(err) } - for { - genesis, err := config.DownloadAndVerifyGenesis() - if err != nil { - time.Sleep(10 * time.Minute) - continue - } - - m.engineConfig.GenesisSeed = genesis.GenesisSeedHex - break - } - rebuildGenesisFrame := false - if genesis != nil && len(m.engineConfig.GenesisSeed) != 74 { - m.logger.Warn("rebuilding genesis frame") + if genesis != nil && genesis.Difficulty != 1000000 { + m.logger.Info("rewinding time reel to genesis") err = m.clockStore.ResetMasterClockFrames(m.filter) + err = m.clockStore.ResetDataClockFrames( + p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3), + ) if err != nil { panic(err) } @@ -107,7 +104,8 @@ func (m *MasterTimeReel) Start() error { rebuildGenesisFrame = true } - if frame == nil || rebuildGenesisFrame { + if genesis == nil || rebuildGenesisFrame { + m.logger.Info("creating genesis frame") m.head = m.createGenesisFrame() } else { m.head = frame @@ -158,8 +156,8 @@ func (m *MasterTimeReel) createGenesisFrame() *protobufs.ClockFrame { } difficulty := m.engineConfig.Difficulty - if difficulty != 10000000 { - difficulty = 10000000 + if difficulty != 1000000 { + difficulty = 1000000 } frame, err := m.frameProver.CreateMasterGenesisFrame( diff --git a/node/crypto/wesolowski_frame_prover.go b/node/crypto/wesolowski_frame_prover.go index 3bec58d..bc2e539 100644 --- a/node/crypto/wesolowski_frame_prover.go +++ b/node/crypto/wesolowski_frame_prover.go @@ -308,11 +308,16 @@ func (w *WesolowskiFrameProver) CreateDataGenesisFrame( addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...) frameProverTrie.Add(addrBytes, 0) - if i%8 == 0 && i != 0 { + if i%8 == 0 { frameProverTries = append(frameProverTries, frameProverTrie) frameProverTrie = &tries.RollingFrecencyCritbitTrie{} } } + if len(frameProverTrie.FindNearestAndApproximateNeighbors( + make([]byte, 32), + )) != 0 { + frameProverTries = append(frameProverTries, frameProverTrie) + } w.logger.Info("proving genesis frame") input := []byte{} diff --git a/node/execution/intrinsics/token/application/token_application.go b/node/execution/intrinsics/token/application/token_application.go index 3ac4d44..a710611 100644 --- a/node/execution/intrinsics/token/application/token_application.go +++ b/node/execution/intrinsics/token/application/token_application.go @@ -67,8 +67,10 @@ func GetOutputsFromClockFrame( } transition := &protobufs.TokenRequests{} - if err := proto.Unmarshal(associatedProof, transition); err != nil { - return nil, nil, errors.Wrap(err, "get outputs from clock frame") + if frame.FrameNumber != 0 { + if err := proto.Unmarshal(associatedProof, transition); err != nil { + return nil, nil, errors.Wrap(err, "get outputs from clock frame") + } } return transition, tokenOutputs, nil @@ -114,7 +116,35 @@ func (a *TokenApplication) ApplyTransitions( case *protobufs.TokenRequest_Announce: var primary *protobufs.Ed448Signature payload := []byte{} + + if t.Announce == nil || t.Announce.PublicKeySignaturesEd448 == nil { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + ErrInvalidStateTransition, + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } for i, p := range t.Announce.PublicKeySignaturesEd448 { + if p.PublicKey == nil || p.Signature == nil || + p.PublicKey.KeyValue == nil { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + ErrInvalidStateTransition, + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } if i == 0 { primary = p } else { @@ -161,80 +191,13 @@ func (a *TokenApplication) ApplyTransitions( break req } - payload = []byte("mint") - for _, p := range t.Announce.InitialProof.Proofs { - payload = append(payload, p...) - } - if err := t.Announce.InitialProof.Signature.Verify(payload); err != nil { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) + if t.Announce.InitialProof != nil && + t.Announce.InitialProof.Proofs != nil { + payload = []byte("mint") + for _, p := range t.Announce.InitialProof.Proofs { + payload = append(payload, p...) } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - pk, err := pcrypto.UnmarshalEd448PublicKey( - t.Announce.InitialProof.Signature.PublicKey.KeyValue, - ) - if err != nil { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) - } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - - peerId, err := peer.IDFromPublicKey(pk) - if err != nil { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) - } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - - addr, err := poseidon.HashBytes( - t.Announce.InitialProof.Signature.PublicKey.KeyValue, - ) - if err != nil { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) - } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - - if len(t.Announce.InitialProof.Proofs) == 3 && - bytes.Equal( - t.Announce.InitialProof.Proofs[0], - []byte("pre-dusk"), - ) && bytes.Equal(t.Announce.InitialProof.Proofs[1], make([]byte, 32)) && - currentFrameNumber < 604800 { - delete := []*protobufs.TokenOutput{} - if !bytes.Equal(t.Announce.InitialProof.Proofs[1], make([]byte, 32)) { + if err := t.Announce.InitialProof.Signature.Verify(payload); err != nil { if !skipFailures { return nil, nil, nil, errors.Wrap( errors.New("invalid data"), @@ -247,91 +210,8 @@ func (a *TokenApplication) ApplyTransitions( ) break req } - - data := t.Announce.InitialProof.Proofs[2] - if len(data) < 28 { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) - } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - - increment := binary.BigEndian.Uint32(data[:4]) - parallelism := binary.BigEndian.Uint32(data[8:12]) - inputLen := binary.BigEndian.Uint64(data[12:20]) - - if len(delete) != 0 { - if delete[0].GetDeletedProof().Difficulty-1 != increment { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) - } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - } - - if uint64(len(data[20:])) < inputLen+8 { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) - } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - - input := make([]byte, inputLen) - copy(input[:], data[20:20+inputLen]) - - outputLen := binary.BigEndian.Uint64(data[20+inputLen : 20+inputLen+8]) - - if uint64(len(data[20+inputLen+8:])) < outputLen { - if !skipFailures { - return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), - "apply transitions", - ) - } - failedTransitions.Requests = append( - failedTransitions.Requests, - transition, - ) - break req - } - - output := make([]byte, outputLen) - copy(output[:], data[20+inputLen+8:]) - dataProver := crypto.NewKZGInclusionProver(a.Logger) - wesoProver := crypto.NewWesolowskiFrameProver(a.Logger) - index := binary.BigEndian.Uint32(output[:4]) - indexProof := output[4:520] - kzgCommitment := output[520:594] - kzgProof := output[594:668] - ip := sha3.Sum512(indexProof) - - v, err := dataProver.VerifyRaw( - ip[:], - kzgCommitment, - int(index), - kzgProof, - nearestApplicablePowerOfTwo(uint64(parallelism)), + pk, err := pcrypto.UnmarshalEd448PublicKey( + t.Announce.InitialProof.Signature.PublicKey.KeyValue, ) if err != nil { if !skipFailures { @@ -347,7 +227,8 @@ func (a *TokenApplication) ApplyTransitions( break req } - if !v { + peerId, err := peer.IDFromPublicKey(pk) + if err != nil { if !skipFailures { return nil, nil, nil, errors.Wrap( errors.New("invalid data"), @@ -361,16 +242,10 @@ func (a *TokenApplication) ApplyTransitions( break req } - wp := []byte{} - wp = append(wp, peerId...) - wp = append(wp, input...) - v = wesoProver.VerifyPreDuskChallengeProof( - wp, - increment, - index, - indexProof, + addr, err := poseidon.HashBytes( + t.Announce.InitialProof.Signature.PublicKey.KeyValue, ) - if !v { + if err != nil { if !skipFailures { return nil, nil, nil, errors.Wrap( errors.New("invalid data"), @@ -384,63 +259,239 @@ func (a *TokenApplication) ApplyTransitions( break req } - pomwBasis := big.NewInt(1200000) + if len(t.Announce.InitialProof.Proofs) == 3 && + bytes.Equal( + t.Announce.InitialProof.Proofs[0], + []byte("pre-dusk"), + ) && bytes.Equal(t.Announce.InitialProof.Proofs[1], make([]byte, 32)) && + currentFrameNumber < 604800 { + delete := []*protobufs.TokenOutput{} + if !bytes.Equal(t.Announce.InitialProof.Proofs[1], make([]byte, 32)) { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } - reward := new(big.Int).Mul(pomwBasis, big.NewInt(int64(parallelism))) - if len(delete) != 0 { - reward.Add( - reward, - new(big.Int).SetBytes(delete[0].GetDeletedProof().Amount), + data := t.Announce.InitialProof.Proofs[2] + if len(data) < 28 { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } + + increment := binary.BigEndian.Uint32(data[:4]) + parallelism := binary.BigEndian.Uint32(data[8:12]) + inputLen := binary.BigEndian.Uint64(data[12:20]) + + if len(delete) != 0 { + if delete[0].GetDeletedProof().Difficulty-1 != increment { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } + } + + if uint64(len(data[20:])) < inputLen+8 { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } + + input := make([]byte, inputLen) + copy(input[:], data[20:20+inputLen]) + + outputLen := binary.BigEndian.Uint64(data[20+inputLen : 20+inputLen+8]) + + if uint64(len(data[20+inputLen+8:])) < outputLen { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } + + output := make([]byte, outputLen) + copy(output[:], data[20+inputLen+8:]) + dataProver := crypto.NewKZGInclusionProver(a.Logger) + wesoProver := crypto.NewWesolowskiFrameProver(a.Logger) + index := binary.BigEndian.Uint32(output[:4]) + indexProof := output[4:520] + kzgCommitment := output[520:594] + kzgProof := output[594:668] + ip := sha3.Sum512(indexProof) + + v, err := dataProver.VerifyRaw( + ip[:], + kzgCommitment, + int(index), + kzgProof, + nearestApplicablePowerOfTwo(uint64(parallelism)), ) - } + if err != nil { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } - if increment != 0 { - add := &protobufs.PreCoinProof{ - Amount: reward.FillBytes(make([]byte, 32)), - Index: index, - IndexProof: indexProof, - Commitment: kzgCommitment, - Proof: append(append([]byte{}, kzgProof...), indexProof...), - Parallelism: parallelism, - Difficulty: increment, - Owner: &protobufs.AccountRef{ - Account: &protobufs.AccountRef_ImplicitAccount{ - ImplicitAccount: &protobufs.ImplicitAccount{ - ImplicitType: 0, - Address: addr.FillBytes(make([]byte, 32)), + if !v { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } + + wp := []byte{} + wp = append(wp, peerId...) + wp = append(wp, input...) + v = wesoProver.VerifyPreDuskChallengeProof( + wp, + increment, + index, + indexProof, + ) + if !v { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } + + pomwBasis := big.NewInt(1200000) + + reward := new(big.Int).Mul(pomwBasis, big.NewInt(int64(parallelism))) + if len(delete) != 0 { + reward.Add( + reward, + new(big.Int).SetBytes(delete[0].GetDeletedProof().Amount), + ) + } + + if increment != 0 { + add := &protobufs.PreCoinProof{ + Amount: reward.FillBytes(make([]byte, 32)), + Index: index, + IndexProof: indexProof, + Commitment: kzgCommitment, + Proof: append(append([]byte{}, kzgProof...), indexProof...), + Parallelism: parallelism, + Difficulty: increment, + Owner: &protobufs.AccountRef{ + Account: &protobufs.AccountRef_ImplicitAccount{ + ImplicitAccount: &protobufs.ImplicitAccount{ + ImplicitType: 0, + Address: addr.FillBytes(make([]byte, 32)), + }, }, }, - }, + } + outputs.Outputs = append(outputs.Outputs, &protobufs.TokenOutput{ + Output: &protobufs.TokenOutput_Proof{ + Proof: add, + }, + }) + } else { + add := &protobufs.Coin{ + Amount: reward.FillBytes(make([]byte, 32)), + Intersection: make([]byte, 1024), + Owner: &protobufs.AccountRef{ + Account: &protobufs.AccountRef_ImplicitAccount{ + ImplicitAccount: &protobufs.ImplicitAccount{ + ImplicitType: 0, + Address: addr.FillBytes(make([]byte, 32)), + }, + }, + }, + } + outputs.Outputs = append(outputs.Outputs, &protobufs.TokenOutput{ + Output: &protobufs.TokenOutput_Coin{ + Coin: add, + }, + }) } - outputs.Outputs = append(outputs.Outputs, &protobufs.TokenOutput{ - Output: &protobufs.TokenOutput_Proof{ - Proof: add, - }, - }) + outputs.Outputs = append(outputs.Outputs, delete...) } else { - add := &protobufs.Coin{ - Amount: reward.FillBytes(make([]byte, 32)), - Intersection: make([]byte, 1024), - Owner: &protobufs.AccountRef{ - Account: &protobufs.AccountRef_ImplicitAccount{ - ImplicitAccount: &protobufs.ImplicitAccount{ - ImplicitType: 0, - Address: addr.FillBytes(make([]byte, 32)), - }, - }, - }, + if !skipFailures { + return nil, nil, nil, errors.Wrap( + errors.New("invalid data"), + "apply transitions", + ) } - outputs.Outputs = append(outputs.Outputs, &protobufs.TokenOutput{ - Output: &protobufs.TokenOutput_Coin{ - Coin: add, - }, - }) + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req } - outputs.Outputs = append(outputs.Outputs, delete...) - } else { + } + case *protobufs.TokenRequest_Merge: + newCoin := &protobufs.Coin{} + newTotal := new(big.Int) + newIntersection := make([]byte, 1024) + payload := []byte("merge") + if t.Merge == nil || t.Merge.Coins == nil || t.Merge.Signature == nil { if !skipFailures { return nil, nil, nil, errors.Wrap( - errors.New("invalid data"), + ErrInvalidStateTransition, "apply transitions", ) } @@ -450,14 +501,36 @@ func (a *TokenApplication) ApplyTransitions( ) break req } - case *protobufs.TokenRequest_Merge: - newCoin := &protobufs.Coin{} - newTotal := new(big.Int) - newIntersection := make([]byte, 1024) - payload := []byte("merge") for _, c := range t.Merge.Coins { + if c.Address == nil { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + ErrInvalidStateTransition, + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } payload = append(payload, c.Address...) } + if t.Merge.Signature.PublicKey == nil || + t.Merge.Signature.Signature == nil { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + ErrInvalidStateTransition, + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } if err := t.Merge.Signature.Verify(payload); err != nil { if !skipFailures { return nil, nil, nil, errors.Wrap( @@ -596,6 +669,21 @@ func (a *TokenApplication) ApplyTransitions( newCoins := []*protobufs.Coin{} newAmounts := []*big.Int{} payload := []byte{} + if t.Split.Signature.PublicKey == nil || + t.Split.Signature.Signature == nil || + t.Split.OfCoin == nil { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + ErrInvalidStateTransition, + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } coin, err := a.CoinStore.GetCoinByAddress(t.Split.OfCoin.Address) if err != nil && !skipFailures { if !skipFailures { @@ -903,6 +991,21 @@ func (a *TokenApplication) ApplyTransitions( transition, ) case *protobufs.TokenRequest_Mint: + if t.Mint.Signature == nil || t.Mint.Signature.PublicKey == nil || + t.Mint.Signature.Signature == nil || + t.Mint.Proofs == nil { + if !skipFailures { + return nil, nil, nil, errors.Wrap( + ErrInvalidStateTransition, + "apply transitions", + ) + } + failedTransitions.Requests = append( + failedTransitions.Requests, + transition, + ) + break req + } payload := []byte("mint") for _, p := range t.Mint.Proofs { payload = append(payload, p...) @@ -971,7 +1074,7 @@ func (a *TokenApplication) ApplyTransitions( bytes.Equal( t.Mint.Proofs[0], []byte("pre-dusk"), - ) && (bytes.Equal(t.Mint.Proofs[1], make([]byte, 32)) || + ) && (!bytes.Equal(t.Mint.Proofs[1], make([]byte, 32)) || currentFrameNumber < 604800) { delete := []*protobufs.TokenOutput{} if !bytes.Equal(t.Mint.Proofs[1], make([]byte, 32)) { diff --git a/node/execution/intrinsics/token/token_execution_engine.go b/node/execution/intrinsics/token/token_execution_engine.go index 4c9000d..6399121 100644 --- a/node/execution/intrinsics/token/token_execution_engine.go +++ b/node/execution/intrinsics/token/token_execution_engine.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "encoding/hex" "encoding/json" + "fmt" "strconv" "strings" "sync" @@ -79,20 +80,7 @@ func NewTokenExecutionEngine( panic(errors.New("logger is nil")) } - var genesis *config.SignedGenesisUnlock - var err error - for { - genesis, err = config.DownloadAndVerifyGenesis() - if err != nil { - gotime.Sleep(10 * gotime.Minute) - } - - if genesis != nil { - break - } - } - - seed, err := hex.DecodeString(genesis.GenesisSeedHex) + seed, err := hex.DecodeString(engineConfig.GenesisSeed) if err != nil { panic(err) } @@ -186,9 +174,9 @@ func NewTokenExecutionEngine( if genesisCreated { go func() { - if len(e.engineConfig.MultisigProverEnrollmentPaths) != 0 && genesisCreated { - keys := [][]byte{} - ksigs := [][]byte{} + keys := [][]byte{} + ksigs := [][]byte{} + if len(e.engineConfig.MultisigProverEnrollmentPaths) != 0 { for _, conf := range e.engineConfig.MultisigProverEnrollmentPaths { extraConf, err := config.LoadConfig(conf, "", false) if err != nil { @@ -218,92 +206,115 @@ func NewTokenExecutionEngine( } ksigs = append(ksigs, sig) } + } - keyjoin := []byte{} - for _, k := range keys { - keyjoin = append(keyjoin, k...) - } + keyjoin := []byte{} + for _, k := range keys { + keyjoin = append(keyjoin, k...) + } - mainsig, err := e.pubSub.SignMessage(keyjoin) - if err != nil { - panic(err) - } + mainsig, err := e.pubSub.SignMessage(keyjoin) + if err != nil { + panic(err) + } - announce := &protobufs.TokenRequest_Announce{ - Announce: &protobufs.AnnounceProverRequest{ - PublicKeySignaturesEd448: []*protobufs.Ed448Signature{}, + announce := &protobufs.TokenRequest_Announce{ + Announce: &protobufs.AnnounceProverRequest{ + PublicKeySignaturesEd448: []*protobufs.Ed448Signature{}, + }, + } + + announce.Announce.PublicKeySignaturesEd448 = append( + announce.Announce.PublicKeySignaturesEd448, + &protobufs.Ed448Signature{ + PublicKey: &protobufs.Ed448PublicKey{ + KeyValue: e.pubSub.GetPublicKey(), }, - } + Signature: mainsig, + }, + ) + for i := range keys { announce.Announce.PublicKeySignaturesEd448 = append( announce.Announce.PublicKeySignaturesEd448, &protobufs.Ed448Signature{ PublicKey: &protobufs.Ed448PublicKey{ - KeyValue: e.pubSub.GetPublicKey(), + KeyValue: keys[i], }, - Signature: mainsig, + Signature: ksigs[i], }, ) + } - for i := range keys { - announce.Announce.PublicKeySignaturesEd448 = append( - announce.Announce.PublicKeySignaturesEd448, - &protobufs.Ed448Signature{ - PublicKey: &protobufs.Ed448PublicKey{ - KeyValue: keys[i], - }, - Signature: ksigs[i], - }, - ) + inc, _, _, err := dataProofStore.GetLatestDataTimeProof( + e.pubSub.GetPeerID(), + ) + _, parallelism, input, output, err := dataProofStore.GetDataTimeProof( + e.pubSub.GetPeerID(), + inc, + ) + if err == nil { + proof := []byte{} + proof = binary.BigEndian.AppendUint32(proof, inc) + proof = binary.BigEndian.AppendUint32(proof, parallelism) + proof = binary.BigEndian.AppendUint64(proof, uint64(len(input))) + proof = append(proof, input...) + proof = binary.BigEndian.AppendUint64(proof, uint64(len(output))) + proof = append(proof, output...) + announce.Announce.InitialProof.Proofs = [][]byte{ + []byte("pre-dusk"), + make([]byte, 32), + proof, } - - inc, _, _, err := dataProofStore.GetLatestDataTimeProof( - e.pubSub.GetPeerID(), - ) - _, parallelism, input, output, err := dataProofStore.GetDataTimeProof( - e.pubSub.GetPeerID(), - inc, - ) - if err == nil { - proof := []byte{} - proof = binary.BigEndian.AppendUint32(proof, inc) - proof = binary.BigEndian.AppendUint32(proof, parallelism) - proof = binary.BigEndian.AppendUint64(proof, uint64(len(input))) - proof = append(proof, input...) - proof = binary.BigEndian.AppendUint64(proof, uint64(len(output))) - proof = append(proof, output...) - announce.Announce.InitialProof.Proofs = [][]byte{ - []byte("pre-dusk"), - make([]byte, 32), - proof, - } - payload := []byte("mint") - for _, p := range announce.Announce.InitialProof.Proofs { - payload = append(payload, p...) - } - sig, err := e.pubSub.SignMessage(payload) - if err != nil { - panic(err) - } - - announce.Announce.InitialProof.Signature = &protobufs.Ed448Signature{ - PublicKey: &protobufs.Ed448PublicKey{ - KeyValue: e.pubSub.GetPublicKey(), - }, - Signature: sig, - } + payload := []byte("mint") + for _, p := range announce.Announce.InitialProof.Proofs { + payload = append(payload, p...) } - - req := &protobufs.TokenRequest{ - Request: announce, - } - - err = e.publishMessage(intrinsicFilter, req) + sig, err := e.pubSub.SignMessage(payload) if err != nil { panic(err) } + + announce.Announce.InitialProof.Signature = &protobufs.Ed448Signature{ + PublicKey: &protobufs.Ed448PublicKey{ + KeyValue: e.pubSub.GetPublicKey(), + }, + Signature: sig, + } } + + req := &protobufs.TokenRequest{ + Request: announce, + } + + // need to wait for peering + gotime.Sleep(30 * gotime.Second) + e.publishMessage(intrinsicFilter, req) }() + } else { + f, _, err := e.clockStore.GetLatestDataClockFrame(e.intrinsicFilter) + if err == nil { + msg := []byte("resume") + msg = binary.BigEndian.AppendUint64(msg, f.FrameNumber) + msg = append(msg, e.intrinsicFilter...) + sig, err := e.pubSub.SignMessage(msg) + if err != nil { + panic(err) + } + + // need to wait for peering + gotime.Sleep(30 * gotime.Second) + e.publishMessage(e.intrinsicFilter, &protobufs.AnnounceProverResume{ + Filter: e.intrinsicFilter, + FrameNumber: f.FrameNumber, + PublicKeySignatureEd448: &protobufs.Ed448Signature{ + PublicKey: &protobufs.Ed448PublicKey{ + KeyValue: e.pubSub.GetPublicKey(), + }, + Signature: sig, + }, + }) + } } inc, _, _, err := dataProofStore.GetLatestDataTimeProof(pubSub.GetPeerID()) @@ -459,13 +470,9 @@ func CreateGenesisState( [][]byte, map[string]uint64, ) { - genesis, err := config.DownloadAndVerifyGenesis() - if err != nil { - panic(err) - } - - if err != nil { - panic(errors.New("genesis seed is nil")) + genesis := config.GetGenesis() + if genesis == nil { + panic("genesis is nil") } seed, err := hex.DecodeString(engineConfig.GenesisSeed) @@ -474,13 +481,17 @@ func CreateGenesisState( } logger.Info("creating genesis frame from message:") - for _, l := range strings.Split(string(seed), "\n") { - logger.Info(l) + for i, l := range strings.Split(string(seed), "|") { + if i == 0 { + logger.Info(l) + } else { + logger.Info(fmt.Sprintf("Blockstamp ending in 0x%x", l)) + } } difficulty := engineConfig.Difficulty - if difficulty != 100000 { - difficulty = 100000 + if difficulty != 200000 { + difficulty = 200000 } b := sha3.Sum256(seed) @@ -531,6 +542,7 @@ func CreateGenesisState( bridgedAddrs := map[string]struct{}{} + logger.Info("encoding bridged token state") bridgeTotal := decimal.Zero for _, b := range bridged { amt, err := decimal.NewFromString(b.Amount) @@ -544,6 +556,7 @@ func CreateGenesisState( voucherTotals := map[string]decimal.Decimal{} peerIdTotals := map[string]decimal.Decimal{} peerSeniority := map[string]uint64{} + logger.Info("encoding first retro state") for _, f := range firstRetro { if _, ok := bridgedAddrs[f.PeerId]; !ok { peerIdTotals[f.PeerId], err = decimal.NewFromString(f.Reward) @@ -562,12 +575,14 @@ func CreateGenesisState( peerSeniority[f.PeerId] = uint64(10 * 6 * 60 * 24 * 92 / (max / actual)) } + logger.Info("encoding voucher state") for _, v := range vouchers { if _, ok := bridgedAddrs[v]; !ok { voucherTotals[v] = decimal.NewFromInt(50) } } + logger.Info("encoding second retro state") for _, f := range secondRetro { if _, ok := bridgedAddrs[f.PeerId]; !ok { existing, ok := peerIdTotals[f.PeerId] @@ -609,6 +624,7 @@ func CreateGenesisState( } } + logger.Info("encoding third retro state") for _, f := range thirdRetro { existing, ok := peerIdTotals[f.PeerId] @@ -630,6 +646,7 @@ func CreateGenesisState( peerSeniority[f.PeerId] = peerSeniority[f.PeerId] + (10 * 6 * 60 * 24 * 30) } + logger.Info("encoding fourth retro state") for _, f := range fourthRetro { existing, ok := peerIdTotals[f.PeerId] @@ -661,6 +678,14 @@ func CreateGenesisState( panic(err) } + totalExecutions := 0 + logger.Info( + "creating execution state", + zap.Int( + "coin_executions", + totalExecutions, + ), + ) genesisState.Outputs = append(genesisState.Outputs, &protobufs.TokenOutput{ Output: &protobufs.TokenOutput_Coin{ Coin: &protobufs.Coin{ @@ -678,8 +703,18 @@ func CreateGenesisState( }, }, }) + totalExecutions++ for peerId, total := range peerIdTotals { + if totalExecutions%1000 == 0 { + logger.Info( + "creating execution state", + zap.Int( + "coin_executions", + totalExecutions, + ), + ) + } peerBytes, err := base58.Decode(peerId) if err != nil { panic(err) @@ -707,9 +742,19 @@ func CreateGenesisState( }, }, }) + totalExecutions++ } for voucher, total := range voucherTotals { + if totalExecutions%1000 == 0 { + logger.Info( + "creating execution state", + zap.Int( + "coin_executions", + totalExecutions, + ), + ) + } keyBytes, err := hex.DecodeString(voucher[2:]) if err != nil { panic(err) @@ -737,10 +782,18 @@ func CreateGenesisState( }, }, }) + totalExecutions++ } + logger.Info( + "serializing execution state to store, this may take some time...", + zap.Int( + "coin_executions", + totalExecutions, + ), + ) + txn, err := coinStore.NewTransaction() for _, output := range genesisState.Outputs { - txn, err := coinStore.NewTransaction() if err != nil { panic(err) } @@ -758,9 +811,9 @@ func CreateGenesisState( if err != nil { panic(err) } - if err := txn.Commit(); err != nil { - panic(err) - } + } + if err := txn.Commit(); err != nil { + panic(err) } logger.Info("encoded transcript") @@ -826,7 +879,7 @@ func CreateGenesisState( }, AggregateCommitment: commitment, Proof: proof, - }, [][]byte{genesis.Beacon}, peerSeniority + }, [][]byte{genesis.Beacon}, map[string]uint64{} } func GetAddressOfCoin( @@ -1001,14 +1054,15 @@ func (e *TokenExecutionEngine) RunWorker() { ) panic(err) } + + txn, err := e.coinStore.NewTransaction() + if err != nil { + panic(err) + } + for _, output := range app.TokenOutputs.Outputs { switch o := output.Output.(type) { case *protobufs.TokenOutput_Coin: - txn, err := e.coinStore.NewTransaction() - if err != nil { - panic(err) - } - address, err := GetAddressOfCoin(o.Coin, frame.FrameNumber) if err != nil { panic(err) @@ -1022,15 +1076,7 @@ func (e *TokenExecutionEngine) RunWorker() { if err != nil { panic(err) } - if err := txn.Commit(); err != nil { - panic(err) - } case *protobufs.TokenOutput_DeletedCoin: - txn, err := e.coinStore.NewTransaction() - if err != nil { - panic(err) - } - coin, err := e.coinStore.GetCoinByAddress(o.DeletedCoin.Address) if err != nil { panic(err) @@ -1043,15 +1089,7 @@ func (e *TokenExecutionEngine) RunWorker() { if err != nil { panic(err) } - if err := txn.Commit(); err != nil { - panic(err) - } case *protobufs.TokenOutput_Proof: - txn, err := e.coinStore.NewTransaction() - if err != nil { - panic(err) - } - address, err := GetAddressOfPreCoinProof(o.Proof) if err != nil { panic(err) @@ -1065,15 +1103,7 @@ func (e *TokenExecutionEngine) RunWorker() { if err != nil { panic(err) } - if err := txn.Commit(); err != nil { - panic(err) - } case *protobufs.TokenOutput_DeletedProof: - txn, err := e.coinStore.NewTransaction() - if err != nil { - panic(err) - } - address, err := GetAddressOfPreCoinProof(o.DeletedProof) if err != nil { panic(err) @@ -1086,11 +1116,12 @@ func (e *TokenExecutionEngine) RunWorker() { if err != nil { panic(err) } - if err := txn.Commit(); err != nil { - panic(err) - } } } + + if err := txn.Commit(); err != nil { + panic(err) + } } } } diff --git a/node/main.go b/node/main.go index 9d440fb..50bfa72 100644 --- a/node/main.go +++ b/node/main.go @@ -195,7 +195,7 @@ func main() { count++ } - if count < len(config.Signatories)/2 { + if count < len(config.Signatories)/2+len(config.Signatories)%2 { fmt.Printf("Quorum on signatures not met") os.Exit(1) } @@ -332,9 +332,6 @@ func main() { } if *core != 0 { - runtime.GOMAXPROCS(1) - rdebug.SetGCPercent(9999) - if nodeConfig.Engine.DataWorkerMemoryLimit == 0 { nodeConfig.Engine.DataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75GiB } @@ -372,6 +369,7 @@ func main() { l, uint32(*core)-1, qcrypto.NewWesolowskiFrameProver(l), + nodeConfig, *parentProcess, ) if err != nil { @@ -395,6 +393,19 @@ func main() { report := RunSelfTestIfNeeded(*configDirectory, nodeConfig) + if *core == 0 { + for { + genesis, err := config.DownloadAndVerifyGenesis(*network) + if err != nil { + time.Sleep(10 * time.Minute) + continue + } + + nodeConfig.Engine.GenesisSeed = genesis.GenesisSeedHex + break + } + } + done := make(chan os.Signal, 1) signal.Notify(done, syscall.SIGINT, syscall.SIGTERM) var node *app.Node diff --git a/node/p2p/bloom_utils.go b/node/p2p/bloom_utils.go index e46bb46..424ac6b 100644 --- a/node/p2p/bloom_utils.go +++ b/node/p2p/bloom_utils.go @@ -1,7 +1,6 @@ package p2p import ( - "fmt" "math/big" "sort" @@ -36,17 +35,9 @@ func GetBloomFilter(data []byte, bitLength int, k int) []byte { } if outputBI.Bit(int(position)) != 1 { outputBI.SetBit(outputBI, int(position), 1) - } else if k*size <= 32 { + } else if k < size { // we need to extend the search k++ - } else { - fmt.Printf( - "digest %+x cannot be used as bloom index, panicking\n", - digest, - ) - panic( - "could not generate bloom filter index, k offset cannot be adjusted", - ) } } outputBI.FillBytes(output) diff --git a/node/p2p/bloom_utils_test.go b/node/p2p/bloom_utils_test.go index 2fa7698..6d7572c 100644 --- a/node/p2p/bloom_utils_test.go +++ b/node/p2p/bloom_utils_test.go @@ -33,6 +33,30 @@ func TestGetBloomFilter(t *testing.T) { 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }) + fourByteSixtyFourKTest := p2p.GetBloomFilter( + []byte{0x00, 0x00, 0x00, 0x00}, + 1024, + 64, + ) + assert.ElementsMatch(t, fourByteSixtyFourKTest, []byte{ + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x40, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x21, 0x00, 0x01, + 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, + }) + fourByteSixteenKTest := p2p.GetBloomFilter( []byte{0x00, 0x00, 0x00, 0x00}, 65536, diff --git a/node/p2p/blossomsub.go b/node/p2p/blossomsub.go index 4d76623..7ca7bfc 100644 --- a/node/p2p/blossomsub.go +++ b/node/p2p/blossomsub.go @@ -391,6 +391,10 @@ func (b *BlossomSub) Subscribe( b.logger.Error("subscription failed", zap.Error(err)) return errors.Wrap(err, "subscribe") } + _, ok := b.bitmaskMap[string(bit.Bitmask())] + if !ok { + b.bitmaskMap[string(bit.Bitmask())] = bit + } subs = append(subs, sub) } diff --git a/node/rpc/data_worker_ipc_server.go b/node/rpc/data_worker_ipc_server.go index cd77214..a43147f 100644 --- a/node/rpc/data_worker_ipc_server.go +++ b/node/rpc/data_worker_ipc_server.go @@ -3,15 +3,20 @@ package rpc import ( "context" "encoding/binary" + "encoding/hex" "os" "runtime" "syscall" "time" + "golang.org/x/crypto/sha3" + "source.quilibrium.com/quilibrium/monorepo/node/config" + "source.quilibrium.com/quilibrium/monorepo/node/consensus/data" "source.quilibrium.com/quilibrium/monorepo/node/crypto" "source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token" "source.quilibrium.com/quilibrium/monorepo/node/p2p" + pcrypto "github.com/libp2p/go-libp2p/core/crypto" "github.com/multiformats/go-multiaddr" mn "github.com/multiformats/go-multiaddr/net" "github.com/pkg/errors" @@ -147,7 +152,7 @@ func (r *DataWorkerIPCServer) CalculateChallengeProof( if !found { return nil, errors.Wrap( - errors.New("no applicable challenge"), + data.ErrNoApplicableChallenge, "calculate challenge proof", ) } @@ -169,13 +174,42 @@ func NewDataWorkerIPCServer( logger *zap.Logger, coreId uint32, prover crypto.FrameProver, + config *config.Config, parentProcessId int, ) (*DataWorkerIPCServer, error) { + peerPrivKey, err := hex.DecodeString(config.P2P.PeerPrivKey) + if err != nil { + panic(errors.Wrap(err, "error unmarshaling peerkey")) + } + + privKey, err := pcrypto.UnmarshalEd448PrivateKey(peerPrivKey) + if err != nil { + panic(errors.Wrap(err, "error unmarshaling peerkey")) + } + + pub := privKey.GetPublic() + + pubKey, err := pub.Raw() + if err != nil { + panic(err) + } + + digest := make([]byte, 128) + s := sha3.NewShake256() + s.Write([]byte(pubKey)) + _, err = s.Read(digest) + if err != nil { + panic(err) + } + return &DataWorkerIPCServer{ - listenAddrGRPC: listenAddrGRPC, - logger: logger, - coreId: coreId, - prover: prover, + listenAddrGRPC: listenAddrGRPC, + logger: logger, + coreId: coreId, + prover: prover, + indices: []int{ + p2p.GetOnesIndices(p2p.GetBloomFilter(digest, 1024, 64))[coreId%64], + }, parentProcessId: parentProcessId, }, nil } diff --git a/node/store/clock.go b/node/store/clock.go index 0695a0d..2c6653a 100644 --- a/node/store/clock.go +++ b/node/store/clock.go @@ -764,7 +764,7 @@ func (p *PebbleClockStore) GetLatestDataClockFrame( } frameNumber := binary.BigEndian.Uint64(idxValue) - frame, _, err := p.GetDataClockFrame(filter, frameNumber, false) + frame, tries, err := p.GetDataClockFrame(filter, frameNumber, false) if err != nil { if errors.Is(err, pebble.ErrNotFound) { return nil, nil, ErrNotFound @@ -775,29 +775,7 @@ func (p *PebbleClockStore) GetLatestDataClockFrame( closer.Close() - proverTries := []*tries.RollingFrecencyCritbitTrie{} - i := uint16(0) - for { - proverTrie := &tries.RollingFrecencyCritbitTrie{} - trieData, closer, err := p.db.Get(clockProverTrieKey(filter, i, frameNumber)) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - break - } - - return nil, nil, errors.Wrap(err, "get latest data clock frame") - } - - if err := proverTrie.Deserialize(trieData); err != nil { - closer.Close() - return nil, nil, errors.Wrap(err, "get latest data clock frame") - } - closer.Close() - proverTries = append(proverTries, proverTrie) - i++ - } - - return frame, proverTries, nil + return frame, tries, nil } // GetStagedDataClockFrame implements ClockStore.