mirror of
https://source.quilibrium.com/quilibrium/ceremonyclient.git
synced 2024-12-26 00:25:17 +00:00
v2.0.0-p5 (#303)
This commit is contained in:
parent
bb9512488e
commit
5230ceb413
@ -11,6 +11,7 @@ import (
|
|||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/execution"
|
"source.quilibrium.com/quilibrium/monorepo/node/execution"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
"source.quilibrium.com/quilibrium/monorepo/node/keys"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||||
@ -48,7 +49,7 @@ func newNode(
|
|||||||
coinStore store.CoinStore,
|
coinStore store.CoinStore,
|
||||||
keyManager keys.KeyManager,
|
keyManager keys.KeyManager,
|
||||||
pubSub p2p.PubSub,
|
pubSub p2p.PubSub,
|
||||||
// execution engines wire in here
|
tokenExecutionEngine *token.TokenExecutionEngine,
|
||||||
engine consensus.ConsensusEngine,
|
engine consensus.ConsensusEngine,
|
||||||
) (*Node, error) {
|
) (*Node, error) {
|
||||||
if engine == nil {
|
if engine == nil {
|
||||||
@ -56,6 +57,9 @@ func newNode(
|
|||||||
}
|
}
|
||||||
|
|
||||||
execEngines := make(map[string]execution.ExecutionEngine)
|
execEngines := make(map[string]execution.ExecutionEngine)
|
||||||
|
if tokenExecutionEngine != nil {
|
||||||
|
execEngines[tokenExecutionEngine.GetName()] = tokenExecutionEngine
|
||||||
|
}
|
||||||
|
|
||||||
return &Node{
|
return &Node{
|
||||||
logger,
|
logger,
|
||||||
|
@ -46,12 +46,14 @@ func NewDebugNode(configConfig *config.Config, selfTestReport *protobufs.SelfTes
|
|||||||
p2PConfig := configConfig.P2P
|
p2PConfig := configConfig.P2P
|
||||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||||
engineConfig := configConfig.Engine
|
engineConfig := configConfig.Engine
|
||||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
|
||||||
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
||||||
|
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||||
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver)
|
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver)
|
||||||
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
|
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
|
||||||
|
pebbleKeyStore := store.NewPebbleKeyStore(pebbleDB, zapLogger)
|
||||||
|
tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, engineConfig, fileKeyManager, blossomSub, wesolowskiFrameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport)
|
||||||
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
|
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
|
||||||
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, masterClockConsensusEngine)
|
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, tokenExecutionEngine, masterClockConsensusEngine)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -70,12 +72,14 @@ func NewNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestRepo
|
|||||||
p2PConfig := configConfig.P2P
|
p2PConfig := configConfig.P2P
|
||||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||||
engineConfig := configConfig.Engine
|
engineConfig := configConfig.Engine
|
||||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
|
||||||
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
||||||
|
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||||
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver)
|
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver)
|
||||||
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
|
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
|
||||||
|
pebbleKeyStore := store.NewPebbleKeyStore(pebbleDB, zapLogger)
|
||||||
|
tokenExecutionEngine := token.NewTokenExecutionEngine(zapLogger, engineConfig, fileKeyManager, blossomSub, wesolowskiFrameProver, kzgInclusionProver, pebbleClockStore, pebbleDataProofStore, pebbleCoinStore, masterTimeReel, inMemoryPeerInfoManager, pebbleKeyStore, selfTestReport)
|
||||||
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
|
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
|
||||||
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, masterClockConsensusEngine)
|
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, pebbleCoinStore, fileKeyManager, blossomSub, tokenExecutionEngine, masterClockConsensusEngine)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -132,12 +132,12 @@ var Signatories = []string{
|
|||||||
|
|
||||||
var unlock *SignedGenesisUnlock
|
var unlock *SignedGenesisUnlock
|
||||||
|
|
||||||
func DownloadAndVerifyGenesis() (*SignedGenesisUnlock, error) {
|
func DownloadAndVerifyGenesis(network uint) (*SignedGenesisUnlock, error) {
|
||||||
if unlock != nil {
|
if unlock != nil {
|
||||||
return unlock, nil
|
return unlock, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := http.Get("https://releases.quilibrium.com/stasis")
|
resp, err := http.Get("https://releases.quilibrium.com/genesisunlock")
|
||||||
if err != nil || resp.StatusCode != 200 {
|
if err != nil || resp.StatusCode != 200 {
|
||||||
fmt.Println("Stasis lock not yet released.")
|
fmt.Println("Stasis lock not yet released.")
|
||||||
return nil, errors.New("stasis lock not yet released")
|
return nil, errors.New("stasis lock not yet released")
|
||||||
@ -185,14 +185,14 @@ func DownloadAndVerifyGenesis() (*SignedGenesisUnlock, error) {
|
|||||||
opensslMsg := "SHA3-256(genesis)= " + hex.EncodeToString(digest[:])
|
opensslMsg := "SHA3-256(genesis)= " + hex.EncodeToString(digest[:])
|
||||||
if !ed448.Verify(pubkey, append([]byte(opensslMsg), 0x0a), sig, "") {
|
if !ed448.Verify(pubkey, append([]byte(opensslMsg), 0x0a), sig, "") {
|
||||||
fmt.Printf("Failed signature check for signatory #%d\n", i)
|
fmt.Printf("Failed signature check for signatory #%d\n", i)
|
||||||
return nil, err
|
return nil, errors.New("failed signature check")
|
||||||
}
|
}
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
|
||||||
if count < len(Signatories)/2 {
|
if count < len(Signatories)/2+len(Signatories)%2 {
|
||||||
fmt.Printf("Quorum on signatures not met")
|
fmt.Printf("Quorum on signatures not met")
|
||||||
return nil, err
|
return nil, errors.New("quorum on signatures not met")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Stasis lock released. Welcome to 2.0.")
|
fmt.Println("Stasis lock released. Welcome to 2.0.")
|
||||||
@ -200,6 +200,10 @@ func DownloadAndVerifyGenesis() (*SignedGenesisUnlock, error) {
|
|||||||
return unlock, err
|
return unlock, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetGenesis() *SignedGenesisUnlock {
|
||||||
|
return unlock
|
||||||
|
}
|
||||||
|
|
||||||
var StasisSeed = "737461736973"
|
var StasisSeed = "737461736973"
|
||||||
|
|
||||||
func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
|
func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
|
||||||
@ -235,7 +239,7 @@ func LoadConfig(configPath string, proverKey string, skipGenesisCheck bool) (
|
|||||||
genesisSeed := StasisSeed
|
genesisSeed := StasisSeed
|
||||||
|
|
||||||
if !skipGenesisCheck {
|
if !skipGenesisCheck {
|
||||||
output, err := DownloadAndVerifyGenesis()
|
output, err := DownloadAndVerifyGenesis(0)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
genesisSeed = output.GenesisSeedHex
|
genesisSeed = output.GenesisSeedHex
|
||||||
}
|
}
|
||||||
|
@ -36,5 +36,5 @@ func FormatVersion(version []byte) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GetPatchNumber() byte {
|
func GetPatchNumber() byte {
|
||||||
return 0x04
|
return 0x05
|
||||||
}
|
}
|
||||||
|
@ -283,22 +283,6 @@ func (e *DataClockConsensusEngine) collect(
|
|||||||
e.logger.Info("no peers available for sync, waiting")
|
e.logger.Info("no peers available for sync, waiting")
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
} else if maxFrame > latest.FrameNumber {
|
} else if maxFrame > latest.FrameNumber {
|
||||||
masterHead, err := e.masterTimeReel.Head()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if masterHead.FrameNumber < maxFrame {
|
|
||||||
e.logger.Info(
|
|
||||||
"master frame synchronization needed to continue, waiting",
|
|
||||||
zap.Uint64("master_frame_head", masterHead.FrameNumber),
|
|
||||||
zap.Uint64("max_data_frame_target", maxFrame),
|
|
||||||
)
|
|
||||||
|
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
latest, err = e.sync(latest, maxFrame, peerId)
|
latest, err = e.sync(latest, maxFrame, peerId)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
|
@ -34,6 +34,8 @@ import (
|
|||||||
const PEER_INFO_TTL = 60 * 60 * 1000
|
const PEER_INFO_TTL = 60 * 60 * 1000
|
||||||
const UNCOOPERATIVE_PEER_INFO_TTL = 5 * 60 * 1000
|
const UNCOOPERATIVE_PEER_INFO_TTL = 5 * 60 * 1000
|
||||||
|
|
||||||
|
var ErrNoApplicableChallenge = errors.New("no applicable challenge")
|
||||||
|
|
||||||
type SyncStatusType int
|
type SyncStatusType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -214,7 +216,7 @@ func NewDataClockConsensusEngine(
|
|||||||
|
|
||||||
difficulty := engineConfig.Difficulty
|
difficulty := engineConfig.Difficulty
|
||||||
if difficulty == 0 {
|
if difficulty == 0 {
|
||||||
difficulty = 100000
|
difficulty = 200000
|
||||||
}
|
}
|
||||||
|
|
||||||
e := &DataClockConsensusEngine{
|
e := &DataClockConsensusEngine{
|
||||||
@ -250,6 +252,7 @@ func NewDataClockConsensusEngine(
|
|||||||
peerInfoManager: peerInfoManager,
|
peerInfoManager: peerInfoManager,
|
||||||
peerSeniority: newFromMap(peerSeniority),
|
peerSeniority: newFromMap(peerSeniority),
|
||||||
messageProcessorCh: make(chan *pb.Message),
|
messageProcessorCh: make(chan *pb.Message),
|
||||||
|
engineConfig: engineConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("constructing consensus engine")
|
logger.Info("constructing consensus engine")
|
||||||
@ -456,8 +459,35 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
|
|||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(len(clients))
|
wg.Add(len(clients))
|
||||||
for i, client := range clients {
|
for i, client := range clients {
|
||||||
|
client := client
|
||||||
go func() {
|
go func() {
|
||||||
for j := 3; j >= 0; j-- {
|
for j := 3; j >= 0; j-- {
|
||||||
|
var err error
|
||||||
|
if client == nil {
|
||||||
|
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
||||||
|
e.logger.Error(
|
||||||
|
"client failed, reconnecting after 50ms",
|
||||||
|
zap.Uint32("client", uint32(i)),
|
||||||
|
)
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
client, err = e.createParallelDataClientsFromListAndIndex(uint32(i))
|
||||||
|
if err != nil {
|
||||||
|
e.logger.Error("failed to reconnect", zap.Error(err))
|
||||||
|
}
|
||||||
|
} else if len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
|
||||||
|
e.logger.Error(
|
||||||
|
"client failed, reconnecting after 50ms",
|
||||||
|
)
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
client, err =
|
||||||
|
e.createParallelDataClientsFromBaseMultiaddrAndIndex(uint32(i))
|
||||||
|
if err != nil {
|
||||||
|
e.logger.Error("failed to reconnect", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clients[i] = client
|
||||||
|
continue
|
||||||
|
}
|
||||||
resp, err :=
|
resp, err :=
|
||||||
client.CalculateChallengeProof(
|
client.CalculateChallengeProof(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@ -467,8 +497,11 @@ func (e *DataClockConsensusEngine) PerformTimeProof(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrNoApplicableChallenge) {
|
||||||
|
break
|
||||||
|
}
|
||||||
if j == 0 {
|
if j == 0 {
|
||||||
e.logger.Error("unable to get a response in time from worker")
|
e.logger.Error("unable to get a response in time from worker", zap.Error(err))
|
||||||
}
|
}
|
||||||
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
||||||
e.logger.Error(
|
e.logger.Error(
|
||||||
@ -532,7 +565,7 @@ func (
|
|||||||
len(e.frameProverTries),
|
len(e.frameProverTries),
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, trie := range e.frameProverTries {
|
for i, trie := range e.frameProverTries {
|
||||||
newTrie := &tries.RollingFrecencyCritbitTrie{}
|
newTrie := &tries.RollingFrecencyCritbitTrie{}
|
||||||
b, err := trie.Serialize()
|
b, err := trie.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -543,7 +576,7 @@ func (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
frameProverTries = append(frameProverTries, newTrie)
|
frameProverTries[i] = newTrie
|
||||||
}
|
}
|
||||||
|
|
||||||
e.frameProverTriesMx.RUnlock()
|
e.frameProverTriesMx.RUnlock()
|
||||||
@ -553,11 +586,6 @@ func (
|
|||||||
func (e *DataClockConsensusEngine) runLoop() {
|
func (e *DataClockConsensusEngine) runLoop() {
|
||||||
dataFrameCh := e.dataTimeReel.NewFrameCh()
|
dataFrameCh := e.dataTimeReel.NewFrameCh()
|
||||||
|
|
||||||
e.logger.Info("waiting for peer list mappings")
|
|
||||||
// We need to re-tune this so that libp2p's peerstore activation threshold
|
|
||||||
// considers DHT peers to be correct:
|
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
|
|
||||||
for e.state < consensus.EngineStateStopping {
|
for e.state < consensus.EngineStateStopping {
|
||||||
peerCount := e.pubSub.GetNetworkPeersCount()
|
peerCount := e.pubSub.GetNetworkPeersCount()
|
||||||
if peerCount < e.minimumPeersRequired {
|
if peerCount < e.minimumPeersRequired {
|
||||||
@ -597,7 +625,7 @@ func (e *DataClockConsensusEngine) runLoop() {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, trie := range e.GetFrameProverTries() {
|
trie := e.GetFrameProverTries()[0]
|
||||||
if bytes.Equal(
|
if bytes.Equal(
|
||||||
trie.FindNearest(e.provingKeyAddress).External.Key,
|
trie.FindNearest(e.provingKeyAddress).External.Key,
|
||||||
e.provingKeyAddress,
|
e.provingKeyAddress,
|
||||||
@ -684,7 +712,6 @@ func (e *DataClockConsensusEngine) runLoop() {
|
|||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
|
||||||
case <-time.After(20 * time.Second):
|
case <-time.After(20 * time.Second):
|
||||||
dataFrame, err := e.dataTimeReel.Head()
|
dataFrame, err := e.dataTimeReel.Head()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -812,6 +839,25 @@ func (e *DataClockConsensusEngine) Stop(force bool) <-chan error {
|
|||||||
e.state = consensus.EngineStateStopping
|
e.state = consensus.EngineStateStopping
|
||||||
errChan := make(chan error)
|
errChan := make(chan error)
|
||||||
|
|
||||||
|
msg := []byte("pause")
|
||||||
|
msg = binary.BigEndian.AppendUint64(msg, e.GetFrame().FrameNumber)
|
||||||
|
msg = append(msg, e.filter...)
|
||||||
|
sig, err := e.pubSub.SignMessage(msg)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.publishMessage(e.filter, &protobufs.AnnounceProverPause{
|
||||||
|
Filter: e.filter,
|
||||||
|
FrameNumber: e.GetFrame().FrameNumber,
|
||||||
|
PublicKeySignatureEd448: &protobufs.Ed448Signature{
|
||||||
|
PublicKey: &protobufs.Ed448PublicKey{
|
||||||
|
KeyValue: e.pubSub.GetPublicKey(),
|
||||||
|
},
|
||||||
|
Signature: sig,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(len(e.executionEngines))
|
wg.Add(len(e.executionEngines))
|
||||||
for name := range e.executionEngines {
|
for name := range e.executionEngines {
|
||||||
@ -1057,7 +1103,8 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromList() (
|
|||||||
|
|
||||||
_, addr, err := mn.DialArgs(ma)
|
_, addr, err := mn.DialArgs(ma)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
e.logger.Error("could not get dial args", zap.Error(err))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := grpc.Dial(
|
conn, err := grpc.Dial(
|
||||||
@ -1071,7 +1118,8 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromList() (
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
e.logger.Error("could not dial", zap.Error(err))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
clients[i] = protobufs.NewDataIPCServiceClient(conn)
|
clients[i] = protobufs.NewDataIPCServiceClient(conn)
|
||||||
@ -1115,7 +1163,7 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
|
|||||||
|
|
||||||
_, addr, err := mn.DialArgs(ma)
|
_, addr, err := mn.DialArgs(ma)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Warn("could not connect to client", zap.String("addr", addr))
|
e.logger.Error("could not get dial args", zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1130,7 +1178,8 @@ func (e *DataClockConsensusEngine) createParallelDataClientsFromBaseMultiaddr(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Warn("could not dial to client", zap.String("addr", addr))
|
e.logger.Error("could not dial", zap.Error(err))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
clients[i] = protobufs.NewDataIPCServiceClient(conn)
|
clients[i] = protobufs.NewDataIPCServiceClient(conn)
|
||||||
|
@ -280,6 +280,9 @@ func (e *DataClockConsensusEngine) handleDataPeerListAnnounce(
|
|||||||
func (e *DataClockConsensusEngine) getAddressFromSignature(
|
func (e *DataClockConsensusEngine) getAddressFromSignature(
|
||||||
sig *protobufs.Ed448Signature,
|
sig *protobufs.Ed448Signature,
|
||||||
) ([]byte, error) {
|
) ([]byte, error) {
|
||||||
|
if sig.PublicKey == nil || sig.PublicKey.KeyValue == nil {
|
||||||
|
return nil, errors.New("invalid data")
|
||||||
|
}
|
||||||
addrBI, err := poseidon.HashBytes(sig.PublicKey.KeyValue)
|
addrBI, err := poseidon.HashBytes(sig.PublicKey.KeyValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "get address from signature")
|
return nil, errors.Wrap(err, "get address from signature")
|
||||||
@ -298,11 +301,25 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverJoin(
|
|||||||
return errors.Wrap(err, "handle data announce prover join")
|
return errors.Wrap(err, "handle data announce prover join")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
|
||||||
|
return errors.Wrap(
|
||||||
|
errors.New("invalid data"),
|
||||||
|
"handle data announce prover join",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
|
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "handle data announce prover join")
|
return errors.Wrap(err, "handle data announce prover join")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msg := []byte("join")
|
||||||
|
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
|
||||||
|
msg = append(msg, announce.Filter...)
|
||||||
|
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
|
||||||
|
return errors.Wrap(err, "handle data announce prover join")
|
||||||
|
}
|
||||||
|
|
||||||
e.proverTrieRequestsMx.Lock()
|
e.proverTrieRequestsMx.Lock()
|
||||||
if len(announce.Filter) != len(e.filter) {
|
if len(announce.Filter) != len(e.filter) {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
@ -325,7 +342,16 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave(
|
|||||||
if err := any.UnmarshalTo(announce); err != nil {
|
if err := any.UnmarshalTo(announce); err != nil {
|
||||||
return errors.Wrap(err, "handle data announce prover leave")
|
return errors.Wrap(err, "handle data announce prover leave")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
|
||||||
|
return errors.Wrap(
|
||||||
|
errors.New("invalid data"),
|
||||||
|
"handle data announce prover leave",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
e.proverTrieRequestsMx.Lock()
|
e.proverTrieRequestsMx.Lock()
|
||||||
|
|
||||||
if len(announce.Filter) != len(e.filter) {
|
if len(announce.Filter) != len(e.filter) {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
errors.New("filter width mismatch"),
|
errors.New("filter width mismatch"),
|
||||||
@ -333,6 +359,13 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverLeave(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msg := []byte("leave")
|
||||||
|
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
|
||||||
|
msg = append(msg, announce.Filter...)
|
||||||
|
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
|
||||||
|
return errors.Wrap(err, "handle data announce prover leave")
|
||||||
|
}
|
||||||
|
|
||||||
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
|
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "handle data announce prover leave")
|
return errors.Wrap(err, "handle data announce prover leave")
|
||||||
@ -353,6 +386,13 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverPause(
|
|||||||
return errors.Wrap(err, "handle data announce prover pause")
|
return errors.Wrap(err, "handle data announce prover pause")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
|
||||||
|
return errors.Wrap(
|
||||||
|
errors.New("invalid data"),
|
||||||
|
"handle data announce prover leave",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
e.proverTrieRequestsMx.Lock()
|
e.proverTrieRequestsMx.Lock()
|
||||||
if len(announce.Filter) != len(e.filter) {
|
if len(announce.Filter) != len(e.filter) {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
@ -361,12 +401,19 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverPause(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msg := []byte("pause")
|
||||||
|
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
|
||||||
|
msg = append(msg, announce.Filter...)
|
||||||
|
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
|
||||||
|
return errors.Wrap(err, "handle data announce prover pause")
|
||||||
|
}
|
||||||
|
|
||||||
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
|
address, err := e.getAddressFromSignature(announce.PublicKeySignatureEd448)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "handle data announce prover pause")
|
return errors.Wrap(err, "handle data announce prover pause")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.proverTrieLeaveRequests[string(address)] = string(announce.Filter)
|
e.proverTriePauseRequests[string(address)] = string(announce.Filter)
|
||||||
e.proverTrieRequestsMx.Unlock()
|
e.proverTrieRequestsMx.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -381,6 +428,13 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverResume(
|
|||||||
return errors.Wrap(err, "handle data announce prover resume")
|
return errors.Wrap(err, "handle data announce prover resume")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if announce.PublicKeySignatureEd448 == nil || announce.Filter == nil {
|
||||||
|
return errors.Wrap(
|
||||||
|
errors.New("invalid data"),
|
||||||
|
"handle data announce prover resume",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
e.proverTrieRequestsMx.Lock()
|
e.proverTrieRequestsMx.Lock()
|
||||||
if len(announce.Filter) != len(e.filter) {
|
if len(announce.Filter) != len(e.filter) {
|
||||||
return errors.Wrap(
|
return errors.Wrap(
|
||||||
@ -394,7 +448,14 @@ func (e *DataClockConsensusEngine) handleDataAnnounceProverResume(
|
|||||||
return errors.Wrap(err, "handle data announce prover resume")
|
return errors.Wrap(err, "handle data announce prover resume")
|
||||||
}
|
}
|
||||||
|
|
||||||
e.proverTrieLeaveRequests[string(address)] = string(announce.Filter)
|
msg := []byte("resume")
|
||||||
|
msg = binary.BigEndian.AppendUint64(msg, announce.FrameNumber)
|
||||||
|
msg = append(msg, announce.Filter...)
|
||||||
|
if err := announce.GetPublicKeySignatureEd448().Verify(msg); err != nil {
|
||||||
|
return errors.Wrap(err, "handle data announce prover resume")
|
||||||
|
}
|
||||||
|
|
||||||
|
e.proverTrieResumeRequests[string(address)] = string(announce.Filter)
|
||||||
e.proverTrieRequestsMx.Unlock()
|
e.proverTrieRequestsMx.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -159,6 +159,7 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport(
|
|||||||
func (e *MasterClockConsensusEngine) publishProof(
|
func (e *MasterClockConsensusEngine) publishProof(
|
||||||
frame *protobufs.ClockFrame,
|
frame *protobufs.ClockFrame,
|
||||||
) error {
|
) error {
|
||||||
|
if bytes.Equal(e.pubSub.GetPeerID(), []byte(e.beacon)) {
|
||||||
e.logger.Debug(
|
e.logger.Debug(
|
||||||
"publishing frame",
|
"publishing frame",
|
||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
zap.Uint64("frame_number", frame.FrameNumber),
|
||||||
@ -166,7 +167,6 @@ func (e *MasterClockConsensusEngine) publishProof(
|
|||||||
|
|
||||||
e.masterTimeReel.Insert(frame, false)
|
e.masterTimeReel.Insert(frame, false)
|
||||||
|
|
||||||
if bytes.Equal(e.pubSub.GetPeerID(), []byte(e.beacon)) {
|
|
||||||
err := e.publishMessage(e.filter, frame)
|
err := e.publishMessage(e.filter, frame)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "publish proof")
|
return errors.Wrap(err, "publish proof")
|
||||||
|
@ -13,8 +13,8 @@ import (
|
|||||||
func (e *MasterClockConsensusEngine) prove(
|
func (e *MasterClockConsensusEngine) prove(
|
||||||
previousFrame *protobufs.ClockFrame,
|
previousFrame *protobufs.ClockFrame,
|
||||||
) (*protobufs.ClockFrame, error) {
|
) (*protobufs.ClockFrame, error) {
|
||||||
e.logger.Debug("proving new frame")
|
|
||||||
if bytes.Equal(e.pubSub.GetPeerID(), []byte(e.beacon)) {
|
if bytes.Equal(e.pubSub.GetPeerID(), []byte(e.beacon)) {
|
||||||
|
e.logger.Debug("proving new frame")
|
||||||
e.collectedProverSlotsMx.Lock()
|
e.collectedProverSlotsMx.Lock()
|
||||||
collectedProverSlots := e.collectedProverSlots
|
collectedProverSlots := e.collectedProverSlots
|
||||||
e.collectedProverSlots = []*protobufs.InclusionAggregateProof{}
|
e.collectedProverSlots = []*protobufs.InclusionAggregateProof{}
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/iden3/go-iden3-crypto/poseidon"
|
"github.com/iden3/go-iden3-crypto/poseidon"
|
||||||
|
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
@ -69,7 +70,7 @@ type MasterClockConsensusEngine struct {
|
|||||||
|
|
||||||
var _ consensus.ConsensusEngine = (*MasterClockConsensusEngine)(nil)
|
var _ consensus.ConsensusEngine = (*MasterClockConsensusEngine)(nil)
|
||||||
|
|
||||||
var MASTER_CLOCK_RATE = uint32(10000000)
|
var MASTER_CLOCK_RATE = uint32(1000000)
|
||||||
|
|
||||||
func NewMasterClockConsensusEngine(
|
func NewMasterClockConsensusEngine(
|
||||||
engineConfig *config.EngineConfig,
|
engineConfig *config.EngineConfig,
|
||||||
@ -167,26 +168,19 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
|||||||
e.state = consensus.EngineStateLoading
|
e.state = consensus.EngineStateLoading
|
||||||
e.logger.Info("syncing last seen state")
|
e.logger.Info("syncing last seen state")
|
||||||
|
|
||||||
var genesis *config.SignedGenesisUnlock
|
err := e.masterTimeReel.Start()
|
||||||
var err error
|
|
||||||
|
|
||||||
for {
|
|
||||||
genesis, err := config.DownloadAndVerifyGenesis()
|
|
||||||
if err != nil {
|
|
||||||
time.Sleep(10 * time.Minute)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
e.engineConfig.GenesisSeed = genesis.GenesisSeedHex
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
err = e.masterTimeReel.Start()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.beacon, err = peer.IDFromBytes(genesis.Beacon)
|
beaconPubKey, err := pcrypto.UnmarshalEd448PublicKey(
|
||||||
|
config.GetGenesis().Beacon,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.beacon, err = peer.IDFromPublicKey(beaconPubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -196,6 +190,7 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e.logger.Info("building historic frame cache")
|
||||||
e.buildHistoricFrameCache(frame)
|
e.buildHistoricFrameCache(frame)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -17,13 +17,6 @@ import (
|
|||||||
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
"source.quilibrium.com/quilibrium/monorepo/node/tries"
|
||||||
)
|
)
|
||||||
|
|
||||||
var allBitmaskFilter = []byte{
|
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
|
||||||
}
|
|
||||||
|
|
||||||
var unknownDistance = new(big.Int).SetBytes([]byte{
|
var unknownDistance = new(big.Int).SetBytes([]byte{
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||||
@ -46,7 +39,6 @@ type DataTimeReel struct {
|
|||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
clockStore store.ClockStore
|
clockStore store.ClockStore
|
||||||
frameProver crypto.FrameProver
|
frameProver crypto.FrameProver
|
||||||
parentTimeReel TimeReel
|
|
||||||
|
|
||||||
origin []byte
|
origin []byte
|
||||||
initialInclusionProof *crypto.InclusionAggregateProof
|
initialInclusionProof *crypto.InclusionAggregateProof
|
||||||
@ -227,7 +219,7 @@ func (d *DataTimeReel) createGenesisFrame() (
|
|||||||
|
|
||||||
difficulty := d.engineConfig.Difficulty
|
difficulty := d.engineConfig.Difficulty
|
||||||
if difficulty == 0 || difficulty == 10000 {
|
if difficulty == 0 || difficulty == 10000 {
|
||||||
difficulty = 100000
|
difficulty = 200000
|
||||||
}
|
}
|
||||||
|
|
||||||
frame, tries, err := d.frameProver.CreateDataGenesisFrame(
|
frame, tries, err := d.frameProver.CreateDataGenesisFrame(
|
||||||
@ -293,22 +285,6 @@ func (d *DataTimeReel) runLoop() {
|
|||||||
zap.Uint64("frame_number", frame.frameNumber),
|
zap.Uint64("frame_number", frame.frameNumber),
|
||||||
)
|
)
|
||||||
|
|
||||||
// tag: equinox – master filter changes
|
|
||||||
_, err := d.clockStore.GetMasterClockFrame(
|
|
||||||
allBitmaskFilter,
|
|
||||||
frame.frameNumber)
|
|
||||||
if err != nil {
|
|
||||||
d.logger.Debug("no master, add pending")
|
|
||||||
|
|
||||||
// If the frame arrived ahead of a master, e.g. the master data is not
|
|
||||||
// synced, we'll go ahead and mark it as pending and process it when
|
|
||||||
// we can, but if we had a general fault, panic:
|
|
||||||
if !errors.Is(err, store.ErrNotFound) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rawFrame, err := d.clockStore.GetStagedDataClockFrame(
|
rawFrame, err := d.clockStore.GetStagedDataClockFrame(
|
||||||
d.filter,
|
d.filter,
|
||||||
frame.frameNumber,
|
frame.frameNumber,
|
||||||
@ -617,21 +593,26 @@ func (d *DataTimeReel) GetDistance(frame *protobufs.ClockFrame) (
|
|||||||
*big.Int,
|
*big.Int,
|
||||||
error,
|
error,
|
||||||
) {
|
) {
|
||||||
// tag: equinox – master filter changes
|
if frame.FrameNumber == 0 {
|
||||||
master, err := d.clockStore.GetMasterClockFrame(
|
return big.NewInt(0), nil
|
||||||
allBitmaskFilter,
|
}
|
||||||
frame.FrameNumber)
|
|
||||||
|
prev, _, err := d.clockStore.GetDataClockFrame(
|
||||||
|
d.filter,
|
||||||
|
frame.FrameNumber-1,
|
||||||
|
false,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return unknownDistance, errors.Wrap(err, "get distance")
|
return unknownDistance, errors.Wrap(err, "get distance")
|
||||||
}
|
}
|
||||||
|
|
||||||
masterSelector, err := master.GetSelector()
|
prevSelector, err := prev.GetSelector()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return unknownDistance, errors.Wrap(err, "get distance")
|
return unknownDistance, errors.Wrap(err, "get distance")
|
||||||
}
|
}
|
||||||
|
|
||||||
discriminatorNode :=
|
discriminatorNode :=
|
||||||
d.proverTries[0].FindNearest(masterSelector.FillBytes(make([]byte, 32)))
|
d.proverTries[0].FindNearest(prevSelector.FillBytes(make([]byte, 32)))
|
||||||
discriminator := discriminatorNode.External.Key
|
discriminator := discriminatorNode.External.Key
|
||||||
addr, err := frame.GetAddress()
|
addr, err := frame.GetAddress()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -5,11 +5,12 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token/application"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||||
)
|
)
|
||||||
@ -53,7 +54,9 @@ func NewMasterTimeReel(
|
|||||||
panic("frame prover is nil")
|
panic("frame prover is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
filter, err := hex.DecodeString(engineConfig.Filter)
|
filter, err := hex.DecodeString(
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -74,32 +77,26 @@ func NewMasterTimeReel(
|
|||||||
|
|
||||||
// Start implements TimeReel.
|
// Start implements TimeReel.
|
||||||
func (m *MasterTimeReel) Start() error {
|
func (m *MasterTimeReel) Start() error {
|
||||||
|
m.logger.Debug("starting master time reel")
|
||||||
frame, err := m.clockStore.GetLatestMasterClockFrame(m.filter)
|
frame, err := m.clockStore.GetLatestMasterClockFrame(m.filter)
|
||||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m.logger.Debug("fetching genesis frame")
|
||||||
genesis, err := m.clockStore.GetMasterClockFrame(m.filter, 0)
|
genesis, err := m.clockStore.GetMasterClockFrame(m.filter, 0)
|
||||||
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
if err != nil && !errors.Is(err, store.ErrNotFound) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
|
||||||
genesis, err := config.DownloadAndVerifyGenesis()
|
|
||||||
if err != nil {
|
|
||||||
time.Sleep(10 * time.Minute)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
m.engineConfig.GenesisSeed = genesis.GenesisSeedHex
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
rebuildGenesisFrame := false
|
rebuildGenesisFrame := false
|
||||||
if genesis != nil && len(m.engineConfig.GenesisSeed) != 74 {
|
if genesis != nil && genesis.Difficulty != 1000000 {
|
||||||
m.logger.Warn("rebuilding genesis frame")
|
m.logger.Info("rewinding time reel to genesis")
|
||||||
|
|
||||||
err = m.clockStore.ResetMasterClockFrames(m.filter)
|
err = m.clockStore.ResetMasterClockFrames(m.filter)
|
||||||
|
err = m.clockStore.ResetDataClockFrames(
|
||||||
|
p2p.GetBloomFilter(application.TOKEN_ADDRESS, 256, 3),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -107,7 +104,8 @@ func (m *MasterTimeReel) Start() error {
|
|||||||
rebuildGenesisFrame = true
|
rebuildGenesisFrame = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if frame == nil || rebuildGenesisFrame {
|
if genesis == nil || rebuildGenesisFrame {
|
||||||
|
m.logger.Info("creating genesis frame")
|
||||||
m.head = m.createGenesisFrame()
|
m.head = m.createGenesisFrame()
|
||||||
} else {
|
} else {
|
||||||
m.head = frame
|
m.head = frame
|
||||||
@ -158,8 +156,8 @@ func (m *MasterTimeReel) createGenesisFrame() *protobufs.ClockFrame {
|
|||||||
}
|
}
|
||||||
|
|
||||||
difficulty := m.engineConfig.Difficulty
|
difficulty := m.engineConfig.Difficulty
|
||||||
if difficulty != 10000000 {
|
if difficulty != 1000000 {
|
||||||
difficulty = 10000000
|
difficulty = 1000000
|
||||||
}
|
}
|
||||||
|
|
||||||
frame, err := m.frameProver.CreateMasterGenesisFrame(
|
frame, err := m.frameProver.CreateMasterGenesisFrame(
|
||||||
|
@ -308,11 +308,16 @@ func (w *WesolowskiFrameProver) CreateDataGenesisFrame(
|
|||||||
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
|
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
|
||||||
frameProverTrie.Add(addrBytes, 0)
|
frameProverTrie.Add(addrBytes, 0)
|
||||||
|
|
||||||
if i%8 == 0 && i != 0 {
|
if i%8 == 0 {
|
||||||
frameProverTries = append(frameProverTries, frameProverTrie)
|
frameProverTries = append(frameProverTries, frameProverTrie)
|
||||||
frameProverTrie = &tries.RollingFrecencyCritbitTrie{}
|
frameProverTrie = &tries.RollingFrecencyCritbitTrie{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(frameProverTrie.FindNearestAndApproximateNeighbors(
|
||||||
|
make([]byte, 32),
|
||||||
|
)) != 0 {
|
||||||
|
frameProverTries = append(frameProverTries, frameProverTrie)
|
||||||
|
}
|
||||||
|
|
||||||
w.logger.Info("proving genesis frame")
|
w.logger.Info("proving genesis frame")
|
||||||
input := []byte{}
|
input := []byte{}
|
||||||
|
@ -67,9 +67,11 @@ func GetOutputsFromClockFrame(
|
|||||||
}
|
}
|
||||||
|
|
||||||
transition := &protobufs.TokenRequests{}
|
transition := &protobufs.TokenRequests{}
|
||||||
|
if frame.FrameNumber != 0 {
|
||||||
if err := proto.Unmarshal(associatedProof, transition); err != nil {
|
if err := proto.Unmarshal(associatedProof, transition); err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "get outputs from clock frame")
|
return nil, nil, errors.Wrap(err, "get outputs from clock frame")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return transition, tokenOutputs, nil
|
return transition, tokenOutputs, nil
|
||||||
}
|
}
|
||||||
@ -114,7 +116,35 @@ func (a *TokenApplication) ApplyTransitions(
|
|||||||
case *protobufs.TokenRequest_Announce:
|
case *protobufs.TokenRequest_Announce:
|
||||||
var primary *protobufs.Ed448Signature
|
var primary *protobufs.Ed448Signature
|
||||||
payload := []byte{}
|
payload := []byte{}
|
||||||
|
|
||||||
|
if t.Announce == nil || t.Announce.PublicKeySignaturesEd448 == nil {
|
||||||
|
if !skipFailures {
|
||||||
|
return nil, nil, nil, errors.Wrap(
|
||||||
|
ErrInvalidStateTransition,
|
||||||
|
"apply transitions",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
failedTransitions.Requests = append(
|
||||||
|
failedTransitions.Requests,
|
||||||
|
transition,
|
||||||
|
)
|
||||||
|
break req
|
||||||
|
}
|
||||||
for i, p := range t.Announce.PublicKeySignaturesEd448 {
|
for i, p := range t.Announce.PublicKeySignaturesEd448 {
|
||||||
|
if p.PublicKey == nil || p.Signature == nil ||
|
||||||
|
p.PublicKey.KeyValue == nil {
|
||||||
|
if !skipFailures {
|
||||||
|
return nil, nil, nil, errors.Wrap(
|
||||||
|
ErrInvalidStateTransition,
|
||||||
|
"apply transitions",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
failedTransitions.Requests = append(
|
||||||
|
failedTransitions.Requests,
|
||||||
|
transition,
|
||||||
|
)
|
||||||
|
break req
|
||||||
|
}
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
primary = p
|
primary = p
|
||||||
} else {
|
} else {
|
||||||
@ -161,6 +191,8 @@ func (a *TokenApplication) ApplyTransitions(
|
|||||||
break req
|
break req
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.Announce.InitialProof != nil &&
|
||||||
|
t.Announce.InitialProof.Proofs != nil {
|
||||||
payload = []byte("mint")
|
payload = []byte("mint")
|
||||||
for _, p := range t.Announce.InitialProof.Proofs {
|
for _, p := range t.Announce.InitialProof.Proofs {
|
||||||
payload = append(payload, p...)
|
payload = append(payload, p...)
|
||||||
@ -450,14 +482,55 @@ func (a *TokenApplication) ApplyTransitions(
|
|||||||
)
|
)
|
||||||
break req
|
break req
|
||||||
}
|
}
|
||||||
|
}
|
||||||
case *protobufs.TokenRequest_Merge:
|
case *protobufs.TokenRequest_Merge:
|
||||||
newCoin := &protobufs.Coin{}
|
newCoin := &protobufs.Coin{}
|
||||||
newTotal := new(big.Int)
|
newTotal := new(big.Int)
|
||||||
newIntersection := make([]byte, 1024)
|
newIntersection := make([]byte, 1024)
|
||||||
payload := []byte("merge")
|
payload := []byte("merge")
|
||||||
|
if t.Merge == nil || t.Merge.Coins == nil || t.Merge.Signature == nil {
|
||||||
|
if !skipFailures {
|
||||||
|
return nil, nil, nil, errors.Wrap(
|
||||||
|
ErrInvalidStateTransition,
|
||||||
|
"apply transitions",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
failedTransitions.Requests = append(
|
||||||
|
failedTransitions.Requests,
|
||||||
|
transition,
|
||||||
|
)
|
||||||
|
break req
|
||||||
|
}
|
||||||
for _, c := range t.Merge.Coins {
|
for _, c := range t.Merge.Coins {
|
||||||
|
if c.Address == nil {
|
||||||
|
if !skipFailures {
|
||||||
|
return nil, nil, nil, errors.Wrap(
|
||||||
|
ErrInvalidStateTransition,
|
||||||
|
"apply transitions",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
failedTransitions.Requests = append(
|
||||||
|
failedTransitions.Requests,
|
||||||
|
transition,
|
||||||
|
)
|
||||||
|
break req
|
||||||
|
}
|
||||||
payload = append(payload, c.Address...)
|
payload = append(payload, c.Address...)
|
||||||
}
|
}
|
||||||
|
if t.Merge.Signature.PublicKey == nil ||
|
||||||
|
t.Merge.Signature.Signature == nil {
|
||||||
|
if !skipFailures {
|
||||||
|
return nil, nil, nil, errors.Wrap(
|
||||||
|
ErrInvalidStateTransition,
|
||||||
|
"apply transitions",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
failedTransitions.Requests = append(
|
||||||
|
failedTransitions.Requests,
|
||||||
|
transition,
|
||||||
|
)
|
||||||
|
break req
|
||||||
|
}
|
||||||
if err := t.Merge.Signature.Verify(payload); err != nil {
|
if err := t.Merge.Signature.Verify(payload); err != nil {
|
||||||
if !skipFailures {
|
if !skipFailures {
|
||||||
return nil, nil, nil, errors.Wrap(
|
return nil, nil, nil, errors.Wrap(
|
||||||
@ -596,6 +669,21 @@ func (a *TokenApplication) ApplyTransitions(
|
|||||||
newCoins := []*protobufs.Coin{}
|
newCoins := []*protobufs.Coin{}
|
||||||
newAmounts := []*big.Int{}
|
newAmounts := []*big.Int{}
|
||||||
payload := []byte{}
|
payload := []byte{}
|
||||||
|
if t.Split.Signature.PublicKey == nil ||
|
||||||
|
t.Split.Signature.Signature == nil ||
|
||||||
|
t.Split.OfCoin == nil {
|
||||||
|
if !skipFailures {
|
||||||
|
return nil, nil, nil, errors.Wrap(
|
||||||
|
ErrInvalidStateTransition,
|
||||||
|
"apply transitions",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
failedTransitions.Requests = append(
|
||||||
|
failedTransitions.Requests,
|
||||||
|
transition,
|
||||||
|
)
|
||||||
|
break req
|
||||||
|
}
|
||||||
coin, err := a.CoinStore.GetCoinByAddress(t.Split.OfCoin.Address)
|
coin, err := a.CoinStore.GetCoinByAddress(t.Split.OfCoin.Address)
|
||||||
if err != nil && !skipFailures {
|
if err != nil && !skipFailures {
|
||||||
if !skipFailures {
|
if !skipFailures {
|
||||||
@ -903,6 +991,21 @@ func (a *TokenApplication) ApplyTransitions(
|
|||||||
transition,
|
transition,
|
||||||
)
|
)
|
||||||
case *protobufs.TokenRequest_Mint:
|
case *protobufs.TokenRequest_Mint:
|
||||||
|
if t.Mint.Signature == nil || t.Mint.Signature.PublicKey == nil ||
|
||||||
|
t.Mint.Signature.Signature == nil ||
|
||||||
|
t.Mint.Proofs == nil {
|
||||||
|
if !skipFailures {
|
||||||
|
return nil, nil, nil, errors.Wrap(
|
||||||
|
ErrInvalidStateTransition,
|
||||||
|
"apply transitions",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
failedTransitions.Requests = append(
|
||||||
|
failedTransitions.Requests,
|
||||||
|
transition,
|
||||||
|
)
|
||||||
|
break req
|
||||||
|
}
|
||||||
payload := []byte("mint")
|
payload := []byte("mint")
|
||||||
for _, p := range t.Mint.Proofs {
|
for _, p := range t.Mint.Proofs {
|
||||||
payload = append(payload, p...)
|
payload = append(payload, p...)
|
||||||
@ -971,7 +1074,7 @@ func (a *TokenApplication) ApplyTransitions(
|
|||||||
bytes.Equal(
|
bytes.Equal(
|
||||||
t.Mint.Proofs[0],
|
t.Mint.Proofs[0],
|
||||||
[]byte("pre-dusk"),
|
[]byte("pre-dusk"),
|
||||||
) && (bytes.Equal(t.Mint.Proofs[1], make([]byte, 32)) ||
|
) && (!bytes.Equal(t.Mint.Proofs[1], make([]byte, 32)) ||
|
||||||
currentFrameNumber < 604800) {
|
currentFrameNumber < 604800) {
|
||||||
delete := []*protobufs.TokenOutput{}
|
delete := []*protobufs.TokenOutput{}
|
||||||
if !bytes.Equal(t.Mint.Proofs[1], make([]byte, 32)) {
|
if !bytes.Equal(t.Mint.Proofs[1], make([]byte, 32)) {
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -79,20 +80,7 @@ func NewTokenExecutionEngine(
|
|||||||
panic(errors.New("logger is nil"))
|
panic(errors.New("logger is nil"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var genesis *config.SignedGenesisUnlock
|
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
|
||||||
var err error
|
|
||||||
for {
|
|
||||||
genesis, err = config.DownloadAndVerifyGenesis()
|
|
||||||
if err != nil {
|
|
||||||
gotime.Sleep(10 * gotime.Minute)
|
|
||||||
}
|
|
||||||
|
|
||||||
if genesis != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
seed, err := hex.DecodeString(genesis.GenesisSeedHex)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -186,9 +174,9 @@ func NewTokenExecutionEngine(
|
|||||||
|
|
||||||
if genesisCreated {
|
if genesisCreated {
|
||||||
go func() {
|
go func() {
|
||||||
if len(e.engineConfig.MultisigProverEnrollmentPaths) != 0 && genesisCreated {
|
|
||||||
keys := [][]byte{}
|
keys := [][]byte{}
|
||||||
ksigs := [][]byte{}
|
ksigs := [][]byte{}
|
||||||
|
if len(e.engineConfig.MultisigProverEnrollmentPaths) != 0 {
|
||||||
for _, conf := range e.engineConfig.MultisigProverEnrollmentPaths {
|
for _, conf := range e.engineConfig.MultisigProverEnrollmentPaths {
|
||||||
extraConf, err := config.LoadConfig(conf, "", false)
|
extraConf, err := config.LoadConfig(conf, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -218,6 +206,7 @@ func NewTokenExecutionEngine(
|
|||||||
}
|
}
|
||||||
ksigs = append(ksigs, sig)
|
ksigs = append(ksigs, sig)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
keyjoin := []byte{}
|
keyjoin := []byte{}
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
@ -298,12 +287,34 @@ func NewTokenExecutionEngine(
|
|||||||
Request: announce,
|
Request: announce,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = e.publishMessage(intrinsicFilter, req)
|
// need to wait for peering
|
||||||
|
gotime.Sleep(30 * gotime.Second)
|
||||||
|
e.publishMessage(intrinsicFilter, req)
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
f, _, err := e.clockStore.GetLatestDataClockFrame(e.intrinsicFilter)
|
||||||
|
if err == nil {
|
||||||
|
msg := []byte("resume")
|
||||||
|
msg = binary.BigEndian.AppendUint64(msg, f.FrameNumber)
|
||||||
|
msg = append(msg, e.intrinsicFilter...)
|
||||||
|
sig, err := e.pubSub.SignMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// need to wait for peering
|
||||||
|
gotime.Sleep(30 * gotime.Second)
|
||||||
|
e.publishMessage(e.intrinsicFilter, &protobufs.AnnounceProverResume{
|
||||||
|
Filter: e.intrinsicFilter,
|
||||||
|
FrameNumber: f.FrameNumber,
|
||||||
|
PublicKeySignatureEd448: &protobufs.Ed448Signature{
|
||||||
|
PublicKey: &protobufs.Ed448PublicKey{
|
||||||
|
KeyValue: e.pubSub.GetPublicKey(),
|
||||||
|
},
|
||||||
|
Signature: sig,
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inc, _, _, err := dataProofStore.GetLatestDataTimeProof(pubSub.GetPeerID())
|
inc, _, _, err := dataProofStore.GetLatestDataTimeProof(pubSub.GetPeerID())
|
||||||
@ -459,13 +470,9 @@ func CreateGenesisState(
|
|||||||
[][]byte,
|
[][]byte,
|
||||||
map[string]uint64,
|
map[string]uint64,
|
||||||
) {
|
) {
|
||||||
genesis, err := config.DownloadAndVerifyGenesis()
|
genesis := config.GetGenesis()
|
||||||
if err != nil {
|
if genesis == nil {
|
||||||
panic(err)
|
panic("genesis is nil")
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
panic(errors.New("genesis seed is nil"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
|
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
|
||||||
@ -474,13 +481,17 @@ func CreateGenesisState(
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("creating genesis frame from message:")
|
logger.Info("creating genesis frame from message:")
|
||||||
for _, l := range strings.Split(string(seed), "\n") {
|
for i, l := range strings.Split(string(seed), "|") {
|
||||||
|
if i == 0 {
|
||||||
logger.Info(l)
|
logger.Info(l)
|
||||||
|
} else {
|
||||||
|
logger.Info(fmt.Sprintf("Blockstamp ending in 0x%x", l))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
difficulty := engineConfig.Difficulty
|
difficulty := engineConfig.Difficulty
|
||||||
if difficulty != 100000 {
|
if difficulty != 200000 {
|
||||||
difficulty = 100000
|
difficulty = 200000
|
||||||
}
|
}
|
||||||
|
|
||||||
b := sha3.Sum256(seed)
|
b := sha3.Sum256(seed)
|
||||||
@ -531,6 +542,7 @@ func CreateGenesisState(
|
|||||||
|
|
||||||
bridgedAddrs := map[string]struct{}{}
|
bridgedAddrs := map[string]struct{}{}
|
||||||
|
|
||||||
|
logger.Info("encoding bridged token state")
|
||||||
bridgeTotal := decimal.Zero
|
bridgeTotal := decimal.Zero
|
||||||
for _, b := range bridged {
|
for _, b := range bridged {
|
||||||
amt, err := decimal.NewFromString(b.Amount)
|
amt, err := decimal.NewFromString(b.Amount)
|
||||||
@ -544,6 +556,7 @@ func CreateGenesisState(
|
|||||||
voucherTotals := map[string]decimal.Decimal{}
|
voucherTotals := map[string]decimal.Decimal{}
|
||||||
peerIdTotals := map[string]decimal.Decimal{}
|
peerIdTotals := map[string]decimal.Decimal{}
|
||||||
peerSeniority := map[string]uint64{}
|
peerSeniority := map[string]uint64{}
|
||||||
|
logger.Info("encoding first retro state")
|
||||||
for _, f := range firstRetro {
|
for _, f := range firstRetro {
|
||||||
if _, ok := bridgedAddrs[f.PeerId]; !ok {
|
if _, ok := bridgedAddrs[f.PeerId]; !ok {
|
||||||
peerIdTotals[f.PeerId], err = decimal.NewFromString(f.Reward)
|
peerIdTotals[f.PeerId], err = decimal.NewFromString(f.Reward)
|
||||||
@ -562,12 +575,14 @@ func CreateGenesisState(
|
|||||||
peerSeniority[f.PeerId] = uint64(10 * 6 * 60 * 24 * 92 / (max / actual))
|
peerSeniority[f.PeerId] = uint64(10 * 6 * 60 * 24 * 92 / (max / actual))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Info("encoding voucher state")
|
||||||
for _, v := range vouchers {
|
for _, v := range vouchers {
|
||||||
if _, ok := bridgedAddrs[v]; !ok {
|
if _, ok := bridgedAddrs[v]; !ok {
|
||||||
voucherTotals[v] = decimal.NewFromInt(50)
|
voucherTotals[v] = decimal.NewFromInt(50)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Info("encoding second retro state")
|
||||||
for _, f := range secondRetro {
|
for _, f := range secondRetro {
|
||||||
if _, ok := bridgedAddrs[f.PeerId]; !ok {
|
if _, ok := bridgedAddrs[f.PeerId]; !ok {
|
||||||
existing, ok := peerIdTotals[f.PeerId]
|
existing, ok := peerIdTotals[f.PeerId]
|
||||||
@ -609,6 +624,7 @@ func CreateGenesisState(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Info("encoding third retro state")
|
||||||
for _, f := range thirdRetro {
|
for _, f := range thirdRetro {
|
||||||
existing, ok := peerIdTotals[f.PeerId]
|
existing, ok := peerIdTotals[f.PeerId]
|
||||||
|
|
||||||
@ -630,6 +646,7 @@ func CreateGenesisState(
|
|||||||
peerSeniority[f.PeerId] = peerSeniority[f.PeerId] + (10 * 6 * 60 * 24 * 30)
|
peerSeniority[f.PeerId] = peerSeniority[f.PeerId] + (10 * 6 * 60 * 24 * 30)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.Info("encoding fourth retro state")
|
||||||
for _, f := range fourthRetro {
|
for _, f := range fourthRetro {
|
||||||
existing, ok := peerIdTotals[f.PeerId]
|
existing, ok := peerIdTotals[f.PeerId]
|
||||||
|
|
||||||
@ -661,6 +678,14 @@ func CreateGenesisState(
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
totalExecutions := 0
|
||||||
|
logger.Info(
|
||||||
|
"creating execution state",
|
||||||
|
zap.Int(
|
||||||
|
"coin_executions",
|
||||||
|
totalExecutions,
|
||||||
|
),
|
||||||
|
)
|
||||||
genesisState.Outputs = append(genesisState.Outputs, &protobufs.TokenOutput{
|
genesisState.Outputs = append(genesisState.Outputs, &protobufs.TokenOutput{
|
||||||
Output: &protobufs.TokenOutput_Coin{
|
Output: &protobufs.TokenOutput_Coin{
|
||||||
Coin: &protobufs.Coin{
|
Coin: &protobufs.Coin{
|
||||||
@ -678,8 +703,18 @@ func CreateGenesisState(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
totalExecutions++
|
||||||
|
|
||||||
for peerId, total := range peerIdTotals {
|
for peerId, total := range peerIdTotals {
|
||||||
|
if totalExecutions%1000 == 0 {
|
||||||
|
logger.Info(
|
||||||
|
"creating execution state",
|
||||||
|
zap.Int(
|
||||||
|
"coin_executions",
|
||||||
|
totalExecutions,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
peerBytes, err := base58.Decode(peerId)
|
peerBytes, err := base58.Decode(peerId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -707,9 +742,19 @@ func CreateGenesisState(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
totalExecutions++
|
||||||
}
|
}
|
||||||
|
|
||||||
for voucher, total := range voucherTotals {
|
for voucher, total := range voucherTotals {
|
||||||
|
if totalExecutions%1000 == 0 {
|
||||||
|
logger.Info(
|
||||||
|
"creating execution state",
|
||||||
|
zap.Int(
|
||||||
|
"coin_executions",
|
||||||
|
totalExecutions,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
keyBytes, err := hex.DecodeString(voucher[2:])
|
keyBytes, err := hex.DecodeString(voucher[2:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -737,10 +782,18 @@ func CreateGenesisState(
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
totalExecutions++
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, output := range genesisState.Outputs {
|
logger.Info(
|
||||||
|
"serializing execution state to store, this may take some time...",
|
||||||
|
zap.Int(
|
||||||
|
"coin_executions",
|
||||||
|
totalExecutions,
|
||||||
|
),
|
||||||
|
)
|
||||||
txn, err := coinStore.NewTransaction()
|
txn, err := coinStore.NewTransaction()
|
||||||
|
for _, output := range genesisState.Outputs {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -758,10 +811,10 @@ func CreateGenesisState(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err := txn.Commit(); err != nil {
|
if err := txn.Commit(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("encoded transcript")
|
logger.Info("encoded transcript")
|
||||||
|
|
||||||
@ -826,7 +879,7 @@ func CreateGenesisState(
|
|||||||
},
|
},
|
||||||
AggregateCommitment: commitment,
|
AggregateCommitment: commitment,
|
||||||
Proof: proof,
|
Proof: proof,
|
||||||
}, [][]byte{genesis.Beacon}, peerSeniority
|
}, [][]byte{genesis.Beacon}, map[string]uint64{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetAddressOfCoin(
|
func GetAddressOfCoin(
|
||||||
@ -1001,14 +1054,15 @@ func (e *TokenExecutionEngine) RunWorker() {
|
|||||||
)
|
)
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
for _, output := range app.TokenOutputs.Outputs {
|
|
||||||
switch o := output.Output.(type) {
|
|
||||||
case *protobufs.TokenOutput_Coin:
|
|
||||||
txn, err := e.coinStore.NewTransaction()
|
txn, err := e.coinStore.NewTransaction()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, output := range app.TokenOutputs.Outputs {
|
||||||
|
switch o := output.Output.(type) {
|
||||||
|
case *protobufs.TokenOutput_Coin:
|
||||||
address, err := GetAddressOfCoin(o.Coin, frame.FrameNumber)
|
address, err := GetAddressOfCoin(o.Coin, frame.FrameNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -1022,15 +1076,7 @@ func (e *TokenExecutionEngine) RunWorker() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err := txn.Commit(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
case *protobufs.TokenOutput_DeletedCoin:
|
case *protobufs.TokenOutput_DeletedCoin:
|
||||||
txn, err := e.coinStore.NewTransaction()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
coin, err := e.coinStore.GetCoinByAddress(o.DeletedCoin.Address)
|
coin, err := e.coinStore.GetCoinByAddress(o.DeletedCoin.Address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -1043,15 +1089,7 @@ func (e *TokenExecutionEngine) RunWorker() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err := txn.Commit(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
case *protobufs.TokenOutput_Proof:
|
case *protobufs.TokenOutput_Proof:
|
||||||
txn, err := e.coinStore.NewTransaction()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
address, err := GetAddressOfPreCoinProof(o.Proof)
|
address, err := GetAddressOfPreCoinProof(o.Proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -1065,15 +1103,7 @@ func (e *TokenExecutionEngine) RunWorker() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if err := txn.Commit(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
case *protobufs.TokenOutput_DeletedProof:
|
case *protobufs.TokenOutput_DeletedProof:
|
||||||
txn, err := e.coinStore.NewTransaction()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
address, err := GetAddressOfPreCoinProof(o.DeletedProof)
|
address, err := GetAddressOfPreCoinProof(o.DeletedProof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -1086,13 +1116,14 @@ func (e *TokenExecutionEngine) RunWorker() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := txn.Commit(); err != nil {
|
if err := txn.Commit(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *TokenExecutionEngine) publishMessage(
|
func (e *TokenExecutionEngine) publishMessage(
|
||||||
|
19
node/main.go
19
node/main.go
@ -195,7 +195,7 @@ func main() {
|
|||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
|
||||||
if count < len(config.Signatories)/2 {
|
if count < len(config.Signatories)/2+len(config.Signatories)%2 {
|
||||||
fmt.Printf("Quorum on signatures not met")
|
fmt.Printf("Quorum on signatures not met")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@ -332,9 +332,6 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if *core != 0 {
|
if *core != 0 {
|
||||||
runtime.GOMAXPROCS(1)
|
|
||||||
rdebug.SetGCPercent(9999)
|
|
||||||
|
|
||||||
if nodeConfig.Engine.DataWorkerMemoryLimit == 0 {
|
if nodeConfig.Engine.DataWorkerMemoryLimit == 0 {
|
||||||
nodeConfig.Engine.DataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75GiB
|
nodeConfig.Engine.DataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75GiB
|
||||||
}
|
}
|
||||||
@ -372,6 +369,7 @@ func main() {
|
|||||||
l,
|
l,
|
||||||
uint32(*core)-1,
|
uint32(*core)-1,
|
||||||
qcrypto.NewWesolowskiFrameProver(l),
|
qcrypto.NewWesolowskiFrameProver(l),
|
||||||
|
nodeConfig,
|
||||||
*parentProcess,
|
*parentProcess,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -395,6 +393,19 @@ func main() {
|
|||||||
|
|
||||||
report := RunSelfTestIfNeeded(*configDirectory, nodeConfig)
|
report := RunSelfTestIfNeeded(*configDirectory, nodeConfig)
|
||||||
|
|
||||||
|
if *core == 0 {
|
||||||
|
for {
|
||||||
|
genesis, err := config.DownloadAndVerifyGenesis(*network)
|
||||||
|
if err != nil {
|
||||||
|
time.Sleep(10 * time.Minute)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeConfig.Engine.GenesisSeed = genesis.GenesisSeedHex
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
done := make(chan os.Signal, 1)
|
done := make(chan os.Signal, 1)
|
||||||
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
|
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
|
||||||
var node *app.Node
|
var node *app.Node
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package p2p
|
package p2p
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
@ -36,17 +35,9 @@ func GetBloomFilter(data []byte, bitLength int, k int) []byte {
|
|||||||
}
|
}
|
||||||
if outputBI.Bit(int(position)) != 1 {
|
if outputBI.Bit(int(position)) != 1 {
|
||||||
outputBI.SetBit(outputBI, int(position), 1)
|
outputBI.SetBit(outputBI, int(position), 1)
|
||||||
} else if k*size <= 32 {
|
} else if k < size {
|
||||||
// we need to extend the search
|
// we need to extend the search
|
||||||
k++
|
k++
|
||||||
} else {
|
|
||||||
fmt.Printf(
|
|
||||||
"digest %+x cannot be used as bloom index, panicking\n",
|
|
||||||
digest,
|
|
||||||
)
|
|
||||||
panic(
|
|
||||||
"could not generate bloom filter index, k offset cannot be adjusted",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
outputBI.FillBytes(output)
|
outputBI.FillBytes(output)
|
||||||
|
@ -33,6 +33,30 @@ func TestGetBloomFilter(t *testing.T) {
|
|||||||
0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
fourByteSixtyFourKTest := p2p.GetBloomFilter(
|
||||||
|
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||||
|
1024,
|
||||||
|
64,
|
||||||
|
)
|
||||||
|
assert.ElementsMatch(t, fourByteSixtyFourKTest, []byte{
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
|
||||||
|
0x00, 0x00, 0x02, 0x00, 0x10, 0x00, 0x00, 0x10,
|
||||||
|
0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x08, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x40, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x10,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x80, 0x21, 0x00, 0x01,
|
||||||
|
0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01,
|
||||||
|
})
|
||||||
|
|
||||||
fourByteSixteenKTest := p2p.GetBloomFilter(
|
fourByteSixteenKTest := p2p.GetBloomFilter(
|
||||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||||
65536,
|
65536,
|
||||||
|
@ -391,6 +391,10 @@ func (b *BlossomSub) Subscribe(
|
|||||||
b.logger.Error("subscription failed", zap.Error(err))
|
b.logger.Error("subscription failed", zap.Error(err))
|
||||||
return errors.Wrap(err, "subscribe")
|
return errors.Wrap(err, "subscribe")
|
||||||
}
|
}
|
||||||
|
_, ok := b.bitmaskMap[string(bit.Bitmask())]
|
||||||
|
if !ok {
|
||||||
|
b.bitmaskMap[string(bit.Bitmask())] = bit
|
||||||
|
}
|
||||||
subs = append(subs, sub)
|
subs = append(subs, sub)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,15 +3,20 @@ package rpc
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus/data"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
|
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/token"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
|
|
||||||
|
pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
mn "github.com/multiformats/go-multiaddr/net"
|
mn "github.com/multiformats/go-multiaddr/net"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -147,7 +152,7 @@ func (r *DataWorkerIPCServer) CalculateChallengeProof(
|
|||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
return nil, errors.Wrap(
|
return nil, errors.Wrap(
|
||||||
errors.New("no applicable challenge"),
|
data.ErrNoApplicableChallenge,
|
||||||
"calculate challenge proof",
|
"calculate challenge proof",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -169,13 +174,42 @@ func NewDataWorkerIPCServer(
|
|||||||
logger *zap.Logger,
|
logger *zap.Logger,
|
||||||
coreId uint32,
|
coreId uint32,
|
||||||
prover crypto.FrameProver,
|
prover crypto.FrameProver,
|
||||||
|
config *config.Config,
|
||||||
parentProcessId int,
|
parentProcessId int,
|
||||||
) (*DataWorkerIPCServer, error) {
|
) (*DataWorkerIPCServer, error) {
|
||||||
|
peerPrivKey, err := hex.DecodeString(config.P2P.PeerPrivKey)
|
||||||
|
if err != nil {
|
||||||
|
panic(errors.Wrap(err, "error unmarshaling peerkey"))
|
||||||
|
}
|
||||||
|
|
||||||
|
privKey, err := pcrypto.UnmarshalEd448PrivateKey(peerPrivKey)
|
||||||
|
if err != nil {
|
||||||
|
panic(errors.Wrap(err, "error unmarshaling peerkey"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub := privKey.GetPublic()
|
||||||
|
|
||||||
|
pubKey, err := pub.Raw()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
digest := make([]byte, 128)
|
||||||
|
s := sha3.NewShake256()
|
||||||
|
s.Write([]byte(pubKey))
|
||||||
|
_, err = s.Read(digest)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
return &DataWorkerIPCServer{
|
return &DataWorkerIPCServer{
|
||||||
listenAddrGRPC: listenAddrGRPC,
|
listenAddrGRPC: listenAddrGRPC,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
coreId: coreId,
|
coreId: coreId,
|
||||||
prover: prover,
|
prover: prover,
|
||||||
|
indices: []int{
|
||||||
|
p2p.GetOnesIndices(p2p.GetBloomFilter(digest, 1024, 64))[coreId%64],
|
||||||
|
},
|
||||||
parentProcessId: parentProcessId,
|
parentProcessId: parentProcessId,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -764,7 +764,7 @@ func (p *PebbleClockStore) GetLatestDataClockFrame(
|
|||||||
}
|
}
|
||||||
|
|
||||||
frameNumber := binary.BigEndian.Uint64(idxValue)
|
frameNumber := binary.BigEndian.Uint64(idxValue)
|
||||||
frame, _, err := p.GetDataClockFrame(filter, frameNumber, false)
|
frame, tries, err := p.GetDataClockFrame(filter, frameNumber, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, pebble.ErrNotFound) {
|
if errors.Is(err, pebble.ErrNotFound) {
|
||||||
return nil, nil, ErrNotFound
|
return nil, nil, ErrNotFound
|
||||||
@ -775,29 +775,7 @@ func (p *PebbleClockStore) GetLatestDataClockFrame(
|
|||||||
|
|
||||||
closer.Close()
|
closer.Close()
|
||||||
|
|
||||||
proverTries := []*tries.RollingFrecencyCritbitTrie{}
|
return frame, tries, nil
|
||||||
i := uint16(0)
|
|
||||||
for {
|
|
||||||
proverTrie := &tries.RollingFrecencyCritbitTrie{}
|
|
||||||
trieData, closer, err := p.db.Get(clockProverTrieKey(filter, i, frameNumber))
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, pebble.ErrNotFound) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil, errors.Wrap(err, "get latest data clock frame")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := proverTrie.Deserialize(trieData); err != nil {
|
|
||||||
closer.Close()
|
|
||||||
return nil, nil, errors.Wrap(err, "get latest data clock frame")
|
|
||||||
}
|
|
||||||
closer.Close()
|
|
||||||
proverTries = append(proverTries, proverTrie)
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
return frame, proverTries, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStagedDataClockFrame implements ClockStore.
|
// GetStagedDataClockFrame implements ClockStore.
|
||||||
|
Loading…
Reference in New Issue
Block a user