ceremonyclient/node/execution/intrinsics/ceremony/ceremony_execution_engine.go

1499 lines
37 KiB
Go
Raw Normal View History

2023-09-25 02:43:35 +00:00
package ceremony
import (
"bytes"
"crypto"
"crypto/rand"
v1.4.18 (#193) * Remove bootstrap peer (#189) * Change bootstrap servers to DHT-only peers (#187) * support voucher file-based claims (#183) * Change bootstrap servers to DHT-only peers Changing my bootstrap servers to DHT-only peers with somewhat lower specs. One of the new ones is in the US and the other one is in Switzerland. Both use reliable providers and have 10Gbps network interfaces. --------- Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> * Don't run self-test in DHT-only mode (#186) * support voucher file-based claims (#183) * Don't run self-test in DHT-only mode The node tries to create a self-test when ran with the `-dht-only` flag, but it doesn't load the KZG ceremony data in DHT-only mode which leads to a crash. Don't run self-test when the `-dht-only` flag is set. I tested by starting a node locally with and without existing self-test and with the `-dht-only` flag. --------- Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> * Embed json files in binary (#182) * Embed ceremony.json in binary * Embed retroactive_peers.json in binary * Signers build and verification tasks (#181) * add signers specific Taskfile * add verify tasks * move signer task under signer folder * create docker image specific for signers * map current user into docker image and container * ignore node-tmp-* * add verify:build:internal * prevent tasks with docker commands from being run inside a container * rename *:internal to *:container * add README.md * add pem files to git * Updating Q Guide link (#173) * Update README.md Updated link to Quilibrium guide to new website * Update README.md * feat: network switching and namespaced announce strings/bitmasks (#190) * feat: network switching and namespaced announce strings/bitmasks * bump version name and logo * feat: mini pomw proofs as part of peer manifest (#191) * shift default config directory under current folder (#176) * feat: signature check (#192) * feat: signature check * adjust docker command so it doesn't invoke sigcheck * remove old version * add binaries and digests * fix bug, revert build * shasum has weird byte at end * proper binaries and digests * Signatory #13 added * Signatory #3 added * Signer 4 (#194) * Signatory #5 added * Signatory #9 added (#195) * Signatory #1 added * added sig.6 files (#196) * Signatories #8 and #16 added * Signatory #12 added * Add signature (#197) * reset build for v1.4.18 after testnet bug * updated build, resigned by #13 * Signatory #16 added * added sig.6 files (#198) * Signatory #8 added * Signatory #17 added * Signatory #1 added * Signatory #7 added * Signatory #4 added * Signatory #14 added * remove binaries, ready to ship --------- Co-authored-by: littleblackcloud <163544315+littleblackcloud@users.noreply.github.com> Co-authored-by: Agost Biro <5764438+agostbiro@users.noreply.github.com> Co-authored-by: Marius Scurtescu <marius.scurtescu@gmail.com> Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com> Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com> Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-25 05:22:50 +00:00
_ "embed"
2024-02-13 07:04:56 +00:00
"encoding/base64"
2023-09-25 02:43:35 +00:00
"encoding/binary"
"encoding/hex"
2024-02-13 07:04:56 +00:00
"encoding/json"
"fmt"
2023-09-25 02:43:35 +00:00
"strings"
"sync"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
2023-09-25 02:43:35 +00:00
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
2024-02-13 07:04:56 +00:00
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/vdf"
2023-09-25 02:43:35 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/ceremony"
2024-02-13 07:04:56 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
2023-09-25 02:43:35 +00:00
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
2024-02-13 07:04:56 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
2023-09-25 02:43:35 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/execution"
2024-02-13 07:04:56 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/execution/intrinsics/ceremony/application"
2023-09-25 02:43:35 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
2024-02-13 07:04:56 +00:00
"source.quilibrium.com/quilibrium/monorepo/node/tries"
2023-09-25 02:43:35 +00:00
)
type CeremonyExecutionEngine struct {
logger *zap.Logger
clock *ceremony.CeremonyDataClockConsensusEngine
clockStore store.ClockStore
keyStore store.KeyStore
keyManager keys.KeyManager
engineConfig *config.EngineConfig
pubSub p2p.PubSub
2024-01-03 07:31:42 +00:00
peerIdHash []byte
2023-09-25 02:43:35 +00:00
provingKey crypto.Signer
proverPublicKey []byte
provingKeyAddress []byte
2024-02-13 07:04:56 +00:00
inclusionProver qcrypto.InclusionProver
2023-09-25 02:43:35 +00:00
participantMx sync.Mutex
peerChannels map[string]*p2p.PublicP2PChannel
activeSecrets []curves.Scalar
activeClockFrame *protobufs.ClockFrame
alreadyPublishedShare bool
alreadyPublishedTranscript bool
seenMessageMap map[string]bool
seenMessageMx sync.Mutex
2024-01-03 07:31:42 +00:00
intrinsicFilter []byte
2024-02-13 07:04:56 +00:00
frameProver qcrypto.FrameProver
2023-09-25 02:43:35 +00:00
}
2024-02-20 07:59:03 +00:00
const validCeremonySelector = "253f3a6383dcfe91cf49abd20204b3e6ef5afd4c70c1968bb1f0b827a72af53b"
2023-09-25 02:43:35 +00:00
func NewCeremonyExecutionEngine(
logger *zap.Logger,
engineConfig *config.EngineConfig,
keyManager keys.KeyManager,
pubSub p2p.PubSub,
2024-02-13 07:04:56 +00:00
frameProver qcrypto.FrameProver,
inclusionProver qcrypto.InclusionProver,
2023-09-25 02:43:35 +00:00
clockStore store.ClockStore,
2024-02-13 07:04:56 +00:00
masterTimeReel *time.MasterTimeReel,
2024-03-21 07:14:45 +00:00
peerInfoManager p2p.PeerInfoManager,
2023-09-25 02:43:35 +00:00
keyStore store.KeyStore,
) *CeremonyExecutionEngine {
if logger == nil {
panic(errors.New("logger is nil"))
}
2024-01-03 07:31:42 +00:00
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
if err != nil {
panic(err)
}
intrinsicFilter := append(
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
)
2024-02-24 08:35:13 +00:00
frame, _, err := clockStore.GetDataClockFrame(intrinsicFilter, 0, false)
2024-02-13 07:04:56 +00:00
var origin []byte
var inclusionProof *qcrypto.InclusionAggregateProof
var proverKeys [][]byte
2024-02-14 07:11:12 +00:00
rebuildGenesisFrame := false
2024-02-20 07:59:03 +00:00
if frame != nil {
selector, err := frame.GetSelector()
2024-02-14 07:11:12 +00:00
if err != nil {
panic(err)
}
2024-02-20 07:59:03 +00:00
if selector.Text(16) != validCeremonySelector {
logger.Warn("corrupted genesis frame detected, rebuilding")
err = clockStore.ResetDataClockFrames(intrinsicFilter)
if err != nil {
panic(err)
}
rebuildGenesisFrame = true
}
2024-02-14 07:11:12 +00:00
}
if err != nil && errors.Is(err, store.ErrNotFound) || rebuildGenesisFrame {
2024-02-13 07:04:56 +00:00
origin, inclusionProof, proverKeys = CreateGenesisState(
logger,
engineConfig,
nil,
inclusionProver,
)
}
dataTimeReel := time.NewDataTimeReel(
intrinsicFilter,
logger,
clockStore,
engineConfig,
frameProver,
origin,
inclusionProof,
proverKeys,
)
2024-01-03 07:31:42 +00:00
clock := ceremony.NewCeremonyDataClockConsensusEngine(
engineConfig,
logger,
keyManager,
clockStore,
keyStore,
pubSub,
2024-02-13 07:04:56 +00:00
frameProver,
inclusionProver,
masterTimeReel,
dataTimeReel,
2024-03-21 07:14:45 +00:00
peerInfoManager,
2024-01-03 07:31:42 +00:00
intrinsicFilter,
seed,
)
2023-09-25 02:43:35 +00:00
e := &CeremonyExecutionEngine{
logger: logger,
clock: clock,
engineConfig: engineConfig,
keyManager: keyManager,
clockStore: clockStore,
keyStore: keyStore,
pubSub: pubSub,
2024-02-13 07:04:56 +00:00
inclusionProver: inclusionProver,
frameProver: frameProver,
2023-09-25 02:43:35 +00:00
participantMx: sync.Mutex{},
peerChannels: map[string]*p2p.PublicP2PChannel{},
alreadyPublishedShare: false,
seenMessageMx: sync.Mutex{},
seenMessageMap: map[string]bool{},
2024-01-03 07:31:42 +00:00
intrinsicFilter: intrinsicFilter,
}
peerId := e.pubSub.GetPeerID()
addr, err := poseidon.HashBytes(peerId)
if err != nil {
panic(err)
2023-09-25 02:43:35 +00:00
}
2024-01-03 07:31:42 +00:00
addrBytes := addr.Bytes()
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
e.peerIdHash = addrBytes
2023-09-25 02:43:35 +00:00
provingKey, _, publicKeyBytes, provingKeyAddress := e.clock.GetProvingKey(
engineConfig,
)
e.provingKey = provingKey
e.proverPublicKey = publicKeyBytes
e.provingKeyAddress = provingKeyAddress
return e
}
var _ execution.ExecutionEngine = (*CeremonyExecutionEngine)(nil)
// GetName implements ExecutionEngine
func (*CeremonyExecutionEngine) GetName() string {
return "ceremony"
}
// GetSupportedApplications implements ExecutionEngine
func (
*CeremonyExecutionEngine,
) GetSupportedApplications() []*protobufs.Application {
return []*protobufs.Application{
{
Address: application.CEREMONY_ADDRESS,
ExecutionContext: protobufs.ExecutionContext_EXECUTION_CONTEXT_INTRINSIC,
},
}
}
v1.4.18 (#193) * Remove bootstrap peer (#189) * Change bootstrap servers to DHT-only peers (#187) * support voucher file-based claims (#183) * Change bootstrap servers to DHT-only peers Changing my bootstrap servers to DHT-only peers with somewhat lower specs. One of the new ones is in the US and the other one is in Switzerland. Both use reliable providers and have 10Gbps network interfaces. --------- Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> * Don't run self-test in DHT-only mode (#186) * support voucher file-based claims (#183) * Don't run self-test in DHT-only mode The node tries to create a self-test when ran with the `-dht-only` flag, but it doesn't load the KZG ceremony data in DHT-only mode which leads to a crash. Don't run self-test when the `-dht-only` flag is set. I tested by starting a node locally with and without existing self-test and with the `-dht-only` flag. --------- Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> * Embed json files in binary (#182) * Embed ceremony.json in binary * Embed retroactive_peers.json in binary * Signers build and verification tasks (#181) * add signers specific Taskfile * add verify tasks * move signer task under signer folder * create docker image specific for signers * map current user into docker image and container * ignore node-tmp-* * add verify:build:internal * prevent tasks with docker commands from being run inside a container * rename *:internal to *:container * add README.md * add pem files to git * Updating Q Guide link (#173) * Update README.md Updated link to Quilibrium guide to new website * Update README.md * feat: network switching and namespaced announce strings/bitmasks (#190) * feat: network switching and namespaced announce strings/bitmasks * bump version name and logo * feat: mini pomw proofs as part of peer manifest (#191) * shift default config directory under current folder (#176) * feat: signature check (#192) * feat: signature check * adjust docker command so it doesn't invoke sigcheck * remove old version * add binaries and digests * fix bug, revert build * shasum has weird byte at end * proper binaries and digests * Signatory #13 added * Signatory #3 added * Signer 4 (#194) * Signatory #5 added * Signatory #9 added (#195) * Signatory #1 added * added sig.6 files (#196) * Signatories #8 and #16 added * Signatory #12 added * Add signature (#197) * reset build for v1.4.18 after testnet bug * updated build, resigned by #13 * Signatory #16 added * added sig.6 files (#198) * Signatory #8 added * Signatory #17 added * Signatory #1 added * Signatory #7 added * Signatory #4 added * Signatory #14 added * remove binaries, ready to ship --------- Co-authored-by: littleblackcloud <163544315+littleblackcloud@users.noreply.github.com> Co-authored-by: Agost Biro <5764438+agostbiro@users.noreply.github.com> Co-authored-by: Marius Scurtescu <marius.scurtescu@gmail.com> Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com> Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com> Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-25 05:22:50 +00:00
// 2024-01-03: 1.2.0
//
//go:embed retroactive_peers.json
var retroactivePeersJsonBinary []byte
2024-02-13 07:04:56 +00:00
// Creates a genesis state for the intrinsic
func CreateGenesisState(
logger *zap.Logger,
engineConfig *config.EngineConfig,
testProverKeys [][]byte,
inclusionProver qcrypto.InclusionProver,
) (
[]byte,
*qcrypto.InclusionAggregateProof,
[][]byte,
) {
seed, err := hex.DecodeString(engineConfig.GenesisSeed)
if err != nil {
panic(errors.New("genesis seed is nil"))
}
logger.Info("creating genesis frame")
for _, l := range strings.Split(string(seed), "\n") {
logger.Info(l)
}
2024-02-14 07:11:12 +00:00
difficulty := engineConfig.Difficulty
2024-06-08 08:38:53 +00:00
if difficulty == 0 || difficulty == 10000 {
difficulty = 100000
2024-02-14 07:11:12 +00:00
}
2024-02-13 07:04:56 +00:00
b := sha3.Sum256(seed)
2024-02-14 07:11:12 +00:00
v := vdf.New(difficulty, b)
2024-02-13 07:04:56 +00:00
v.Execute()
o := v.GetOutput()
inputMessage := o[:]
// Signatories are special, they don't have an inclusion proof because they
// have not broadcasted communication keys, but they still get contribution
// rights prior to PoMW, because they did produce meaningful work in the
// first phase:
logger.Info("encoding signatories to prover trie")
proverKeys := [][]byte{}
if len(testProverKeys) != 0 {
logger.Warn(
"TEST PROVER ENTRIES BEING ADDED, YOUR NODE WILL BE KICKED IF IN" +
" PRODUCTION",
)
proverKeys = testProverKeys
} else {
for _, s := range kzg.CeremonySignatories {
pubkey := s.ToAffineCompressed()
logger.Info("0x" + hex.EncodeToString(pubkey))
proverKeys = append(proverKeys, pubkey)
}
}
logger.Info("encoding ceremony and phase one signatories")
transcript := &protobufs.CeremonyTranscript{}
for p, s := range kzg.CeremonyBLS48581G1 {
transcript.G1Powers = append(
transcript.G1Powers,
&protobufs.BLS48581G1PublicKey{
KeyValue: s.ToAffineCompressed(),
},
)
logger.Info(fmt.Sprintf("encoded G1 power %d", p))
}
for p, s := range kzg.CeremonyBLS48581G2 {
transcript.G2Powers = append(
transcript.G2Powers,
&protobufs.BLS48581G2PublicKey{
KeyValue: s.ToAffineCompressed(),
},
)
logger.Info(fmt.Sprintf("encoded G2 power %d", p))
}
transcript.RunningG1_256Witnesses = append(
transcript.RunningG1_256Witnesses,
&protobufs.BLS48581G1PublicKey{
KeyValue: kzg.CeremonyRunningProducts[0].ToAffineCompressed(),
},
)
transcript.RunningG2_256Powers = append(
transcript.RunningG2_256Powers,
&protobufs.BLS48581G2PublicKey{
KeyValue: kzg.CeremonyBLS48581G2[len(kzg.CeremonyBLS48581G2)-1].
ToAffineCompressed(),
},
)
outputProof := &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{},
TransitionInputs: [][]byte{},
}
proofBytes, err := proto.Marshal(outputProof)
if err != nil {
panic(err)
}
logger.Info("encoded transcript")
logger.Info("encoding ceremony signatories into application state")
rewardTrie := &tries.RewardCritbitTrie{}
for _, s := range kzg.CeremonySignatories {
pubkey := s.ToAffineCompressed()
addr, err := poseidon.HashBytes(pubkey)
if err != nil {
panic(err)
}
addrBytes := addr.Bytes()
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
rewardTrie.Add(addrBytes, 0, 50)
}
type peerData struct {
PeerId string `json:"peer_id"`
TokenBalance uint64 `json:"token_balance"`
}
type rewards struct {
Rewards []peerData `json:"rewards"`
}
retroEntries := &rewards{}
v1.4.18 (#193) * Remove bootstrap peer (#189) * Change bootstrap servers to DHT-only peers (#187) * support voucher file-based claims (#183) * Change bootstrap servers to DHT-only peers Changing my bootstrap servers to DHT-only peers with somewhat lower specs. One of the new ones is in the US and the other one is in Switzerland. Both use reliable providers and have 10Gbps network interfaces. --------- Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> * Don't run self-test in DHT-only mode (#186) * support voucher file-based claims (#183) * Don't run self-test in DHT-only mode The node tries to create a self-test when ran with the `-dht-only` flag, but it doesn't load the KZG ceremony data in DHT-only mode which leads to a crash. Don't run self-test when the `-dht-only` flag is set. I tested by starting a node locally with and without existing self-test and with the `-dht-only` flag. --------- Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com> * Embed json files in binary (#182) * Embed ceremony.json in binary * Embed retroactive_peers.json in binary * Signers build and verification tasks (#181) * add signers specific Taskfile * add verify tasks * move signer task under signer folder * create docker image specific for signers * map current user into docker image and container * ignore node-tmp-* * add verify:build:internal * prevent tasks with docker commands from being run inside a container * rename *:internal to *:container * add README.md * add pem files to git * Updating Q Guide link (#173) * Update README.md Updated link to Quilibrium guide to new website * Update README.md * feat: network switching and namespaced announce strings/bitmasks (#190) * feat: network switching and namespaced announce strings/bitmasks * bump version name and logo * feat: mini pomw proofs as part of peer manifest (#191) * shift default config directory under current folder (#176) * feat: signature check (#192) * feat: signature check * adjust docker command so it doesn't invoke sigcheck * remove old version * add binaries and digests * fix bug, revert build * shasum has weird byte at end * proper binaries and digests * Signatory #13 added * Signatory #3 added * Signer 4 (#194) * Signatory #5 added * Signatory #9 added (#195) * Signatory #1 added * added sig.6 files (#196) * Signatories #8 and #16 added * Signatory #12 added * Add signature (#197) * reset build for v1.4.18 after testnet bug * updated build, resigned by #13 * Signatory #16 added * added sig.6 files (#198) * Signatory #8 added * Signatory #17 added * Signatory #1 added * Signatory #7 added * Signatory #4 added * Signatory #14 added * remove binaries, ready to ship --------- Co-authored-by: littleblackcloud <163544315+littleblackcloud@users.noreply.github.com> Co-authored-by: Agost Biro <5764438+agostbiro@users.noreply.github.com> Co-authored-by: Marius Scurtescu <marius.scurtescu@gmail.com> Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com> Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com> Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-25 05:22:50 +00:00
err = json.Unmarshal(retroactivePeersJsonBinary, retroEntries)
2024-02-13 07:04:56 +00:00
if err != nil {
panic(err)
}
logger.Info("adding retroactive peer reward info")
for _, s := range retroEntries.Rewards {
peerId := s.PeerId
peerBytes, err := base64.StdEncoding.DecodeString(peerId)
if err != nil {
panic(err)
}
addr, err := poseidon.HashBytes(peerBytes)
if err != nil {
panic(err)
}
addrBytes := addr.Bytes()
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
rewardTrie.Add(addrBytes, 0, s.TokenBalance)
}
trieBytes, err := rewardTrie.Serialize()
if err != nil {
panic(err)
}
ceremonyLobbyState := &protobufs.CeremonyLobbyState{
LobbyState: 0,
CeremonyState: &protobufs.CeremonyLobbyState_CeremonyOpenState{
CeremonyOpenState: &protobufs.CeremonyOpenState{
JoinedParticipants: []*protobufs.CeremonyLobbyJoin{},
PreferredParticipants: []*protobufs.Ed448PublicKey{},
},
},
LatestTranscript: transcript,
RewardTrie: trieBytes,
}
outputBytes, err := proto.Marshal(ceremonyLobbyState)
if err != nil {
panic(err)
}
2024-02-14 07:11:12 +00:00
intrinsicFilter := append(
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
)
// Compat: there was a bug that went unnoticed in prior versions,
// the raw filter was used instead of the application address, which didn't
// affect execution because we forcibly stashed it. Preserving this to ensure
// no rebuilding of frame history is required.
2024-02-13 07:04:56 +00:00
executionOutput := &protobufs.IntrinsicExecutionOutput{
2024-02-14 07:11:12 +00:00
Address: intrinsicFilter,
2024-02-13 07:04:56 +00:00
Output: outputBytes,
Proof: proofBytes,
}
data, err := proto.Marshal(executionOutput)
if err != nil {
panic(err)
}
logger.Info("proving execution output for inclusion")
commitment, err := inclusionProver.Commit(
2024-02-14 07:11:12 +00:00
data,
2024-02-13 07:04:56 +00:00
protobufs.IntrinsicExecutionOutputType,
)
if err != nil {
panic(err)
}
logger.Info("creating kzg proof")
proof, err := inclusionProver.ProveAggregate(
[]*qcrypto.InclusionCommitment{
commitment,
},
)
if err != nil {
panic(err)
}
logger.Info("finalizing execution proof")
return inputMessage, proof, proverKeys
}
2023-09-25 02:43:35 +00:00
// Start implements ExecutionEngine
func (e *CeremonyExecutionEngine) Start() <-chan error {
errChan := make(chan error)
e.logger.Info("ceremony data loaded", zap.Binary(
"g2_power",
2024-02-13 07:04:56 +00:00
kzg.CeremonyBLS48581G2[1].ToAffineCompressed(),
2023-09-25 02:43:35 +00:00
))
go func() {
2024-01-03 07:31:42 +00:00
err := <-e.clock.Start()
2023-09-25 02:43:35 +00:00
if err != nil {
panic(err)
}
err = <-e.clock.RegisterExecutor(e, 0)
if err != nil {
panic(err)
}
go e.RunWorker()
2023-09-25 02:43:35 +00:00
errChan <- nil
}()
return errChan
}
// Stop implements ExecutionEngine
2024-03-24 08:11:00 +00:00
func (e *CeremonyExecutionEngine) Stop(force bool) <-chan error {
2023-09-25 02:43:35 +00:00
errChan := make(chan error)
go func() {
2024-03-24 08:11:00 +00:00
errChan <- <-e.clock.Stop(force)
2023-09-25 02:43:35 +00:00
}()
return errChan
}
// ProcessMessage implements ExecutionEngine
func (e *CeremonyExecutionEngine) ProcessMessage(
address []byte,
message *protobufs.Message,
) ([]*protobufs.Message, error) {
if bytes.Equal(address, e.GetSupportedApplications()[0].Address) {
e.logger.Debug("processing execution message")
2023-09-25 02:43:35 +00:00
any := &anypb.Any{}
if err := proto.Unmarshal(message.Payload, any); err != nil {
return nil, errors.Wrap(err, "process message")
}
switch any.TypeUrl {
case protobufs.ClockFrameType:
frame := &protobufs.ClockFrame{}
if err := any.UnmarshalTo(frame); err != nil {
return nil, errors.Wrap(err, "process message")
}
2024-01-03 07:31:42 +00:00
if frame.FrameNumber < e.clock.GetFrame().FrameNumber {
2023-09-25 02:43:35 +00:00
return nil, nil
}
2024-02-13 07:04:56 +00:00
if err := e.frameProver.VerifyDataClockFrame(frame); err != nil {
2023-09-25 02:43:35 +00:00
return nil, errors.Wrap(err, "process message")
}
if err := e.VerifyExecution(frame); err != nil {
return nil, errors.Wrap(err, "process message")
}
case protobufs.CeremonyLobbyJoinType:
fallthrough
case protobufs.CeremonySeenProverAttestationType:
fallthrough
case protobufs.CeremonyDroppedProverAttestationType:
fallthrough
case protobufs.CeremonyTranscriptCommitType:
fallthrough
case protobufs.CeremonyTranscriptShareType:
fallthrough
case protobufs.CeremonyTranscriptType:
hash := sha3.Sum256(any.Value)
if any.TypeUrl == protobufs.CeremonyTranscriptType {
e.seenMessageMx.Lock()
ref := string(hash[:])
if _, ok := e.seenMessageMap[ref]; !ok {
e.seenMessageMap[ref] = true
} else {
return nil, errors.Wrap(
errors.New("message already received"),
"process message",
)
}
e.seenMessageMx.Unlock()
}
2023-09-25 02:43:35 +00:00
if e.clock.IsInProverTrie(e.proverPublicKey) {
proposedTransition := &protobufs.CeremonyLobbyStateTransition{
TypeUrls: []string{any.TypeUrl},
TransitionInputs: [][]byte{
any.Value,
},
}
any := &anypb.Any{}
if err := any.MarshalFrom(proposedTransition); err != nil {
return nil, errors.Wrap(err, "process message")
}
any.TypeUrl = strings.Replace(
any.TypeUrl,
"type.googleapis.com",
"types.quilibrium.com",
1,
)
payload, err := proto.Marshal(any)
if err != nil {
return nil, errors.Wrap(err, "process message")
}
h, err := poseidon.HashBytes(payload)
if err != nil {
return nil, errors.Wrap(err, "process message")
}
msg := &protobufs.Message{
Hash: h.Bytes(),
Address: e.provingKeyAddress,
Payload: payload,
}
return []*protobufs.Message{
msg,
}, nil
}
}
}
return nil, nil
}
func (e *CeremonyExecutionEngine) RunWorker() {
frameChan := e.clock.GetFrameChannel()
for {
select {
case frame := <-frameChan:
e.activeClockFrame = frame
e.logger.Info(
"evaluating next frame",
zap.Uint64(
"frame_number",
frame.FrameNumber,
),
2023-09-25 02:43:35 +00:00
)
app, err := application.MaterializeApplicationFromFrame(frame)
if err != nil {
e.logger.Error(
"error while materializing application from frame",
zap.Error(err),
)
panic(err)
2023-09-25 02:43:35 +00:00
}
_, _, reward := app.RewardTrie.Get(e.provingKeyAddress)
_, _, retro := app.RewardTrie.Get(e.peerIdHash)
2023-09-28 07:59:27 +00:00
e.logger.Info(
"current application state",
zap.Uint64("my_balance", reward+retro),
zap.String("lobby_state", app.LobbyState.String()),
2023-09-28 07:59:27 +00:00
)
switch app.LobbyState {
case application.CEREMONY_APPLICATION_STATE_OPEN:
e.alreadyPublishedShare = false
e.alreadyPublishedTranscript = false
alreadyJoined := false
for _, join := range app.LobbyJoins {
if bytes.Equal(
join.PublicKeySignatureEd448.PublicKey.KeyValue,
e.proverPublicKey,
) {
alreadyJoined = true
break
}
}
2023-09-25 02:43:35 +00:00
e.logger.Info(
"lobby open for joins",
zap.Int("joined_participants", len(app.LobbyJoins)),
zap.Int(
"preferred_participants",
len(app.NextRoundPreferredParticipants),
),
zap.Bool("in_lobby", alreadyJoined),
zap.Uint64("state_count", app.StateCount),
2023-09-25 02:43:35 +00:00
)
if !alreadyJoined {
e.logger.Info(
"joining lobby",
zap.Binary("proving_key", e.proverPublicKey),
2023-09-25 02:43:35 +00:00
)
if err := e.announceJoin(frame); err != nil {
e.logger.Error(
"failed to announce join",
zap.Error(err),
)
}
2023-09-25 02:43:35 +00:00
e.logger.Info("preparing contribution")
// Calculate this now after announcing, this gives 10 frames of buffer
e.ensureSecrets(app)
2023-09-28 07:59:27 +00:00
}
case application.CEREMONY_APPLICATION_STATE_IN_PROGRESS:
inRound := false
for _, p := range app.ActiveParticipants {
2024-01-03 07:31:42 +00:00
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
e.proverPublicKey,
) {
inRound = true
break
2023-09-25 02:43:35 +00:00
}
}
if len(e.activeSecrets) == 0 && inRound {
// If we ended up in the scenario where we do not have any secrets
// available but we're in the round, we should politely leave.
2024-01-16 07:14:00 +00:00
e.publishDroppedParticipant(e.proverPublicKey)
2023-09-25 02:43:35 +00:00
continue
}
e.logger.Info(
"round in progress",
zap.Any("participants", app.ActiveParticipants),
zap.Any(
"current_seen_attestations",
len(app.LatestSeenProverAttestations),
),
zap.Any(
"current_dropped_attestations",
len(app.DroppedParticipantAttestations),
),
zap.Any(
"preferred_participants_for_next_round",
len(app.NextRoundPreferredParticipants),
),
zap.Bool("in_round", inRound),
zap.Uint64("current_sub_round", app.RoundCount),
zap.Uint64("stale_state_count", app.StateCount),
)
shouldConnect := false
position := 0
if len(e.peerChannels) == 0 && app.RoundCount == 1 &&
len(app.ActiveParticipants) > 1 {
for i, p := range app.ActiveParticipants {
2023-09-25 02:43:35 +00:00
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
2023-09-25 02:43:35 +00:00
e.proverPublicKey,
) {
shouldConnect = true
position = i
break
2023-09-25 02:43:35 +00:00
}
}
}
if shouldConnect {
2023-09-25 02:43:35 +00:00
e.logger.Info(
"connecting to peers",
2023-09-25 02:43:35 +00:00
zap.Any("participants", app.ActiveParticipants),
)
err := e.connectToActivePeers(app, position)
2023-09-25 02:43:35 +00:00
if err != nil {
e.logger.Error("error while connecting to peers", zap.Error(err))
e.publishDroppedParticipant(e.proverPublicKey)
continue
2023-09-25 02:43:35 +00:00
}
}
if len(e.peerChannels) != 0 {
done := false
rounds := app.TranscriptRoundAdvanceCommits
if len(rounds) != 0 {
for _, c := range rounds[app.RoundCount-1].Commits {
if bytes.Equal(
c.ProverSignature.PublicKey.KeyValue,
e.proverPublicKey,
) {
done = true
}
}
}
if !done {
e.logger.Info(
"participating in round",
zap.Any("participants", app.ActiveParticipants),
zap.Uint64("current_round", app.RoundCount),
)
err := e.participateRound(app)
if err != nil {
e.logger.Error("error while participating in round", zap.Error(err))
e.publishDroppedParticipant(e.proverPublicKey)
}
}
} else if len(app.ActiveParticipants) == 1 &&
bytes.Equal(
app.ActiveParticipants[0].PublicKeySignatureEd448.PublicKey.KeyValue,
e.proverPublicKey,
) {
if err = e.commitRound(e.activeSecrets); err != nil {
e.logger.Error("error while participating in round", zap.Error(err))
}
2023-09-25 02:43:35 +00:00
}
case application.CEREMONY_APPLICATION_STATE_FINALIZING:
e.logger.Info(
"round contribution finalizing",
zap.Any("participants", len(app.ActiveParticipants)),
zap.Any(
"current_seen_attestations",
len(app.LatestSeenProverAttestations),
),
zap.Any(
"current_dropped_attestations",
len(app.DroppedParticipantAttestations),
),
zap.Any(
"preferred_participants_for_next_round",
len(app.NextRoundPreferredParticipants),
),
zap.Int("finalized_shares", len(app.TranscriptShares)),
)
2023-09-25 02:43:35 +00:00
for _, s := range app.TranscriptShares {
if bytes.Equal(
s.ProverSignature.PublicKey.KeyValue,
e.proverPublicKey,
) {
e.alreadyPublishedShare = true
}
2023-09-25 02:43:35 +00:00
}
shouldPublish := false
for _, p := range app.ActiveParticipants {
if bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
e.proverPublicKey,
) {
shouldPublish = true
break
}
}
if !e.alreadyPublishedShare && shouldPublish {
if len(e.activeSecrets) == 0 {
e.publishDroppedParticipant(e.proverPublicKey)
continue
}
err := e.publishTranscriptShare(app)
if err != nil {
e.logger.Error(
"error while publishing transcript share",
zap.Error(err),
)
}
}
case application.CEREMONY_APPLICATION_STATE_VALIDATING:
e.logger.Info("round contribution validating")
e.alreadyPublishedShare = false
for _, c := range e.peerChannels {
c.Close()
2023-09-25 02:43:35 +00:00
}
e.peerChannels = map[string]*p2p.PublicP2PChannel{}
if app.UpdatedTranscript != nil && !e.alreadyPublishedTranscript {
if err := e.publishTranscript(app); err != nil {
e.logger.Error(
"error while publishing transcript",
zap.Error(err),
)
}
2023-09-25 02:43:35 +00:00
}
}
}
}
}
func (e *CeremonyExecutionEngine) publishMessage(
filter []byte,
message proto.Message,
) error {
any := &anypb.Any{}
if err := any.MarshalFrom(message); err != nil {
return errors.Wrap(err, "publish message")
}
any.TypeUrl = strings.Replace(
any.TypeUrl,
"type.googleapis.com",
"types.quilibrium.com",
1,
)
payload, err := proto.Marshal(any)
if err != nil {
return errors.Wrap(err, "publish message")
}
h, err := poseidon.HashBytes(payload)
if err != nil {
return errors.Wrap(err, "publish message")
}
msg := &protobufs.Message{
Hash: h.Bytes(),
Address: application.CEREMONY_ADDRESS,
Payload: payload,
}
data, err := proto.Marshal(msg)
if err != nil {
return errors.Wrap(err, "publish message")
}
return e.pubSub.PublishToBitmask(filter, data)
}
func (e *CeremonyExecutionEngine) announceJoin(
frame *protobufs.ClockFrame,
) error {
idk, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
idk, err = e.keyManager.CreateAgreementKey(
"q-ratchet-idk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "announce join")
}
} else {
return errors.Wrap(err, "announce join")
}
2023-09-25 02:43:35 +00:00
}
2023-09-25 02:43:35 +00:00
spk, err := e.keyManager.GetAgreementKey("q-ratchet-spk")
if err != nil {
if errors.Is(err, keys.KeyNotFoundErr) {
spk, err = e.keyManager.CreateAgreementKey(
"q-ratchet-spk",
keys.KeyTypeX448,
)
if err != nil {
return errors.Wrap(err, "announce join")
}
} else {
return errors.Wrap(err, "announce join")
}
2023-09-25 02:43:35 +00:00
}
g := curves.ED448().Point.Generator()
join := &protobufs.CeremonyLobbyJoin{
FrameNumber: frame.FrameNumber,
IdentityKey: &protobufs.X448PublicKey{
KeyValue: g.Mul(idk).ToAffineCompressed(),
},
SignedPreKey: &protobufs.X448PublicKey{
KeyValue: g.Mul(spk).ToAffineCompressed(),
},
2024-03-01 07:12:31 +00:00
PeerId: e.pubSub.GetPeerID(),
2023-09-25 02:43:35 +00:00
}
sig, err := join.SignWithProverKey(e.provingKey)
if err != nil {
return errors.Wrap(err, "announce join")
}
join.PublicKeySignatureEd448 = &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.proverPublicKey,
},
}
return errors.Wrap(
e.publishMessage(
2024-01-03 07:31:42 +00:00
e.intrinsicFilter,
2023-09-25 02:43:35 +00:00
join,
),
"announce join",
)
}
func (e *CeremonyExecutionEngine) connectToActivePeers(
app *application.CeremonyApplication,
position int,
) error {
idk, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
if err != nil {
return errors.Wrap(err, "connect to active peers")
}
spk, err := e.keyManager.GetAgreementKey("q-ratchet-spk")
if err != nil {
return errors.Wrap(err, "connect to active peers")
}
2024-01-03 07:31:42 +00:00
for i, p := range app.LobbyJoins {
if !bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
e.proverPublicKey,
) {
2023-09-25 02:43:35 +00:00
receiverIdk, err := curves.ED448().Point.FromAffineCompressed(
2024-01-03 07:31:42 +00:00
p.IdentityKey.KeyValue,
2023-09-25 02:43:35 +00:00
)
if err != nil {
return errors.Wrap(err, "connect to active peers")
}
receiverSpk, err := curves.ED448().Point.FromAffineCompressed(
2024-01-03 07:31:42 +00:00
p.SignedPreKey.KeyValue,
2023-09-25 02:43:35 +00:00
)
if err != nil {
return errors.Wrap(err, "connect to active peers")
}
client, err := e.clock.GetPublicChannelForProvingKey(
i > position,
2024-03-01 07:12:31 +00:00
p.PeerId,
2024-01-03 07:31:42 +00:00
p.PublicKeySignatureEd448.PublicKey.KeyValue,
)
if err != nil {
e.logger.Error(
"peer does not support direct public channels",
2024-01-03 07:31:42 +00:00
zap.Binary(
"proving_key",
p.PublicKeySignatureEd448.PublicKey.KeyValue,
),
zap.Error(err),
)
}
2024-01-03 07:31:42 +00:00
e.peerChannels[string(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
)], err = p2p.NewPublicP2PChannel(
client,
2023-09-25 02:43:35 +00:00
e.proverPublicKey,
2024-01-03 07:31:42 +00:00
p.PublicKeySignatureEd448.PublicKey.KeyValue,
2023-09-25 02:43:35 +00:00
i > position,
idk,
spk,
receiverIdk,
receiverSpk,
curves.ED448(),
e.keyManager,
e.pubSub,
)
if err != nil {
2024-01-16 07:14:00 +00:00
e.logger.Error(
"could not establish p2p channel",
zap.Binary(
"proving_key",
p.PublicKeySignatureEd448.PublicKey.KeyValue,
),
zap.Error(err),
)
2023-09-25 02:43:35 +00:00
return errors.Wrap(err, "connect to active peers")
}
}
}
return nil
}
func (e *CeremonyExecutionEngine) participateRound(
app *application.CeremonyApplication,
) error {
idk, err := e.keyManager.GetAgreementKey("q-ratchet-idk")
if err != nil {
return errors.Wrap(err, "participate round")
}
spk, err := e.keyManager.GetAgreementKey("q-ratchet-spk")
if err != nil {
return errors.Wrap(err, "participate round")
}
2023-09-25 02:43:35 +00:00
idkPoint := curves.ED448().Point.Generator().Mul(idk)
idks := []curves.Point{}
initiator := false
2023-09-25 02:43:35 +00:00
for _, p := range app.ActiveParticipants {
2024-01-03 07:31:42 +00:00
if !bytes.Equal(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
e.proverPublicKey,
) {
ic, err := e.keyStore.GetLatestKeyBundle(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
)
2023-09-25 02:43:35 +00:00
if err != nil {
return errors.Wrap(err, "participate round")
}
var kba *protobufs.KeyBundleAnnouncement
switch ic.TypeUrl {
case protobufs.KeyBundleAnnouncementType:
kba = &protobufs.KeyBundleAnnouncement{}
if err := proto.Unmarshal(
ic.Data,
kba,
); err != nil {
return errors.Wrap(err, "participate round")
}
}
receiverIdk, err := curves.ED448().Point.FromAffineCompressed(
kba.IdentityKey.GetPublicKeySignatureEd448().PublicKey.KeyValue,
)
if err != nil {
return errors.Wrap(err, "participate round")
}
receiverSpk, err := curves.ED448().Point.FromAffineCompressed(
kba.SignedPreKey.GetPublicKeySignatureEd448().PublicKey.KeyValue,
)
if err != nil {
return errors.Wrap(err, "participate round")
}
2024-01-03 07:31:42 +00:00
if _, ok := e.peerChannels[string(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
)]; !ok {
client, err := e.clock.GetPublicChannelForProvingKey(
initiator,
2024-03-01 07:12:31 +00:00
p.PeerId,
2024-01-03 07:31:42 +00:00
p.PublicKeySignatureEd448.PublicKey.KeyValue,
)
if err != nil {
e.logger.Error(
"peer does not support direct public channels",
2024-01-03 07:31:42 +00:00
zap.Binary(
"proving_key",
p.PublicKeySignatureEd448.PublicKey.KeyValue,
),
zap.Error(err),
)
}
2024-01-03 07:31:42 +00:00
e.peerChannels[string(
p.PublicKeySignatureEd448.PublicKey.KeyValue,
)], err = p2p.NewPublicP2PChannel(
client,
e.proverPublicKey,
2024-01-03 07:31:42 +00:00
p.PublicKeySignatureEd448.PublicKey.KeyValue,
initiator,
idk,
spk,
receiverIdk,
receiverSpk,
curves.ED448(),
e.keyManager,
e.pubSub,
)
if err != nil {
return errors.Wrap(err, "participate round")
}
}
2023-09-25 02:43:35 +00:00
idks = append(idks, receiverIdk)
} else {
initiator = true
2023-09-25 02:43:35 +00:00
idks = append(idks, idkPoint)
}
}
pubKeys := [][]byte{}
for _, p := range app.ActiveParticipants {
2024-01-03 07:31:42 +00:00
pubKeys = append(
pubKeys,
p.PublicKeySignatureEd448.PublicKey.KeyValue,
)
2023-09-25 02:43:35 +00:00
}
newSecrets, err := application.ProcessRound(
e.proverPublicKey,
idk,
int(app.RoundCount),
pubKeys,
idks,
e.activeSecrets,
curves.BLS48581G1(),
func(i int, receiver []byte, msg []byte) error {
return e.peerChannels[string(receiver)].Send(msg)
2023-09-25 02:43:35 +00:00
},
func(i int, sender []byte) ([]byte, error) {
2023-09-25 02:43:35 +00:00
msg, err := e.peerChannels[string(
sender,
2023-09-25 02:43:35 +00:00
)].Receive()
if err != nil {
e.publishDroppedParticipant(sender)
2023-09-25 02:43:35 +00:00
return nil, err
} else {
if i == 0 {
e.publishLastSeenParticipant(sender)
2023-09-25 02:43:35 +00:00
}
return msg, nil
}
},
app.LatestTranscript.G1Powers[1].KeyValue,
)
if err != nil {
return errors.Wrap(err, "participate round")
}
return errors.Wrap(e.commitRound(newSecrets), "participate round")
}
func (e *CeremonyExecutionEngine) commitRound(secrets []curves.Scalar) error {
g2Pub := curves.BLS48581G2().Point.Generator().Mul(secrets[0])
sig, err := application.SignProverKeyForCommit(
e.proverPublicKey,
secrets[0],
)
if err != nil {
return errors.Wrap(err, "commit round")
}
proverSig, err := e.provingKey.Sign(
rand.Reader,
g2Pub.ToAffineCompressed(),
crypto.Hash(0),
)
if err != nil {
return errors.Wrap(err, "commit round")
}
advance := &protobufs.CeremonyTranscriptCommit{
ProverSignature: &protobufs.Ed448Signature{
Signature: proverSig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.proverPublicKey,
},
},
ContributionSignature: &protobufs.BLS48581Signature{
Signature: sig,
PublicKey: &protobufs.BLS48581G2PublicKey{
KeyValue: g2Pub.ToAffineCompressed(),
},
},
}
if err := e.publishMessage(
2024-01-03 07:31:42 +00:00
e.intrinsicFilter,
2023-09-25 02:43:35 +00:00
advance,
); err != nil {
return errors.Wrap(err, "commit round")
}
e.activeSecrets = secrets
return nil
}
// Publishes a dropped participant attestation, logs any errors but does not
// forward them on.
func (e *CeremonyExecutionEngine) publishDroppedParticipant(
participant []byte,
) {
2024-01-03 07:31:42 +00:00
frameNumber := e.clock.GetFrame().FrameNumber
2023-09-25 02:43:35 +00:00
b := binary.BigEndian.AppendUint64([]byte("dropped"), frameNumber)
b = append(b, participant...)
sig, err := e.provingKey.Sign(rand.Reader, b, crypto.Hash(0))
if err != nil {
e.logger.Error(
"error while signing dropped participant attestation",
zap.Error(err),
)
return
}
dropped := &protobufs.CeremonyDroppedProverAttestation{
DroppedProverKey: &protobufs.Ed448PublicKey{
KeyValue: participant,
},
LastSeenFrame: frameNumber,
ProverSignature: &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.proverPublicKey,
},
},
}
err = e.publishMessage(
2024-01-03 07:31:42 +00:00
e.intrinsicFilter,
2023-09-25 02:43:35 +00:00
dropped,
)
if err != nil {
e.logger.Error(
"error while publishing dropped participant attestation",
zap.Error(err),
)
return
}
}
// Publishes a last seen participant attestation, logs any errors but does not
// forward them on.
func (e *CeremonyExecutionEngine) publishLastSeenParticipant(
participant []byte,
) {
2024-01-03 07:31:42 +00:00
frameNumber := e.clock.GetFrame().FrameNumber
2023-09-25 02:43:35 +00:00
b := binary.BigEndian.AppendUint64([]byte("lastseen"), frameNumber)
b = append(b, participant...)
sig, err := e.provingKey.Sign(rand.Reader, b, crypto.Hash(0))
if err != nil {
e.logger.Error(
"error while signing last seen participant attestation",
zap.Error(err),
)
return
}
seen := &protobufs.CeremonySeenProverAttestation{
SeenProverKey: &protobufs.Ed448PublicKey{
KeyValue: participant,
},
LastSeenFrame: frameNumber,
ProverSignature: &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.proverPublicKey,
},
},
}
err = e.publishMessage(
2024-01-03 07:31:42 +00:00
e.intrinsicFilter,
2023-09-25 02:43:35 +00:00
seen,
)
if err != nil {
e.logger.Error(
"error while publishing dropped participant attestation",
zap.Error(err),
)
return
}
}
func (e *CeremonyExecutionEngine) publishTranscriptShare(
app *application.CeremonyApplication,
) error {
transcriptShare := &protobufs.CeremonyTranscriptShare{}
transcriptShare.AdditiveG1Powers = make(
[]*protobufs.BLS48581G1PublicKey,
len(e.activeSecrets),
)
transcriptShare.AdditiveG2Powers = make(
[]*protobufs.BLS48581G2PublicKey,
len(app.LatestTranscript.G2Powers)-1,
)
eg := errgroup.Group{}
eg.SetLimit(100)
e.logger.Info("creating transcript share")
for i, s := range e.activeSecrets {
i := i
s := s
eg.Go(func() error {
if i%100 == 0 {
e.logger.Info(
"writing transcript share chunk",
zap.Int("chunk_start", i),
)
}
basisG1, err := curves.BLS48581G1().Point.FromAffineCompressed(
app.LatestTranscript.G1Powers[i+1].KeyValue,
)
if err != nil {
return errors.Wrap(err, "publish transcript share")
}
transcriptShare.AdditiveG1Powers[i] = &protobufs.BLS48581G1PublicKey{
KeyValue: basisG1.Mul(s).ToAffineCompressed(),
}
if i+1 < len(app.LatestTranscript.G2Powers) {
basisG2, err := curves.BLS48581G2().Point.FromAffineCompressed(
app.LatestTranscript.G2Powers[i+1].KeyValue,
)
if err != nil {
return errors.Wrap(err, "publish transcript share")
}
transcriptShare.AdditiveG2Powers[i] = &protobufs.BLS48581G2PublicKey{
KeyValue: basisG2.Mul(s).ToAffineCompressed(),
}
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
e.logger.Info(
"done writing transcript chunks, adding witnesses and signing",
)
transcriptShare.AdditiveG1_256Witness = &protobufs.BLS48581G1PublicKey{
KeyValue: curves.BLS48581G1().Point.Generator().Mul(
e.activeSecrets[len(app.LatestTranscript.G2Powers)-2],
).ToAffineCompressed(),
}
transcriptShare.AdditiveG2_256Witness = &protobufs.BLS48581G2PublicKey{
KeyValue: curves.BLS48581G2().Point.Generator().Mul(
e.activeSecrets[len(app.LatestTranscript.G2Powers)-2],
).ToAffineCompressed(),
}
sig, err := transcriptShare.SignWithProverKey(e.provingKey)
if err != nil {
errors.Wrap(err, "publish transcript share")
}
transcriptShare.ProverSignature = &protobufs.Ed448Signature{
Signature: sig,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: e.proverPublicKey,
},
}
err = errors.Wrap(
e.publishMessage(
2024-01-03 07:31:42 +00:00
e.intrinsicFilter,
2023-09-25 02:43:35 +00:00
transcriptShare,
),
"publish transcript share",
)
if err != nil {
return err
} else {
e.alreadyPublishedShare = true
return nil
}
}
func (e *CeremonyExecutionEngine) VerifyExecution(
frame *protobufs.ClockFrame,
) error {
2024-01-03 07:31:42 +00:00
if e.clock.GetFrame().FrameNumber != frame.FrameNumber-1 {
2023-09-25 02:43:35 +00:00
return nil
}
if len(frame.AggregateProofs) > 0 {
for _, proofs := range frame.AggregateProofs {
for _, inclusion := range proofs.InclusionCommitments {
if inclusion.TypeUrl == protobufs.IntrinsicExecutionOutputType {
transition, _, err := application.GetOutputsFromClockFrame(frame)
if err != nil {
return errors.Wrap(err, "verify execution")
}
parent, err := e.clockStore.GetStagedDataClockFrame(
append(
p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
2024-02-23 03:23:26 +00:00
p2p.GetBloomFilterIndices(
application.CEREMONY_ADDRESS,
65536,
24,
)...,
),
2023-09-25 02:43:35 +00:00
frame.FrameNumber-1,
frame.ParentSelector,
2024-02-22 06:07:17 +00:00
false,
2023-09-25 02:43:35 +00:00
)
if err != nil && !errors.Is(err, store.ErrNotFound) {
return errors.Wrap(err, "verify execution")
}
if parent == nil {
return errors.Wrap(
errors.New("missing parent frame"),
"verify execution",
)
}
a, err := application.MaterializeApplicationFromFrame(parent)
if err != nil {
return errors.Wrap(err, "verify execution")
}
2024-02-13 07:04:56 +00:00
a, _, _, err = a.ApplyTransition(frame.FrameNumber, transition, false)
2023-09-25 02:43:35 +00:00
if err != nil {
return errors.Wrap(err, "verify execution")
}
a2, err := application.MaterializeApplicationFromFrame(frame)
if err != nil {
return errors.Wrap(err, "verify execution")
}
if !a.Equals(a2) {
return errors.Wrap(
application.ErrInvalidStateTransition,
"verify execution",
)
}
return nil
}
}
}
}
return nil
}
func (e *CeremonyExecutionEngine) publishTranscript(
app *application.CeremonyApplication,
) error {
e.logger.Info("publishing updated transcript")
e.alreadyPublishedTranscript = true
err := errors.Wrap(
e.publishMessage(
2024-01-03 07:31:42 +00:00
e.intrinsicFilter,
2023-09-25 02:43:35 +00:00
app.UpdatedTranscript,
),
"publish transcript share",
)
if err != nil {
e.alreadyPublishedTranscript = false
return err
} else {
return nil
}
}
func (e *CeremonyExecutionEngine) ensureSecrets(
app *application.CeremonyApplication,
) {
if len(e.activeSecrets) == 0 {
e.activeSecrets = []curves.Scalar{}
t := curves.BLS48581G1().Scalar.Random(rand.Reader)
x := t.Clone()
for i := 0; i < len(app.LatestTranscript.G1Powers)-1; i++ {
if i%1000 == 0 {
e.logger.Info(
"calculating secrets for contribution",
zap.Int("secrets_calculated", i),
zap.Int("total_secrets", len(app.LatestTranscript.G1Powers)-1),
)
}
e.activeSecrets = append(e.activeSecrets, x)
x = x.Mul(t)
}
e.logger.Info(
"done preparing contribution",
zap.Int("secrets_calculated", len(e.activeSecrets)),
)
}
}
func (e *CeremonyExecutionEngine) GetPeerInfo() *protobufs.PeerInfoResponse {
return e.clock.GetPeerInfo()
}
func (e *CeremonyExecutionEngine) GetFrame() *protobufs.ClockFrame {
return e.clock.GetFrame()
}