ceremonyclient/node/consensus/master/broadcast_messaging.go
Cassandra Heart 2bbd1e0690
v1.4.18 (#193)
* Remove bootstrap peer (#189)

* Change bootstrap servers to DHT-only peers (#187)

* support voucher file-based claims (#183)

* Change bootstrap servers to DHT-only peers

Changing my bootstrap servers to DHT-only peers with somewhat lower
specs. One of the new ones is in the US and the other one is in
Switzerland. Both use reliable providers and have 10Gbps network
interfaces.

---------

Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

* Don't run self-test in DHT-only mode (#186)

* support voucher file-based claims (#183)

* Don't run self-test in DHT-only mode

The node tries to create a self-test when ran with the `-dht-only`
flag, but it doesn't load the KZG ceremony data in DHT-only mode
which leads to a crash.

Don't run self-test when the `-dht-only` flag is set.

I tested by starting a node locally with and without existing
self-test and with the `-dht-only` flag.

---------

Co-authored-by: Cassandra Heart <7929478+CassOnMars@users.noreply.github.com>

* Embed json files in binary (#182)

* Embed ceremony.json in binary

* Embed retroactive_peers.json in binary

* Signers build and verification tasks (#181)

* add signers specific Taskfile

* add verify tasks

* move signer task under signer folder

* create docker image specific for signers

* map current user into docker image and container

* ignore node-tmp-*

* add verify:build:internal

* prevent tasks with docker commands from being run inside a container

* rename *:internal to *:container

* add README.md

* add pem files to git

* Updating Q Guide link (#173)

* Update README.md

Updated link to Quilibrium guide to new website

* Update README.md

* feat: network switching and namespaced announce strings/bitmasks (#190)

* feat: network switching and namespaced announce strings/bitmasks

* bump version name and logo

* feat: mini pomw proofs as part of peer manifest (#191)

* shift default config directory under current folder (#176)

* feat: signature check (#192)

* feat: signature check

* adjust docker command so it doesn't invoke sigcheck

* remove old version

* add binaries and digests

* fix bug, revert build

* shasum has weird byte at end

* proper binaries and digests

* Signatory #13 added

* Signatory #3 added

* Signer 4 (#194)

* Signatory #5 added

* Signatory #9 added (#195)

* Signatory #1 added

* added sig.6 files (#196)

* Signatories #8 and #16 added

* Signatory #12 added

* Add signature (#197)

* reset build for v1.4.18 after testnet bug

* updated build, resigned by #13

* Signatory #16 added

* added sig.6 files (#198)

* Signatory #8 added

* Signatory #17 added

* Signatory #1 added

* Signatory #7 added

* Signatory #4 added

* Signatory #14 added

* remove binaries, ready to ship

---------

Co-authored-by: littleblackcloud <163544315+littleblackcloud@users.noreply.github.com>
Co-authored-by: Agost Biro <5764438+agostbiro@users.noreply.github.com>
Co-authored-by: Marius Scurtescu <marius.scurtescu@gmail.com>
Co-authored-by: Demipoet <161999657+demipoet@users.noreply.github.com>
Co-authored-by: 0xOzgur <29779769+0xOzgur@users.noreply.github.com>
Co-authored-by: Freekers <1370857+Freekers@users.noreply.github.com>
2024-05-25 00:22:50 -05:00

331 lines
8.3 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

package master
import (
"bytes"
"context"
"encoding/binary"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/mr-tron/base58"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
e.logger.Debug(
"received message",
zap.Binary("data", message.Data),
zap.Binary("from", message.From),
zap.Binary("signature", message.Signature),
)
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
return errors.Wrap(err, "handle message")
}
any := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, any); err != nil {
return errors.Wrap(err, "handle message")
}
switch any.TypeUrl {
case protobufs.ClockFrameType:
if err := e.handleClockFrameData(
message.From,
any,
); err != nil {
return errors.Wrap(err, "handle message")
}
return nil
case protobufs.SelfTestReportType:
if err := e.handleSelfTestReport(
message.From,
any,
); err != nil {
return errors.Wrap(err, "handle message")
}
return nil
}
return errors.Wrap(errors.New("invalid message"), "handle message")
}
func (e *MasterClockConsensusEngine) handleClockFrameData(
peerID []byte,
any *anypb.Any,
) error {
frame := &protobufs.ClockFrame{}
if err := any.UnmarshalTo(frame); err != nil {
return errors.Wrap(err, "handle clock frame data")
}
head, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
if frame.FrameNumber < head.FrameNumber {
return nil
}
if e.difficulty != frame.Difficulty {
e.logger.Debug(
"frame difficulty mismatched",
zap.Uint32("difficulty", frame.Difficulty),
)
return errors.Wrap(
errors.New("frame difficulty"),
"handle clock frame data",
)
}
e.logger.Debug(
"got clock frame",
zap.Binary("sender", peerID),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
)
go func() {
select {
case e.frameValidationCh <- frame:
default:
e.logger.Debug(
"dropped frame due to overwhelmed queue",
zap.Binary("sender", peerID),
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Int("proof_count", len(frame.AggregateProofs)),
)
}
}()
return nil
}
func (e *MasterClockConsensusEngine) handleSelfTestReport(
peerID []byte,
any *anypb.Any,
) error {
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
return nil
}
report := &protobufs.SelfTestReport{}
if err := any.UnmarshalTo(report); err != nil {
return errors.Wrap(err, "handle self test report")
}
// minimum proof size is one timestamp, one vdf proof, must match one fewer
// than core count
if len(report.Proof) < 516+8 ||
((len(report.Proof)-8)/516) != int(report.Cores-1) {
e.logger.Warn(
"received invalid proof from peer",
zap.String("peer_id", peer.ID(peerID).String()),
)
e.pubSub.SetPeerScore(peerID, -1000)
return errors.Wrap(errors.New("invalid report"), "handle self test report")
}
e.logger.Debug(
"received proof from peer",
zap.String("peer_id", peer.ID(peerID).String()),
)
info := e.peerInfoManager.GetPeerInfo(peerID)
if info != nil {
if (time.Now().UnixMilli() - info.LastSeen) < (270 * 1000) {
return nil
}
info.MasterHeadFrame = report.MasterHeadFrame
if info.Bandwidth <= 1048576 {
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute)
defer cancel()
ch := e.pubSub.GetMultiaddrOfPeerStream(ctx, peerID)
select {
case <-ch:
go func() {
e.bandwidthTestCh <- peerID
}()
case <-ctx.Done():
}
}()
}
proof := report.Proof
timestamp := binary.BigEndian.Uint64(proof[:8])
proof = proof[8:]
// Ignore outdated reports, give 3 minutes for propagation delay
if int64(timestamp) < (time.Now().UnixMilli() - (480 * 1000)) {
return nil
}
challenge := binary.BigEndian.AppendUint64([]byte{}, report.MasterHeadFrame)
challenge = append(challenge, peerID...)
proofs := make([][]byte, (len(report.Proof)-8)/516)
for i := 0; i < len(proofs); i++ {
proofs[i] = proof[i*516 : (i+1)*516]
}
if !e.frameProver.VerifyChallengeProof(
challenge,
int64(timestamp),
report.DifficultyMetric,
proofs,
) {
e.logger.Warn(
"received invalid proof from peer",
zap.String("peer_id", peer.ID(peerID).String()),
)
e.pubSub.SetPeerScore(peerID, -1000)
return errors.Wrap(
errors.New("invalid report"),
"handle self test report",
)
}
info.LastSeen = time.Now().UnixMilli()
return nil
}
e.addPeerManifestReport(peerID, report)
memory := binary.BigEndian.Uint64(report.Memory)
e.logger.Debug(
"received self test report",
zap.String("peer_id", base58.Encode(peerID)),
zap.Uint32("difficulty", report.Difficulty),
zap.Int64("difficulty_metric", report.DifficultyMetric),
zap.Int64("commit_16_metric", report.Commit_16Metric),
zap.Int64("commit_128_metric", report.Commit_128Metric),
zap.Int64("commit_1024_metric", report.Commit_1024Metric),
zap.Int64("commit_65536_metric", report.Commit_65536Metric),
zap.Int64("proof_16_metric", report.Proof_16Metric),
zap.Int64("proof_128_metric", report.Proof_128Metric),
zap.Int64("proof_1024_metric", report.Proof_1024Metric),
zap.Int64("proof_65536_metric", report.Proof_65536Metric),
zap.Uint32("cores", report.Cores),
zap.Uint64("memory", memory),
zap.Uint64("storage", binary.BigEndian.Uint64(report.Storage)),
)
if report.Cores < 3 || memory < 16000000000 {
e.logger.Debug(
"peer reported invalid configuration",
zap.String("peer_id", base58.Encode(peerID)),
zap.Uint32("difficulty", report.Difficulty),
zap.Int64("difficulty_metric", report.DifficultyMetric),
zap.Int64("commit_16_metric", report.Commit_16Metric),
zap.Int64("commit_128_metric", report.Commit_128Metric),
zap.Int64("commit_1024_metric", report.Commit_1024Metric),
zap.Int64("commit_65536_metric", report.Commit_65536Metric),
zap.Int64("proof_16_metric", report.Proof_16Metric),
zap.Int64("proof_128_metric", report.Proof_128Metric),
zap.Int64("proof_1024_metric", report.Proof_1024Metric),
zap.Int64("proof_65536_metric", report.Proof_65536Metric),
zap.Uint32("cores", report.Cores),
zap.Uint64("memory", memory),
zap.Uint64("storage", binary.BigEndian.Uint64(report.Storage)),
)
// tag: dusk nuke this peer for now
e.pubSub.SetPeerScore(peerID, -1000)
return nil
}
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute)
defer cancel()
ch := e.pubSub.GetMultiaddrOfPeerStream(ctx, peerID)
select {
case <-ch:
go func() {
e.bandwidthTestCh <- peerID
}()
case <-ctx.Done():
}
}()
return nil
}
func (e *MasterClockConsensusEngine) publishProof(
frame *protobufs.ClockFrame,
) error {
e.logger.Debug(
"publishing frame",
zap.Uint64("frame_number", frame.FrameNumber),
)
e.masterTimeReel.Insert(frame, false)
peers, err := e.GetMostAheadPeers()
if err != nil || len(peers) == 0 {
// publish if we don't see anyone (empty peer list) or if we're the most
// ahead:
e.report.MasterHeadFrame = frame.FrameNumber
if err := e.publishMessage(e.filter, e.report); err != nil {
e.logger.Debug("error publishing message", zap.Error(err))
}
}
e.state = consensus.EngineStateCollecting
return nil
}
func (e *MasterClockConsensusEngine) publishMessage(
filter []byte,
message proto.Message,
) error {
any := &anypb.Any{}
if err := any.MarshalFrom(message); err != nil {
return errors.Wrap(err, "publish message")
}
// annoying protobuf any hack
any.TypeUrl = strings.Replace(
any.TypeUrl,
"type.googleapis.com",
"types.quilibrium.com",
1,
)
payload, err := proto.Marshal(any)
if err != nil {
return errors.Wrap(err, "publish message")
}
h, err := poseidon.HashBytes(payload)
if err != nil {
return errors.Wrap(err, "publish message")
}
msg := &protobufs.Message{
Hash: h.Bytes(),
Address: e.filter,
Payload: payload,
}
data, err := proto.Marshal(msg)
if err != nil {
return errors.Wrap(err, "publish message")
}
return e.pubSub.PublishToBitmask(filter, data)
}