v1.4.18-patch-2

This commit is contained in:
Cassandra Heart 2024-05-29 12:46:44 -05:00
parent 5252a9a5ec
commit 1a979c1512
No known key found for this signature in database
GPG Key ID: 6352152859385958
63 changed files with 966 additions and 53 deletions

View File

@ -89,7 +89,10 @@ func GenerateVDFWithStopChan(seed []byte, iterations, int_size_bits uint32, stop
func GenerateVDFIteration(seed, x_blob []byte, iterations, int_size_bits uint32) ([]byte, []byte) {
int_size := (int_size_bits + 16) >> 4
D := iqc.CreateDiscriminant(seed, int_size_bits)
x, _ := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D)
x, ok := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D)
if !ok {
return nil, nil
}
y, proof := calculateVDF(D, x, iterations, int_size_bits, nil)
@ -105,8 +108,15 @@ func VerifyVDF(seed, proof_blob []byte, iterations, int_size_bits uint32) bool {
D := iqc.CreateDiscriminant(seed, int_size_bits)
x := iqc.NewClassGroupFromAbDiscriminant(big.NewInt(2), big.NewInt(1), D)
y, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D)
proof, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D)
y, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D)
if !ok {
return false
}
proof, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D)
if !ok {
return false
}
return verifyProof(x, y, proof, iterations)
}
@ -114,9 +124,20 @@ func VerifyVDF(seed, proof_blob []byte, iterations, int_size_bits uint32) bool {
func VerifyVDFIteration(seed, x_blob, proof_blob []byte, iterations, int_size_bits uint32) bool {
int_size := (int_size_bits + 16) >> 4
D := iqc.CreateDiscriminant(seed, int_size_bits)
x, _ := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D)
y, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D)
proof, _ := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D)
x, ok := iqc.NewClassGroupFromBytesDiscriminant(x_blob[:(2*int_size)], D)
if !ok {
return false
}
y, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[:(2*int_size)], D)
if !ok {
return false
}
proof, ok := iqc.NewClassGroupFromBytesDiscriminant(proof_blob[2*int_size:], D)
if !ok {
return false
}
return verifyProof(x, y, proof, iterations)
}

View File

@ -8,6 +8,14 @@ type EngineConfig struct {
PendingCommitWorkers int64 `yaml:"pendingCommitWorkers"`
MinimumPeersRequired int `yaml:"minimumPeersRequired"`
StatsMultiaddr string `yaml:"statsMultiaddr"`
// Sets the fmt.Sprintf format string to use as the listen multiaddrs for
// data worker processes
DataWorkerBaseListenMultiaddr string `yaml:"dataWorkerBaseListenMultiaddr"`
// Sets the starting port number to use as the listen port for data worker
// processes, incrementing by 1 until n-1, n = cores. (Example: a 4 core
// system, base listen port of 40000 will listen on 40000, 40001, 40002)
DataWorkerBaseListenPort uint16 `yaml:"dataWorkerBaseListenPort"`
DataWorkerMemoryLimit int64 `yaml:"dataWorkerMemoryLimit"`
// Values used only for testing do not override these in production, your
// node will get kicked out

View File

@ -29,5 +29,5 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x01
return 0x02
}

View File

@ -116,15 +116,19 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport(
peerID []byte,
any *anypb.Any,
) error {
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
return nil
}
report := &protobufs.SelfTestReport{}
if err := any.UnmarshalTo(report); err != nil {
return errors.Wrap(err, "handle self test report")
}
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
info := e.peerInfoManager.GetPeerInfo(peerID)
info.LastSeen = time.Now().UnixMilli()
info.DifficultyMetric = report.DifficultyMetric
info.MasterHeadFrame = report.MasterHeadFrame
return nil
}
// minimum proof size is one timestamp, one vdf proof, must match one fewer
// than core count
if len(report.Proof) < 516+8 ||

View File

@ -7,6 +7,7 @@ import (
"crypto/rand"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"math/big"
"sync"
@ -15,9 +16,12 @@ import (
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/mr-tron/base58"
"github.com/multiformats/go-multiaddr"
mn "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
qtime "source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
@ -68,6 +72,7 @@ type MasterClockConsensusEngine struct {
verifyTestCh chan verifyChallenge
currentReceivingSyncPeers int
currentReceivingSyncPeersMx sync.Mutex
engineConfig *config.EngineConfig
}
var _ consensus.ConsensusEngine = (*MasterClockConsensusEngine)(nil)
@ -131,6 +136,7 @@ func NewMasterClockConsensusEngine(
frameValidationCh: make(chan *protobufs.ClockFrame),
bandwidthTestCh: make(chan []byte),
verifyTestCh: make(chan verifyChallenge),
engineConfig: engineConfig,
}
e.addPeerManifestReport(e.pubSub.GetPeerID(), report)
@ -241,6 +247,16 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
time.Sleep(30 * time.Second)
difficultyMetric := int64(100000)
skew := (difficultyMetric * 12) / 10
parallelism := e.report.Cores - 1
if parallelism < 3 {
panic("invalid system configuration, minimum system configuration must be four cores")
}
clients, err := e.createParallelDataClients(int(parallelism))
if err != nil {
panic(err)
}
for {
head, err := e.masterTimeReel.Head()
@ -250,7 +266,6 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
e.report.MasterHeadFrame = head.FrameNumber
e.report.DifficultyMetric = difficultyMetric
parallelism := e.report.Cores - 1
challenge := binary.BigEndian.AppendUint64(
[]byte{},
@ -258,15 +273,42 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
)
challenge = append(challenge, e.pubSub.GetPeerID()...)
ts, proofs, nextDifficultyMetric, err :=
e.frameProver.CalculateChallengeProof(
challenge,
parallelism,
skew,
proofs := make([][]byte, parallelism)
nextMetrics := make([]int64, parallelism)
wg := sync.WaitGroup{}
wg.Add(int(parallelism))
ts := time.Now().UnixMilli()
for i := uint32(0); i < parallelism; i++ {
i := i
go func() {
resp, err :=
clients[i].CalculateChallengeProof(
context.Background(),
&protobufs.ChallengeProofRequest{
Challenge: challenge,
Core: i,
Skew: skew,
NowMs: ts,
},
)
if err != nil {
panic(err)
}
proofs[i], nextMetrics[i] = resp.Output, resp.NextSkew
wg.Done()
}()
}
wg.Wait()
nextDifficultySum := uint64(0)
for i := 0; i < int(parallelism); i++ {
nextDifficultySum += uint64(nextMetrics[i])
}
nextDifficultyMetric := int64(nextDifficultySum / uint64(parallelism))
e.logger.Info(
"recalibrating difficulty metric",
zap.Int64("previous_difficulty_metric", difficultyMetric),
@ -336,6 +378,64 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
return errChan
}
func (e *MasterClockConsensusEngine) createParallelDataClients(
paralellism int,
) ([]protobufs.DataIPCServiceClient, error) {
e.logger.Info(
"connecting to data worker processes",
zap.Int("parallelism", paralellism),
)
if e.engineConfig.DataWorkerBaseListenMultiaddr == "" {
e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if e.engineConfig.DataWorkerBaseListenPort == 0 {
e.engineConfig.DataWorkerBaseListenPort = 40000
}
clients := make([]protobufs.DataIPCServiceClient, paralellism)
for i := 0; i < paralellism; i++ {
ma, err := multiaddr.NewMultiaddr(
fmt.Sprintf(
e.engineConfig.DataWorkerBaseListenMultiaddr,
int(e.engineConfig.DataWorkerBaseListenPort)+i,
),
)
if err != nil {
panic(err)
}
_, addr, err := mn.DialArgs(ma)
if err != nil {
panic(err)
}
conn, err := grpc.Dial(
addr,
grpc.WithTransportCredentials(
insecure.NewCredentials(),
),
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(10*1024*1024),
grpc.MaxCallRecvMsgSize(10*1024*1024),
),
)
if err != nil {
panic(err)
}
clients[i] = protobufs.NewDataIPCServiceClient(conn)
}
e.logger.Info(
"connected to data worker processes",
zap.Int("parallelism", paralellism),
)
return clients, nil
}
func (e *MasterClockConsensusEngine) PerformValidation(
ctx context.Context,
msg *protobufs.ValidationMessage,

View File

@ -53,9 +53,10 @@ type FrameProver interface {
) bool
CalculateChallengeProof(
challenge []byte,
parallelism uint32,
core uint32,
skew int64,
) (int64, [][]byte, int64, error)
nowMs int64,
) ([]byte, int64, error)
VerifyChallengeProof(
challenge []byte,
timestamp int64,

View File

@ -6,7 +6,6 @@ import (
"crypto/rand"
"encoding/binary"
"math/big"
"sync"
"time"
"github.com/cloudflare/circl/sign/ed448"
@ -601,25 +600,17 @@ func (w *WesolowskiFrameProver) VerifyWeakRecursiveProof(
func (w *WesolowskiFrameProver) CalculateChallengeProof(
challenge []byte,
parallelism uint32,
core uint32,
skew int64,
) (int64, [][]byte, int64, error) {
now := time.Now()
nowMs := now.UnixMilli()
nowMs int64,
) ([]byte, int64, error) {
input := binary.BigEndian.AppendUint64([]byte{}, uint64(nowMs))
input = append(input, challenge...)
outputs := make([][]byte, parallelism)
wg := sync.WaitGroup{}
wg.Add(int(parallelism))
// 4.5 minutes = 270 seconds, one increment should be ten seconds
proofDuration := 270 * 1000
calibratedDifficulty := (int64(proofDuration) * 10000) / skew
for i := uint32(0); i < parallelism; i++ {
i := i
go func() {
instanceInput := binary.BigEndian.AppendUint32([]byte{}, i)
instanceInput := binary.BigEndian.AppendUint32([]byte{}, core)
instanceInput = append(instanceInput, input...)
b := sha3.Sum256(instanceInput)
v := vdf.New(uint32(calibratedDifficulty), b)
@ -627,17 +618,13 @@ func (w *WesolowskiFrameProver) CalculateChallengeProof(
v.Execute()
o := v.GetOutput()
outputs[i] = make([]byte, 516)
copy(outputs[i][:], o[:])
wg.Done()
}()
}
wg.Wait()
output := make([]byte, 516)
copy(output[:], o[:])
now := time.UnixMilli(nowMs)
after := time.Since(now)
nextSkew := (skew * after.Milliseconds()) / int64(proofDuration)
return nowMs, outputs, nextSkew, nil
return output, nextSkew, nil
}
func (w *WesolowskiFrameProver) VerifyChallengeProof(

View File

@ -30,10 +30,12 @@ func TestMasterProve(t *testing.T) {
func TestChallengeProof(t *testing.T) {
l, _ := zap.NewProduction()
w := crypto.NewWesolowskiFrameProver(l)
now, proofs, nextSkew, err := w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 3, 120000)
now := time.Now().UnixMilli()
proofs, nextSkew, err := w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 0, 120000, now)
assert.NoError(t, err)
assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, 100000, proofs))
now, proofs, _, err = w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 3, nextSkew*12/10)
assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, 100000, [][]byte{proofs}))
now = time.Now().UnixMilli()
proofs, _, err = w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 0, nextSkew*12/10, now)
assert.NoError(t, err)
assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, nextSkew, proofs))
assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, now, nextSkew, [][]byte{proofs}))
}

View File

@ -12,9 +12,11 @@ import (
"io/fs"
"log"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
rdebug "runtime/debug"
"runtime/pprof"
"strconv"
"strings"
@ -103,6 +105,16 @@ var (
true,
"enables or disables signature validation (default true)",
)
core = flag.Int(
"core",
0,
"specifies the core of the process (defaults to zero, the initial launcher)",
)
parentProcess = flag.Int(
"parent-process",
0,
"specifies the parent process pid for a data worker",
)
)
var signatories = []string{
@ -264,7 +276,7 @@ func main() {
done := make(chan os.Signal, 1)
signal.Notify(done, syscall.SIGINT, syscall.SIGTERM)
if !*dbConsole {
if !*dbConsole && *core == 0 {
printLogo()
printVersion()
fmt.Println(" ")
@ -322,7 +334,58 @@ func main() {
return
}
if *core != 0 {
runtime.GOMAXPROCS(1)
rdebug.SetGCPercent(9999)
if nodeConfig.Engine.DataWorkerMemoryLimit == 0 {
nodeConfig.Engine.DataWorkerMemoryLimit = 1792 * 1024 * 1024 // 1.75GiB
}
rdebug.SetMemoryLimit(nodeConfig.Engine.DataWorkerMemoryLimit)
if nodeConfig.Engine.DataWorkerBaseListenMultiaddr == "" {
nodeConfig.Engine.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
}
if nodeConfig.Engine.DataWorkerBaseListenPort == 0 {
nodeConfig.Engine.DataWorkerBaseListenPort = 40000
}
if *parentProcess == 0 {
panic("parent process pid not specified")
}
l, err := zap.NewProduction()
if err != nil {
panic(err)
}
rpcMultiaddr := fmt.Sprintf(
nodeConfig.Engine.DataWorkerBaseListenMultiaddr,
int(nodeConfig.Engine.DataWorkerBaseListenPort)+*core-1,
)
srv, err := rpc.NewDataWorkerIPCServer(
rpcMultiaddr,
l,
uint32(*core)-1,
qcrypto.NewWesolowskiFrameProver(l),
*parentProcess,
)
if err != nil {
panic(err)
}
err = srv.Start()
if err != nil {
panic(err)
}
return
}
fmt.Println("Loading ceremony state and starting node...")
go spawnDataWorkers()
kzg.Init()
report := RunSelfTestIfNeeded(*configDirectory, nodeConfig)
@ -365,9 +428,55 @@ func main() {
node.Start()
<-done
stopDataWorkers()
node.Stop()
}
var dataWorkers []*exec.Cmd
func spawnDataWorkers() {
process, err := os.Executable()
if err != nil {
panic(err)
}
cores := runtime.GOMAXPROCS(0)
dataWorkers = make([]*exec.Cmd, cores-1)
fmt.Printf("Spawning %d data workers...\n", cores-1)
for i := 1; i <= cores-1; i++ {
i := i
go func() {
args := []string{
fmt.Sprintf("--core=%d", i),
fmt.Sprintf("--parent-process=%d", os.Getpid()),
}
args = append(args, os.Args[1:]...)
cmd := exec.Command(process, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
panic(err)
}
dataWorkers[i-1] = cmd
}()
}
}
func stopDataWorkers() {
for i := 0; i < len(dataWorkers); i++ {
err := dataWorkers[i].Process.Signal(os.Kill)
if err != nil {
fmt.Printf(
"fatal: unable to kill worker with pid %d, please kill this process!\n",
dataWorkers[i].Process.Pid,
)
}
}
}
// Reintroduce at a later date
func RunCompaction(clockStore *store.PebbleClockStore) {
intrinsicFilter := append(

BIN
node/node-1.4.18-darwin-arm64 Executable file

Binary file not shown.

View File

@ -0,0 +1 @@
SHA3-256(node-1.4.18-darwin-arm64)= 0f666d6c3814f96bebce455034fe6a44b64f223b4e313ac506f42fa2371ac30d

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
node/node-1.4.18-linux-amd64 Executable file

Binary file not shown.

View File

@ -0,0 +1 @@
SHA3-256(node-1.4.18-linux-amd64)= f2da38a5c8d4257767f4a9659ca9c05d4cf312fd7294e6a18847c8923f6815a0

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
node/node-1.4.18-linux-arm64 Executable file

Binary file not shown.

View File

@ -0,0 +1 @@
SHA3-256(node-1.4.18-linux-arm64)= e12e14a18a491921fcdd4ffe6e9792f1e195abf4950fe56316219f920867c45a

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

258
node/protobufs/data.pb.go Normal file
View File

@ -0,0 +1,258 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
// protoc v3.21.12
// source: data.proto
package protobufs
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ChallengeProofRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Challenge []byte `protobuf:"bytes,1,opt,name=challenge,proto3" json:"challenge,omitempty"`
Core uint32 `protobuf:"varint,2,opt,name=core,proto3" json:"core,omitempty"`
Skew int64 `protobuf:"varint,3,opt,name=skew,proto3" json:"skew,omitempty"`
NowMs int64 `protobuf:"varint,4,opt,name=now_ms,json=nowMs,proto3" json:"now_ms,omitempty"`
}
func (x *ChallengeProofRequest) Reset() {
*x = ChallengeProofRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_data_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ChallengeProofRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChallengeProofRequest) ProtoMessage() {}
func (x *ChallengeProofRequest) ProtoReflect() protoreflect.Message {
mi := &file_data_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChallengeProofRequest.ProtoReflect.Descriptor instead.
func (*ChallengeProofRequest) Descriptor() ([]byte, []int) {
return file_data_proto_rawDescGZIP(), []int{0}
}
func (x *ChallengeProofRequest) GetChallenge() []byte {
if x != nil {
return x.Challenge
}
return nil
}
func (x *ChallengeProofRequest) GetCore() uint32 {
if x != nil {
return x.Core
}
return 0
}
func (x *ChallengeProofRequest) GetSkew() int64 {
if x != nil {
return x.Skew
}
return 0
}
func (x *ChallengeProofRequest) GetNowMs() int64 {
if x != nil {
return x.NowMs
}
return 0
}
type ChallengeProofResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Output []byte `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"`
NextSkew int64 `protobuf:"varint,2,opt,name=next_skew,json=nextSkew,proto3" json:"next_skew,omitempty"`
}
func (x *ChallengeProofResponse) Reset() {
*x = ChallengeProofResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_data_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ChallengeProofResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChallengeProofResponse) ProtoMessage() {}
func (x *ChallengeProofResponse) ProtoReflect() protoreflect.Message {
mi := &file_data_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChallengeProofResponse.ProtoReflect.Descriptor instead.
func (*ChallengeProofResponse) Descriptor() ([]byte, []int) {
return file_data_proto_rawDescGZIP(), []int{1}
}
func (x *ChallengeProofResponse) GetOutput() []byte {
if x != nil {
return x.Output
}
return nil
}
func (x *ChallengeProofResponse) GetNextSkew() int64 {
if x != nil {
return x.NextSkew
}
return 0
}
var File_data_proto protoreflect.FileDescriptor
var file_data_proto_rawDesc = []byte{
0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x71, 0x75,
0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61,
0x74, 0x61, 0x2e, 0x70, 0x62, 0x22, 0x74, 0x0a, 0x15, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e,
0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65,
0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04,
0x73, 0x6b, 0x65, 0x77, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x77, 0x5f, 0x6d, 0x73, 0x18, 0x04,
0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6e, 0x6f, 0x77, 0x4d, 0x73, 0x22, 0x4d, 0x0a, 0x16, 0x43,
0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1b, 0x0a,
0x09, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
0x52, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x6b, 0x65, 0x77, 0x32, 0x8c, 0x01, 0x0a, 0x0e, 0x44,
0x61, 0x74, 0x61, 0x49, 0x50, 0x43, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a,
0x17, 0x43, 0x61, 0x6c, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65,
0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2e, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f,
0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f,
0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f,
0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_data_proto_rawDescOnce sync.Once
file_data_proto_rawDescData = file_data_proto_rawDesc
)
func file_data_proto_rawDescGZIP() []byte {
file_data_proto_rawDescOnce.Do(func() {
file_data_proto_rawDescData = protoimpl.X.CompressGZIP(file_data_proto_rawDescData)
})
return file_data_proto_rawDescData
}
var file_data_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_data_proto_goTypes = []interface{}{
(*ChallengeProofRequest)(nil), // 0: quilibrium.node.data.pb.ChallengeProofRequest
(*ChallengeProofResponse)(nil), // 1: quilibrium.node.data.pb.ChallengeProofResponse
}
var file_data_proto_depIdxs = []int32{
0, // 0: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:input_type -> quilibrium.node.data.pb.ChallengeProofRequest
1, // 1: quilibrium.node.data.pb.DataIPCService.CalculateChallengeProof:output_type -> quilibrium.node.data.pb.ChallengeProofResponse
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_data_proto_init() }
func file_data_proto_init() {
if File_data_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_data_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChallengeProofRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_data_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ChallengeProofResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_data_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_data_proto_goTypes,
DependencyIndexes: file_data_proto_depIdxs,
MessageInfos: file_data_proto_msgTypes,
}.Build()
File_data_proto = out.File
file_data_proto_rawDesc = nil
file_data_proto_goTypes = nil
file_data_proto_depIdxs = nil
}

View File

@ -0,0 +1,171 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: data.proto
/*
Package protobufs is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package protobufs
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
func request_DataIPCService_CalculateChallengeProof_0(ctx context.Context, marshaler runtime.Marshaler, client DataIPCServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ChallengeProofRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CalculateChallengeProof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_DataIPCService_CalculateChallengeProof_0(ctx context.Context, marshaler runtime.Marshaler, server DataIPCServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ChallengeProofRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.CalculateChallengeProof(ctx, &protoReq)
return msg, metadata, err
}
// RegisterDataIPCServiceHandlerServer registers the http handlers for service DataIPCService to "mux".
// UnaryRPC :call DataIPCServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterDataIPCServiceHandlerFromEndpoint instead.
func RegisterDataIPCServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server DataIPCServiceServer) error {
mux.Handle("POST", pattern_DataIPCService_CalculateChallengeProof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof", runtime.WithHTTPPathPattern("/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_DataIPCService_CalculateChallengeProof_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_DataIPCService_CalculateChallengeProof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterDataIPCServiceHandlerFromEndpoint is same as RegisterDataIPCServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterDataIPCServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.DialContext(ctx, endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterDataIPCServiceHandler(ctx, mux, conn)
}
// RegisterDataIPCServiceHandler registers the http handlers for service DataIPCService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterDataIPCServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterDataIPCServiceHandlerClient(ctx, mux, NewDataIPCServiceClient(conn))
}
// RegisterDataIPCServiceHandlerClient registers the http handlers for service DataIPCService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DataIPCServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DataIPCServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "DataIPCServiceClient" to call the correct interceptors.
func RegisterDataIPCServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DataIPCServiceClient) error {
mux.Handle("POST", pattern_DataIPCService_CalculateChallengeProof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof", runtime.WithHTTPPathPattern("/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_DataIPCService_CalculateChallengeProof_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_DataIPCService_CalculateChallengeProof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_DataIPCService_CalculateChallengeProof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"quilibrium.node.data.pb.DataIPCService", "CalculateChallengeProof"}, ""))
)
var (
forward_DataIPCService_CalculateChallengeProof_0 = runtime.ForwardResponseMessage
)

21
node/protobufs/data.proto Normal file
View File

@ -0,0 +1,21 @@
syntax = "proto3";
package quilibrium.node.data.pb;
option go_package = "source.quilibrium.com/quilibrium/monorepo/node/protobufs";
message ChallengeProofRequest {
bytes challenge = 1;
uint32 core = 2;
int64 skew = 3;
int64 now_ms = 4;
}
message ChallengeProofResponse {
bytes output = 1;
int64 next_skew = 2;
}
service DataIPCService {
rpc CalculateChallengeProof(ChallengeProofRequest) returns (ChallengeProofResponse);
}

View File

@ -0,0 +1,109 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v3.21.12
// source: data.proto
package protobufs
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
DataIPCService_CalculateChallengeProof_FullMethodName = "/quilibrium.node.data.pb.DataIPCService/CalculateChallengeProof"
)
// DataIPCServiceClient is the client API for DataIPCService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type DataIPCServiceClient interface {
CalculateChallengeProof(ctx context.Context, in *ChallengeProofRequest, opts ...grpc.CallOption) (*ChallengeProofResponse, error)
}
type dataIPCServiceClient struct {
cc grpc.ClientConnInterface
}
func NewDataIPCServiceClient(cc grpc.ClientConnInterface) DataIPCServiceClient {
return &dataIPCServiceClient{cc}
}
func (c *dataIPCServiceClient) CalculateChallengeProof(ctx context.Context, in *ChallengeProofRequest, opts ...grpc.CallOption) (*ChallengeProofResponse, error) {
out := new(ChallengeProofResponse)
err := c.cc.Invoke(ctx, DataIPCService_CalculateChallengeProof_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DataIPCServiceServer is the server API for DataIPCService service.
// All implementations must embed UnimplementedDataIPCServiceServer
// for forward compatibility
type DataIPCServiceServer interface {
CalculateChallengeProof(context.Context, *ChallengeProofRequest) (*ChallengeProofResponse, error)
mustEmbedUnimplementedDataIPCServiceServer()
}
// UnimplementedDataIPCServiceServer must be embedded to have forward compatible implementations.
type UnimplementedDataIPCServiceServer struct {
}
func (UnimplementedDataIPCServiceServer) CalculateChallengeProof(context.Context, *ChallengeProofRequest) (*ChallengeProofResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CalculateChallengeProof not implemented")
}
func (UnimplementedDataIPCServiceServer) mustEmbedUnimplementedDataIPCServiceServer() {}
// UnsafeDataIPCServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to DataIPCServiceServer will
// result in compilation errors.
type UnsafeDataIPCServiceServer interface {
mustEmbedUnimplementedDataIPCServiceServer()
}
func RegisterDataIPCServiceServer(s grpc.ServiceRegistrar, srv DataIPCServiceServer) {
s.RegisterService(&DataIPCService_ServiceDesc, srv)
}
func _DataIPCService_CalculateChallengeProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ChallengeProofRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DataIPCServiceServer).CalculateChallengeProof(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: DataIPCService_CalculateChallengeProof_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DataIPCServiceServer).CalculateChallengeProof(ctx, req.(*ChallengeProofRequest))
}
return interceptor(ctx, in, info, handler)
}
// DataIPCService_ServiceDesc is the grpc.ServiceDesc for DataIPCService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var DataIPCService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "quilibrium.node.data.pb.DataIPCService",
HandlerType: (*DataIPCServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CalculateChallengeProof",
Handler: _DataIPCService_CalculateChallengeProof_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "data.proto",
}

View File

@ -0,0 +1,119 @@
package rpc
import (
"context"
"os"
"runtime"
"syscall"
"time"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"github.com/multiformats/go-multiaddr"
mn "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type DataWorkerIPCServer struct {
protobufs.UnimplementedDataIPCServiceServer
listenAddrGRPC string
logger *zap.Logger
coreId uint32
prover crypto.FrameProver
parentProcessId int
}
// GetFrameInfo implements protobufs.NodeServiceServer.
func (r *DataWorkerIPCServer) CalculateChallengeProof(
ctx context.Context,
req *protobufs.ChallengeProofRequest,
) (*protobufs.ChallengeProofResponse, error) {
if r.coreId != req.Core {
return nil, errors.Wrap(
errors.New("invalid core id"),
"calculate challenge proof",
)
}
proof, nextSkew, err := r.prover.CalculateChallengeProof(
req.Challenge,
uint32(r.coreId),
req.Skew,
req.NowMs,
)
if err != nil {
return nil, errors.Wrap(err, "calculate challenge proof")
}
return &protobufs.ChallengeProofResponse{
Output: proof,
NextSkew: nextSkew,
}, nil
}
func NewDataWorkerIPCServer(
listenAddrGRPC string,
logger *zap.Logger,
coreId uint32,
prover crypto.FrameProver,
parentProcessId int,
) (*DataWorkerIPCServer, error) {
return &DataWorkerIPCServer{
listenAddrGRPC: listenAddrGRPC,
logger: logger,
coreId: coreId,
prover: prover,
parentProcessId: parentProcessId,
}, nil
}
func (r *DataWorkerIPCServer) Start() error {
s := grpc.NewServer(
grpc.MaxRecvMsgSize(10*1024*1024),
grpc.MaxSendMsgSize(10*1024*1024),
)
protobufs.RegisterDataIPCServiceServer(s, r)
reflection.Register(s)
mg, err := multiaddr.NewMultiaddr(r.listenAddrGRPC)
if err != nil {
return errors.Wrap(err, "start")
}
lis, err := mn.Listen(mg)
if err != nil {
return errors.Wrap(err, "start")
}
go r.monitorParent()
if err := s.Serve(mn.NetListener(lis)); err != nil {
panic(err)
}
return nil
}
func (r *DataWorkerIPCServer) monitorParent() {
for {
time.Sleep(1 * time.Second)
proc, err := os.FindProcess(r.parentProcessId)
if err != nil {
r.logger.Error("parent process not found, terminating")
os.Exit(1)
}
// Windows returns an error if the process is dead, nobody else does
if runtime.GOOS != "windows" {
err := proc.Signal(syscall.Signal(0))
if err != nil {
r.logger.Error("parent process not found, terminating")
os.Exit(1)
}
}
}
}