v1.4.21-p1

This commit is contained in:
Cassandra Heart 2024-07-28 16:40:16 -05:00
parent 819d49d659
commit cf81c167f9
No known key found for this signature in database
GPG Key ID: 6352152859385958
5 changed files with 113 additions and 13 deletions

View File

@ -70,6 +70,7 @@ func (n *Node) VerifyProofIntegrity() {
if e != nil {
panic(e)
}
dataProver := crypto.NewKZGInclusionProver(n.logger)
wesoProver := crypto.NewWesolowskiFrameProver(n.logger)
@ -79,6 +80,7 @@ func (n *Node) VerifyProofIntegrity() {
if err != nil {
panic(err)
}
idx, idxProof, idxCommit, idxKP := master.GetOutputs(o)
ip := sha3.Sum512(idxProof)
@ -89,7 +91,7 @@ func (n *Node) VerifyProofIntegrity() {
}
if !v {
panic(fmt.Sprintf("bad kzg proof at increment %d", i))
panic(fmt.Sprintf("bad kzg proof at increment %d", j))
}
wp := []byte{}
wp = append(wp, n.pubSub.GetPeerID()...)
@ -97,7 +99,7 @@ func (n *Node) VerifyProofIntegrity() {
fmt.Printf("%x\n", wp)
v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof)
if !v {
panic(fmt.Sprintf("bad weso proof at increment %d", i))
panic(fmt.Sprintf("bad weso proof at increment %d", j))
}
}
}

View File

@ -36,5 +36,5 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x00
return 0x01
}

View File

@ -585,6 +585,9 @@ func (w *WesolowskiFrameProver) CalculateChallengeProof(
increment uint32,
) ([]byte, error) {
difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
instanceInput := binary.BigEndian.AppendUint32([]byte{}, core)
instanceInput = append(instanceInput, challenge...)
@ -604,6 +607,9 @@ func (w *WesolowskiFrameProver) VerifyChallengeProof(
proof []byte,
) bool {
difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
if len(proof) != 516 {
return false

View File

@ -28,6 +28,7 @@ import (
"golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils"
"github.com/cloudflare/circl/sign/ed448"
@ -532,32 +533,48 @@ func RunMigrationIfNeeded(
configDir string,
nodeConfig *config.Config,
) {
shouldMigrate := false
shouldMigrate13 := false
shouldMigrate15 := false
migrationInfo := []byte{0x00, 0x00, 0x00}
_, err := os.Stat(filepath.Join(configDir, "MIGRATIONS"))
if err != nil && os.IsNotExist(err) {
fmt.Println("Migrations file not found, will perform migration...")
shouldMigrate = true
shouldMigrate13 = true
shouldMigrate15 = true
}
if !shouldMigrate {
if !shouldMigrate13 {
migrationInfo, err = os.ReadFile(filepath.Join(configDir, "MIGRATIONS"))
if err != nil {
panic(err)
}
if len(migrationInfo) < 3 ||
!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x013}) {
(!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x013}) &&
!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x15})) {
fmt.Println("Migrations file outdated, will perform migration...")
shouldMigrate = true
shouldMigrate13 = true
shouldMigrate15 = true
}
}
// If subsequent migrations arise, we will need to distinguish by version
if shouldMigrate {
fmt.Println("Running migration...")
if !shouldMigrate15 {
migrationInfo, err = os.ReadFile(filepath.Join(configDir, "MIGRATIONS"))
if err != nil {
panic(err)
}
if len(migrationInfo) < 3 ||
!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x15}) {
fmt.Println("Migrations file outdated, will perform migration...")
shouldMigrate13 = false
shouldMigrate15 = true
}
}
if shouldMigrate13 {
fmt.Println("Running 1.4.19 migration...")
// Easiest migration in the world.
err := os.RemoveAll(filepath.Join(configDir, "store"))
if err != nil {
fmt.Println("ERROR: Could not remove store, please be sure to do this before restarting the node.")
@ -576,6 +593,40 @@ func RunMigrationIfNeeded(
fmt.Println("Migration completed.")
}
if shouldMigrate15 {
fmt.Println("Running 1.4.21.1 migration...")
db := store.NewPebbleDB(nodeConfig.DB)
logger, _ := zap.NewProduction()
proofStore := store.NewPebbleDataProofStore(db, logger)
peerId := getPeerID(nodeConfig.P2P)
increment, _, _, err := proofStore.GetLatestDataTimeProof([]byte(peerId))
if err != nil && (!errors.Is(err, store.ErrNotFound) || increment != 0) {
panic(err)
}
if increment > 699999 {
err := proofStore.RewindToIncrement([]byte(peerId), 699999)
if err != nil {
panic(err)
}
}
db.Close()
err = os.WriteFile(
filepath.Join(configDir, "MIGRATIONS"),
[]byte{0x01, 0x04, 0x15},
fs.FileMode(0600),
)
if err != nil {
fmt.Println("ERROR: Could not save migration file.")
panic(err)
}
fmt.Println("Migration completed.")
}
}
func RunSelfTestIfNeeded(
@ -843,7 +894,7 @@ func printBalance(config *config.Config) {
fmt.Println("Unclaimed balance:", r.FloatString(12), "QUIL")
}
func printPeerID(p2pConfig *config.P2PConfig) {
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
@ -860,6 +911,12 @@ func printPeerID(p2pConfig *config.P2PConfig) {
panic(errors.Wrap(err, "error getting peer id"))
}
return id
}
func printPeerID(p2pConfig *config.P2PConfig) {
id := getPeerID(p2pConfig)
fmt.Println("Peer ID: " + id.String())
}

View File

@ -49,6 +49,7 @@ type DataProofStore interface {
output []byte,
err error,
)
RewindToIncrement(peerId []byte, increment uint32) error
}
var _ DataProofStore = (*PebbleDataProofStore)(nil)
@ -487,6 +488,9 @@ func (p *PebbleDataProofStore) PutDataTimeProof(
// upgrading on time, akin to a "difficulty bomb" in reverse, but locally
// calculated.
difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
// Basis split on the estimated shard level for growth rate (in terms of
// units): 240 (QUIL) * 8000000000 (conversion factor) / 1600000 (shards)
@ -567,3 +571,34 @@ func (p *PebbleDataProofStore) GetLatestDataTimeProof(peerId []byte) (
return increment, parallelism, output, err
}
func (p *PebbleDataProofStore) RewindToIncrement(
peerId []byte,
increment uint32,
) error {
reward := new(big.Int)
for j := uint32(0); j <= increment; j++ {
_, parallelism, _, _, err := p.GetDataTimeProof(peerId, uint32(j))
if err != nil {
panic(err)
}
pomwBasis := big.NewInt(1200000)
reward = reward.Add(
reward,
new(big.Int).Mul(pomwBasis, big.NewInt(int64(parallelism))),
)
}
latest := []byte{}
latest = binary.BigEndian.AppendUint32(latest, increment)
latest = append(latest, reward.FillBytes(make([]byte, 32))...)
if err := p.db.Set(dataTimeProofLatestKey(peerId), latest); err != nil {
return errors.Wrap(err, "put data time proof")
}
return nil
}