v1.4.21-p1 (#279)

This commit is contained in:
Cassandra Heart 2024-07-29 12:46:36 -05:00 committed by GitHub
parent 819d49d659
commit f640c09008
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 113 additions and 13 deletions

View File

@ -70,6 +70,7 @@ func (n *Node) VerifyProofIntegrity() {
if e != nil { if e != nil {
panic(e) panic(e)
} }
dataProver := crypto.NewKZGInclusionProver(n.logger) dataProver := crypto.NewKZGInclusionProver(n.logger)
wesoProver := crypto.NewWesolowskiFrameProver(n.logger) wesoProver := crypto.NewWesolowskiFrameProver(n.logger)
@ -79,6 +80,7 @@ func (n *Node) VerifyProofIntegrity() {
if err != nil { if err != nil {
panic(err) panic(err)
} }
idx, idxProof, idxCommit, idxKP := master.GetOutputs(o) idx, idxProof, idxCommit, idxKP := master.GetOutputs(o)
ip := sha3.Sum512(idxProof) ip := sha3.Sum512(idxProof)
@ -89,7 +91,7 @@ func (n *Node) VerifyProofIntegrity() {
} }
if !v { if !v {
panic(fmt.Sprintf("bad kzg proof at increment %d", i)) panic(fmt.Sprintf("bad kzg proof at increment %d", j))
} }
wp := []byte{} wp := []byte{}
wp = append(wp, n.pubSub.GetPeerID()...) wp = append(wp, n.pubSub.GetPeerID()...)
@ -97,7 +99,7 @@ func (n *Node) VerifyProofIntegrity() {
fmt.Printf("%x\n", wp) fmt.Printf("%x\n", wp)
v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof) v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof)
if !v { if !v {
panic(fmt.Sprintf("bad weso proof at increment %d", i)) panic(fmt.Sprintf("bad weso proof at increment %d", j))
} }
} }
} }

View File

@ -36,5 +36,5 @@ func FormatVersion(version []byte) string {
} }
func GetPatchNumber() byte { func GetPatchNumber() byte {
return 0x00 return 0x01
} }

View File

@ -585,6 +585,9 @@ func (w *WesolowskiFrameProver) CalculateChallengeProof(
increment uint32, increment uint32,
) ([]byte, error) { ) ([]byte, error) {
difficulty := 200000 - (increment / 4) difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
instanceInput := binary.BigEndian.AppendUint32([]byte{}, core) instanceInput := binary.BigEndian.AppendUint32([]byte{}, core)
instanceInput = append(instanceInput, challenge...) instanceInput = append(instanceInput, challenge...)
@ -604,6 +607,9 @@ func (w *WesolowskiFrameProver) VerifyChallengeProof(
proof []byte, proof []byte,
) bool { ) bool {
difficulty := 200000 - (increment / 4) difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
if len(proof) != 516 { if len(proof) != 516 {
return false return false

View File

@ -28,6 +28,7 @@ import (
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs" "source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/utils" "source.quilibrium.com/quilibrium/monorepo/node/utils"
"github.com/cloudflare/circl/sign/ed448" "github.com/cloudflare/circl/sign/ed448"
@ -532,32 +533,48 @@ func RunMigrationIfNeeded(
configDir string, configDir string,
nodeConfig *config.Config, nodeConfig *config.Config,
) { ) {
shouldMigrate := false shouldMigrate13 := false
shouldMigrate15 := false
migrationInfo := []byte{0x00, 0x00, 0x00} migrationInfo := []byte{0x00, 0x00, 0x00}
_, err := os.Stat(filepath.Join(configDir, "MIGRATIONS")) _, err := os.Stat(filepath.Join(configDir, "MIGRATIONS"))
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
fmt.Println("Migrations file not found, will perform migration...") fmt.Println("Migrations file not found, will perform migration...")
shouldMigrate = true shouldMigrate13 = true
shouldMigrate15 = true
} }
if !shouldMigrate { if !shouldMigrate13 {
migrationInfo, err = os.ReadFile(filepath.Join(configDir, "MIGRATIONS")) migrationInfo, err = os.ReadFile(filepath.Join(configDir, "MIGRATIONS"))
if err != nil { if err != nil {
panic(err) panic(err)
} }
if len(migrationInfo) < 3 || if len(migrationInfo) < 3 ||
!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x013}) { (!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x013}) &&
!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x15})) {
fmt.Println("Migrations file outdated, will perform migration...") fmt.Println("Migrations file outdated, will perform migration...")
shouldMigrate = true shouldMigrate13 = true
shouldMigrate15 = true
} }
} }
// If subsequent migrations arise, we will need to distinguish by version if !shouldMigrate15 {
if shouldMigrate { migrationInfo, err = os.ReadFile(filepath.Join(configDir, "MIGRATIONS"))
fmt.Println("Running migration...") if err != nil {
panic(err)
}
if len(migrationInfo) < 3 ||
!bytes.Equal(migrationInfo, []byte{0x01, 0x04, 0x15}) {
fmt.Println("Migrations file outdated, will perform migration...")
shouldMigrate13 = false
shouldMigrate15 = true
}
}
if shouldMigrate13 {
fmt.Println("Running 1.4.19 migration...")
// Easiest migration in the world.
err := os.RemoveAll(filepath.Join(configDir, "store")) err := os.RemoveAll(filepath.Join(configDir, "store"))
if err != nil { if err != nil {
fmt.Println("ERROR: Could not remove store, please be sure to do this before restarting the node.") fmt.Println("ERROR: Could not remove store, please be sure to do this before restarting the node.")
@ -576,6 +593,40 @@ func RunMigrationIfNeeded(
fmt.Println("Migration completed.") fmt.Println("Migration completed.")
} }
if shouldMigrate15 {
fmt.Println("Running 1.4.21.1 migration...")
db := store.NewPebbleDB(nodeConfig.DB)
logger, _ := zap.NewProduction()
proofStore := store.NewPebbleDataProofStore(db, logger)
peerId := getPeerID(nodeConfig.P2P)
increment, _, _, err := proofStore.GetLatestDataTimeProof([]byte(peerId))
if err != nil && (!errors.Is(err, store.ErrNotFound) || increment != 0) {
panic(err)
}
if increment > 699999 {
err := proofStore.RewindToIncrement([]byte(peerId), 699999)
if err != nil {
panic(err)
}
}
db.Close()
err = os.WriteFile(
filepath.Join(configDir, "MIGRATIONS"),
[]byte{0x01, 0x04, 0x15},
fs.FileMode(0600),
)
if err != nil {
fmt.Println("ERROR: Could not save migration file.")
panic(err)
}
fmt.Println("Migration completed.")
}
} }
func RunSelfTestIfNeeded( func RunSelfTestIfNeeded(
@ -843,7 +894,7 @@ func printBalance(config *config.Config) {
fmt.Println("Unclaimed balance:", r.FloatString(12), "QUIL") fmt.Println("Unclaimed balance:", r.FloatString(12), "QUIL")
} }
func printPeerID(p2pConfig *config.P2PConfig) { func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey) peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
if err != nil { if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey")) panic(errors.Wrap(err, "error unmarshaling peerkey"))
@ -860,6 +911,12 @@ func printPeerID(p2pConfig *config.P2PConfig) {
panic(errors.Wrap(err, "error getting peer id")) panic(errors.Wrap(err, "error getting peer id"))
} }
return id
}
func printPeerID(p2pConfig *config.P2PConfig) {
id := getPeerID(p2pConfig)
fmt.Println("Peer ID: " + id.String()) fmt.Println("Peer ID: " + id.String())
} }

View File

@ -49,6 +49,7 @@ type DataProofStore interface {
output []byte, output []byte,
err error, err error,
) )
RewindToIncrement(peerId []byte, increment uint32) error
} }
var _ DataProofStore = (*PebbleDataProofStore)(nil) var _ DataProofStore = (*PebbleDataProofStore)(nil)
@ -487,6 +488,9 @@ func (p *PebbleDataProofStore) PutDataTimeProof(
// upgrading on time, akin to a "difficulty bomb" in reverse, but locally // upgrading on time, akin to a "difficulty bomb" in reverse, but locally
// calculated. // calculated.
difficulty := 200000 - (increment / 4) difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
// Basis split on the estimated shard level for growth rate (in terms of // Basis split on the estimated shard level for growth rate (in terms of
// units): 240 (QUIL) * 8000000000 (conversion factor) / 1600000 (shards) // units): 240 (QUIL) * 8000000000 (conversion factor) / 1600000 (shards)
@ -567,3 +571,34 @@ func (p *PebbleDataProofStore) GetLatestDataTimeProof(peerId []byte) (
return increment, parallelism, output, err return increment, parallelism, output, err
} }
func (p *PebbleDataProofStore) RewindToIncrement(
peerId []byte,
increment uint32,
) error {
reward := new(big.Int)
for j := uint32(0); j <= increment; j++ {
_, parallelism, _, _, err := p.GetDataTimeProof(peerId, uint32(j))
if err != nil {
panic(err)
}
pomwBasis := big.NewInt(1200000)
reward = reward.Add(
reward,
new(big.Int).Mul(pomwBasis, big.NewInt(int64(parallelism))),
)
}
latest := []byte{}
latest = binary.BigEndian.AppendUint32(latest, increment)
latest = append(latest, reward.FillBytes(make([]byte, 32))...)
if err := p.db.Set(dataTimeProofLatestKey(peerId), latest); err != nil {
return errors.Wrap(err, "put data time proof")
}
return nil
}