mirror of
https://source.quilibrium.com/quilibrium/ceremonyclient.git
synced 2024-12-26 16:45:18 +00:00
v1.4.20 base
This commit is contained in:
parent
35561a9e41
commit
26195cef5f
@ -21,8 +21,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// BlossomSubID_v12 is the protocol ID for version 1.2.0 of the BlossomSub protocol.
|
// BlossomSubID_v12 is the protocol ID for version 1.2.1 of the BlossomSub protocol.
|
||||||
BlossomSubID_v12 = protocol.ID("/blossomsub/1.2.0")
|
BlossomSubID_v12 = protocol.ID("/blossomsub/1.2.1")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Defines the default BlossomSub parameters.
|
// Defines the default BlossomSub parameters.
|
||||||
@ -52,7 +52,7 @@ var (
|
|||||||
BlossomSubOpportunisticGraftPeers = 2
|
BlossomSubOpportunisticGraftPeers = 2
|
||||||
BlossomSubGraftFloodThreshold = 10 * time.Second
|
BlossomSubGraftFloodThreshold = 10 * time.Second
|
||||||
BlossomSubMaxIHaveLength = 5000
|
BlossomSubMaxIHaveLength = 5000
|
||||||
BlossomSubMaxIHaveMessages = 100
|
BlossomSubMaxIHaveMessages = 10
|
||||||
BlossomSubIWantFollowupTime = 3 * time.Second
|
BlossomSubIWantFollowupTime = 3 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,5 +1,2 @@
|
|||||||
// Deprecated: The database-backed peerstore will be removed from go-libp2p in the future.
|
// libp2p deprecated this, we disagree
|
||||||
// Use the memory peerstore (pstoremem) instead.
|
|
||||||
// For more details see https://github.com/libp2p/go-libp2p/issues/2329
|
|
||||||
// and https://github.com/libp2p/go-libp2p/issues/2355.
|
|
||||||
package pstoreds
|
package pstoreds
|
||||||
|
@ -725,11 +725,11 @@ func logoVersion(width int) string {
|
|||||||
out += " ''---.. ...---'' ##\n"
|
out += " ''---.. ...---'' ##\n"
|
||||||
out += " ''----------''\n"
|
out += " ''----------''\n"
|
||||||
out += " \n"
|
out += " \n"
|
||||||
out += " Quilibrium Node - v" + config.GetVersionString() + " – Betelgeuse\n"
|
out += " Quilibrium Node - v" + config.GetVersionString() + " – Solstice\n"
|
||||||
out += " \n"
|
out += " \n"
|
||||||
out += " DB Console\n"
|
out += " DB Console\n"
|
||||||
} else {
|
} else {
|
||||||
out = "Quilibrium Node - v" + config.GetVersionString() + " – Betelgeuse - DB Console\n"
|
out = "Quilibrium Node - v" + config.GetVersionString() + " – Solstice - DB Console\n"
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func (n *Node) VerifyProofIntegrity() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !v {
|
if !v {
|
||||||
panic("bad kzg proof")
|
panic(fmt.Sprintf("bad kzg proof at increment %d", i))
|
||||||
}
|
}
|
||||||
wp := []byte{}
|
wp := []byte{}
|
||||||
wp = append(wp, n.pubSub.GetPeerID()...)
|
wp = append(wp, n.pubSub.GetPeerID()...)
|
||||||
@ -97,7 +97,7 @@ func (n *Node) VerifyProofIntegrity() {
|
|||||||
fmt.Printf("%x\n", wp)
|
fmt.Printf("%x\n", wp)
|
||||||
v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof)
|
v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof)
|
||||||
if !v {
|
if !v {
|
||||||
panic("bad weso proof")
|
panic(fmt.Sprintf("bad weso proof at increment %d", i))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,9 +56,11 @@ var storeSet = wire.NewSet(
|
|||||||
store.NewPebbleClockStore,
|
store.NewPebbleClockStore,
|
||||||
store.NewPebbleKeyStore,
|
store.NewPebbleKeyStore,
|
||||||
store.NewPebbleDataProofStore,
|
store.NewPebbleDataProofStore,
|
||||||
|
store.NewPeerstoreDatastore,
|
||||||
wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)),
|
wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)),
|
||||||
wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)),
|
wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)),
|
||||||
wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)),
|
wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)),
|
||||||
|
wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)),
|
||||||
)
|
)
|
||||||
|
|
||||||
var pubSubSet = wire.NewSet(
|
var pubSubSet = wire.NewSet(
|
||||||
@ -89,6 +91,7 @@ var consensusSet = wire.NewSet(
|
|||||||
func NewDHTNode(*config.Config) (*DHTNode, error) {
|
func NewDHTNode(*config.Config) (*DHTNode, error) {
|
||||||
panic(wire.Build(
|
panic(wire.Build(
|
||||||
debugLoggerSet,
|
debugLoggerSet,
|
||||||
|
storeSet,
|
||||||
pubSubSet,
|
pubSubSet,
|
||||||
newDHTNode,
|
newDHTNode,
|
||||||
))
|
))
|
||||||
|
@ -24,8 +24,14 @@ import (
|
|||||||
|
|
||||||
func NewDHTNode(configConfig *config.Config) (*DHTNode, error) {
|
func NewDHTNode(configConfig *config.Config) (*DHTNode, error) {
|
||||||
p2PConfig := configConfig.P2P
|
p2PConfig := configConfig.P2P
|
||||||
|
dbConfig := configConfig.DB
|
||||||
|
pebbleDB := store.NewPebbleDB(dbConfig)
|
||||||
|
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
zapLogger := debugLogger()
|
zapLogger := debugLogger()
|
||||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
|
||||||
dhtNode, err := newDHTNode(blossomSub)
|
dhtNode, err := newDHTNode(blossomSub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -42,7 +48,11 @@ func NewDebugNode(configConfig *config.Config, selfTestReport *protobufs.SelfTes
|
|||||||
keyConfig := configConfig.Key
|
keyConfig := configConfig.Key
|
||||||
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
||||||
p2PConfig := configConfig.P2P
|
p2PConfig := configConfig.P2P
|
||||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
|
||||||
engineConfig := configConfig.Engine
|
engineConfig := configConfig.Engine
|
||||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||||
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
||||||
@ -65,7 +75,11 @@ func NewNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestRepo
|
|||||||
keyConfig := configConfig.Key
|
keyConfig := configConfig.Key
|
||||||
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
||||||
p2PConfig := configConfig.P2P
|
p2PConfig := configConfig.P2P
|
||||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
|
||||||
engineConfig := configConfig.Engine
|
engineConfig := configConfig.Engine
|
||||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||||
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
||||||
@ -125,7 +139,7 @@ var debugLoggerSet = wire.NewSet(
|
|||||||
|
|
||||||
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
|
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
|
||||||
|
|
||||||
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)))
|
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, store.NewPeerstoreDatastore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)), wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)))
|
||||||
|
|
||||||
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)))
|
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)))
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ func GetMinimumVersion() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GetVersion() []byte {
|
func GetVersion() []byte {
|
||||||
return []byte{0x01, 0x04, 0x13}
|
return []byte{0x01, 0x04, 0x14}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetVersionString() string {
|
func GetVersionString() string {
|
||||||
@ -22,12 +22,19 @@ func GetVersionString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func FormatVersion(version []byte) string {
|
func FormatVersion(version []byte) string {
|
||||||
|
if len(version) == 3 {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"%d.%d.%d",
|
"%d.%d.%d",
|
||||||
version[0], version[1], version[2],
|
version[0], version[1], version[2],
|
||||||
)
|
)
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"%d.%d.%d-p%d",
|
||||||
|
version[0], version[1], version[2], version[3],
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetPatchNumber() byte {
|
func GetPatchNumber() byte {
|
||||||
return 0x01
|
return 0x00
|
||||||
}
|
}
|
||||||
|
@ -37,14 +37,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch any.TypeUrl {
|
switch any.TypeUrl {
|
||||||
case protobufs.ClockFrameType:
|
|
||||||
if err := e.handleClockFrameData(
|
|
||||||
message.From,
|
|
||||||
any,
|
|
||||||
); err != nil {
|
|
||||||
return errors.Wrap(err, "handle message")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case protobufs.SelfTestReportType:
|
case protobufs.SelfTestReportType:
|
||||||
if err := e.handleSelfTestReport(
|
if err := e.handleSelfTestReport(
|
||||||
message.From,
|
message.From,
|
||||||
@ -58,60 +50,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
|
|||||||
return errors.Wrap(errors.New("invalid message"), "handle message")
|
return errors.Wrap(errors.New("invalid message"), "handle message")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) handleClockFrameData(
|
|
||||||
peerID []byte,
|
|
||||||
any *anypb.Any,
|
|
||||||
) error {
|
|
||||||
frame := &protobufs.ClockFrame{}
|
|
||||||
if err := any.UnmarshalTo(frame); err != nil {
|
|
||||||
return errors.Wrap(err, "handle clock frame data")
|
|
||||||
}
|
|
||||||
|
|
||||||
head, err := e.masterTimeReel.Head()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if frame.FrameNumber < head.FrameNumber {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.difficulty != frame.Difficulty {
|
|
||||||
e.logger.Debug(
|
|
||||||
"frame difficulty mismatched",
|
|
||||||
zap.Uint32("difficulty", frame.Difficulty),
|
|
||||||
)
|
|
||||||
return errors.Wrap(
|
|
||||||
errors.New("frame difficulty"),
|
|
||||||
"handle clock frame data",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.logger.Debug(
|
|
||||||
"got clock frame",
|
|
||||||
zap.Binary("sender", peerID),
|
|
||||||
zap.Binary("filter", frame.Filter),
|
|
||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
|
||||||
zap.Int("proof_count", len(frame.AggregateProofs)),
|
|
||||||
)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case e.frameValidationCh <- frame:
|
|
||||||
default:
|
|
||||||
e.logger.Debug(
|
|
||||||
"dropped frame due to overwhelmed queue",
|
|
||||||
zap.Binary("sender", peerID),
|
|
||||||
zap.Binary("filter", frame.Filter),
|
|
||||||
zap.Uint64("frame_number", frame.FrameNumber),
|
|
||||||
zap.Int("proof_count", len(frame.AggregateProofs)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MasterClockConsensusEngine) handleSelfTestReport(
|
func (e *MasterClockConsensusEngine) handleSelfTestReport(
|
||||||
peerID []byte,
|
peerID []byte,
|
||||||
any *anypb.Any,
|
any *anypb.Any,
|
||||||
@ -252,7 +190,6 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This does not publish any longer, frames strictly are picked up from sync
|
|
||||||
func (e *MasterClockConsensusEngine) publishProof(
|
func (e *MasterClockConsensusEngine) publishProof(
|
||||||
frame *protobufs.ClockFrame,
|
frame *protobufs.ClockFrame,
|
||||||
) error {
|
) error {
|
||||||
|
@ -1,12 +1,9 @@
|
|||||||
package master
|
package master
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
@ -65,87 +62,10 @@ func (e *MasterClockConsensusEngine) GetMostAheadPeers() (
|
|||||||
func (e *MasterClockConsensusEngine) collect(
|
func (e *MasterClockConsensusEngine) collect(
|
||||||
currentFramePublished *protobufs.ClockFrame,
|
currentFramePublished *protobufs.ClockFrame,
|
||||||
) (*protobufs.ClockFrame, error) {
|
) (*protobufs.ClockFrame, error) {
|
||||||
e.logger.Debug("collecting vdf proofs")
|
|
||||||
|
|
||||||
latest, err := e.masterTimeReel.Head()
|
latest, err := e.masterTimeReel.Head()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// With the increase of network size, constrain down to top thirty
|
|
||||||
peers, err := e.GetMostAheadPeers()
|
|
||||||
if err != nil {
|
|
||||||
return latest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(peers); i++ {
|
|
||||||
peer := peers[i]
|
|
||||||
e.logger.Info("setting syncing target", zap.Binary("peer_id", peer))
|
|
||||||
|
|
||||||
cc, err := e.pubSub.GetDirectChannel(peer, "validation")
|
|
||||||
if err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"could not connect for sync",
|
|
||||||
zap.String("peer_id", base58.Encode(peer)),
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
client := protobufs.NewValidationServiceClient(cc)
|
|
||||||
syncClient, err := client.Sync(
|
|
||||||
context.Background(),
|
|
||||||
&protobufs.SyncRequest{
|
|
||||||
FramesRequest: &protobufs.ClockFramesRequest{
|
|
||||||
Filter: e.filter,
|
|
||||||
FromFrameNumber: latest.FrameNumber,
|
|
||||||
ToFrameNumber: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
cc.Close()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for msg, err := syncClient.Recv(); msg != nil &&
|
|
||||||
err == nil; msg, err = syncClient.Recv() {
|
|
||||||
if msg.FramesResponse == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, frame := range msg.FramesResponse.ClockFrames {
|
|
||||||
frame := frame
|
|
||||||
|
|
||||||
if frame.FrameNumber < latest.FrameNumber {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.difficulty != frame.Difficulty {
|
|
||||||
e.logger.Debug(
|
|
||||||
"frame difficulty mismatched",
|
|
||||||
zap.Uint32("difficulty", frame.Difficulty),
|
|
||||||
)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.frameProver.VerifyMasterClockFrame(frame); err != nil {
|
|
||||||
e.logger.Error(
|
|
||||||
"peer returned invalid frame",
|
|
||||||
zap.String("peer_id", base58.Encode(peer)))
|
|
||||||
e.pubSub.SetPeerScore(peer, -1000)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
e.masterTimeReel.Insert(frame, false)
|
|
||||||
latest = frame
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
cc.Close()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
cc.Close()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return latest, nil
|
return latest, nil
|
||||||
}
|
}
|
||||||
|
@ -421,27 +421,7 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
newFrameCh := e.masterTimeReel.NewFrameCh()
|
|
||||||
|
|
||||||
for e.state < consensus.EngineStateStopping {
|
for e.state < consensus.EngineStateStopping {
|
||||||
var err error
|
|
||||||
select {
|
|
||||||
case frame := <-newFrameCh:
|
|
||||||
currentFrame := frame
|
|
||||||
latestFrame := frame
|
|
||||||
if latestFrame, err = e.collect(currentFrame); err != nil {
|
|
||||||
e.logger.Error("could not collect", zap.Error(err))
|
|
||||||
latestFrame = currentFrame
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if latestFrame, err = e.prove(latestFrame); err != nil {
|
|
||||||
e.logger.Error("could not prove", zap.Error(err))
|
|
||||||
latestFrame = currentFrame
|
|
||||||
}
|
|
||||||
if err = e.publishProof(latestFrame); err != nil {
|
|
||||||
e.logger.Error("could not publish", zap.Error(err))
|
|
||||||
}
|
|
||||||
case <-time.After(20 * time.Second):
|
|
||||||
frame, err := e.masterTimeReel.Head()
|
frame, err := e.masterTimeReel.Head()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -455,7 +435,6 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
|||||||
e.logger.Error("could not publish", zap.Error(err))
|
e.logger.Error("could not publish", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
@ -511,7 +490,7 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
|
|||||||
for i := uint32(0); i < parallelism; i++ {
|
for i := uint32(0); i < parallelism; i++ {
|
||||||
i := i
|
i := i
|
||||||
go func() {
|
go func() {
|
||||||
for j := 3; j > 0; j-- {
|
for j := 3; j >= 0; j-- {
|
||||||
resp, err :=
|
resp, err :=
|
||||||
clients[i].CalculateChallengeProof(
|
clients[i].CalculateChallengeProof(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@ -522,7 +501,7 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if j == 1 || len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
|
if j == 0 {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
||||||
@ -533,7 +512,18 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
|
|||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(50 * time.Millisecond)
|
||||||
clients[i], err = e.createParallelDataClientsFromListAndIndex(i)
|
clients[i], err = e.createParallelDataClientsFromListAndIndex(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
e.logger.Error("failed to reconnect", zap.Error(err))
|
||||||
|
}
|
||||||
|
} else if len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
|
||||||
|
e.logger.Error(
|
||||||
|
"client failed, reconnecting after 50ms",
|
||||||
|
zap.Uint32("client", i),
|
||||||
|
)
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
clients[i], err =
|
||||||
|
e.createParallelDataClientsFromBaseMultiaddrAndIndex(i)
|
||||||
|
if err != nil {
|
||||||
|
e.logger.Error("failed to reconnect", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
@ -593,12 +583,12 @@ func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
|
|||||||
) {
|
) {
|
||||||
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index])
|
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return nil, errors.Wrap(err, "create parallel data client")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, addr, err := mn.DialArgs(ma)
|
_, addr, err := mn.DialArgs(ma)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return nil, errors.Wrap(err, "create parallel data client")
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := grpc.Dial(
|
conn, err := grpc.Dial(
|
||||||
@ -612,7 +602,66 @@ func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return nil, errors.Wrap(err, "create parallel data client")
|
||||||
|
}
|
||||||
|
|
||||||
|
client := protobufs.NewDataIPCServiceClient(conn)
|
||||||
|
|
||||||
|
e.logger.Info(
|
||||||
|
"connected to data worker process",
|
||||||
|
zap.Uint32("client", index),
|
||||||
|
)
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (
|
||||||
|
e *MasterClockConsensusEngine,
|
||||||
|
) createParallelDataClientsFromBaseMultiaddrAndIndex(
|
||||||
|
index uint32,
|
||||||
|
) (
|
||||||
|
protobufs.DataIPCServiceClient,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
e.logger.Info(
|
||||||
|
"re-connecting to data worker process",
|
||||||
|
zap.Uint32("client", index),
|
||||||
|
)
|
||||||
|
|
||||||
|
if e.engineConfig.DataWorkerBaseListenMultiaddr == "" {
|
||||||
|
e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.engineConfig.DataWorkerBaseListenPort == 0 {
|
||||||
|
e.engineConfig.DataWorkerBaseListenPort = 40000
|
||||||
|
}
|
||||||
|
|
||||||
|
ma, err := multiaddr.NewMultiaddr(
|
||||||
|
fmt.Sprintf(
|
||||||
|
e.engineConfig.DataWorkerBaseListenMultiaddr,
|
||||||
|
int(e.engineConfig.DataWorkerBaseListenPort)+int(index),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "create parallel data client")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, addr, err := mn.DialArgs(ma)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "create parallel data client")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := grpc.Dial(
|
||||||
|
addr,
|
||||||
|
grpc.WithTransportCredentials(
|
||||||
|
insecure.NewCredentials(),
|
||||||
|
),
|
||||||
|
grpc.WithDefaultCallOptions(
|
||||||
|
grpc.MaxCallSendMsgSize(10*1024*1024),
|
||||||
|
grpc.MaxCallRecvMsgSize(10*1024*1024),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "create parallel data client")
|
||||||
}
|
}
|
||||||
|
|
||||||
client := protobufs.NewDataIPCServiceClient(conn)
|
client := protobufs.NewDataIPCServiceClient(conn)
|
||||||
|
@ -22,7 +22,7 @@ replace github.com/cockroachdb/pebble => ../pebble
|
|||||||
require (
|
require (
|
||||||
filippo.io/edwards25519 v1.0.0-rc.1
|
filippo.io/edwards25519 v1.0.0-rc.1
|
||||||
github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a
|
github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a
|
||||||
github.com/libp2p/go-libp2p v0.31.0
|
github.com/libp2p/go-libp2p v0.35.1
|
||||||
github.com/libp2p/go-libp2p-gostream v0.6.0
|
github.com/libp2p/go-libp2p-gostream v0.6.0
|
||||||
github.com/libp2p/go-libp2p-kad-dht v0.23.0
|
github.com/libp2p/go-libp2p-kad-dht v0.23.0
|
||||||
google.golang.org/protobuf v1.34.1
|
google.golang.org/protobuf v1.34.1
|
||||||
@ -34,6 +34,7 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect
|
||||||
github.com/pion/datachannel v1.5.6 // indirect
|
github.com/pion/datachannel v1.5.6 // indirect
|
||||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||||
|
@ -203,6 +203,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
|||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ=
|
||||||
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
22
node/main.go
22
node/main.go
@ -113,6 +113,11 @@ var (
|
|||||||
0,
|
0,
|
||||||
"specifies the parent process pid for a data worker",
|
"specifies the parent process pid for a data worker",
|
||||||
)
|
)
|
||||||
|
integrityCheck = flag.Bool(
|
||||||
|
"integrity-check",
|
||||||
|
false,
|
||||||
|
"runs an integrity check on the store, helpful for confirming backups are not corrupted (defaults to false)",
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
var signatories = []string{
|
var signatories = []string{
|
||||||
@ -402,7 +407,10 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Loading ceremony state and starting node...")
|
fmt.Println("Loading ceremony state and starting node...")
|
||||||
|
|
||||||
|
if !*integrityCheck {
|
||||||
go spawnDataWorkers(nodeConfig)
|
go spawnDataWorkers(nodeConfig)
|
||||||
|
}
|
||||||
|
|
||||||
kzg.Init()
|
kzg.Init()
|
||||||
|
|
||||||
@ -422,6 +430,13 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *integrityCheck {
|
||||||
|
fmt.Println("Running integrity check...")
|
||||||
|
node.VerifyProofIntegrity()
|
||||||
|
fmt.Println("Integrity check passed!")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
repair(*configDirectory, node)
|
repair(*configDirectory, node)
|
||||||
|
|
||||||
if nodeConfig.ListenGRPCMultiaddr != "" {
|
if nodeConfig.ListenGRPCMultiaddr != "" {
|
||||||
@ -477,6 +492,7 @@ func spawnDataWorkers(nodeConfig *config.Config) {
|
|||||||
for i := 1; i <= cores-1; i++ {
|
for i := 1; i <= cores-1; i++ {
|
||||||
i := i
|
i := i
|
||||||
go func() {
|
go func() {
|
||||||
|
for {
|
||||||
args := []string{
|
args := []string{
|
||||||
fmt.Sprintf("--core=%d", i),
|
fmt.Sprintf("--core=%d", i),
|
||||||
fmt.Sprintf("--parent-process=%d", os.Getpid()),
|
fmt.Sprintf("--parent-process=%d", os.Getpid()),
|
||||||
@ -491,6 +507,10 @@ func spawnDataWorkers(nodeConfig *config.Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dataWorkers[i-1] = cmd
|
dataWorkers[i-1] = cmd
|
||||||
|
cmd.Wait()
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
fmt.Printf("Data worker %d stopped, restarting...\n", i)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -912,5 +932,5 @@ func printVersion() {
|
|||||||
patchString = fmt.Sprintf("-p%d", patch)
|
patchString = fmt.Sprintf("-p%d", patch)
|
||||||
}
|
}
|
||||||
fmt.Println(" ")
|
fmt.Println(" ")
|
||||||
fmt.Println(" Quilibrium Node - v" + config.GetVersionString() + patchString + " – Betelgeuse")
|
fmt.Println(" Quilibrium Node - v" + config.GetVersionString() + patchString + " – Solstice")
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/libp2p/go-libp2p/core/protocol"
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
"github.com/libp2p/go-libp2p/p2p/discovery/routing"
|
"github.com/libp2p/go-libp2p/p2p/discovery/routing"
|
||||||
"github.com/libp2p/go-libp2p/p2p/discovery/util"
|
"github.com/libp2p/go-libp2p/p2p/discovery/util"
|
||||||
|
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
|
||||||
"github.com/mr-tron/base58"
|
"github.com/mr-tron/base58"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -32,6 +33,7 @@ import (
|
|||||||
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BlossomSub struct {
|
type BlossomSub struct {
|
||||||
@ -61,7 +63,7 @@ var BITMASK_ALL = []byte{
|
|||||||
// While we iterate through these next phases, we're going to aggressively
|
// While we iterate through these next phases, we're going to aggressively
|
||||||
// enforce keeping updated. This will be achieved through announce strings
|
// enforce keeping updated. This will be achieved through announce strings
|
||||||
// that will vary with each update
|
// that will vary with each update
|
||||||
var ANNOUNCE_PREFIX = "quilibrium-1.4.19-betelgeuse-"
|
var ANNOUNCE_PREFIX = "quilibrium-1.4.20-solstice-"
|
||||||
|
|
||||||
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
|
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
|
||||||
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
|
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
|
||||||
@ -85,6 +87,7 @@ func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
|
|||||||
|
|
||||||
func NewBlossomSub(
|
func NewBlossomSub(
|
||||||
p2pConfig *config.P2PConfig,
|
p2pConfig *config.P2PConfig,
|
||||||
|
peerstore store.Peerstore,
|
||||||
logger *zap.Logger,
|
logger *zap.Logger,
|
||||||
) *BlossomSub {
|
) *BlossomSub {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -123,6 +126,13 @@ func NewBlossomSub(
|
|||||||
opts = append(opts, libp2p.Identity(privKey))
|
opts = append(opts, libp2p.Identity(privKey))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ps, err := pstoreds.NewPeerstore(ctx, peerstore, pstoreds.DefaultOpts())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = append(opts, libp2p.Peerstore(ps))
|
||||||
|
|
||||||
bs := &BlossomSub{
|
bs := &BlossomSub{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
@ -182,7 +192,7 @@ func NewBlossomSub(
|
|||||||
BehaviourPenaltyDecay: .5,
|
BehaviourPenaltyDecay: .5,
|
||||||
DecayInterval: 10 * time.Second,
|
DecayInterval: 10 * time.Second,
|
||||||
DecayToZero: .1,
|
DecayToZero: .1,
|
||||||
RetainScore: 5 * time.Minute,
|
RetainScore: 60 * time.Minute,
|
||||||
AppSpecificScore: func(p peer.ID) float64 {
|
AppSpecificScore: func(p peer.ID) float64 {
|
||||||
return float64(bs.GetPeerScore([]byte(p)))
|
return float64(bs.GetPeerScore([]byte(p)))
|
||||||
},
|
},
|
||||||
@ -199,13 +209,13 @@ func NewBlossomSub(
|
|||||||
|
|
||||||
params := mergeDefaults(p2pConfig)
|
params := mergeDefaults(p2pConfig)
|
||||||
rt := blossomsub.NewBlossomSubRouter(h, params)
|
rt := blossomsub.NewBlossomSubRouter(h, params)
|
||||||
ps, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...)
|
pubsub, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
peerID := h.ID()
|
peerID := h.ID()
|
||||||
bs.ps = ps
|
bs.ps = pubsub
|
||||||
bs.peerID = peerID
|
bs.peerID = peerID
|
||||||
bs.h = h
|
bs.h = h
|
||||||
bs.signKey = privKey
|
bs.signKey = privKey
|
||||||
|
@ -158,7 +158,9 @@ func (r *RPCServer) GetNodeInfo(
|
|||||||
PeerId: peerID.String(),
|
PeerId: peerID.String(),
|
||||||
MaxFrame: r.masterClock.GetFrame().GetFrameNumber(),
|
MaxFrame: r.masterClock.GetFrame().GetFrameNumber(),
|
||||||
PeerScore: uint64(peerScore),
|
PeerScore: uint64(peerScore),
|
||||||
Version: config.GetVersion(),
|
Version: append(
|
||||||
|
append([]byte{}, config.GetVersion()...), config.GetPatchNumber(),
|
||||||
|
),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package store
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -92,6 +93,24 @@ func (i *InMemKVDBIterator) Next() bool {
|
|||||||
return found
|
return found
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (i *InMemKVDBIterator) Last() bool {
|
||||||
|
if !i.open {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i.db.storeMx.Lock()
|
||||||
|
found := false
|
||||||
|
final := sort.SearchStrings(i.db.sortedKeys, string(i.end))
|
||||||
|
if len(i.db.sortedKeys) == final ||
|
||||||
|
!bytes.Equal([]byte(i.db.sortedKeys[final]), i.end) {
|
||||||
|
final--
|
||||||
|
}
|
||||||
|
i.pos = final
|
||||||
|
found = true
|
||||||
|
i.db.storeMx.Unlock()
|
||||||
|
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
func (i *InMemKVDBIterator) Prev() bool {
|
func (i *InMemKVDBIterator) Prev() bool {
|
||||||
if !i.open {
|
if !i.open {
|
||||||
return false
|
return false
|
||||||
@ -162,6 +181,26 @@ func (i *InMemKVDBIterator) SeekLT(lt []byte) bool {
|
|||||||
return found
|
return found
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *InMemKVDBTransaction) Get(key []byte) ([]byte, io.Closer, error) {
|
||||||
|
if !t.db.open {
|
||||||
|
return nil, nil, errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range t.changes {
|
||||||
|
if bytes.Equal(c.key, key) {
|
||||||
|
return c.value, io.NopCloser(nil), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.db.storeMx.Lock()
|
||||||
|
b, ok := t.db.store[string(key)]
|
||||||
|
t.db.storeMx.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, pebble.ErrNotFound
|
||||||
|
}
|
||||||
|
return b, io.NopCloser(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (t *InMemKVDBTransaction) Set(key []byte, value []byte) error {
|
func (t *InMemKVDBTransaction) Set(key []byte, value []byte) error {
|
||||||
if !t.db.open {
|
if !t.db.open {
|
||||||
return errors.New("inmem db closed")
|
return errors.New("inmem db closed")
|
||||||
@ -212,6 +251,23 @@ func (t *InMemKVDBTransaction) Delete(key []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *InMemKVDBTransaction) NewIter(lowerBound []byte, upperBound []byte) (
|
||||||
|
Iterator,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
if !t.db.open {
|
||||||
|
return nil, errors.New("inmem db closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &InMemKVDBIterator{
|
||||||
|
open: true,
|
||||||
|
db: t.db,
|
||||||
|
start: lowerBound,
|
||||||
|
end: upperBound,
|
||||||
|
pos: -1,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (t *InMemKVDBTransaction) Abort() error {
|
func (t *InMemKVDBTransaction) Abort() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,7 @@ type Iterator interface {
|
|||||||
Value() []byte
|
Value() []byte
|
||||||
Close() error
|
Close() error
|
||||||
SeekLT([]byte) bool
|
SeekLT([]byte) bool
|
||||||
|
Last() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type TypedIterator[T proto.Message] interface {
|
type TypedIterator[T proto.Message] interface {
|
||||||
|
@ -88,16 +88,22 @@ func (p *PebbleDB) CompactAll() error {
|
|||||||
var _ KVDB = (*PebbleDB)(nil)
|
var _ KVDB = (*PebbleDB)(nil)
|
||||||
|
|
||||||
type Transaction interface {
|
type Transaction interface {
|
||||||
|
Get(key []byte) ([]byte, io.Closer, error)
|
||||||
Set(key []byte, value []byte) error
|
Set(key []byte, value []byte) error
|
||||||
Commit() error
|
Commit() error
|
||||||
Delete(key []byte) error
|
Delete(key []byte) error
|
||||||
Abort() error
|
Abort() error
|
||||||
|
NewIter(lowerBound []byte, upperBound []byte) (Iterator, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type PebbleTransaction struct {
|
type PebbleTransaction struct {
|
||||||
b *pebble.Batch
|
b *pebble.Batch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *PebbleTransaction) Get(key []byte) ([]byte, io.Closer, error) {
|
||||||
|
return t.b.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
func (t *PebbleTransaction) Set(key []byte, value []byte) error {
|
func (t *PebbleTransaction) Set(key []byte, value []byte) error {
|
||||||
return t.b.Set(key, value, &pebble.WriteOptions{Sync: true})
|
return t.b.Set(key, value, &pebble.WriteOptions{Sync: true})
|
||||||
}
|
}
|
||||||
@ -114,6 +120,16 @@ func (t *PebbleTransaction) Abort() error {
|
|||||||
return t.b.Close()
|
return t.b.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *PebbleTransaction) NewIter(lowerBound []byte, upperBound []byte) (
|
||||||
|
Iterator,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
return t.b.NewIter(&pebble.IterOptions{
|
||||||
|
LowerBound: lowerBound,
|
||||||
|
UpperBound: upperBound,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
var _ Transaction = (*PebbleTransaction)(nil)
|
var _ Transaction = (*PebbleTransaction)(nil)
|
||||||
|
|
||||||
func rightAlign(data []byte, size int) []byte {
|
func rightAlign(data []byte, size int) []byte {
|
||||||
|
318
node/store/peerstore.go
Normal file
318
node/store/peerstore.go
Normal file
@ -0,0 +1,318 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/pebble"
|
||||||
|
ds "github.com/ipfs/go-datastore"
|
||||||
|
dsq "github.com/ipfs/go-datastore/query"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// shim structs for go-datastore
|
||||||
|
type batch struct {
|
||||||
|
b *transaction
|
||||||
|
db KVDB
|
||||||
|
}
|
||||||
|
|
||||||
|
type transaction struct {
|
||||||
|
tx Transaction
|
||||||
|
}
|
||||||
|
|
||||||
|
type PeerstoreDatastore struct {
|
||||||
|
db KVDB
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
PEERSTORE = 0x06
|
||||||
|
)
|
||||||
|
|
||||||
|
type Peerstore interface {
|
||||||
|
ds.TxnDatastore
|
||||||
|
ds.PersistentDatastore
|
||||||
|
ds.Batching
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ds.Datastore = (*PeerstoreDatastore)(nil)
|
||||||
|
var _ ds.TxnDatastore = (*PeerstoreDatastore)(nil)
|
||||||
|
var _ ds.Txn = (*transaction)(nil)
|
||||||
|
var _ ds.PersistentDatastore = (*PeerstoreDatastore)(nil)
|
||||||
|
var _ ds.Batching = (*PeerstoreDatastore)(nil)
|
||||||
|
var _ ds.Batch = (*batch)(nil)
|
||||||
|
var _ Peerstore = (*PeerstoreDatastore)(nil)
|
||||||
|
|
||||||
|
func NewPeerstoreDatastore(db KVDB) (*PeerstoreDatastore, error) {
|
||||||
|
ds := PeerstoreDatastore{
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
return &ds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) Put(
|
||||||
|
ctx context.Context,
|
||||||
|
key ds.Key,
|
||||||
|
value []byte,
|
||||||
|
) (err error) {
|
||||||
|
return d.db.Set(
|
||||||
|
append([]byte{PEERSTORE}, key.Bytes()...),
|
||||||
|
value,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) Sync(ctx context.Context, prefix ds.Key) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) Get(
|
||||||
|
ctx context.Context,
|
||||||
|
key ds.Key,
|
||||||
|
) (value []byte, err error) {
|
||||||
|
val, closer, err := d.db.Get(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||||
|
if err != nil {
|
||||||
|
if err == pebble.ErrNotFound {
|
||||||
|
return nil, ds.ErrNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]byte, len(val))
|
||||||
|
copy(out[:], val[:])
|
||||||
|
closer.Close()
|
||||||
|
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) Has(
|
||||||
|
ctx context.Context,
|
||||||
|
key ds.Key,
|
||||||
|
) (exists bool, err error) {
|
||||||
|
if _, err := d.Get(ctx, key); err != nil {
|
||||||
|
if err == ds.ErrNotFound {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, errors.Wrap(err, "has")
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) GetSize(
|
||||||
|
ctx context.Context,
|
||||||
|
key ds.Key,
|
||||||
|
) (size int, err error) {
|
||||||
|
return ds.GetBackedSize(ctx, d, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) Delete(
|
||||||
|
ctx context.Context,
|
||||||
|
key ds.Key,
|
||||||
|
) (err error) {
|
||||||
|
return d.db.Delete(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) Query(ctx context.Context, q dsq.Query) (
|
||||||
|
dsq.Results,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
rnge := []byte{PEERSTORE}
|
||||||
|
|
||||||
|
qNaive := q
|
||||||
|
prefix := ds.NewKey(q.Prefix).String()
|
||||||
|
if prefix != "/" {
|
||||||
|
rnge = append(rnge, []byte(prefix+"/")...)
|
||||||
|
qNaive.Prefix = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
i, err := d.db.NewIter(rnge, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "query")
|
||||||
|
}
|
||||||
|
|
||||||
|
next := i.Next
|
||||||
|
if len(q.Orders) > 0 {
|
||||||
|
switch q.Orders[0].(type) {
|
||||||
|
case dsq.OrderByKey, *dsq.OrderByKey:
|
||||||
|
qNaive.Orders = nil
|
||||||
|
i.First()
|
||||||
|
case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending:
|
||||||
|
next = func() bool {
|
||||||
|
next = i.Prev
|
||||||
|
return i.Last()
|
||||||
|
}
|
||||||
|
qNaive.Orders = nil
|
||||||
|
default:
|
||||||
|
i.First()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
i.First()
|
||||||
|
}
|
||||||
|
r := dsq.ResultsFromIterator(q, dsq.Iterator{
|
||||||
|
Next: func() (dsq.Result, bool) {
|
||||||
|
if !next() {
|
||||||
|
return dsq.Result{}, false
|
||||||
|
}
|
||||||
|
k := string(i.Key()[1:])
|
||||||
|
e := dsq.Entry{Key: k, Size: len(i.Value())}
|
||||||
|
|
||||||
|
if !q.KeysOnly {
|
||||||
|
buf := make([]byte, len(i.Value()))
|
||||||
|
copy(buf, i.Value())
|
||||||
|
e.Value = buf
|
||||||
|
}
|
||||||
|
return dsq.Result{Entry: e}, true
|
||||||
|
},
|
||||||
|
Close: func() error {
|
||||||
|
return i.Close()
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return dsq.NaiveQueryApply(qNaive, r), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: get disk usage of peerstore later
|
||||||
|
func (d *PeerstoreDatastore) DiskUsage(ctx context.Context) (uint64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Closing is not done here:
|
||||||
|
func (d *PeerstoreDatastore) Close() (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) Batch(ctx context.Context) (ds.Batch, error) {
|
||||||
|
return &batch{
|
||||||
|
b: &transaction{tx: d.db.NewBatch()},
|
||||||
|
db: d.db,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *PeerstoreDatastore) NewTransaction(
|
||||||
|
ctx context.Context,
|
||||||
|
readOnly bool,
|
||||||
|
) (ds.Txn, error) {
|
||||||
|
tx := d.db.NewBatch()
|
||||||
|
return &transaction{tx}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *batch) Put(ctx context.Context, key ds.Key, value []byte) error {
|
||||||
|
b.b.Put(ctx, key, value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *batch) Commit(ctx context.Context) error {
|
||||||
|
return b.b.Commit(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *batch) Delete(ctx context.Context, key ds.Key) error {
|
||||||
|
b.b.Delete(ctx, key)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) Commit(ctx context.Context) error {
|
||||||
|
return t.tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) Discard(ctx context.Context) {
|
||||||
|
t.tx.Abort()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) Get(
|
||||||
|
ctx context.Context,
|
||||||
|
key ds.Key,
|
||||||
|
) (value []byte, err error) {
|
||||||
|
b, closer, err := t.tx.Get(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||||
|
if err != nil {
|
||||||
|
if err == pebble.ErrNotFound {
|
||||||
|
return nil, ds.ErrNotFound
|
||||||
|
}
|
||||||
|
return nil, errors.Wrap(err, "get")
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]byte, len(b))
|
||||||
|
copy(out[:], b[:])
|
||||||
|
closer.Close()
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) Put(ctx context.Context, key ds.Key, value []byte) error {
|
||||||
|
return t.tx.Set(append([]byte{PEERSTORE}, key.Bytes()...), value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) Has(ctx context.Context, key ds.Key) (
|
||||||
|
exists bool,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
if _, err := t.Get(ctx, key); err != nil {
|
||||||
|
if errors.Is(err, ErrNotFound) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, errors.Wrap(err, "has")
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) GetSize(
|
||||||
|
ctx context.Context,
|
||||||
|
key ds.Key,
|
||||||
|
) (size int, err error) {
|
||||||
|
return ds.GetBackedSize(ctx, t, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) Delete(ctx context.Context, key ds.Key) (err error) {
|
||||||
|
return t.tx.Delete(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *transaction) Query(ctx context.Context, q dsq.Query) (
|
||||||
|
dsq.Results,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
rnge := []byte{PEERSTORE}
|
||||||
|
qNaive := q
|
||||||
|
prefix := ds.NewKey(q.Prefix).String()
|
||||||
|
if prefix != "/" {
|
||||||
|
rnge = append(rnge, []byte(prefix+"/")...)
|
||||||
|
qNaive.Prefix = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
i, err := t.tx.NewIter(rnge, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "query")
|
||||||
|
}
|
||||||
|
|
||||||
|
next := i.Next
|
||||||
|
if len(q.Orders) > 0 {
|
||||||
|
switch q.Orders[0].(type) {
|
||||||
|
case dsq.OrderByKey, *dsq.OrderByKey:
|
||||||
|
qNaive.Orders = nil
|
||||||
|
case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending:
|
||||||
|
next = func() bool {
|
||||||
|
next = i.Prev
|
||||||
|
return i.Last()
|
||||||
|
}
|
||||||
|
qNaive.Orders = nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r := dsq.ResultsFromIterator(q, dsq.Iterator{
|
||||||
|
Next: func() (dsq.Result, bool) {
|
||||||
|
if !next() {
|
||||||
|
return dsq.Result{}, false
|
||||||
|
}
|
||||||
|
k := string(i.Key()[1:])
|
||||||
|
e := dsq.Entry{Key: k, Size: len(i.Value())}
|
||||||
|
|
||||||
|
if !q.KeysOnly {
|
||||||
|
buf := make([]byte, len(i.Value()))
|
||||||
|
copy(buf, i.Value())
|
||||||
|
e.Value = buf
|
||||||
|
}
|
||||||
|
return dsq.Result{Entry: e}, true
|
||||||
|
},
|
||||||
|
Close: func() error {
|
||||||
|
return i.Close()
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return dsq.NaiveQueryApply(qNaive, r), nil
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user