mirror of
https://source.quilibrium.com/quilibrium/ceremonyclient.git
synced 2024-12-25 16:15:17 +00:00
v1.4.20 base
This commit is contained in:
parent
35561a9e41
commit
26195cef5f
@ -21,8 +21,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// BlossomSubID_v12 is the protocol ID for version 1.2.0 of the BlossomSub protocol.
|
||||
BlossomSubID_v12 = protocol.ID("/blossomsub/1.2.0")
|
||||
// BlossomSubID_v12 is the protocol ID for version 1.2.1 of the BlossomSub protocol.
|
||||
BlossomSubID_v12 = protocol.ID("/blossomsub/1.2.1")
|
||||
)
|
||||
|
||||
// Defines the default BlossomSub parameters.
|
||||
@ -52,7 +52,7 @@ var (
|
||||
BlossomSubOpportunisticGraftPeers = 2
|
||||
BlossomSubGraftFloodThreshold = 10 * time.Second
|
||||
BlossomSubMaxIHaveLength = 5000
|
||||
BlossomSubMaxIHaveMessages = 100
|
||||
BlossomSubMaxIHaveMessages = 10
|
||||
BlossomSubIWantFollowupTime = 3 * time.Second
|
||||
)
|
||||
|
||||
|
@ -1,5 +1,2 @@
|
||||
// Deprecated: The database-backed peerstore will be removed from go-libp2p in the future.
|
||||
// Use the memory peerstore (pstoremem) instead.
|
||||
// For more details see https://github.com/libp2p/go-libp2p/issues/2329
|
||||
// and https://github.com/libp2p/go-libp2p/issues/2355.
|
||||
// libp2p deprecated this, we disagree
|
||||
package pstoreds
|
||||
|
@ -725,11 +725,11 @@ func logoVersion(width int) string {
|
||||
out += " ''---.. ...---'' ##\n"
|
||||
out += " ''----------''\n"
|
||||
out += " \n"
|
||||
out += " Quilibrium Node - v" + config.GetVersionString() + " – Betelgeuse\n"
|
||||
out += " Quilibrium Node - v" + config.GetVersionString() + " – Solstice\n"
|
||||
out += " \n"
|
||||
out += " DB Console\n"
|
||||
} else {
|
||||
out = "Quilibrium Node - v" + config.GetVersionString() + " – Betelgeuse - DB Console\n"
|
||||
out = "Quilibrium Node - v" + config.GetVersionString() + " – Solstice - DB Console\n"
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func (n *Node) VerifyProofIntegrity() {
|
||||
}
|
||||
|
||||
if !v {
|
||||
panic("bad kzg proof")
|
||||
panic(fmt.Sprintf("bad kzg proof at increment %d", i))
|
||||
}
|
||||
wp := []byte{}
|
||||
wp = append(wp, n.pubSub.GetPeerID()...)
|
||||
@ -97,7 +97,7 @@ func (n *Node) VerifyProofIntegrity() {
|
||||
fmt.Printf("%x\n", wp)
|
||||
v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof)
|
||||
if !v {
|
||||
panic("bad weso proof")
|
||||
panic(fmt.Sprintf("bad weso proof at increment %d", i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,9 +56,11 @@ var storeSet = wire.NewSet(
|
||||
store.NewPebbleClockStore,
|
||||
store.NewPebbleKeyStore,
|
||||
store.NewPebbleDataProofStore,
|
||||
store.NewPeerstoreDatastore,
|
||||
wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)),
|
||||
wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)),
|
||||
wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)),
|
||||
wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)),
|
||||
)
|
||||
|
||||
var pubSubSet = wire.NewSet(
|
||||
@ -89,6 +91,7 @@ var consensusSet = wire.NewSet(
|
||||
func NewDHTNode(*config.Config) (*DHTNode, error) {
|
||||
panic(wire.Build(
|
||||
debugLoggerSet,
|
||||
storeSet,
|
||||
pubSubSet,
|
||||
newDHTNode,
|
||||
))
|
||||
|
@ -24,8 +24,14 @@ import (
|
||||
|
||||
func NewDHTNode(configConfig *config.Config) (*DHTNode, error) {
|
||||
p2PConfig := configConfig.P2P
|
||||
dbConfig := configConfig.DB
|
||||
pebbleDB := store.NewPebbleDB(dbConfig)
|
||||
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zapLogger := debugLogger()
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
|
||||
dhtNode, err := newDHTNode(blossomSub)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -42,7 +48,11 @@ func NewDebugNode(configConfig *config.Config, selfTestReport *protobufs.SelfTes
|
||||
keyConfig := configConfig.Key
|
||||
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
||||
p2PConfig := configConfig.P2P
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
|
||||
engineConfig := configConfig.Engine
|
||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
||||
@ -65,7 +75,11 @@ func NewNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestRepo
|
||||
keyConfig := configConfig.Key
|
||||
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
|
||||
p2PConfig := configConfig.P2P
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, zapLogger)
|
||||
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
|
||||
engineConfig := configConfig.Engine
|
||||
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
|
||||
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
|
||||
@ -125,7 +139,7 @@ var debugLoggerSet = wire.NewSet(
|
||||
|
||||
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
|
||||
|
||||
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)))
|
||||
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, store.NewPeerstoreDatastore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)), wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)))
|
||||
|
||||
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)))
|
||||
|
||||
|
@ -14,7 +14,7 @@ func GetMinimumVersion() []byte {
|
||||
}
|
||||
|
||||
func GetVersion() []byte {
|
||||
return []byte{0x01, 0x04, 0x13}
|
||||
return []byte{0x01, 0x04, 0x14}
|
||||
}
|
||||
|
||||
func GetVersionString() string {
|
||||
@ -22,12 +22,19 @@ func GetVersionString() string {
|
||||
}
|
||||
|
||||
func FormatVersion(version []byte) string {
|
||||
return fmt.Sprintf(
|
||||
"%d.%d.%d",
|
||||
version[0], version[1], version[2],
|
||||
)
|
||||
if len(version) == 3 {
|
||||
return fmt.Sprintf(
|
||||
"%d.%d.%d",
|
||||
version[0], version[1], version[2],
|
||||
)
|
||||
} else {
|
||||
return fmt.Sprintf(
|
||||
"%d.%d.%d-p%d",
|
||||
version[0], version[1], version[2], version[3],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func GetPatchNumber() byte {
|
||||
return 0x01
|
||||
return 0x00
|
||||
}
|
||||
|
@ -37,14 +37,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
|
||||
}
|
||||
|
||||
switch any.TypeUrl {
|
||||
case protobufs.ClockFrameType:
|
||||
if err := e.handleClockFrameData(
|
||||
message.From,
|
||||
any,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "handle message")
|
||||
}
|
||||
return nil
|
||||
case protobufs.SelfTestReportType:
|
||||
if err := e.handleSelfTestReport(
|
||||
message.From,
|
||||
@ -58,60 +50,6 @@ func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
|
||||
return errors.Wrap(errors.New("invalid message"), "handle message")
|
||||
}
|
||||
|
||||
func (e *MasterClockConsensusEngine) handleClockFrameData(
|
||||
peerID []byte,
|
||||
any *anypb.Any,
|
||||
) error {
|
||||
frame := &protobufs.ClockFrame{}
|
||||
if err := any.UnmarshalTo(frame); err != nil {
|
||||
return errors.Wrap(err, "handle clock frame data")
|
||||
}
|
||||
|
||||
head, err := e.masterTimeReel.Head()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if frame.FrameNumber < head.FrameNumber {
|
||||
return nil
|
||||
}
|
||||
|
||||
if e.difficulty != frame.Difficulty {
|
||||
e.logger.Debug(
|
||||
"frame difficulty mismatched",
|
||||
zap.Uint32("difficulty", frame.Difficulty),
|
||||
)
|
||||
return errors.Wrap(
|
||||
errors.New("frame difficulty"),
|
||||
"handle clock frame data",
|
||||
)
|
||||
}
|
||||
|
||||
e.logger.Debug(
|
||||
"got clock frame",
|
||||
zap.Binary("sender", peerID),
|
||||
zap.Binary("filter", frame.Filter),
|
||||
zap.Uint64("frame_number", frame.FrameNumber),
|
||||
zap.Int("proof_count", len(frame.AggregateProofs)),
|
||||
)
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case e.frameValidationCh <- frame:
|
||||
default:
|
||||
e.logger.Debug(
|
||||
"dropped frame due to overwhelmed queue",
|
||||
zap.Binary("sender", peerID),
|
||||
zap.Binary("filter", frame.Filter),
|
||||
zap.Uint64("frame_number", frame.FrameNumber),
|
||||
zap.Int("proof_count", len(frame.AggregateProofs)),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *MasterClockConsensusEngine) handleSelfTestReport(
|
||||
peerID []byte,
|
||||
any *anypb.Any,
|
||||
@ -252,7 +190,6 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport(
|
||||
return nil
|
||||
}
|
||||
|
||||
// This does not publish any longer, frames strictly are picked up from sync
|
||||
func (e *MasterClockConsensusEngine) publishProof(
|
||||
frame *protobufs.ClockFrame,
|
||||
) error {
|
||||
|
@ -1,12 +1,9 @@
|
||||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||
@ -65,87 +62,10 @@ func (e *MasterClockConsensusEngine) GetMostAheadPeers() (
|
||||
func (e *MasterClockConsensusEngine) collect(
|
||||
currentFramePublished *protobufs.ClockFrame,
|
||||
) (*protobufs.ClockFrame, error) {
|
||||
e.logger.Debug("collecting vdf proofs")
|
||||
|
||||
latest, err := e.masterTimeReel.Head()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// With the increase of network size, constrain down to top thirty
|
||||
peers, err := e.GetMostAheadPeers()
|
||||
if err != nil {
|
||||
return latest, nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(peers); i++ {
|
||||
peer := peers[i]
|
||||
e.logger.Info("setting syncing target", zap.Binary("peer_id", peer))
|
||||
|
||||
cc, err := e.pubSub.GetDirectChannel(peer, "validation")
|
||||
if err != nil {
|
||||
e.logger.Error(
|
||||
"could not connect for sync",
|
||||
zap.String("peer_id", base58.Encode(peer)),
|
||||
)
|
||||
continue
|
||||
}
|
||||
client := protobufs.NewValidationServiceClient(cc)
|
||||
syncClient, err := client.Sync(
|
||||
context.Background(),
|
||||
&protobufs.SyncRequest{
|
||||
FramesRequest: &protobufs.ClockFramesRequest{
|
||||
Filter: e.filter,
|
||||
FromFrameNumber: latest.FrameNumber,
|
||||
ToFrameNumber: 0,
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
cc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
for msg, err := syncClient.Recv(); msg != nil &&
|
||||
err == nil; msg, err = syncClient.Recv() {
|
||||
if msg.FramesResponse == nil {
|
||||
break
|
||||
}
|
||||
|
||||
for _, frame := range msg.FramesResponse.ClockFrames {
|
||||
frame := frame
|
||||
|
||||
if frame.FrameNumber < latest.FrameNumber {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.difficulty != frame.Difficulty {
|
||||
e.logger.Debug(
|
||||
"frame difficulty mismatched",
|
||||
zap.Uint32("difficulty", frame.Difficulty),
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
if err := e.frameProver.VerifyMasterClockFrame(frame); err != nil {
|
||||
e.logger.Error(
|
||||
"peer returned invalid frame",
|
||||
zap.String("peer_id", base58.Encode(peer)))
|
||||
e.pubSub.SetPeerScore(peer, -1000)
|
||||
break
|
||||
}
|
||||
|
||||
e.masterTimeReel.Insert(frame, false)
|
||||
latest = frame
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
cc.Close()
|
||||
break
|
||||
}
|
||||
cc.Close()
|
||||
break
|
||||
}
|
||||
|
||||
return latest, nil
|
||||
}
|
||||
|
@ -421,39 +421,18 @@ func (e *MasterClockConsensusEngine) Start() <-chan error {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
newFrameCh := e.masterTimeReel.NewFrameCh()
|
||||
|
||||
for e.state < consensus.EngineStateStopping {
|
||||
var err error
|
||||
select {
|
||||
case frame := <-newFrameCh:
|
||||
currentFrame := frame
|
||||
latestFrame := frame
|
||||
if latestFrame, err = e.collect(currentFrame); err != nil {
|
||||
e.logger.Error("could not collect", zap.Error(err))
|
||||
latestFrame = currentFrame
|
||||
continue
|
||||
}
|
||||
if latestFrame, err = e.prove(latestFrame); err != nil {
|
||||
e.logger.Error("could not prove", zap.Error(err))
|
||||
latestFrame = currentFrame
|
||||
}
|
||||
if err = e.publishProof(latestFrame); err != nil {
|
||||
e.logger.Error("could not publish", zap.Error(err))
|
||||
}
|
||||
case <-time.After(20 * time.Second):
|
||||
frame, err := e.masterTimeReel.Head()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
frame, err := e.masterTimeReel.Head()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if frame, err = e.prove(frame); err != nil {
|
||||
e.logger.Error("could not prove", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
if err = e.publishProof(frame); err != nil {
|
||||
e.logger.Error("could not publish", zap.Error(err))
|
||||
}
|
||||
if frame, err = e.prove(frame); err != nil {
|
||||
e.logger.Error("could not prove", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
if err = e.publishProof(frame); err != nil {
|
||||
e.logger.Error("could not publish", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -511,7 +490,7 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
|
||||
for i := uint32(0); i < parallelism; i++ {
|
||||
i := i
|
||||
go func() {
|
||||
for j := 3; j > 0; j-- {
|
||||
for j := 3; j >= 0; j-- {
|
||||
resp, err :=
|
||||
clients[i].CalculateChallengeProof(
|
||||
context.Background(),
|
||||
@ -522,7 +501,7 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
if j == 1 || len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
|
||||
if j == 0 {
|
||||
panic(err)
|
||||
}
|
||||
if len(e.engineConfig.DataWorkerMultiaddrs) != 0 {
|
||||
@ -533,7 +512,18 @@ func (e *MasterClockConsensusEngine) PerformTimeProof(
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
clients[i], err = e.createParallelDataClientsFromListAndIndex(i)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
e.logger.Error("failed to reconnect", zap.Error(err))
|
||||
}
|
||||
} else if len(e.engineConfig.DataWorkerMultiaddrs) == 0 {
|
||||
e.logger.Error(
|
||||
"client failed, reconnecting after 50ms",
|
||||
zap.Uint32("client", i),
|
||||
)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
clients[i], err =
|
||||
e.createParallelDataClientsFromBaseMultiaddrAndIndex(i)
|
||||
if err != nil {
|
||||
e.logger.Error("failed to reconnect", zap.Error(err))
|
||||
}
|
||||
}
|
||||
continue
|
||||
@ -593,12 +583,12 @@ func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
|
||||
) {
|
||||
ma, err := multiaddr.NewMultiaddr(e.engineConfig.DataWorkerMultiaddrs[index])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, errors.Wrap(err, "create parallel data client")
|
||||
}
|
||||
|
||||
_, addr, err := mn.DialArgs(ma)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, errors.Wrap(err, "create parallel data client")
|
||||
}
|
||||
|
||||
conn, err := grpc.Dial(
|
||||
@ -612,7 +602,66 @@ func (e *MasterClockConsensusEngine) createParallelDataClientsFromListAndIndex(
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, errors.Wrap(err, "create parallel data client")
|
||||
}
|
||||
|
||||
client := protobufs.NewDataIPCServiceClient(conn)
|
||||
|
||||
e.logger.Info(
|
||||
"connected to data worker process",
|
||||
zap.Uint32("client", index),
|
||||
)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (
|
||||
e *MasterClockConsensusEngine,
|
||||
) createParallelDataClientsFromBaseMultiaddrAndIndex(
|
||||
index uint32,
|
||||
) (
|
||||
protobufs.DataIPCServiceClient,
|
||||
error,
|
||||
) {
|
||||
e.logger.Info(
|
||||
"re-connecting to data worker process",
|
||||
zap.Uint32("client", index),
|
||||
)
|
||||
|
||||
if e.engineConfig.DataWorkerBaseListenMultiaddr == "" {
|
||||
e.engineConfig.DataWorkerBaseListenMultiaddr = "/ip4/127.0.0.1/tcp/%d"
|
||||
}
|
||||
|
||||
if e.engineConfig.DataWorkerBaseListenPort == 0 {
|
||||
e.engineConfig.DataWorkerBaseListenPort = 40000
|
||||
}
|
||||
|
||||
ma, err := multiaddr.NewMultiaddr(
|
||||
fmt.Sprintf(
|
||||
e.engineConfig.DataWorkerBaseListenMultiaddr,
|
||||
int(e.engineConfig.DataWorkerBaseListenPort)+int(index),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create parallel data client")
|
||||
}
|
||||
|
||||
_, addr, err := mn.DialArgs(ma)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create parallel data client")
|
||||
}
|
||||
|
||||
conn, err := grpc.Dial(
|
||||
addr,
|
||||
grpc.WithTransportCredentials(
|
||||
insecure.NewCredentials(),
|
||||
),
|
||||
grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallSendMsgSize(10*1024*1024),
|
||||
grpc.MaxCallRecvMsgSize(10*1024*1024),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create parallel data client")
|
||||
}
|
||||
|
||||
client := protobufs.NewDataIPCServiceClient(conn)
|
||||
|
@ -22,7 +22,7 @@ replace github.com/cockroachdb/pebble => ../pebble
|
||||
require (
|
||||
filippo.io/edwards25519 v1.0.0-rc.1
|
||||
github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a
|
||||
github.com/libp2p/go-libp2p v0.31.0
|
||||
github.com/libp2p/go-libp2p v0.35.1
|
||||
github.com/libp2p/go-libp2p-gostream v0.6.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.23.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
@ -34,6 +34,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
|
@ -203,6 +203,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ=
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
|
50
node/main.go
50
node/main.go
@ -113,6 +113,11 @@ var (
|
||||
0,
|
||||
"specifies the parent process pid for a data worker",
|
||||
)
|
||||
integrityCheck = flag.Bool(
|
||||
"integrity-check",
|
||||
false,
|
||||
"runs an integrity check on the store, helpful for confirming backups are not corrupted (defaults to false)",
|
||||
)
|
||||
)
|
||||
|
||||
var signatories = []string{
|
||||
@ -402,7 +407,10 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Println("Loading ceremony state and starting node...")
|
||||
go spawnDataWorkers(nodeConfig)
|
||||
|
||||
if !*integrityCheck {
|
||||
go spawnDataWorkers(nodeConfig)
|
||||
}
|
||||
|
||||
kzg.Init()
|
||||
|
||||
@ -422,6 +430,13 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if *integrityCheck {
|
||||
fmt.Println("Running integrity check...")
|
||||
node.VerifyProofIntegrity()
|
||||
fmt.Println("Integrity check passed!")
|
||||
return
|
||||
}
|
||||
|
||||
repair(*configDirectory, node)
|
||||
|
||||
if nodeConfig.ListenGRPCMultiaddr != "" {
|
||||
@ -477,20 +492,25 @@ func spawnDataWorkers(nodeConfig *config.Config) {
|
||||
for i := 1; i <= cores-1; i++ {
|
||||
i := i
|
||||
go func() {
|
||||
args := []string{
|
||||
fmt.Sprintf("--core=%d", i),
|
||||
fmt.Sprintf("--parent-process=%d", os.Getpid()),
|
||||
}
|
||||
args = append(args, os.Args[1:]...)
|
||||
cmd := exec.Command(process, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stdout
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for {
|
||||
args := []string{
|
||||
fmt.Sprintf("--core=%d", i),
|
||||
fmt.Sprintf("--parent-process=%d", os.Getpid()),
|
||||
}
|
||||
args = append(args, os.Args[1:]...)
|
||||
cmd := exec.Command(process, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stdout
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dataWorkers[i-1] = cmd
|
||||
dataWorkers[i-1] = cmd
|
||||
cmd.Wait()
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
fmt.Printf("Data worker %d stopped, restarting...\n", i)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
@ -912,5 +932,5 @@ func printVersion() {
|
||||
patchString = fmt.Sprintf("-p%d", patch)
|
||||
}
|
||||
fmt.Println(" ")
|
||||
fmt.Println(" Quilibrium Node - v" + config.GetVersionString() + patchString + " – Betelgeuse")
|
||||
fmt.Println(" Quilibrium Node - v" + config.GetVersionString() + patchString + " – Solstice")
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery/routing"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery/util"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
@ -32,6 +33,7 @@ import (
|
||||
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/config"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
|
||||
"source.quilibrium.com/quilibrium/monorepo/node/store"
|
||||
)
|
||||
|
||||
type BlossomSub struct {
|
||||
@ -61,7 +63,7 @@ var BITMASK_ALL = []byte{
|
||||
// While we iterate through these next phases, we're going to aggressively
|
||||
// enforce keeping updated. This will be achieved through announce strings
|
||||
// that will vary with each update
|
||||
var ANNOUNCE_PREFIX = "quilibrium-1.4.19-betelgeuse-"
|
||||
var ANNOUNCE_PREFIX = "quilibrium-1.4.20-solstice-"
|
||||
|
||||
func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
|
||||
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
|
||||
@ -85,6 +87,7 @@ func getPeerID(p2pConfig *config.P2PConfig) peer.ID {
|
||||
|
||||
func NewBlossomSub(
|
||||
p2pConfig *config.P2PConfig,
|
||||
peerstore store.Peerstore,
|
||||
logger *zap.Logger,
|
||||
) *BlossomSub {
|
||||
ctx := context.Background()
|
||||
@ -123,6 +126,13 @@ func NewBlossomSub(
|
||||
opts = append(opts, libp2p.Identity(privKey))
|
||||
}
|
||||
|
||||
ps, err := pstoreds.NewPeerstore(ctx, peerstore, pstoreds.DefaultOpts())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
opts = append(opts, libp2p.Peerstore(ps))
|
||||
|
||||
bs := &BlossomSub{
|
||||
ctx: ctx,
|
||||
logger: logger,
|
||||
@ -182,7 +192,7 @@ func NewBlossomSub(
|
||||
BehaviourPenaltyDecay: .5,
|
||||
DecayInterval: 10 * time.Second,
|
||||
DecayToZero: .1,
|
||||
RetainScore: 5 * time.Minute,
|
||||
RetainScore: 60 * time.Minute,
|
||||
AppSpecificScore: func(p peer.ID) float64 {
|
||||
return float64(bs.GetPeerScore([]byte(p)))
|
||||
},
|
||||
@ -199,13 +209,13 @@ func NewBlossomSub(
|
||||
|
||||
params := mergeDefaults(p2pConfig)
|
||||
rt := blossomsub.NewBlossomSubRouter(h, params)
|
||||
ps, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...)
|
||||
pubsub, err := blossomsub.NewBlossomSubWithRouter(ctx, h, rt, blossomOpts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
peerID := h.ID()
|
||||
bs.ps = ps
|
||||
bs.ps = pubsub
|
||||
bs.peerID = peerID
|
||||
bs.h = h
|
||||
bs.signKey = privKey
|
||||
|
@ -158,7 +158,9 @@ func (r *RPCServer) GetNodeInfo(
|
||||
PeerId: peerID.String(),
|
||||
MaxFrame: r.masterClock.GetFrame().GetFrameNumber(),
|
||||
PeerScore: uint64(peerScore),
|
||||
Version: config.GetVersion(),
|
||||
Version: append(
|
||||
append([]byte{}, config.GetVersion()...), config.GetPatchNumber(),
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
@ -92,6 +93,24 @@ func (i *InMemKVDBIterator) Next() bool {
|
||||
return found
|
||||
}
|
||||
|
||||
func (i *InMemKVDBIterator) Last() bool {
|
||||
if !i.open {
|
||||
return false
|
||||
}
|
||||
i.db.storeMx.Lock()
|
||||
found := false
|
||||
final := sort.SearchStrings(i.db.sortedKeys, string(i.end))
|
||||
if len(i.db.sortedKeys) == final ||
|
||||
!bytes.Equal([]byte(i.db.sortedKeys[final]), i.end) {
|
||||
final--
|
||||
}
|
||||
i.pos = final
|
||||
found = true
|
||||
i.db.storeMx.Unlock()
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
func (i *InMemKVDBIterator) Prev() bool {
|
||||
if !i.open {
|
||||
return false
|
||||
@ -162,6 +181,26 @@ func (i *InMemKVDBIterator) SeekLT(lt []byte) bool {
|
||||
return found
|
||||
}
|
||||
|
||||
func (t *InMemKVDBTransaction) Get(key []byte) ([]byte, io.Closer, error) {
|
||||
if !t.db.open {
|
||||
return nil, nil, errors.New("inmem db closed")
|
||||
}
|
||||
|
||||
for _, c := range t.changes {
|
||||
if bytes.Equal(c.key, key) {
|
||||
return c.value, io.NopCloser(nil), nil
|
||||
}
|
||||
}
|
||||
|
||||
t.db.storeMx.Lock()
|
||||
b, ok := t.db.store[string(key)]
|
||||
t.db.storeMx.Unlock()
|
||||
if !ok {
|
||||
return nil, nil, pebble.ErrNotFound
|
||||
}
|
||||
return b, io.NopCloser(nil), nil
|
||||
}
|
||||
|
||||
func (t *InMemKVDBTransaction) Set(key []byte, value []byte) error {
|
||||
if !t.db.open {
|
||||
return errors.New("inmem db closed")
|
||||
@ -212,6 +251,23 @@ func (t *InMemKVDBTransaction) Delete(key []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *InMemKVDBTransaction) NewIter(lowerBound []byte, upperBound []byte) (
|
||||
Iterator,
|
||||
error,
|
||||
) {
|
||||
if !t.db.open {
|
||||
return nil, errors.New("inmem db closed")
|
||||
}
|
||||
|
||||
return &InMemKVDBIterator{
|
||||
open: true,
|
||||
db: t.db,
|
||||
start: lowerBound,
|
||||
end: upperBound,
|
||||
pos: -1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *InMemKVDBTransaction) Abort() error {
|
||||
return nil
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ type Iterator interface {
|
||||
Value() []byte
|
||||
Close() error
|
||||
SeekLT([]byte) bool
|
||||
Last() bool
|
||||
}
|
||||
|
||||
type TypedIterator[T proto.Message] interface {
|
||||
|
@ -88,16 +88,22 @@ func (p *PebbleDB) CompactAll() error {
|
||||
var _ KVDB = (*PebbleDB)(nil)
|
||||
|
||||
type Transaction interface {
|
||||
Get(key []byte) ([]byte, io.Closer, error)
|
||||
Set(key []byte, value []byte) error
|
||||
Commit() error
|
||||
Delete(key []byte) error
|
||||
Abort() error
|
||||
NewIter(lowerBound []byte, upperBound []byte) (Iterator, error)
|
||||
}
|
||||
|
||||
type PebbleTransaction struct {
|
||||
b *pebble.Batch
|
||||
}
|
||||
|
||||
func (t *PebbleTransaction) Get(key []byte) ([]byte, io.Closer, error) {
|
||||
return t.b.Get(key)
|
||||
}
|
||||
|
||||
func (t *PebbleTransaction) Set(key []byte, value []byte) error {
|
||||
return t.b.Set(key, value, &pebble.WriteOptions{Sync: true})
|
||||
}
|
||||
@ -114,6 +120,16 @@ func (t *PebbleTransaction) Abort() error {
|
||||
return t.b.Close()
|
||||
}
|
||||
|
||||
func (t *PebbleTransaction) NewIter(lowerBound []byte, upperBound []byte) (
|
||||
Iterator,
|
||||
error,
|
||||
) {
|
||||
return t.b.NewIter(&pebble.IterOptions{
|
||||
LowerBound: lowerBound,
|
||||
UpperBound: upperBound,
|
||||
})
|
||||
}
|
||||
|
||||
var _ Transaction = (*PebbleTransaction)(nil)
|
||||
|
||||
func rightAlign(data []byte, size int) []byte {
|
||||
|
318
node/store/peerstore.go
Normal file
318
node/store/peerstore.go
Normal file
@ -0,0 +1,318 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// shim structs for go-datastore
|
||||
type batch struct {
|
||||
b *transaction
|
||||
db KVDB
|
||||
}
|
||||
|
||||
type transaction struct {
|
||||
tx Transaction
|
||||
}
|
||||
|
||||
type PeerstoreDatastore struct {
|
||||
db KVDB
|
||||
}
|
||||
|
||||
const (
|
||||
PEERSTORE = 0x06
|
||||
)
|
||||
|
||||
type Peerstore interface {
|
||||
ds.TxnDatastore
|
||||
ds.PersistentDatastore
|
||||
ds.Batching
|
||||
}
|
||||
|
||||
var _ ds.Datastore = (*PeerstoreDatastore)(nil)
|
||||
var _ ds.TxnDatastore = (*PeerstoreDatastore)(nil)
|
||||
var _ ds.Txn = (*transaction)(nil)
|
||||
var _ ds.PersistentDatastore = (*PeerstoreDatastore)(nil)
|
||||
var _ ds.Batching = (*PeerstoreDatastore)(nil)
|
||||
var _ ds.Batch = (*batch)(nil)
|
||||
var _ Peerstore = (*PeerstoreDatastore)(nil)
|
||||
|
||||
func NewPeerstoreDatastore(db KVDB) (*PeerstoreDatastore, error) {
|
||||
ds := PeerstoreDatastore{
|
||||
db: db,
|
||||
}
|
||||
return &ds, nil
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) Put(
|
||||
ctx context.Context,
|
||||
key ds.Key,
|
||||
value []byte,
|
||||
) (err error) {
|
||||
return d.db.Set(
|
||||
append([]byte{PEERSTORE}, key.Bytes()...),
|
||||
value,
|
||||
)
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) Sync(ctx context.Context, prefix ds.Key) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) Get(
|
||||
ctx context.Context,
|
||||
key ds.Key,
|
||||
) (value []byte, err error) {
|
||||
val, closer, err := d.db.Get(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||
if err != nil {
|
||||
if err == pebble.ErrNotFound {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make([]byte, len(val))
|
||||
copy(out[:], val[:])
|
||||
closer.Close()
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) Has(
|
||||
ctx context.Context,
|
||||
key ds.Key,
|
||||
) (exists bool, err error) {
|
||||
if _, err := d.Get(ctx, key); err != nil {
|
||||
if err == ds.ErrNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, "has")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) GetSize(
|
||||
ctx context.Context,
|
||||
key ds.Key,
|
||||
) (size int, err error) {
|
||||
return ds.GetBackedSize(ctx, d, key)
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) Delete(
|
||||
ctx context.Context,
|
||||
key ds.Key,
|
||||
) (err error) {
|
||||
return d.db.Delete(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) Query(ctx context.Context, q dsq.Query) (
|
||||
dsq.Results,
|
||||
error,
|
||||
) {
|
||||
rnge := []byte{PEERSTORE}
|
||||
|
||||
qNaive := q
|
||||
prefix := ds.NewKey(q.Prefix).String()
|
||||
if prefix != "/" {
|
||||
rnge = append(rnge, []byte(prefix+"/")...)
|
||||
qNaive.Prefix = ""
|
||||
}
|
||||
|
||||
i, err := d.db.NewIter(rnge, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "query")
|
||||
}
|
||||
|
||||
next := i.Next
|
||||
if len(q.Orders) > 0 {
|
||||
switch q.Orders[0].(type) {
|
||||
case dsq.OrderByKey, *dsq.OrderByKey:
|
||||
qNaive.Orders = nil
|
||||
i.First()
|
||||
case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending:
|
||||
next = func() bool {
|
||||
next = i.Prev
|
||||
return i.Last()
|
||||
}
|
||||
qNaive.Orders = nil
|
||||
default:
|
||||
i.First()
|
||||
}
|
||||
} else {
|
||||
i.First()
|
||||
}
|
||||
r := dsq.ResultsFromIterator(q, dsq.Iterator{
|
||||
Next: func() (dsq.Result, bool) {
|
||||
if !next() {
|
||||
return dsq.Result{}, false
|
||||
}
|
||||
k := string(i.Key()[1:])
|
||||
e := dsq.Entry{Key: k, Size: len(i.Value())}
|
||||
|
||||
if !q.KeysOnly {
|
||||
buf := make([]byte, len(i.Value()))
|
||||
copy(buf, i.Value())
|
||||
e.Value = buf
|
||||
}
|
||||
return dsq.Result{Entry: e}, true
|
||||
},
|
||||
Close: func() error {
|
||||
return i.Close()
|
||||
},
|
||||
})
|
||||
return dsq.NaiveQueryApply(qNaive, r), nil
|
||||
}
|
||||
|
||||
// TODO: get disk usage of peerstore later
|
||||
func (d *PeerstoreDatastore) DiskUsage(ctx context.Context) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Closing is not done here:
|
||||
func (d *PeerstoreDatastore) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) Batch(ctx context.Context) (ds.Batch, error) {
|
||||
return &batch{
|
||||
b: &transaction{tx: d.db.NewBatch()},
|
||||
db: d.db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *PeerstoreDatastore) NewTransaction(
|
||||
ctx context.Context,
|
||||
readOnly bool,
|
||||
) (ds.Txn, error) {
|
||||
tx := d.db.NewBatch()
|
||||
return &transaction{tx}, nil
|
||||
}
|
||||
|
||||
func (b *batch) Put(ctx context.Context, key ds.Key, value []byte) error {
|
||||
b.b.Put(ctx, key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *batch) Commit(ctx context.Context) error {
|
||||
return b.b.Commit(ctx)
|
||||
}
|
||||
|
||||
func (b *batch) Delete(ctx context.Context, key ds.Key) error {
|
||||
b.b.Delete(ctx, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transaction) Commit(ctx context.Context) error {
|
||||
return t.tx.Commit()
|
||||
}
|
||||
|
||||
func (t *transaction) Discard(ctx context.Context) {
|
||||
t.tx.Abort()
|
||||
}
|
||||
|
||||
func (t *transaction) Get(
|
||||
ctx context.Context,
|
||||
key ds.Key,
|
||||
) (value []byte, err error) {
|
||||
b, closer, err := t.tx.Get(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||
if err != nil {
|
||||
if err == pebble.ErrNotFound {
|
||||
return nil, ds.ErrNotFound
|
||||
}
|
||||
return nil, errors.Wrap(err, "get")
|
||||
}
|
||||
|
||||
out := make([]byte, len(b))
|
||||
copy(out[:], b[:])
|
||||
closer.Close()
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (t *transaction) Put(ctx context.Context, key ds.Key, value []byte) error {
|
||||
return t.tx.Set(append([]byte{PEERSTORE}, key.Bytes()...), value)
|
||||
}
|
||||
|
||||
func (t *transaction) Has(ctx context.Context, key ds.Key) (
|
||||
exists bool,
|
||||
err error,
|
||||
) {
|
||||
if _, err := t.Get(ctx, key); err != nil {
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, errors.Wrap(err, "has")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (t *transaction) GetSize(
|
||||
ctx context.Context,
|
||||
key ds.Key,
|
||||
) (size int, err error) {
|
||||
return ds.GetBackedSize(ctx, t, key)
|
||||
}
|
||||
|
||||
func (t *transaction) Delete(ctx context.Context, key ds.Key) (err error) {
|
||||
return t.tx.Delete(append([]byte{PEERSTORE}, key.Bytes()...))
|
||||
}
|
||||
|
||||
func (t *transaction) Query(ctx context.Context, q dsq.Query) (
|
||||
dsq.Results,
|
||||
error,
|
||||
) {
|
||||
rnge := []byte{PEERSTORE}
|
||||
qNaive := q
|
||||
prefix := ds.NewKey(q.Prefix).String()
|
||||
if prefix != "/" {
|
||||
rnge = append(rnge, []byte(prefix+"/")...)
|
||||
qNaive.Prefix = ""
|
||||
}
|
||||
|
||||
i, err := t.tx.NewIter(rnge, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "query")
|
||||
}
|
||||
|
||||
next := i.Next
|
||||
if len(q.Orders) > 0 {
|
||||
switch q.Orders[0].(type) {
|
||||
case dsq.OrderByKey, *dsq.OrderByKey:
|
||||
qNaive.Orders = nil
|
||||
case dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending:
|
||||
next = func() bool {
|
||||
next = i.Prev
|
||||
return i.Last()
|
||||
}
|
||||
qNaive.Orders = nil
|
||||
default:
|
||||
}
|
||||
}
|
||||
r := dsq.ResultsFromIterator(q, dsq.Iterator{
|
||||
Next: func() (dsq.Result, bool) {
|
||||
if !next() {
|
||||
return dsq.Result{}, false
|
||||
}
|
||||
k := string(i.Key()[1:])
|
||||
e := dsq.Entry{Key: k, Size: len(i.Value())}
|
||||
|
||||
if !q.KeysOnly {
|
||||
buf := make([]byte, len(i.Value()))
|
||||
copy(buf, i.Value())
|
||||
e.Value = buf
|
||||
}
|
||||
return dsq.Result{Entry: e}, true
|
||||
},
|
||||
Close: func() error {
|
||||
return i.Close()
|
||||
},
|
||||
})
|
||||
return dsq.NaiveQueryApply(qNaive, r), nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user