* v1.4.12

* small logging adjust
This commit is contained in:
Cassandra Heart 2024-03-23 15:26:57 -05:00 committed by GitHub
parent 4781e85fd6
commit 5afabc401b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 726 additions and 335 deletions

View File

@ -49,7 +49,6 @@ func newNode(
logger.Info("running compaction")
if err := clockStore.Compact(
bytes.Repeat([]byte{0xff}, 32),
intrinsicFilter,
); err != nil {
panic(err)

View File

@ -14,7 +14,7 @@ func GetMinimumVersion() []byte {
}
func GetVersion() []byte {
return []byte{0x01, 0x04, 0x0B}
return []byte{0x01, 0x04, 0x0C}
}
func GetVersionString() string {

View File

@ -320,6 +320,7 @@ func (e *CeremonyDataClockConsensusEngine) Start() <-chan error {
panic(err)
}
e.latestFrameReceived = frame.FrameNumber
e.logger.Info(
"preparing peer announce",
zap.Uint64("frame_number", frame.FrameNumber),
@ -463,9 +464,12 @@ func (e *CeremonyDataClockConsensusEngine) runLoop() {
latestFrame = dataFrame
}
if e.latestFrameReceived < latestFrame.FrameNumber {
e.latestFrameReceived = latestFrame.FrameNumber
go func() {
e.frameChan <- latestFrame
}()
}
var nextFrame *protobufs.ClockFrame
if nextFrame, err = e.prove(latestFrame); err != nil {
@ -505,9 +509,12 @@ func (e *CeremonyDataClockConsensusEngine) runLoop() {
}
}
if e.latestFrameReceived < latestFrame.FrameNumber {
e.latestFrameReceived = latestFrame.FrameNumber
go func() {
e.frameChan <- latestFrame
}()
}
var nextFrame *protobufs.ClockFrame
if nextFrame, err = e.prove(latestFrame); err != nil {

View File

@ -513,22 +513,24 @@ func (e *CeremonyDataClockConsensusEngine) collect(
latest := currentFramePublished
// With the increase of network size, constrain down to top thirty
for i := 0; i < 30; i++ {
for {
peerId, maxFrame, err := e.GetMostAheadPeer()
if err != nil {
e.logger.Warn("no peers available, skipping sync")
break
} else if peerId == nil {
e.logger.Info("currently up to date, skipping sync")
break
} else if maxFrame-2 > latest.FrameNumber {
if maxFrame > latest.FrameNumber {
e.syncingStatus = SyncStatusSynchronizing
if err != nil {
e.logger.Info("no peers available for sync, waiting")
time.Sleep(5 * time.Second)
} else if maxFrame-2 > latest.FrameNumber {
latest, err = e.sync(latest, maxFrame, peerId)
if err == nil {
if err != nil {
time.Sleep(30 * time.Second)
} else {
break
}
}
} else {
break
}
}
e.syncingStatus = SyncStatusNotSyncing

View File

@ -331,6 +331,12 @@ func (e *CeremonyDataClockConsensusEngine) handleClockFrameData(
zap.Uint64("frame_number", frame.FrameNumber),
)
if e.latestFrameReceived < frame.FrameNumber {
e.latestFrameReceived = frame.FrameNumber
go func() {
e.frameChan <- frame
}()
}
e.dataTimeReel.Insert(frame, isSync)
return nil
}

View File

@ -40,7 +40,7 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
) error {
e.currentReceivingSyncPeersMx.Lock()
if e.currentReceivingSyncPeers > int(
memory.TotalMemory()/uint64(2147483648)-4,
memory.TotalMemory()/uint64(4294967296)-4,
) {
e.currentReceivingSyncPeersMx.Unlock()
@ -117,7 +117,7 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
// account for skew
if dist > 30000 {
e.logger.Warn(
e.logger.Debug(
"peer provided challenge with too great of a distance",
zap.Int64("distance", dist),
)
@ -128,7 +128,7 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
authentication.Authentication.Response.PublicKey.KeyValue,
)
if err != nil {
e.logger.Warn(
e.logger.Debug(
"peer provided invalid pubkey",
zap.Binary(
"public_key",
@ -139,7 +139,7 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
}
if !(peer.ID(authentication.Authentication.PeerId)).MatchesPublicKey(key) {
e.logger.Warn(
e.logger.Debug(
"peer id does not match pubkey",
zap.Binary("peer_id", authentication.Authentication.PeerId),
zap.Binary(
@ -155,7 +155,7 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
authentication.Authentication.Response.Signature,
)
if err != nil || !b {
e.logger.Warn(
e.logger.Debug(
"peer provided invalid signature",
zap.Binary("peer_id", authentication.Authentication.PeerId),
zap.Binary(
@ -174,7 +174,7 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
authentication.Authentication.PeerId,
)
if manifest == nil || manifest.Bandwidth <= 1048576 {
e.logger.Warn(
e.logger.Debug(
"peer manifest was null or bandwidth was low",
zap.Binary("peer_id", authentication.Authentication.PeerId),
)
@ -243,7 +243,7 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
ParentSelector: selector.FillBytes(make([]byte, 32)),
},
)
rangeSubtract := uint64(16)
rangeSubtract := uint64(4)
for {
parentNumber := to - uint64(rangeSubtract)
@ -323,9 +323,9 @@ func (e *CeremonyDataClockConsensusEngine) NegotiateCompressedSyncFrames(
}
for {
if to == 0 || to-from > 16 {
if max > from+15 {
to = from + 16
if to == 0 || to-from > 4 {
if max > from+3 {
to = from + 4
} else {
to = max + 1
}

View File

@ -127,6 +127,20 @@ func (e *MasterClockConsensusEngine) handleSelfTestReport(
info := e.peerInfoManager.GetPeerInfo(peerID)
if info != nil {
info.MasterHeadFrame = report.MasterHeadFrame
if info.Bandwidth <= 1048576 {
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute)
defer cancel()
ch := e.pubSub.GetMultiaddrOfPeerStream(ctx, peerID)
select {
case <-ch:
go func() {
e.bandwidthTestCh <- peerID
}()
case <-ctx.Done():
}
}()
}
return nil
}

View File

@ -326,9 +326,10 @@ func (e *MasterClockConsensusEngine) performBandwidthTest(peerID []byte) {
cc, err := e.pubSub.GetDirectChannel(peerID, "validation")
if err != nil {
e.logger.Info(
"could not connect for validation",
"could not connect to peer for validation",
zap.String("peer_id", base58.Encode(peerID)),
)
// tag: dusk nuke this peer for now
e.pubSub.SetPeerScore(peerID, -1000)
return
@ -360,7 +361,7 @@ func (e *MasterClockConsensusEngine) performBandwidthTest(peerID []byte) {
if !bytes.Equal(verification, validation.Validation) {
e.logger.Info(
"provided invalid verification",
"peer provided invalid verification",
zap.String("peer_id", base58.Encode(peerID)),
)
// tag: dusk nuke this peer for now
@ -370,7 +371,7 @@ func (e *MasterClockConsensusEngine) performBandwidthTest(peerID []byte) {
if end-start > 2000 {
e.logger.Info(
"slow bandwidth, scoring out",
"peer has slow bandwidth, scoring out",
zap.String("peer_id", base58.Encode(peerID)),
)
// tag: dusk nuke this peer for now

View File

@ -571,7 +571,8 @@ func (e *CeremonyExecutionEngine) ProcessMessage(
func (e *CeremonyExecutionEngine) RunWorker() {
frameChan := e.clock.GetFrameChannel()
for {
frame := <-frameChan
select {
case frame := <-frameChan:
e.activeClockFrame = frame
e.logger.Info(
"evaluating next frame",
@ -812,6 +813,7 @@ func (e *CeremonyExecutionEngine) RunWorker() {
}
}
}
}
}
func (e *CeremonyExecutionEngine) publishMessage(

View File

@ -60,6 +60,11 @@ var (
false,
"print the peer id to stdout from the config and exit",
)
cpuprofile = flag.String(
"cpuprofile",
"",
"write cpu profile to file",
)
memprofile = flag.String(
"memprofile",
"",
@ -84,6 +89,15 @@ func main() {
}()
}
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
if *balance {
config, err := config.LoadConfig(*configDirectory, "")
if err != nil {

View File

@ -4,12 +4,14 @@ import (
"bytes"
"encoding/binary"
"math/big"
"sort"
"github.com/cockroachdb/pebble"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
@ -74,7 +76,6 @@ type ClockStore interface {
ResetMasterClockFrames(filter []byte) error
ResetDataClockFrames(filter []byte) error
Compact(
masterFilter []byte,
dataFilter []byte,
) error
GetTotalDistance(
@ -281,6 +282,7 @@ const CLOCK_DATA_FRAME_DATA = 0x01
const CLOCK_DATA_FRAME_CANDIDATE_DATA = 0x02
const CLOCK_DATA_FRAME_FRECENCY_DATA = 0x03
const CLOCK_DATA_FRAME_DISTANCE_DATA = 0x04
const CLOCK_COMPACTION_DATA = 0x05
const CLOCK_MASTER_FRAME_INDEX_EARLIEST = 0x10 | CLOCK_MASTER_FRAME_DATA
const CLOCK_MASTER_FRAME_INDEX_LATEST = 0x20 | CLOCK_MASTER_FRAME_DATA
const CLOCK_MASTER_FRAME_INDEX_PARENT = 0x30 | CLOCK_MASTER_FRAME_DATA
@ -1200,19 +1202,20 @@ func (p *PebbleClockStore) ResetDataClockFrames(filter []byte) error {
}
func (p *PebbleClockStore) Compact(
masterFilter []byte,
dataFilter []byte,
) error {
if masterFilter != nil {
if err := p.db.Compact(
clockMasterFrameKey(masterFilter, 0),
clockMasterFrameKey(masterFilter, 1000000),
true,
); err != nil {
return errors.Wrap(err, "compact")
version, closer, err := p.db.Get([]byte{CLOCK_COMPACTION_DATA})
cleared := true
if err != nil {
cleared = false
} else {
if bytes.Compare(version, config.GetVersion()) < 0 {
cleared = false
}
closer.Close()
}
if !cleared {
// If this node has been around since the early days, this is going to free
// up a lot of cruft.
if err := p.db.DeleteRange(
@ -1248,37 +1251,313 @@ func (p *PebbleClockStore) Compact(
); err != nil {
return errors.Wrap(err, "compact")
}
}
if dataFilter != nil {
if err := p.db.Compact(
dataProofMetadataKey(
dataFilter,
make([]byte, 74),
),
dataProofMetadataKey(
dataFilter,
bytes.Repeat([]byte{0xff}, 74),
),
true,
); err != nil {
if dataFilter != nil && !cleared {
parents := [][]byte{}
proofs := map[string]struct{}{}
commits := map[string]struct{}{}
data := map[string]struct{}{}
idxValue, closer, err := p.db.Get(clockDataLatestIndex(dataFilter))
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return errors.Wrap(err, "compact")
}
if err := p.db.Compact(
dataProofInclusionKey(
return errors.Wrap(err, "compact")
}
last := binary.BigEndian.Uint64(idxValue)
closer.Close()
for frameNumber := uint64(1); frameNumber <= last; frameNumber++ {
value, closer, err := p.db.Get(clockDataFrameKey(dataFilter, frameNumber))
if err != nil {
if errors.Is(err, pebble.ErrNotFound) {
return errors.Wrap(err, "compact")
}
return errors.Wrap(err, "compact")
}
frame := &protobufs.ClockFrame{}
if len(value) == (len(dataFilter) + 42) {
frameValue, frameCloser, err := p.db.Get(value)
if err != nil {
return errors.Wrap(err, "compact")
}
if err := proto.Unmarshal(frameValue, frame); err != nil {
return errors.Wrap(err, "compact")
}
selector, err := frame.GetSelector()
if err != nil {
panic(err)
}
parents = append(parents,
clockDataParentIndexKey(dataFilter, frameNumber, selector.FillBytes(
make([]byte, 32),
)),
)
closer.Close()
frameCloser.Close()
} else {
if err := proto.Unmarshal(value, frame); err != nil {
return errors.Wrap(err, "compact")
}
selector, err := frame.GetSelector()
if err != nil {
panic(err)
}
err = p.db.Set(
clockDataParentIndexKey(dataFilter, frameNumber, selector.FillBytes(
make([]byte, 32),
)),
value,
)
if err != nil {
return errors.Wrap(err, "compact")
}
err = p.db.Set(
clockDataFrameKey(dataFilter, frameNumber),
clockDataParentIndexKey(dataFilter, frameNumber, selector.FillBytes(
make([]byte, 32),
)),
)
parents = append(parents,
clockDataParentIndexKey(dataFilter, frameNumber, selector.FillBytes(
make([]byte, 32),
)),
)
closer.Close()
}
for i := 0; i < len(frame.Input[516:])/74; i++ {
p.logger.Info(
"preparing indexes for frame compaction",
zap.Uint64("frame_number", frameNumber),
zap.Uint64("max_frame_number", last),
)
commit := frame.Input[516+(i*74) : 516+((i+1)*74)]
frameProofs, frameCommits, frameData, err :=
internalListAggregateProofKeys(
p.db,
dataFilter,
make([]byte, 74),
0,
),
dataProofInclusionKey(
commit,
frameNumber,
)
if err != nil {
return errors.Wrap(err, "compact")
}
for _, proof := range frameProofs {
proof := proof
proofs[string(proof)] = struct{}{}
}
for _, comm := range frameCommits {
comm := comm
commits[string(comm)] = struct{}{}
}
for _, d := range frameData {
d := d
data[string(d)] = struct{}{}
}
}
}
p.logger.Info("sorting indexes for bulk clear")
sortedProofKeys := [][]byte{}
for k := range proofs {
k := k
sortedProofKeys = append(sortedProofKeys, []byte(k))
}
proofs = nil
sort.Slice(sortedProofKeys, func(i, j int) bool {
return bytes.Compare(sortedProofKeys[i], sortedProofKeys[j]) < 0
})
sortedCommitKeys := [][]byte{}
for k := range commits {
k := k
sortedCommitKeys = append(sortedCommitKeys, []byte(k))
}
commits = nil
sort.Slice(sortedCommitKeys, func(i, j int) bool {
return bytes.Compare(sortedCommitKeys[i], sortedCommitKeys[j]) < 0
})
sortedDataKeys := [][]byte{}
for k := range data {
k := k
sortedDataKeys = append(sortedDataKeys, []byte(k))
}
data = nil
sort.Slice(sortedDataKeys, func(i, j int) bool {
return bytes.Compare(sortedDataKeys[i], sortedDataKeys[j]) < 0
})
for i := uint64(0); i < uint64(len(parents)); i++ {
p.logger.Info(
"clearing orphaned frames for frame number",
zap.Uint64("frame_number", i+1),
zap.Int("max_frame_number", len(parents)),
)
pre := clockDataParentIndexKey(
dataFilter,
i+1,
bytes.Repeat([]byte{0x00}, 32),
)
err := p.db.DeleteRange(
pre,
parents[i],
)
if err != nil {
return errors.Wrap(err, "compact")
}
start := new(big.Int).SetBytes(parents[i])
start.Add(start, big.NewInt(1))
startBytes := start.FillBytes(make([]byte, len(parents[i])))
post := clockDataParentIndexKey(
dataFilter,
i+1,
bytes.Repeat([]byte{0xff}, 32),
)
err = p.db.DeleteRange(
startBytes,
post,
)
if err != nil {
return errors.Wrap(err, "compact")
}
}
for i := -1; i < len(sortedProofKeys); i++ {
p.logger.Info(
"clearing orphaned proof metadata",
zap.Int("proof_range_index", i+1),
zap.Int("max_proof_range_index", len(sortedProofKeys)),
)
var start, end []byte
if i == -1 {
start = dataProofMetadataKey(
dataFilter,
bytes.Repeat([]byte{0x00}, 74),
)
} else {
startBI := new(big.Int).SetBytes(sortedProofKeys[i])
startBI.Add(startBI, big.NewInt(1))
start = startBI.FillBytes(make([]byte, len(sortedProofKeys[i])))
}
if i == len(sortedProofKeys)-1 {
end = dataProofMetadataKey(
dataFilter,
bytes.Repeat([]byte{0xff}, 74),
20000,
),
true,
); err != nil {
)
} else {
end = sortedProofKeys[i+1]
}
err := p.db.DeleteRange(
start,
end,
)
if err != nil {
return errors.Wrap(err, "compact")
}
}
for i := -1; i < len(sortedCommitKeys); i++ {
p.logger.Info(
"clearing orphaned commits",
zap.Int("commit_range_index", i+1),
zap.Int("max_commit_range_index", len(sortedProofKeys)),
)
var start, end []byte
if i == -1 {
start = dataProofInclusionKey(
dataFilter,
bytes.Repeat([]byte{0x00}, 74),
0,
)
} else {
start = make([]byte, len(sortedCommitKeys[i]))
copy(start[:], sortedCommitKeys[i][:])
start[74] = 0xff
start[75] = 0xff
start[76] = 0xff
start[77] = 0xff
start[78] = 0xff
start[79] = 0xff
start[80] = 0xff
start[81] = 0xff
}
if i == len(sortedCommitKeys)-1 {
end = dataProofInclusionKey(
dataFilter,
bytes.Repeat([]byte{0xff}, 74),
0xffffffffffffffff,
)
} else {
end = sortedCommitKeys[i+1]
}
err := p.db.DeleteRange(
start,
end,
)
if err != nil {
return errors.Wrap(err, "compact")
}
}
for i := -1; i < len(sortedDataKeys); i++ {
p.logger.Info(
"clearing orphaned data",
zap.Int("data_range_index", i+1),
zap.Int("max_data_range_index", len(sortedProofKeys)),
)
var start, end []byte
if i == -1 {
start = dataProofSegmentKey(
dataFilter,
bytes.Repeat([]byte{0x00}, 32),
)
} else {
startBI := new(big.Int).SetBytes(sortedDataKeys[i])
startBI.Add(startBI, big.NewInt(1))
start = startBI.FillBytes(make([]byte, len(sortedDataKeys[i])))
}
if i == len(sortedDataKeys)-1 {
end = dataProofSegmentKey(
dataFilter,
bytes.Repeat([]byte{0xff}, 32),
)
} else {
end = sortedDataKeys[i+1]
}
err := p.db.DeleteRange(
start,
end,
)
if err != nil {
return errors.Wrap(err, "compact")
}
}
if err := p.db.DeleteRange(
clockDataCandidateFrameKey(
@ -1297,49 +1576,16 @@ func (p *PebbleClockStore) Compact(
return errors.Wrap(err, "compact")
}
if err := p.db.Compact(
clockDataFrameKey(dataFilter, 0),
clockDataFrameKey(dataFilter, 1000000),
true,
); err != nil {
err = p.db.Set([]byte{CLOCK_COMPACTION_DATA}, config.GetVersion())
if err != nil {
return errors.Wrap(err, "compact")
}
}
if err := p.db.Compact(
clockDataCandidateFrameKey(
dataFilter,
0,
make([]byte, 32),
make([]byte, 32),
),
clockDataCandidateFrameKey(
dataFilter,
1000000,
bytes.Repeat([]byte{0xff}, 32),
bytes.Repeat([]byte{0xff}, 32),
),
true,
); err != nil {
if err := p.db.CompactAll(); err != nil {
return errors.Wrap(err, "compact")
}
if err := p.db.Compact(
clockDataParentIndexKey(
dataFilter,
0,
make([]byte, 32),
),
clockDataParentIndexKey(
dataFilter,
1000000,
bytes.Repeat([]byte{0xff}, 32),
),
true,
); err != nil {
return errors.Wrap(err, "compact")
}
}
return nil
}

View File

@ -2,6 +2,7 @@ package store
import (
"encoding/binary"
"fmt"
"github.com/cockroachdb/pebble"
"github.com/pkg/errors"
@ -203,6 +204,75 @@ func internalGetAggregateProof(
return aggregate, nil
}
func internalListAggregateProofKeys(
db KVDB,
filter []byte,
commitment []byte,
frameNumber uint64,
) ([][]byte, [][]byte, [][]byte, error) {
proofs := [][]byte{dataProofMetadataKey(filter, commitment)}
commits := [][]byte{}
data := [][]byte{}
value, closer, err := db.Get(dataProofMetadataKey(filter, commitment))
if err != nil {
fmt.Println("proof lookup failed")
if errors.Is(err, pebble.ErrNotFound) {
return nil, nil, nil, ErrNotFound
}
return nil, nil, nil, errors.Wrap(err, "list aggregate proof")
}
defer closer.Close()
copied := make([]byte, len(value[8:]))
limit := binary.BigEndian.Uint64(value[0:8])
copy(copied, value[8:])
iter, err := db.NewIter(
dataProofInclusionKey(filter, commitment, 0),
dataProofInclusionKey(filter, commitment, limit+1),
)
if err != nil {
fmt.Println("inclusion lookup failed")
return nil, nil, nil, errors.Wrap(err, "list aggregate proof")
}
i := uint32(0)
commits = append(commits, dataProofInclusionKey(filter, commitment, 0))
for iter.First(); iter.Valid(); iter.Next() {
incCommit := iter.Value()
urlLength := binary.BigEndian.Uint16(incCommit[:2])
commitLength := binary.BigEndian.Uint16(incCommit[2:4])
url := make([]byte, urlLength)
copy(url, incCommit[4:urlLength+4])
commit := make([]byte, commitLength)
copy(commit, incCommit[urlLength+4:urlLength+4+commitLength])
remainder := int(urlLength + 4 + commitLength)
for j := 0; j < (len(incCommit)-remainder)/32; j++ {
start := remainder + (j * 32)
end := remainder + ((j + 1) * 32)
data = append(data, dataProofSegmentKey(filter, incCommit[start:end]))
}
i++
}
if err = iter.Close(); err != nil {
return nil, nil, nil, errors.Wrap(err, "list aggregate proof")
}
return proofs, commits, data, nil
}
func (p *PebbleDataProofStore) GetAggregateProof(
filter []byte,
commitment []byte,

View File

@ -344,4 +344,8 @@ func (d *InMemKVDB) DeleteRange(start, end []byte) error {
return nil
}
func (d *InMemKVDB) CompactAll() error {
return nil
}
var _ KVDB = (*InMemKVDB)(nil)

View File

@ -11,6 +11,7 @@ type KVDB interface {
NewBatch() Transaction
NewIter(lowerBound []byte, upperBound []byte) (Iterator, error)
Compact(start, end []byte, parallelize bool) error
CompactAll() error
Close() error
DeleteRange(start, end []byte) error
}

View File

@ -4,6 +4,7 @@ import (
"io"
"github.com/cockroachdb/pebble"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/node/config"
)
@ -60,6 +61,30 @@ func (p *PebbleDB) DeleteRange(start, end []byte) error {
return p.db.DeleteRange(start, end, &pebble.WriteOptions{Sync: true})
}
func (p *PebbleDB) CompactAll() error {
iter, err := p.db.NewIter(nil)
if err != nil {
return errors.Wrap(err, "compact all")
}
var first, last []byte
if iter.First() {
first = append(first, iter.Key()...)
}
if iter.Last() {
last = append(last, iter.Key()...)
}
if err := iter.Close(); err != nil {
return errors.Wrap(err, "compact all")
}
if err := p.Compact(first, last, true); err != nil {
return errors.Wrap(err, "compact all")
}
return nil
}
var _ KVDB = (*PebbleDB)(nil)
type Transaction interface {