mirror of
https://source.quilibrium.com/quilibrium/ceremonyclient.git
synced 2024-12-24 23:55:18 +00:00
Add ceremony-targeted go-libp2p-blossomsub
This commit is contained in:
parent
ff6715575f
commit
ffab09ae6b
2
go-libp2p-blossomsub/.gitignore
vendored
Normal file
2
go-libp2p-blossomsub/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
.idea/
|
||||||
|
.vscode/
|
12
go-libp2p-blossomsub/LICENSE
Normal file
12
go-libp2p-blossomsub/LICENSE
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
This project heavily borrows code from go-libp2p-pubsub's codebase and therefore must
|
||||||
|
be subject to the license the pubsub repo utilizes. This has been kept verbatim for
|
||||||
|
reference:
|
||||||
|
|
||||||
|
This project is transitioning from an MIT-only license to a dual MIT/Apache-2.0 license.
|
||||||
|
Unless otherwise noted, all code contributed prior to 2019-05-06 and not contributed by
|
||||||
|
a user listed in [this signoff issue](https://github.com/ipfs/go-ipfs/issues/6302) is
|
||||||
|
licensed under MIT-only. All new contributions (and past contributions since 2019-05-06)
|
||||||
|
are licensed under a dual MIT/Apache-2.0 license.
|
||||||
|
|
||||||
|
MIT: https://www.opensource.org/licenses/mit
|
||||||
|
Apache-2.0: https://www.apache.org/licenses/license-2.0
|
5
go-libp2p-blossomsub/LICENSE-APACHE
Normal file
5
go-libp2p-blossomsub/LICENSE-APACHE
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
19
go-libp2p-blossomsub/LICENSE-MIT
Normal file
19
go-libp2p-blossomsub/LICENSE-MIT
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
11
go-libp2p-blossomsub/README.md
Normal file
11
go-libp2p-blossomsub/README.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# go-libp2p-blossomsub
|
||||||
|
|
||||||
|
First-pass of blossomsub, rudimentary fork of gossipsub – it does not merge subscriptions, bloom filtering needs to
|
||||||
|
happen at the publish level. This will be updated post-ceremony with the full bloom filter version.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
The go-libp2p-blossomsub project being forked from pubsub inherits the dual-license under Apache 2.0 and MIT terms:
|
||||||
|
|
||||||
|
- Apache License, Version 2.0, ([LICENSE-APACHE](./LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
- MIT license ([LICENSE-MIT](./LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
107
go-libp2p-blossomsub/backoff.go
Normal file
107
go-libp2p-blossomsub/backoff.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MinBackoffDelay = 100 * time.Millisecond
|
||||||
|
MaxBackoffDelay = 10 * time.Second
|
||||||
|
TimeToLive = 10 * time.Minute
|
||||||
|
BackoffCleanupInterval = 1 * time.Minute
|
||||||
|
BackoffMultiplier = 2
|
||||||
|
MaxBackoffJitterCoff = 100
|
||||||
|
MaxBackoffAttempts = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
type backoffHistory struct {
|
||||||
|
duration time.Duration
|
||||||
|
lastTried time.Time
|
||||||
|
attempts int
|
||||||
|
}
|
||||||
|
|
||||||
|
type backoff struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
info map[peer.ID]*backoffHistory
|
||||||
|
ct int // size threshold that kicks off the cleaner
|
||||||
|
ci time.Duration // cleanup intervals
|
||||||
|
maxAttempts int // maximum backoff attempts prior to ejection
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Duration, maxAttempts int) *backoff {
|
||||||
|
b := &backoff{
|
||||||
|
mu: sync.Mutex{},
|
||||||
|
ct: sizeThreshold,
|
||||||
|
ci: cleanupInterval,
|
||||||
|
maxAttempts: maxAttempts,
|
||||||
|
info: make(map[peer.ID]*backoffHistory),
|
||||||
|
}
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano()) // used for jitter
|
||||||
|
go b.cleanupLoop(ctx)
|
||||||
|
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backoff) updateAndGet(id peer.ID) (time.Duration, error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
h, ok := b.info[id]
|
||||||
|
switch {
|
||||||
|
case !ok || time.Since(h.lastTried) > TimeToLive:
|
||||||
|
// first request goes immediately.
|
||||||
|
h = &backoffHistory{
|
||||||
|
duration: time.Duration(0),
|
||||||
|
attempts: 0,
|
||||||
|
}
|
||||||
|
case h.attempts >= b.maxAttempts:
|
||||||
|
return 0, fmt.Errorf("peer %s has reached its maximum backoff attempts", id)
|
||||||
|
|
||||||
|
case h.duration < MinBackoffDelay:
|
||||||
|
h.duration = MinBackoffDelay
|
||||||
|
|
||||||
|
case h.duration < MaxBackoffDelay:
|
||||||
|
jitter := rand.Intn(MaxBackoffJitterCoff)
|
||||||
|
h.duration = (BackoffMultiplier * h.duration) + time.Duration(jitter)*time.Millisecond
|
||||||
|
if h.duration > MaxBackoffDelay || h.duration < 0 {
|
||||||
|
h.duration = MaxBackoffDelay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h.attempts += 1
|
||||||
|
h.lastTried = time.Now()
|
||||||
|
b.info[id] = h
|
||||||
|
return h.duration, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backoff) cleanup() {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
for id, h := range b.info {
|
||||||
|
if time.Since(h.lastTried) > TimeToLive {
|
||||||
|
delete(b.info, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backoff) cleanupLoop(ctx context.Context) {
|
||||||
|
ticker := time.NewTicker(b.ci)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return // pubsub shutting down
|
||||||
|
case <-ticker.C:
|
||||||
|
b.cleanup()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
122
go-libp2p-blossomsub/backoff_test.go
Normal file
122
go-libp2p-blossomsub/backoff_test.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBackoff_Update(t *testing.T) {
|
||||||
|
id1 := peer.ID("peer-1")
|
||||||
|
id2 := peer.ID("peer-2")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
size := 10
|
||||||
|
cleanupInterval := 5 * time.Second
|
||||||
|
maxBackoffAttempts := 10
|
||||||
|
|
||||||
|
b := newBackoff(ctx, size, cleanupInterval, maxBackoffAttempts)
|
||||||
|
|
||||||
|
if len(b.info) > 0 {
|
||||||
|
t.Fatal("non-empty info map for backoff")
|
||||||
|
}
|
||||||
|
|
||||||
|
if d, err := b.updateAndGet(id1); d != time.Duration(0) || err != nil {
|
||||||
|
t.Fatalf("invalid initialization: %v, \t, %s", d, err)
|
||||||
|
}
|
||||||
|
if d, err := b.updateAndGet(id2); d != time.Duration(0) || err != nil {
|
||||||
|
t.Fatalf("invalid initialization: %v, \t, %s", d, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < maxBackoffAttempts-1; i++ {
|
||||||
|
got, err := b.updateAndGet(id1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error post update: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := time.Duration(math.Pow(BackoffMultiplier, float64(i)) *
|
||||||
|
float64(MinBackoffDelay+MaxBackoffJitterCoff*time.Millisecond))
|
||||||
|
if expected > MaxBackoffDelay {
|
||||||
|
expected = MaxBackoffDelay
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected < got { // considering jitter, expected backoff must always be greater than or equal to actual.
|
||||||
|
t.Fatalf("invalid backoff result, expected: %v, got: %v", expected, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// trying once more beyond the threshold, hence expecting exceeding threshold
|
||||||
|
if _, err := b.updateAndGet(id1); err == nil {
|
||||||
|
t.Fatalf("expected an error for going beyond threshold but got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := b.updateAndGet(id2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error post update: %s", err)
|
||||||
|
}
|
||||||
|
if got != MinBackoffDelay {
|
||||||
|
t.Fatalf("invalid backoff result, expected: %v, got: %v", MinBackoffDelay, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sets last tried of id2 to long ago that it resets back upon next try.
|
||||||
|
// update attempts on id2 are below threshold, hence peer should never go beyond backoff attempt threshold.
|
||||||
|
b.info[id2].lastTried = time.Now().Add(-TimeToLive)
|
||||||
|
got, err = b.updateAndGet(id2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error post update: %s", err)
|
||||||
|
}
|
||||||
|
if got != time.Duration(0) {
|
||||||
|
t.Fatalf("invalid ttl expiration, expected: %v, got: %v", time.Duration(0), got)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.info) != 2 {
|
||||||
|
t.Fatalf("pre-invalidation attempt, info map size mismatch, expected: %d, got: %d", 2, len(b.info))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackoff_Clean(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
size := 10
|
||||||
|
cleanupInterval := 2 * time.Second
|
||||||
|
maxBackoffAttempts := 100 // setting attempts to a high number hence testing cleanup logic.
|
||||||
|
b := newBackoff(ctx, size, cleanupInterval, maxBackoffAttempts)
|
||||||
|
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
id := peer.ID(fmt.Sprintf("peer-%d", i))
|
||||||
|
_, err := b.updateAndGet(id)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error post update: %s", err)
|
||||||
|
}
|
||||||
|
b.info[id].lastTried = time.Now().Add(-TimeToLive) // enforces expiry
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.info) != size {
|
||||||
|
t.Fatalf("info map size mismatch, expected: %d, got: %d", size, len(b.info))
|
||||||
|
}
|
||||||
|
|
||||||
|
// waits for a cleanup loop to kick-in
|
||||||
|
time.Sleep(2 * cleanupInterval)
|
||||||
|
|
||||||
|
// next update should trigger cleanup
|
||||||
|
got, err := b.updateAndGet(peer.ID("some-new-peer"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error post update: %s", err)
|
||||||
|
}
|
||||||
|
if got != time.Duration(0) {
|
||||||
|
t.Fatalf("invalid backoff result, expected: %v, got: %v", time.Duration(0), got)
|
||||||
|
}
|
||||||
|
|
||||||
|
// except "some-new-peer" every other records must be cleaned up
|
||||||
|
if len(b.info) != 1 {
|
||||||
|
t.Fatalf("info map size mismatch, expected: %d, got: %d", 1, len(b.info))
|
||||||
|
}
|
||||||
|
}
|
477
go-libp2p-blossomsub/bitmask.go
Normal file
477
go-libp2p-blossomsub/bitmask.go
Normal file
@ -0,0 +1,477 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/crypto"
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrBitmaskClosed is returned if a Bitmask is utilized after it has been closed
|
||||||
|
var ErrBitmaskClosed = errors.New("this Bitmask is closed, try opening a new one")
|
||||||
|
|
||||||
|
// ErrNilSignKey is returned if a nil private key was provided
|
||||||
|
var ErrNilSignKey = errors.New("nil sign key")
|
||||||
|
|
||||||
|
// ErrEmptyPeerID is returned if an empty peer ID was provided
|
||||||
|
var ErrEmptyPeerID = errors.New("empty peer ID")
|
||||||
|
|
||||||
|
// Bitmask is the handle for a pubsub bitmask
|
||||||
|
type Bitmask struct {
|
||||||
|
p *PubSub
|
||||||
|
bitmask []byte
|
||||||
|
|
||||||
|
evtHandlerMux sync.RWMutex
|
||||||
|
evtHandlers map[*BitmaskEventHandler]struct{}
|
||||||
|
|
||||||
|
mux sync.RWMutex
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the bitmask associated with t
|
||||||
|
func (t *Bitmask) Bitmask() []byte {
|
||||||
|
return t.bitmask
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetScoreParams sets the bitmask score parameters if the pubsub router supports peer
|
||||||
|
// scoring
|
||||||
|
func (t *Bitmask) SetScoreParams(p *BitmaskScoreParams) error {
|
||||||
|
err := p.validate()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid bitmask score parameters: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.mux.Lock()
|
||||||
|
defer t.mux.Unlock()
|
||||||
|
|
||||||
|
if t.closed {
|
||||||
|
return ErrBitmaskClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(chan error, 1)
|
||||||
|
update := func() {
|
||||||
|
gs, ok := t.p.rt.(*BlossomSubRouter)
|
||||||
|
if !ok {
|
||||||
|
result <- fmt.Errorf("pubsub router is not BlossomSub")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if gs.score == nil {
|
||||||
|
result <- fmt.Errorf("peer scoring is not enabled in router")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := gs.score.SetBitmaskScoreParams(t.bitmask, p)
|
||||||
|
result <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case t.p.eval <- update:
|
||||||
|
err = <-result
|
||||||
|
return err
|
||||||
|
|
||||||
|
case <-t.p.ctx.Done():
|
||||||
|
return t.p.ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EventHandler creates a handle for bitmask specific events
|
||||||
|
// Multiple event handlers may be created and will operate independently of each other
|
||||||
|
func (t *Bitmask) EventHandler(opts ...BitmaskEventHandlerOpt) (*BitmaskEventHandler, error) {
|
||||||
|
t.mux.RLock()
|
||||||
|
defer t.mux.RUnlock()
|
||||||
|
if t.closed {
|
||||||
|
return nil, ErrBitmaskClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
h := &BitmaskEventHandler{
|
||||||
|
bitmask: t,
|
||||||
|
err: nil,
|
||||||
|
|
||||||
|
evtLog: make(map[peer.ID]EventType),
|
||||||
|
evtLogCh: make(chan struct{}, 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
err := opt(h)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{}, 1)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case t.p.eval <- func() {
|
||||||
|
tmap := t.p.bitmasks[string(t.bitmask)]
|
||||||
|
for p := range tmap {
|
||||||
|
h.evtLog[p] = PeerJoin
|
||||||
|
}
|
||||||
|
|
||||||
|
t.evtHandlerMux.Lock()
|
||||||
|
t.evtHandlers[h] = struct{}{}
|
||||||
|
t.evtHandlerMux.Unlock()
|
||||||
|
done <- struct{}{}
|
||||||
|
}:
|
||||||
|
case <-t.p.ctx.Done():
|
||||||
|
return nil, t.p.ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
<-done
|
||||||
|
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Bitmask) sendNotification(evt PeerEvent) {
|
||||||
|
t.evtHandlerMux.RLock()
|
||||||
|
defer t.evtHandlerMux.RUnlock()
|
||||||
|
|
||||||
|
for h := range t.evtHandlers {
|
||||||
|
h.sendNotification(evt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe returns a new Subscription for the bitmask.
|
||||||
|
// Note that subscription is not an instantaneous operation. It may take some time
|
||||||
|
// before the subscription is processed by the pubsub main loop and propagated to our peers.
|
||||||
|
func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) {
|
||||||
|
t.mux.RLock()
|
||||||
|
defer t.mux.RUnlock()
|
||||||
|
if t.closed {
|
||||||
|
return nil, ErrBitmaskClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
sub := &Subscription{
|
||||||
|
bitmask: t.bitmask,
|
||||||
|
ctx: t.p.ctx,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
err := opt(sub)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sub.ch == nil {
|
||||||
|
// apply the default size
|
||||||
|
sub.ch = make(chan *Message, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(chan *Subscription, 1)
|
||||||
|
|
||||||
|
t.p.disc.Discover(sub.bitmask)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case t.p.addSub <- &addSubReq{
|
||||||
|
sub: sub,
|
||||||
|
resp: out,
|
||||||
|
}:
|
||||||
|
case <-t.p.ctx.Done():
|
||||||
|
return nil, t.p.ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
return <-out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Relay enables message relaying for the bitmask and returns a reference
|
||||||
|
// cancel function. Subsequent calls increase the reference counter.
|
||||||
|
// To completely disable the relay, all references must be cancelled.
|
||||||
|
func (t *Bitmask) Relay() (RelayCancelFunc, error) {
|
||||||
|
t.mux.RLock()
|
||||||
|
defer t.mux.RUnlock()
|
||||||
|
if t.closed {
|
||||||
|
return nil, ErrBitmaskClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(chan RelayCancelFunc, 1)
|
||||||
|
|
||||||
|
t.p.disc.Discover(t.bitmask)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case t.p.addRelay <- &addRelayReq{
|
||||||
|
bitmask: t.bitmask,
|
||||||
|
resp: out,
|
||||||
|
}:
|
||||||
|
case <-t.p.ctx.Done():
|
||||||
|
return nil, t.p.ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
return <-out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouterReady is a function that decides if a router is ready to publish
|
||||||
|
type RouterReady func(rt PubSubRouter, bitmask []byte) (bool, error)
|
||||||
|
|
||||||
|
// ProvideKey is a function that provides a private key and its associated peer ID when publishing a new message
|
||||||
|
type ProvideKey func() (crypto.PrivKey, peer.ID)
|
||||||
|
|
||||||
|
type PublishOptions struct {
|
||||||
|
ready RouterReady
|
||||||
|
customKey ProvideKey
|
||||||
|
local bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type PubOpt func(pub *PublishOptions) error
|
||||||
|
|
||||||
|
// Publish publishes data to bitmask.
|
||||||
|
func (t *Bitmask) Publish(ctx context.Context, data []byte, opts ...PubOpt) error {
|
||||||
|
t.mux.RLock()
|
||||||
|
defer t.mux.RUnlock()
|
||||||
|
if t.closed {
|
||||||
|
return ErrBitmaskClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
pid := t.p.signID
|
||||||
|
key := t.p.signKey
|
||||||
|
|
||||||
|
pub := &PublishOptions{}
|
||||||
|
for _, opt := range opts {
|
||||||
|
err := opt(pub)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pub.customKey != nil && !pub.local {
|
||||||
|
key, pid = pub.customKey()
|
||||||
|
if key == nil {
|
||||||
|
return ErrNilSignKey
|
||||||
|
}
|
||||||
|
if len(pid) == 0 {
|
||||||
|
return ErrEmptyPeerID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &pb.Message{
|
||||||
|
Data: data,
|
||||||
|
Bitmask: t.bitmask,
|
||||||
|
From: nil,
|
||||||
|
Seqno: nil,
|
||||||
|
}
|
||||||
|
if pid != "" {
|
||||||
|
m.From = []byte(pid)
|
||||||
|
m.Seqno = t.p.nextSeqno()
|
||||||
|
}
|
||||||
|
if key != nil {
|
||||||
|
m.From = []byte(pid)
|
||||||
|
err := signMessage(pid, key, m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pub.ready != nil {
|
||||||
|
if t.p.disc.discovery != nil {
|
||||||
|
t.p.disc.Bootstrap(ctx, t.bitmask, pub.ready)
|
||||||
|
} else {
|
||||||
|
// TODO: we could likely do better than polling every 200ms.
|
||||||
|
// For example, block this goroutine on a channel,
|
||||||
|
// and check again whenever events tell us that the number of
|
||||||
|
// peers has increased.
|
||||||
|
var ticker *time.Ticker
|
||||||
|
readyLoop:
|
||||||
|
for {
|
||||||
|
// Check if ready for publishing.
|
||||||
|
// Similar to what disc.Bootstrap does.
|
||||||
|
res := make(chan bool, 1)
|
||||||
|
select {
|
||||||
|
case t.p.eval <- func() {
|
||||||
|
done, _ := pub.ready(t.p.rt, t.bitmask)
|
||||||
|
res <- done
|
||||||
|
}:
|
||||||
|
if <-res {
|
||||||
|
break readyLoop
|
||||||
|
}
|
||||||
|
case <-t.p.ctx.Done():
|
||||||
|
return t.p.ctx.Err()
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
if ticker == nil {
|
||||||
|
ticker = time.NewTicker(200 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return fmt.Errorf("router is not ready: %w", ctx.Err())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), nil, pub.local})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithReadiness returns a publishing option for only publishing when the router is ready.
|
||||||
|
// This option is not useful unless PubSub is also using WithDiscovery
|
||||||
|
func WithReadiness(ready RouterReady) PubOpt {
|
||||||
|
return func(pub *PublishOptions) error {
|
||||||
|
pub.ready = ready
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLocalPublication returns a publishing option to notify in-process subscribers only.
|
||||||
|
// It prevents message publication to mesh peers.
|
||||||
|
// Useful in edge cases where the msg needs to be only delivered to the in-process subscribers,
|
||||||
|
// e.g. not to spam the network with outdated msgs.
|
||||||
|
// Should not be used specifically for in-process pubsubing.
|
||||||
|
func WithLocalPublication(local bool) PubOpt {
|
||||||
|
return func(pub *PublishOptions) error {
|
||||||
|
pub.local = local
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSecretKeyAndPeerId returns a publishing option for providing a custom private key and its corresponding peer ID
|
||||||
|
// This option is useful when we want to send messages from "virtual", never-connectable peers in the network
|
||||||
|
func WithSecretKeyAndPeerId(key crypto.PrivKey, pid peer.ID) PubOpt {
|
||||||
|
return func(pub *PublishOptions) error {
|
||||||
|
pub.customKey = func() (crypto.PrivKey, peer.ID) {
|
||||||
|
return key, pid
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes down the bitmask. Will return an error unless there are no active event handlers or subscriptions.
|
||||||
|
// Does not error if the bitmask is already closed.
|
||||||
|
func (t *Bitmask) Close() error {
|
||||||
|
t.mux.Lock()
|
||||||
|
defer t.mux.Unlock()
|
||||||
|
if t.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &rmBitmaskReq{t, make(chan error, 1)}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case t.p.rmBitmask <- req:
|
||||||
|
case <-t.p.ctx.Done():
|
||||||
|
return t.p.ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := <-req.resp
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.closed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPeers returns a list of peers we are connected to in the given bitmask.
|
||||||
|
func (t *Bitmask) ListPeers() []peer.ID {
|
||||||
|
t.mux.RLock()
|
||||||
|
defer t.mux.RUnlock()
|
||||||
|
if t.closed {
|
||||||
|
return []peer.ID{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.p.ListPeers(t.bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
type EventType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
PeerJoin EventType = iota
|
||||||
|
PeerLeave
|
||||||
|
)
|
||||||
|
|
||||||
|
// BitmaskEventHandler is used to manage bitmask specific events. No Subscription is required to receive events.
|
||||||
|
type BitmaskEventHandler struct {
|
||||||
|
bitmask *Bitmask
|
||||||
|
err error
|
||||||
|
|
||||||
|
evtLogMx sync.Mutex
|
||||||
|
evtLog map[peer.ID]EventType
|
||||||
|
evtLogCh chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type BitmaskEventHandlerOpt func(t *BitmaskEventHandler) error
|
||||||
|
|
||||||
|
type PeerEvent struct {
|
||||||
|
Type EventType
|
||||||
|
Peer peer.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel closes the bitmask event handler
|
||||||
|
func (t *BitmaskEventHandler) Cancel() {
|
||||||
|
bitmask := t.bitmask
|
||||||
|
t.err = fmt.Errorf("bitmask event handler cancelled by calling handler.Cancel()")
|
||||||
|
|
||||||
|
bitmask.evtHandlerMux.Lock()
|
||||||
|
delete(bitmask.evtHandlers, t)
|
||||||
|
t.bitmask.evtHandlerMux.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *BitmaskEventHandler) sendNotification(evt PeerEvent) {
|
||||||
|
t.evtLogMx.Lock()
|
||||||
|
t.addToEventLog(evt)
|
||||||
|
t.evtLogMx.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// addToEventLog assumes a lock has been taken to protect the event log
|
||||||
|
func (t *BitmaskEventHandler) addToEventLog(evt PeerEvent) {
|
||||||
|
e, ok := t.evtLog[evt.Peer]
|
||||||
|
if !ok {
|
||||||
|
t.evtLog[evt.Peer] = evt.Type
|
||||||
|
// send signal that an event has been added to the event log
|
||||||
|
select {
|
||||||
|
case t.evtLogCh <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
} else if e != evt.Type {
|
||||||
|
delete(t.evtLog, evt.Peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pullFromEventLog assumes a lock has been taken to protect the event log
|
||||||
|
func (t *BitmaskEventHandler) pullFromEventLog() (PeerEvent, bool) {
|
||||||
|
for k, v := range t.evtLog {
|
||||||
|
evt := PeerEvent{Peer: k, Type: v}
|
||||||
|
delete(t.evtLog, k)
|
||||||
|
return evt, true
|
||||||
|
}
|
||||||
|
return PeerEvent{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextPeerEvent returns the next event regarding subscribed peers
|
||||||
|
// Guarantees: Peer Join and Peer Leave events for a given peer will fire in order.
|
||||||
|
// Unless a peer both Joins and Leaves before NextPeerEvent emits either event
|
||||||
|
// all events will eventually be received from NextPeerEvent.
|
||||||
|
func (t *BitmaskEventHandler) NextPeerEvent(ctx context.Context) (PeerEvent, error) {
|
||||||
|
for {
|
||||||
|
t.evtLogMx.Lock()
|
||||||
|
evt, ok := t.pullFromEventLog()
|
||||||
|
if ok {
|
||||||
|
// make sure an event log signal is available if there are events in the event log
|
||||||
|
if len(t.evtLog) > 0 {
|
||||||
|
select {
|
||||||
|
case t.evtLogCh <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.evtLogMx.Unlock()
|
||||||
|
return evt, nil
|
||||||
|
}
|
||||||
|
t.evtLogMx.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-t.evtLogCh:
|
||||||
|
continue
|
||||||
|
case <-ctx.Done():
|
||||||
|
return PeerEvent{}, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
1064
go-libp2p-blossomsub/bitmask_test.go
Normal file
1064
go-libp2p-blossomsub/bitmask_test.go
Normal file
File diff suppressed because it is too large
Load Diff
58
go-libp2p-blossomsub/blacklist.go
Normal file
58
go-libp2p-blossomsub/blacklist.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
|
||||||
|
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/timecache"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Blacklist is an interface for peer blacklisting.
|
||||||
|
type Blacklist interface {
|
||||||
|
Add(peer.ID) bool
|
||||||
|
Contains(peer.ID) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapBlacklist is a blacklist implementation using a perfect map
|
||||||
|
type MapBlacklist map[peer.ID]struct{}
|
||||||
|
|
||||||
|
// NewMapBlacklist creates a new MapBlacklist
|
||||||
|
func NewMapBlacklist() Blacklist {
|
||||||
|
return MapBlacklist(make(map[peer.ID]struct{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b MapBlacklist) Add(p peer.ID) bool {
|
||||||
|
b[p] = struct{}{}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b MapBlacklist) Contains(p peer.ID) bool {
|
||||||
|
_, ok := b[p]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeCachedBlacklist is a blacklist implementation using a time cache
|
||||||
|
type TimeCachedBlacklist struct {
|
||||||
|
tc timecache.TimeCache
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimeCachedBlacklist creates a new TimeCachedBlacklist with the given expiry duration
|
||||||
|
func NewTimeCachedBlacklist(expiry time.Duration) (Blacklist, error) {
|
||||||
|
b := &TimeCachedBlacklist{tc: timecache.NewTimeCache(expiry)}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add returns a bool saying whether Add of peer was successful
|
||||||
|
func (b *TimeCachedBlacklist) Add(p peer.ID) bool {
|
||||||
|
s := p.String()
|
||||||
|
if b.tc.Has(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b.tc.Add(s)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *TimeCachedBlacklist) Contains(p peer.ID) bool {
|
||||||
|
return b.tc.Has(p.String())
|
||||||
|
}
|
125
go-libp2p-blossomsub/blacklist_test.go
Normal file
125
go-libp2p-blossomsub/blacklist_test.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMapBlacklist(t *testing.T) {
|
||||||
|
b := NewMapBlacklist()
|
||||||
|
|
||||||
|
p := peer.ID("test")
|
||||||
|
|
||||||
|
b.Add(p)
|
||||||
|
if !b.Contains(p) {
|
||||||
|
t.Fatal("peer not in the blacklist")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimeCachedBlacklist(t *testing.T) {
|
||||||
|
b, err := NewTimeCachedBlacklist(10 * time.Minute)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := peer.ID("test")
|
||||||
|
|
||||||
|
b.Add(p)
|
||||||
|
if !b.Contains(p) {
|
||||||
|
t.Fatal("peer not in the blacklist")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlacklist(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
sub, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
psubs[1].BlacklistPeer(hosts[0].ID())
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message"))
|
||||||
|
|
||||||
|
wctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
_, err = sub.Next(wctx)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("got message from blacklisted peer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlacklist2(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
_, err := psubs[0].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sub1, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
psubs[1].BlacklistPeer(hosts[0].ID())
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message"))
|
||||||
|
|
||||||
|
wctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
_, err = sub1.Next(wctx)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("got message from blacklisted peer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlacklist3(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
|
||||||
|
psubs[1].BlacklistPeer(hosts[0].ID())
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
sub, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message"))
|
||||||
|
|
||||||
|
wctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
_, err = sub.Next(wctx)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("got message from blacklisted peer")
|
||||||
|
}
|
||||||
|
}
|
1984
go-libp2p-blossomsub/blossomsub.go
Normal file
1984
go-libp2p-blossomsub/blossomsub.go
Normal file
File diff suppressed because it is too large
Load Diff
172
go-libp2p-blossomsub/blossomsub_connmgr_test.go
Normal file
172
go-libp2p-blossomsub/blossomsub_connmgr_test.go
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
|
||||||
|
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBlossomSubConnTagMessageDeliveries(t *testing.T) {
|
||||||
|
t.Skip("Test disabled with go-libp2p v0.22.0") // TODO: reenable test when updating to v0.23.0
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
oldBlossomSubD := BlossomSubD
|
||||||
|
oldBlossomSubDlo := BlossomSubDlo
|
||||||
|
oldBlossomSubDHi := BlossomSubDhi
|
||||||
|
oldBlossomSubConnTagDecayInterval := BlossomSubConnTagDecayInterval
|
||||||
|
oldBlossomSubConnTagMessageDeliveryCap := BlossomSubConnTagMessageDeliveryCap
|
||||||
|
|
||||||
|
// set the BlossomSub D parameters low, so that we have some peers outside the mesh
|
||||||
|
BlossomSubDlo = 3
|
||||||
|
BlossomSubD = 3
|
||||||
|
BlossomSubDhi = 3
|
||||||
|
// also set the tag decay interval so we don't have to wait forever for tests
|
||||||
|
BlossomSubConnTagDecayInterval = time.Second
|
||||||
|
|
||||||
|
// set the cap for deliveries above BlossomSubConnTagValueMeshPeer, so the sybils
|
||||||
|
// will be forced out even if they end up in someone's mesh
|
||||||
|
BlossomSubConnTagMessageDeliveryCap = 50
|
||||||
|
|
||||||
|
// reset globals after test
|
||||||
|
defer func() {
|
||||||
|
BlossomSubD = oldBlossomSubD
|
||||||
|
BlossomSubDlo = oldBlossomSubDlo
|
||||||
|
BlossomSubDhi = oldBlossomSubDHi
|
||||||
|
BlossomSubConnTagDecayInterval = oldBlossomSubConnTagDecayInterval
|
||||||
|
BlossomSubConnTagMessageDeliveryCap = oldBlossomSubConnTagMessageDeliveryCap
|
||||||
|
}()
|
||||||
|
|
||||||
|
decayClock := clock.NewMock()
|
||||||
|
decayCfg := connmgr.DecayerCfg{
|
||||||
|
Resolution: time.Second,
|
||||||
|
Clock: decayClock,
|
||||||
|
}
|
||||||
|
|
||||||
|
nHonest := 5
|
||||||
|
nSquatter := 10
|
||||||
|
connLimit := 10
|
||||||
|
|
||||||
|
connmgrs := make([]*connmgr.BasicConnMgr, nHonest)
|
||||||
|
honestHosts := make([]host.Host, nHonest)
|
||||||
|
honestPeers := make(map[peer.ID]struct{})
|
||||||
|
|
||||||
|
for i := 0; i < nHonest; i++ {
|
||||||
|
var err error
|
||||||
|
connmgrs[i], err = connmgr.NewConnManager(nHonest, connLimit,
|
||||||
|
connmgr.WithGracePeriod(0),
|
||||||
|
connmgr.WithSilencePeriod(time.Millisecond),
|
||||||
|
connmgr.DecayerConfig(&decayCfg),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
netw := swarmt.GenSwarm(t)
|
||||||
|
defer netw.Close()
|
||||||
|
h := bhost.NewBlankHost(netw, bhost.WithConnectionManager(connmgrs[i]))
|
||||||
|
honestHosts[i] = h
|
||||||
|
honestPeers[h.ID()] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// use flood publishing, so non-mesh peers will still be delivering messages
|
||||||
|
// to everyone
|
||||||
|
psubs := getBlossomSubs(ctx, honestHosts,
|
||||||
|
WithFloodPublish(true))
|
||||||
|
|
||||||
|
// sybil squatters to be connected later
|
||||||
|
sybilHosts := getNetHosts(t, ctx, nSquatter)
|
||||||
|
for _, h := range sybilHosts {
|
||||||
|
squatter := &sybilSquatter{h: h}
|
||||||
|
h.SetStreamHandler(BlossomSubID_v11, squatter.handleStream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// connect the honest hosts
|
||||||
|
connectAll(t, honestHosts)
|
||||||
|
|
||||||
|
for _, h := range honestHosts {
|
||||||
|
if len(h.Network().Conns()) != nHonest-1 {
|
||||||
|
t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// subscribe everyone to the bitmask
|
||||||
|
bitmask := []byte{0xff, 0x00, 0x00, 0x00}
|
||||||
|
for _, ps := range psubs {
|
||||||
|
_, err := ps.Subscribe(bitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sleep to allow meshes to form
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// have all the hosts publish enough messages to ensure that they get some delivery credit
|
||||||
|
nMessages := BlossomSubConnTagMessageDeliveryCap * 2
|
||||||
|
for _, ps := range psubs {
|
||||||
|
for i := 0; i < nMessages; i++ {
|
||||||
|
ps.Publish(bitmask, []byte("hello"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// advance the fake time for the tag decay
|
||||||
|
decayClock.Add(time.Second)
|
||||||
|
|
||||||
|
// verify that they've given each other delivery connection tags
|
||||||
|
tag := "pubsub-deliveries:test"
|
||||||
|
for _, h := range honestHosts {
|
||||||
|
for _, h2 := range honestHosts {
|
||||||
|
if h.ID() == h2.ID() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
val := getTagValue(h.ConnManager(), h2.ID(), tag)
|
||||||
|
if val == 0 {
|
||||||
|
t.Errorf("Expected non-zero delivery tag value for peer %s", h2.ID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// now connect the sybils to put pressure on the real hosts' connection managers
|
||||||
|
allHosts := append(honestHosts, sybilHosts...)
|
||||||
|
connectAll(t, allHosts)
|
||||||
|
|
||||||
|
// verify that we have a bunch of connections
|
||||||
|
for _, h := range honestHosts {
|
||||||
|
if len(h.Network().Conns()) != nHonest+nSquatter-1 {
|
||||||
|
t.Errorf("expected to have conns to all peers, have %d", len(h.Network().Conns()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// force the connection managers to trim, so we don't need to muck about with timing as much
|
||||||
|
for _, cm := range connmgrs {
|
||||||
|
cm.TrimOpenConns(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we should still have conns to all the honest peers, but not the sybils
|
||||||
|
for _, h := range honestHosts {
|
||||||
|
nHonestConns := 0
|
||||||
|
nDishonestConns := 0
|
||||||
|
for _, conn := range h.Network().Conns() {
|
||||||
|
if _, ok := honestPeers[conn.RemotePeer()]; !ok {
|
||||||
|
nDishonestConns++
|
||||||
|
} else {
|
||||||
|
nHonestConns++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nDishonestConns > connLimit-nHonest {
|
||||||
|
t.Errorf("expected most dishonest conns to be pruned, have %d", nDishonestConns)
|
||||||
|
}
|
||||||
|
if nHonestConns != nHonest-1 {
|
||||||
|
t.Errorf("expected all honest conns to be preserved, have %d", nHonestConns)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
52
go-libp2p-blossomsub/blossomsub_feat.go
Normal file
52
go-libp2p-blossomsub/blossomsub_feat.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlossomSubFeatureTest is a feature test function; it takes a feature and a protocol ID and
|
||||||
|
// should return true if the feature is supported by the protocol
|
||||||
|
type BlossomSubFeatureTest = func(BlossomSubFeature, protocol.ID) bool
|
||||||
|
|
||||||
|
// BlossomSubFeature is a feature discriminant enum
|
||||||
|
type BlossomSubFeature int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Protocol supports basic BlossomSub Mesh -- BlossomSub-v1.1 compatible
|
||||||
|
BlossomSubFeatureMesh = iota
|
||||||
|
// Protocol supports Peer eXchange on prune -- BlossomSub-v1.1 compatible
|
||||||
|
BlossomSubFeaturePX
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlossomSubDefaultProtocols is the default BlossomSub router protocol list
|
||||||
|
var BlossomSubDefaultProtocols = []protocol.ID{BlossomSubID_v11, FloodSubID}
|
||||||
|
|
||||||
|
// BlossomSubDefaultFeatures is the feature test function for the default BlossomSub protocols
|
||||||
|
func BlossomSubDefaultFeatures(feat BlossomSubFeature, proto protocol.ID) bool {
|
||||||
|
switch feat {
|
||||||
|
case BlossomSubFeatureMesh:
|
||||||
|
return proto == BlossomSubID_v11
|
||||||
|
case BlossomSubFeaturePX:
|
||||||
|
return proto == BlossomSubID_v11
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBlossomSubProtocols is a BlossomSub router option that configures a custom protocol list
|
||||||
|
// and feature test function
|
||||||
|
func WithBlossomSubProtocols(protos []protocol.ID, feature BlossomSubFeatureTest) Option {
|
||||||
|
return func(ps *PubSub) error {
|
||||||
|
gs, ok := ps.rt.(*BlossomSubRouter)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("pubsub router is not BlossomSub")
|
||||||
|
}
|
||||||
|
|
||||||
|
gs.protos = protos
|
||||||
|
gs.feature = feature
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
109
go-libp2p-blossomsub/blossomsub_feat_test.go
Normal file
109
go-libp2p-blossomsub/blossomsub_feat_test.go
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDefaultBlossomSubFeatures(t *testing.T) {
|
||||||
|
if BlossomSubDefaultFeatures(BlossomSubFeatureMesh, FloodSubID) {
|
||||||
|
t.Fatal("floodsub should not support Mesh")
|
||||||
|
}
|
||||||
|
if !BlossomSubDefaultFeatures(BlossomSubFeatureMesh, BlossomSubID_v11) {
|
||||||
|
t.Fatal("BlossomSub-v1.1 should support Mesh")
|
||||||
|
}
|
||||||
|
|
||||||
|
if BlossomSubDefaultFeatures(BlossomSubFeaturePX, FloodSubID) {
|
||||||
|
t.Fatal("floodsub should not support PX")
|
||||||
|
}
|
||||||
|
if !BlossomSubDefaultFeatures(BlossomSubFeatureMesh, BlossomSubID_v11) {
|
||||||
|
t.Fatal("BlossomSub-v1.1 should support PX")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlossomSubCustomProtocols(t *testing.T) {
|
||||||
|
customsub := protocol.ID("customsub/1.0.0")
|
||||||
|
protos := []protocol.ID{customsub, FloodSubID}
|
||||||
|
features := func(feat BlossomSubFeature, proto protocol.ID) bool {
|
||||||
|
return proto == customsub
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
hosts := getNetHosts(t, ctx, 3)
|
||||||
|
|
||||||
|
gsubs := getBlossomSubs(ctx, hosts[:2], WithBlossomSubProtocols(protos, features))
|
||||||
|
fsub := getPubsub(ctx, hosts[2])
|
||||||
|
psubs := append(gsubs, fsub)
|
||||||
|
|
||||||
|
connectAll(t, hosts)
|
||||||
|
|
||||||
|
bitmask := []byte{0xff, 0x00, 0x00, 0x00}
|
||||||
|
var subs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
subch, err := ps.Subscribe(bitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
subs = append(subs, subch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for heartbeats to build mesh
|
||||||
|
time.Sleep(time.Second * 2)
|
||||||
|
|
||||||
|
// check the meshes of the gsubs, the BlossomSub meshes should include each other but not the
|
||||||
|
// floddsub peer
|
||||||
|
gsubs[0].eval <- func() {
|
||||||
|
gs := gsubs[0].rt.(*BlossomSubRouter)
|
||||||
|
|
||||||
|
_, ok := gs.mesh[string(bitmask)][hosts[1].ID()]
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("expected gs0 to have gs1 in its mesh")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = gs.mesh[string(bitmask)][hosts[2].ID()]
|
||||||
|
if ok {
|
||||||
|
t.Fatal("expected gs0 to not have fs in its mesh")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gsubs[1].eval <- func() {
|
||||||
|
gs := gsubs[1].rt.(*BlossomSubRouter)
|
||||||
|
|
||||||
|
_, ok := gs.mesh[string(bitmask)][hosts[0].ID()]
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("expected gs1 to have gs0 in its mesh")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = gs.mesh[string(bitmask)][hosts[2].ID()]
|
||||||
|
if ok {
|
||||||
|
t.Fatal("expected gs1 to not have fs in its mesh")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// send some messages
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("%d it's not quite a floooooood %d", i, i))
|
||||||
|
|
||||||
|
owner := rand.Intn(len(psubs))
|
||||||
|
|
||||||
|
psubs[owner].Publish(bitmask, msg)
|
||||||
|
|
||||||
|
for _, sub := range subs {
|
||||||
|
got, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(sub.err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(msg, got.Data) {
|
||||||
|
t.Fatal("got wrong message!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
84
go-libp2p-blossomsub/blossomsub_matchfn_test.go
Normal file
84
go-libp2p-blossomsub/blossomsub_matchfn_test.go
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBlossomSubMatchingFn(t *testing.T) {
|
||||||
|
customsubA100 := protocol.ID("/customsub_a/1.0.0")
|
||||||
|
customsubA101Beta := protocol.ID("/customsub_a/1.0.1-beta")
|
||||||
|
customsubB100 := protocol.ID("/customsub_b/1.0.0")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
h := getNetHosts(t, ctx, 4)
|
||||||
|
psubs := []*PubSub{
|
||||||
|
getBlossomSub(ctx, h[0], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubA100, BlossomSubID_v11}, BlossomSubDefaultFeatures)),
|
||||||
|
getBlossomSub(ctx, h[1], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubA101Beta}, BlossomSubDefaultFeatures)),
|
||||||
|
getBlossomSub(ctx, h[2], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{BlossomSubID_v11}, BlossomSubDefaultFeatures)),
|
||||||
|
getBlossomSub(ctx, h[3], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubB100}, BlossomSubDefaultFeatures)),
|
||||||
|
}
|
||||||
|
|
||||||
|
connect(t, h[0], h[1])
|
||||||
|
connect(t, h[0], h[2])
|
||||||
|
connect(t, h[0], h[3])
|
||||||
|
|
||||||
|
// verify that the peers are connected
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
for i := 1; i < len(h); i++ {
|
||||||
|
if len(h[0].Network().ConnsToPeer(h[i].ID())) == 0 {
|
||||||
|
t.Fatal("expected a connection between peers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// build the mesh
|
||||||
|
var subs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
sub, err := ps.Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
subs = append(subs, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
// publish a message
|
||||||
|
msg := []byte("message")
|
||||||
|
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, msg)
|
||||||
|
|
||||||
|
assertReceive(t, subs[0], msg)
|
||||||
|
assertReceive(t, subs[1], msg) // Should match via semver over CustomSub name, ignoring the version
|
||||||
|
assertReceive(t, subs[2], msg) // Should match via BlossomSubID_v11
|
||||||
|
|
||||||
|
// No message should be received because customsubA and customsubB have different names
|
||||||
|
ctxTimeout, timeoutCancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
|
defer timeoutCancel()
|
||||||
|
received := false
|
||||||
|
for {
|
||||||
|
msg, err := subs[3].Next(ctxTimeout)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if msg != nil {
|
||||||
|
received = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if received {
|
||||||
|
t.Fatal("Should not have received a message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func protocolNameMatch(base protocol.ID) func(protocol.ID) bool {
|
||||||
|
return func(check protocol.ID) bool {
|
||||||
|
baseName := strings.Split(string(base), "/")[1]
|
||||||
|
checkName := strings.Split(string(check), "/")[1]
|
||||||
|
return baseName == checkName
|
||||||
|
}
|
||||||
|
}
|
813
go-libp2p-blossomsub/blossomsub_spam_test.go
Normal file
813
go-libp2p-blossomsub/blossomsub_spam_test.go
Normal file
@ -0,0 +1,813 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math/rand"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-msgio/protoio"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test that when BlossomSub receives too many IWANT messages from a peer
|
||||||
|
// for the same message ID, it cuts off the peer
|
||||||
|
func TestBlossomSubAttackSpamIWANT(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create legitimate and attacker hosts
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
legit := hosts[0]
|
||||||
|
attacker := hosts[1]
|
||||||
|
|
||||||
|
// Set up BlossomSub on the legit host
|
||||||
|
ps, err := NewBlossomSub(ctx, legit)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to mybitmask on the legit host
|
||||||
|
mybitmask := []byte{0xff, 0x00, 0x00}
|
||||||
|
_, err = ps.Subscribe(mybitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used to publish a message with random data
|
||||||
|
publishMsg := func() {
|
||||||
|
data := make([]byte, 16)
|
||||||
|
rand.Read(data)
|
||||||
|
|
||||||
|
if err = ps.Publish(mybitmask, data); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait a bit after the last message before checking we got the
|
||||||
|
// right number of messages
|
||||||
|
msgWaitMax := time.Second
|
||||||
|
msgCount := 0
|
||||||
|
msgTimer := time.NewTimer(msgWaitMax)
|
||||||
|
|
||||||
|
// Checks we received the right number of messages
|
||||||
|
checkMsgCount := func() {
|
||||||
|
// After the original message from the legit host, we keep sending
|
||||||
|
// IWANT until it stops replying. So the number of messages is
|
||||||
|
// <original message> + BlossomSubGossipRetransmission
|
||||||
|
exp := 1 + BlossomSubGossipRetransmission
|
||||||
|
if msgCount != exp {
|
||||||
|
t.Fatalf("Expected %d messages, got %d", exp, msgCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the timer to expire
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-msgTimer.C:
|
||||||
|
checkMsgCount()
|
||||||
|
cancel()
|
||||||
|
return
|
||||||
|
case <-ctx.Done():
|
||||||
|
checkMsgCount()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
|
||||||
|
// When the legit host connects it will send us its subscriptions
|
||||||
|
for _, sub := range irpc.GetSubscriptions() {
|
||||||
|
if sub.GetSubscribe() {
|
||||||
|
// Reply by subcribing to the bitmask and grafting to the peer
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}},
|
||||||
|
Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}},
|
||||||
|
})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Wait for a short interval to make sure the legit host
|
||||||
|
// received and processed the subscribe + graft
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// Publish a message from the legit host
|
||||||
|
publishMsg()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Each time the legit host sends a message
|
||||||
|
for _, msg := range irpc.GetPublish() {
|
||||||
|
// Increment the number of messages and reset the timer
|
||||||
|
msgCount++
|
||||||
|
msgTimer.Reset(msgWaitMax)
|
||||||
|
|
||||||
|
// Shouldn't get more than the expected number of messages
|
||||||
|
exp := 1 + BlossomSubGossipRetransmission
|
||||||
|
if msgCount > exp {
|
||||||
|
cancel()
|
||||||
|
t.Fatal("Received too many responses")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send an IWANT with the message ID, causing the legit host
|
||||||
|
// to send another message (until it cuts off the attacker for
|
||||||
|
// being spammy)
|
||||||
|
iwantlst := []string{DefaultMsgIdFn(msg)}
|
||||||
|
iwant := []*pb.ControlIWant{{MessageIDs: iwantlst}}
|
||||||
|
orpc := rpcWithControl(nil, nil, iwant, nil, nil)
|
||||||
|
writeMsg(&orpc.RPC)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that BlossomSub only responds to IHAVE with IWANT once per heartbeat
|
||||||
|
func TestBlossomSubAttackSpamIHAVE(t *testing.T) {
|
||||||
|
originalBlossomSubIWantFollowupTime := BlossomSubIWantFollowupTime
|
||||||
|
BlossomSubIWantFollowupTime = 10 * time.Second
|
||||||
|
defer func() {
|
||||||
|
BlossomSubIWantFollowupTime = originalBlossomSubIWantFollowupTime
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create legitimate and attacker hosts
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
legit := hosts[0]
|
||||||
|
attacker := hosts[1]
|
||||||
|
|
||||||
|
// Set up BlossomSub on the legit host
|
||||||
|
ps, err := NewBlossomSub(ctx, legit,
|
||||||
|
WithPeerScore(
|
||||||
|
&PeerScoreParams{
|
||||||
|
AppSpecificScore: func(peer.ID) float64 { return 0 },
|
||||||
|
BehaviourPenaltyWeight: -1,
|
||||||
|
BehaviourPenaltyDecay: ScoreParameterDecay(time.Minute),
|
||||||
|
DecayInterval: DefaultDecayInterval,
|
||||||
|
DecayToZero: DefaultDecayToZero,
|
||||||
|
},
|
||||||
|
&PeerScoreThresholds{
|
||||||
|
GossipThreshold: -100,
|
||||||
|
PublishThreshold: -500,
|
||||||
|
GraylistThreshold: -1000,
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to mybitmask on the legit host
|
||||||
|
mybitmask := []byte{0xff, 0x00, 0x00}
|
||||||
|
_, err = ps.Subscribe(mybitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
iWantCount := 0
|
||||||
|
iWantCountMx := sync.Mutex{}
|
||||||
|
getIWantCount := func() int {
|
||||||
|
iWantCountMx.Lock()
|
||||||
|
defer iWantCountMx.Unlock()
|
||||||
|
return iWantCount
|
||||||
|
}
|
||||||
|
addIWantCount := func(i int) {
|
||||||
|
iWantCountMx.Lock()
|
||||||
|
defer iWantCountMx.Unlock()
|
||||||
|
iWantCount += i
|
||||||
|
}
|
||||||
|
|
||||||
|
newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
|
||||||
|
// When the legit host connects it will send us its subscriptions
|
||||||
|
for _, sub := range irpc.GetSubscriptions() {
|
||||||
|
if sub.GetSubscribe() {
|
||||||
|
// Reply by subcribing to the bitmask and grafting to the peer
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}},
|
||||||
|
Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}},
|
||||||
|
})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Wait for a short interval to make sure the legit host
|
||||||
|
// received and processed the subscribe + graft
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
// Send a bunch of IHAVEs
|
||||||
|
for i := 0; i < 3*BlossomSubMaxIHaveLength; i++ {
|
||||||
|
ihavelst := []string{"someid" + strconv.Itoa(i)}
|
||||||
|
ihave := []*pb.ControlIHave{{Bitmask: sub.Bitmask, MessageIDs: ihavelst}}
|
||||||
|
orpc := rpcWithControl(nil, ihave, nil, nil, nil)
|
||||||
|
writeMsg(&orpc.RPC)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(BlossomSubHeartbeatInterval):
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should have hit the maximum number of IWANTs per peer
|
||||||
|
// per heartbeat
|
||||||
|
iwc := getIWantCount()
|
||||||
|
if iwc > BlossomSubMaxIHaveLength {
|
||||||
|
t.Errorf("Expecting max %d IWANTs per heartbeat but received %d", BlossomSubMaxIHaveLength, iwc)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
firstBatchCount := iwc
|
||||||
|
|
||||||
|
// the score should still be 0 because we haven't broken any promises yet
|
||||||
|
score := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID())
|
||||||
|
if score != 0 {
|
||||||
|
t.Errorf("Expected 0 score, but got %f", score)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a bunch of IHAVEs
|
||||||
|
for i := 0; i < 3*BlossomSubMaxIHaveLength; i++ {
|
||||||
|
ihavelst := []string{"someid" + strconv.Itoa(i+100)}
|
||||||
|
ihave := []*pb.ControlIHave{{Bitmask: sub.Bitmask, MessageIDs: ihavelst}}
|
||||||
|
orpc := rpcWithControl(nil, ihave, nil, nil, nil)
|
||||||
|
writeMsg(&orpc.RPC)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(BlossomSubHeartbeatInterval):
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should have sent more IWANTs after the heartbeat
|
||||||
|
iwc = getIWantCount()
|
||||||
|
if iwc == firstBatchCount {
|
||||||
|
t.Error("Expecting to receive more IWANTs after heartbeat but did not")
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
// Should not be more than the maximum per heartbeat
|
||||||
|
if iwc-firstBatchCount > BlossomSubMaxIHaveLength {
|
||||||
|
t.Errorf("Expecting max %d IWANTs per heartbeat but received %d", BlossomSubMaxIHaveLength, iwc-firstBatchCount)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(BlossomSubIWantFollowupTime):
|
||||||
|
}
|
||||||
|
|
||||||
|
// The score should now be negative because of broken promises
|
||||||
|
score = ps.rt.(*BlossomSubRouter).score.Score(attacker.ID())
|
||||||
|
if score >= 0 {
|
||||||
|
t.Errorf("Expected negative score, but got %f", score)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record the count of received IWANT messages
|
||||||
|
if ctl := irpc.GetControl(); ctl != nil {
|
||||||
|
addIWantCount(len(ctl.GetIwant()))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that when BlossomSub receives GRAFT for an unknown bitmask, it ignores
|
||||||
|
// the request
|
||||||
|
func TestBlossomSubAttackGRAFTNonExistentBitmask(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create legitimate and attacker hosts
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
legit := hosts[0]
|
||||||
|
attacker := hosts[1]
|
||||||
|
|
||||||
|
// Set up BlossomSub on the legit host
|
||||||
|
ps, err := NewBlossomSub(ctx, legit)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to mybitmask on the legit host
|
||||||
|
mybitmask := []byte{0xff, 0x00, 0x00}
|
||||||
|
_, err = ps.Subscribe(mybitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks that we haven't received any PRUNE message
|
||||||
|
pruneCount := 0
|
||||||
|
checkForPrune := func() {
|
||||||
|
// We send a GRAFT for a non-existent bitmask so we shouldn't
|
||||||
|
// receive a PRUNE in response
|
||||||
|
if pruneCount != 0 {
|
||||||
|
t.Fatalf("Got %d unexpected PRUNE messages", pruneCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
|
||||||
|
// When the legit host connects it will send us its subscriptions
|
||||||
|
for _, sub := range irpc.GetSubscriptions() {
|
||||||
|
if sub.GetSubscribe() {
|
||||||
|
// Reply by subcribing to the bitmask and grafting to the peer
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}},
|
||||||
|
Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Graft to the peer on a non-existent bitmask
|
||||||
|
nonExistentBitmask := []byte{0xff, 0x00, 0x00, 0xff, 0xff, 0xff}
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: nonExistentBitmask}}},
|
||||||
|
})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Wait for a short interval to make sure the legit host
|
||||||
|
// received and processed the subscribe + graft
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// We shouldn't get any prune messages becaue the bitmask
|
||||||
|
// doesn't exist
|
||||||
|
checkForPrune()
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record the count of received PRUNE messages
|
||||||
|
if ctl := irpc.GetControl(); ctl != nil {
|
||||||
|
pruneCount += len(ctl.GetPrune())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that when BlossomSub receives GRAFT for a peer that has been PRUNED,
|
||||||
|
// it penalizes through P7 and eventually graylists and ignores the requests if the
|
||||||
|
// GRAFTs are coming too fast
|
||||||
|
func TestBlossomSubAttackGRAFTDuringBackoff(t *testing.T) {
|
||||||
|
originalBlossomSubPruneBackoff := BlossomSubPruneBackoff
|
||||||
|
BlossomSubPruneBackoff = 200 * time.Millisecond
|
||||||
|
originalBlossomSubGraftFloodThreshold := BlossomSubGraftFloodThreshold
|
||||||
|
BlossomSubGraftFloodThreshold = 100 * time.Millisecond
|
||||||
|
defer func() {
|
||||||
|
BlossomSubPruneBackoff = originalBlossomSubPruneBackoff
|
||||||
|
BlossomSubGraftFloodThreshold = originalBlossomSubGraftFloodThreshold
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create legitimate and attacker hosts
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
legit := hosts[0]
|
||||||
|
attacker := hosts[1]
|
||||||
|
|
||||||
|
// Set up BlossomSub on the legit host
|
||||||
|
ps, err := NewBlossomSub(ctx, legit,
|
||||||
|
WithPeerScore(
|
||||||
|
&PeerScoreParams{
|
||||||
|
AppSpecificScore: func(peer.ID) float64 { return 0 },
|
||||||
|
BehaviourPenaltyWeight: -100,
|
||||||
|
BehaviourPenaltyDecay: ScoreParameterDecay(time.Minute),
|
||||||
|
DecayInterval: DefaultDecayInterval,
|
||||||
|
DecayToZero: DefaultDecayToZero,
|
||||||
|
},
|
||||||
|
&PeerScoreThresholds{
|
||||||
|
GossipThreshold: -100,
|
||||||
|
PublishThreshold: -500,
|
||||||
|
GraylistThreshold: -1000,
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to mybitmask on the legit host
|
||||||
|
mybitmask := []byte{0xff, 0x00, 0x00}
|
||||||
|
_, err = ps.Subscribe(mybitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pruneCount := 0
|
||||||
|
pruneCountMx := sync.Mutex{}
|
||||||
|
getPruneCount := func() int {
|
||||||
|
pruneCountMx.Lock()
|
||||||
|
defer pruneCountMx.Unlock()
|
||||||
|
return pruneCount
|
||||||
|
}
|
||||||
|
addPruneCount := func(i int) {
|
||||||
|
pruneCountMx.Lock()
|
||||||
|
defer pruneCountMx.Unlock()
|
||||||
|
pruneCount += i
|
||||||
|
}
|
||||||
|
|
||||||
|
newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
|
||||||
|
// When the legit host connects it will send us its subscriptions
|
||||||
|
for _, sub := range irpc.GetSubscriptions() {
|
||||||
|
if sub.GetSubscribe() {
|
||||||
|
// Reply by subcribing to the bitmask and grafting to the peer
|
||||||
|
graft := []*pb.ControlGraft{{Bitmask: sub.Bitmask}}
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}},
|
||||||
|
Control: &pb.ControlMessage{Graft: graft},
|
||||||
|
})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Wait for a short interval to make sure the legit host
|
||||||
|
// received and processed the subscribe + graft
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
// No PRUNE should have been sent at this stage
|
||||||
|
pc := getPruneCount()
|
||||||
|
if pc != 0 {
|
||||||
|
t.Errorf("Expected %d PRUNE messages but got %d", 0, pc)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a PRUNE to remove the attacker node from the legit
|
||||||
|
// host's mesh
|
||||||
|
var prune []*pb.ControlPrune
|
||||||
|
prune = append(prune, &pb.ControlPrune{Bitmask: sub.Bitmask})
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Control: &pb.ControlMessage{Prune: prune},
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(20 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// No PRUNE should have been sent at this stage
|
||||||
|
pc = getPruneCount()
|
||||||
|
if pc != 0 {
|
||||||
|
t.Errorf("Expected %d PRUNE messages but got %d", 0, pc)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for the BlossomSubGraftFloodThreshold to pass before attempting another graft
|
||||||
|
time.Sleep(BlossomSubGraftFloodThreshold + time.Millisecond)
|
||||||
|
|
||||||
|
// Send a GRAFT to attempt to rejoin the mesh
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Control: &pb.ControlMessage{Graft: graft},
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(20 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should have been peanalized by the peer for sending before the backoff has expired
|
||||||
|
// but should still receive a PRUNE because we haven't dropped below GraylistThreshold
|
||||||
|
// yet.
|
||||||
|
pc = getPruneCount()
|
||||||
|
if pc != 1 {
|
||||||
|
t.Errorf("Expected %d PRUNE messages but got %d", 1, pc)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
score1 := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID())
|
||||||
|
if score1 >= 0 {
|
||||||
|
t.Errorf("Expected negative score, but got %f", score1)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a GRAFT again to attempt to rejoin the mesh
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Control: &pb.ControlMessage{Graft: graft},
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(20 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// we are before the flood threshold so we should be penalized twice, but still get
|
||||||
|
// a PRUNE because we are before the flood threshold
|
||||||
|
pc = getPruneCount()
|
||||||
|
if pc != 2 {
|
||||||
|
t.Errorf("Expected %d PRUNE messages but got %d", 2, pc)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
score2 := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID())
|
||||||
|
if score2 >= score1 {
|
||||||
|
t.Errorf("Expected score below %f, but got %f", score1, score2)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send another GRAFT; this should get us a PRUNE, but penalize us below the graylist threshold
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Control: &pb.ControlMessage{Graft: graft},
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(20 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
pc = getPruneCount()
|
||||||
|
if pc != 3 {
|
||||||
|
t.Errorf("Expected %d PRUNE messages but got %d", 3, pc)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
score3 := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID())
|
||||||
|
if score3 >= score2 {
|
||||||
|
t.Errorf("Expected score below %f, but got %f", score2, score3)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
if score3 >= -1000 {
|
||||||
|
t.Errorf("Expected score below %f, but got %f", -1000.0, score3)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the PRUNE backoff to expire and try again; this time we should fail
|
||||||
|
// because we are below the graylist threshold, so our RPC should be ignored and
|
||||||
|
// we should get no PRUNE back
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(BlossomSubPruneBackoff + time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Control: &pb.ControlMessage{Graft: graft},
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(20 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
pc = getPruneCount()
|
||||||
|
if pc != 3 {
|
||||||
|
t.Errorf("Expected %d PRUNE messages but got %d", 3, pc)
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure we are _not_ in the mesh
|
||||||
|
res := make(chan bool)
|
||||||
|
ps.eval <- func() {
|
||||||
|
mesh := ps.rt.(*BlossomSubRouter).mesh[string(mybitmask)]
|
||||||
|
_, inMesh := mesh[attacker.ID()]
|
||||||
|
res <- inMesh
|
||||||
|
}
|
||||||
|
|
||||||
|
inMesh := <-res
|
||||||
|
if inMesh {
|
||||||
|
t.Error("Expected to not be in the mesh of the legitimate host")
|
||||||
|
return // cannot call t.Fatal in a non-test goroutine
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctl := irpc.GetControl(); ctl != nil {
|
||||||
|
addPruneCount(len(ctl.GetPrune()))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
type gsAttackInvalidMsgTracer struct {
|
||||||
|
rejectCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *gsAttackInvalidMsgTracer) Trace(evt *pb.TraceEvent) {
|
||||||
|
// fmt.Printf(" %s %s\n", evt.Type, evt)
|
||||||
|
if evt.GetType() == pb.TraceEvent_REJECT_MESSAGE {
|
||||||
|
t.rejectCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that when BlossomSub receives a lot of invalid messages from
|
||||||
|
// a peer it should graylist the peer
|
||||||
|
func TestBlossomSubAttackInvalidMessageSpam(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create legitimate and attacker hosts
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
legit := hosts[0]
|
||||||
|
attacker := hosts[1]
|
||||||
|
|
||||||
|
mybitmask := []byte{0xff, 0x00, 0x00}
|
||||||
|
|
||||||
|
// Create parameters with reasonable default values
|
||||||
|
params := &PeerScoreParams{
|
||||||
|
AppSpecificScore: func(peer.ID) float64 { return 0 },
|
||||||
|
IPColocationFactorWeight: 0,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
DecayInterval: 5 * time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
RetainScore: 10 * time.Second,
|
||||||
|
Bitmasks: make(map[string]*BitmaskScoreParams),
|
||||||
|
}
|
||||||
|
params.Bitmasks[string(mybitmask)] = &BitmaskScoreParams{
|
||||||
|
BitmaskWeight: 0.25,
|
||||||
|
TimeInMeshWeight: 0.0027,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
TimeInMeshCap: 3600,
|
||||||
|
FirstMessageDeliveriesWeight: 0.664,
|
||||||
|
FirstMessageDeliveriesDecay: 0.9916,
|
||||||
|
FirstMessageDeliveriesCap: 1500,
|
||||||
|
MeshMessageDeliveriesWeight: -0.25,
|
||||||
|
MeshMessageDeliveriesDecay: 0.97,
|
||||||
|
MeshMessageDeliveriesCap: 400,
|
||||||
|
MeshMessageDeliveriesThreshold: 100,
|
||||||
|
MeshMessageDeliveriesActivation: 30 * time.Second,
|
||||||
|
MeshMessageDeliveriesWindow: 5 * time.Minute,
|
||||||
|
MeshFailurePenaltyWeight: -0.25,
|
||||||
|
MeshFailurePenaltyDecay: 0.997,
|
||||||
|
InvalidMessageDeliveriesWeight: -99,
|
||||||
|
InvalidMessageDeliveriesDecay: 0.9994,
|
||||||
|
}
|
||||||
|
thresholds := &PeerScoreThresholds{
|
||||||
|
GossipThreshold: -100,
|
||||||
|
PublishThreshold: -200,
|
||||||
|
GraylistThreshold: -300,
|
||||||
|
AcceptPXThreshold: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up BlossomSub on the legit host
|
||||||
|
tracer := &gsAttackInvalidMsgTracer{}
|
||||||
|
ps, err := NewBlossomSub(ctx, legit,
|
||||||
|
WithEventTracer(tracer),
|
||||||
|
WithPeerScore(params, thresholds),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
attackerScore := func() float64 {
|
||||||
|
return ps.rt.(*BlossomSubRouter).score.Score(attacker.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to mybitmask on the legit host
|
||||||
|
_, err = ps.Subscribe(mybitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pruneCount := 0
|
||||||
|
pruneCountMx := sync.Mutex{}
|
||||||
|
getPruneCount := func() int {
|
||||||
|
pruneCountMx.Lock()
|
||||||
|
defer pruneCountMx.Unlock()
|
||||||
|
return pruneCount
|
||||||
|
}
|
||||||
|
addPruneCount := func(i int) {
|
||||||
|
pruneCountMx.Lock()
|
||||||
|
defer pruneCountMx.Unlock()
|
||||||
|
pruneCount += i
|
||||||
|
}
|
||||||
|
|
||||||
|
newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
|
||||||
|
// When the legit host connects it will send us its subscriptions
|
||||||
|
for _, sub := range irpc.GetSubscriptions() {
|
||||||
|
if sub.GetSubscribe() {
|
||||||
|
// Reply by subcribing to the bitmask and grafting to the peer
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}},
|
||||||
|
Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}},
|
||||||
|
})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Attacker score should start at zero
|
||||||
|
if attackerScore() != 0 {
|
||||||
|
t.Errorf("Expected attacker score to be zero but it's %f", attackerScore())
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a bunch of messages with no signature (these will
|
||||||
|
// fail validation and reduce the attacker's score)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
msg := &pb.Message{
|
||||||
|
Data: []byte("some data" + strconv.Itoa(i)),
|
||||||
|
Bitmask: mybitmask,
|
||||||
|
From: []byte(attacker.ID()),
|
||||||
|
Seqno: []byte{byte(i + 1)},
|
||||||
|
}
|
||||||
|
writeMsg(&pb.RPC{
|
||||||
|
Publish: []*pb.Message{msg},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the initial heartbeat, plus a bit of padding
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(100*time.Millisecond + BlossomSubHeartbeatInitialDelay):
|
||||||
|
}
|
||||||
|
|
||||||
|
// The attackers score should now have fallen below zero
|
||||||
|
if attackerScore() >= 0 {
|
||||||
|
t.Errorf("Expected attacker score to be less than zero but it's %f", attackerScore())
|
||||||
|
return // cannot call t.Fatalf in a non-test goroutine
|
||||||
|
}
|
||||||
|
// There should be several rejected messages (because the signature was invalid)
|
||||||
|
if tracer.rejectCount == 0 {
|
||||||
|
t.Error("Expected message rejection but got none")
|
||||||
|
return // cannot call t.Fatal in a non-test goroutine
|
||||||
|
}
|
||||||
|
// The legit node should have sent a PRUNE message
|
||||||
|
pc := getPruneCount()
|
||||||
|
if pc == 0 {
|
||||||
|
t.Error("Expected attacker node to be PRUNED when score drops low enough")
|
||||||
|
return // cannot call t.Fatal in a non-test goroutine
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctl := irpc.GetControl(); ctl != nil {
|
||||||
|
addPruneCount(len(ctl.GetPrune()))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockGSOnRead func(writeMsg func(*pb.RPC), irpc *pb.RPC)
|
||||||
|
|
||||||
|
func newMockGS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg mockGSOnRead) {
|
||||||
|
// Listen on the BlossomSub protocol
|
||||||
|
const BlossomSubID = protocol.ID("/meshsub/1.0.0")
|
||||||
|
const maxMessageSize = 1024 * 1024
|
||||||
|
attacker.SetStreamHandler(BlossomSubID, func(stream network.Stream) {
|
||||||
|
// When an incoming stream is opened, set up an outgoing stream
|
||||||
|
p := stream.Conn().RemotePeer()
|
||||||
|
ostream, err := attacker.NewStream(ctx, p, BlossomSubID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := protoio.NewDelimitedReader(stream, maxMessageSize)
|
||||||
|
w := protoio.NewDelimitedWriter(ostream)
|
||||||
|
|
||||||
|
var irpc pb.RPC
|
||||||
|
|
||||||
|
writeMsg := func(rpc *pb.RPC) {
|
||||||
|
if err = w.WriteMsg(rpc); err != nil {
|
||||||
|
t.Fatalf("error writing RPC: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep reading messages and responding
|
||||||
|
for {
|
||||||
|
// Bail out when the test finishes
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
irpc.Reset()
|
||||||
|
|
||||||
|
err := r.ReadMsg(&irpc)
|
||||||
|
|
||||||
|
// Bail out when the test finishes
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
onReadMsg(writeMsg, &irpc)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
2502
go-libp2p-blossomsub/blossomsub_test.go
Normal file
2502
go-libp2p-blossomsub/blossomsub_test.go
Normal file
File diff suppressed because it is too large
Load Diff
230
go-libp2p-blossomsub/comm.go
Normal file
230
go-libp2p-blossomsub/comm.go
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pool "github.com/libp2p/go-buffer-pool"
|
||||||
|
"github.com/multiformats/go-varint"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-msgio"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// get the initial RPC containing all of our subscriptions to send to new peers
|
||||||
|
func (p *PubSub) getHelloPacket() *RPC {
|
||||||
|
var rpc RPC
|
||||||
|
|
||||||
|
subscriptions := make(map[string]bool)
|
||||||
|
|
||||||
|
for t := range p.mySubs {
|
||||||
|
subscriptions[t] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for t := range p.myRelays {
|
||||||
|
subscriptions[t] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for t := range subscriptions {
|
||||||
|
as := &pb.RPC_SubOpts{
|
||||||
|
Bitmask: []byte(t),
|
||||||
|
Subscribe: true,
|
||||||
|
}
|
||||||
|
rpc.Subscriptions = append(rpc.Subscriptions, as)
|
||||||
|
}
|
||||||
|
return &rpc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSub) handleNewStream(s network.Stream) {
|
||||||
|
peer := s.Conn().RemotePeer()
|
||||||
|
|
||||||
|
p.inboundStreamsMx.Lock()
|
||||||
|
other, dup := p.inboundStreams[peer]
|
||||||
|
if dup {
|
||||||
|
log.Debugf("duplicate inbound stream from %s; resetting other stream", peer)
|
||||||
|
other.Reset()
|
||||||
|
}
|
||||||
|
p.inboundStreams[peer] = s
|
||||||
|
p.inboundStreamsMx.Unlock()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
p.inboundStreamsMx.Lock()
|
||||||
|
if p.inboundStreams[peer] == s {
|
||||||
|
delete(p.inboundStreams, peer)
|
||||||
|
}
|
||||||
|
p.inboundStreamsMx.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
r := msgio.NewVarintReaderSize(s, p.maxMessageSize)
|
||||||
|
for {
|
||||||
|
msgbytes, err := r.ReadMsg()
|
||||||
|
if err != nil {
|
||||||
|
r.ReleaseMsg(msgbytes)
|
||||||
|
if err != io.EOF {
|
||||||
|
s.Reset()
|
||||||
|
log.Debugf("error reading rpc from %s: %s", s.Conn().RemotePeer(), err)
|
||||||
|
} else {
|
||||||
|
// Just be nice. They probably won't read this
|
||||||
|
// but it doesn't hurt to send it.
|
||||||
|
s.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rpc := new(RPC)
|
||||||
|
err = rpc.Unmarshal(msgbytes)
|
||||||
|
r.ReleaseMsg(msgbytes)
|
||||||
|
if err != nil {
|
||||||
|
s.Reset()
|
||||||
|
log.Warnf("bogus rpc from %s: %s", s.Conn().RemotePeer(), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rpc.from = peer
|
||||||
|
select {
|
||||||
|
case p.incoming <- rpc:
|
||||||
|
case <-p.ctx.Done():
|
||||||
|
// Close is useless because the other side isn't reading.
|
||||||
|
s.Reset()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSub) notifyPeerDead(pid peer.ID) {
|
||||||
|
p.peerDeadPrioLk.RLock()
|
||||||
|
p.peerDeadMx.Lock()
|
||||||
|
p.peerDeadPend[pid] = struct{}{}
|
||||||
|
p.peerDeadMx.Unlock()
|
||||||
|
p.peerDeadPrioLk.RUnlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.peerDead <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan *RPC) {
|
||||||
|
s, err := p.host.NewStream(p.ctx, pid, p.rt.Protocols()...)
|
||||||
|
if err != nil {
|
||||||
|
log.Debug("opening new stream to peer: ", err, pid)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.newPeerError <- pid:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go p.handleSendingMessages(ctx, s, outgoing)
|
||||||
|
go p.handlePeerDead(s)
|
||||||
|
select {
|
||||||
|
case p.newPeerStream <- s:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing <-chan *RPC) {
|
||||||
|
select {
|
||||||
|
case <-time.After(backoff):
|
||||||
|
p.handleNewPeer(ctx, pid, outgoing)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSub) handlePeerDead(s network.Stream) {
|
||||||
|
pid := s.Conn().RemotePeer()
|
||||||
|
|
||||||
|
_, err := s.Read([]byte{0})
|
||||||
|
if err == nil {
|
||||||
|
log.Debugf("unexpected message from %s", pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Reset()
|
||||||
|
p.notifyPeerDead(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) {
|
||||||
|
writeRpc := func(rpc *RPC) error {
|
||||||
|
size := uint64(rpc.Size())
|
||||||
|
|
||||||
|
buf := pool.Get(varint.UvarintSize(size) + int(size))
|
||||||
|
defer pool.Put(buf)
|
||||||
|
|
||||||
|
n := binary.PutUvarint(buf, size)
|
||||||
|
_, err := rpc.MarshalTo(buf[n:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer s.Close()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case rpc, ok := <-outgoing:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := writeRpc(rpc)
|
||||||
|
if err != nil {
|
||||||
|
s.Reset()
|
||||||
|
log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rpcWithSubs(subs ...*pb.RPC_SubOpts) *RPC {
|
||||||
|
return &RPC{
|
||||||
|
RPC: pb.RPC{
|
||||||
|
Subscriptions: subs,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rpcWithMessages(msgs ...*pb.Message) *RPC {
|
||||||
|
return &RPC{RPC: pb.RPC{Publish: msgs}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rpcWithControl(msgs []*pb.Message,
|
||||||
|
ihave []*pb.ControlIHave,
|
||||||
|
iwant []*pb.ControlIWant,
|
||||||
|
graft []*pb.ControlGraft,
|
||||||
|
prune []*pb.ControlPrune) *RPC {
|
||||||
|
return &RPC{
|
||||||
|
RPC: pb.RPC{
|
||||||
|
Publish: msgs,
|
||||||
|
Control: &pb.ControlMessage{
|
||||||
|
Ihave: ihave,
|
||||||
|
Iwant: iwant,
|
||||||
|
Graft: graft,
|
||||||
|
Prune: prune,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyRPC(rpc *RPC) *RPC {
|
||||||
|
res := new(RPC)
|
||||||
|
*res = *rpc
|
||||||
|
if rpc.Control != nil {
|
||||||
|
res.Control = new(pb.ControlMessage)
|
||||||
|
*res.Control = *rpc.Control
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
348
go-libp2p-blossomsub/discovery.go
Normal file
348
go-libp2p-blossomsub/discovery.go
Normal file
@ -0,0 +1,348 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/discovery"
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
discimpl "github.com/libp2p/go-libp2p/p2p/discovery/backoff"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// poll interval
|
||||||
|
|
||||||
|
// DiscoveryPollInitialDelay is how long the discovery system waits after it first starts before polling
|
||||||
|
DiscoveryPollInitialDelay = 0 * time.Millisecond
|
||||||
|
// DiscoveryPollInterval is approximately how long the discovery system waits in between checks for whether the
|
||||||
|
// more peers are needed for any bitmask
|
||||||
|
DiscoveryPollInterval = 1 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// interval at which to retry advertisements when they fail.
|
||||||
|
const discoveryAdvertiseRetryInterval = 2 * time.Minute
|
||||||
|
|
||||||
|
type DiscoverOpt func(*discoverOptions) error
|
||||||
|
|
||||||
|
type discoverOptions struct {
|
||||||
|
connFactory BackoffConnectorFactory
|
||||||
|
opts []discovery.Option
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultDiscoverOptions() *discoverOptions {
|
||||||
|
rngSrc := rand.NewSource(rand.Int63())
|
||||||
|
minBackoff, maxBackoff := time.Second*10, time.Hour
|
||||||
|
cacheSize := 100
|
||||||
|
dialTimeout := time.Minute * 2
|
||||||
|
discoverOpts := &discoverOptions{
|
||||||
|
connFactory: func(host host.Host) (*discimpl.BackoffConnector, error) {
|
||||||
|
backoff := discimpl.NewExponentialBackoff(minBackoff, maxBackoff, discimpl.FullJitter, time.Second, 5.0, 0, rand.New(rngSrc))
|
||||||
|
return discimpl.NewBackoffConnector(host, cacheSize, dialTimeout, backoff)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return discoverOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
// discover represents the discovery pipeline.
|
||||||
|
// The discovery pipeline handles advertising and discovery of peers
|
||||||
|
type discover struct {
|
||||||
|
p *PubSub
|
||||||
|
|
||||||
|
// discovery assists in discovering and advertising peers for a bitmask
|
||||||
|
discovery discovery.Discovery
|
||||||
|
|
||||||
|
// advertising tracks which bitmasks are being advertised
|
||||||
|
advertising map[string]context.CancelFunc
|
||||||
|
|
||||||
|
// discoverQ handles continuing peer discovery
|
||||||
|
discoverQ chan *discoverReq
|
||||||
|
|
||||||
|
// ongoing tracks ongoing discovery requests
|
||||||
|
ongoing map[string]struct{}
|
||||||
|
|
||||||
|
// done handles completion of a discovery request
|
||||||
|
done chan string
|
||||||
|
|
||||||
|
// connector handles connecting to new peers found via discovery
|
||||||
|
connector *discimpl.BackoffConnector
|
||||||
|
|
||||||
|
// options are the set of options to be used to complete struct construction in Start
|
||||||
|
options *discoverOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinBitmaskSize returns a function that checks if a router is ready for publishing based on the bitmask size.
|
||||||
|
// The router ultimately decides the whether it is ready or not, the given size is just a suggestion. Note
|
||||||
|
// that the bitmask size does not include the router in the count.
|
||||||
|
func MinBitmaskSize(size int) RouterReady {
|
||||||
|
return func(rt PubSubRouter, bitmask []byte) (bool, error) {
|
||||||
|
return rt.EnoughPeers(bitmask, size), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start attaches the discovery pipeline to a pubsub instance, initializes discovery and starts event loop
|
||||||
|
func (d *discover) Start(p *PubSub, opts ...DiscoverOpt) error {
|
||||||
|
if d.discovery == nil || p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.p = p
|
||||||
|
d.advertising = make(map[string]context.CancelFunc)
|
||||||
|
d.discoverQ = make(chan *discoverReq, 32)
|
||||||
|
d.ongoing = make(map[string]struct{})
|
||||||
|
d.done = make(chan string)
|
||||||
|
|
||||||
|
conn, err := d.options.connFactory(p.host)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.connector = conn
|
||||||
|
|
||||||
|
go d.discoverLoop()
|
||||||
|
go d.pollTimer()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *discover) pollTimer() {
|
||||||
|
select {
|
||||||
|
case <-time.After(DiscoveryPollInitialDelay):
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case d.p.eval <- d.requestDiscovery:
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(DiscoveryPollInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
select {
|
||||||
|
case d.p.eval <- d.requestDiscovery:
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *discover) requestDiscovery() {
|
||||||
|
for _, b := range d.p.myBitmasks {
|
||||||
|
if !d.p.rt.EnoughPeers(b.bitmask, 0) {
|
||||||
|
d.discoverQ <- &discoverReq{bitmask: b.bitmask, done: make(chan struct{}, 1)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *discover) discoverLoop() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case discover := <-d.discoverQ:
|
||||||
|
bitmask := discover.bitmask
|
||||||
|
|
||||||
|
if _, ok := d.ongoing[string(bitmask)]; ok {
|
||||||
|
discover.done <- struct{}{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d.ongoing[string(bitmask)] = struct{}{}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
d.handleDiscovery(d.p.ctx, bitmask, discover.opts)
|
||||||
|
select {
|
||||||
|
case d.done <- string(bitmask):
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
}
|
||||||
|
discover.done <- struct{}{}
|
||||||
|
}()
|
||||||
|
case bitmask := <-d.done:
|
||||||
|
delete(d.ongoing, bitmask)
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advertise advertises this node's interest in a bitmask to a discovery service. Advertise is not thread-safe.
|
||||||
|
func (d *discover) Advertise(bitmask []byte) {
|
||||||
|
if d.discovery == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
advertisingCtx, cancel := context.WithCancel(d.p.ctx)
|
||||||
|
|
||||||
|
if _, ok := d.advertising[string(bitmask)]; ok {
|
||||||
|
cancel()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.advertising[string(bitmask)] = cancel
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
next, err := d.discovery.Advertise(advertisingCtx, string(bitmask))
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("bootstrap: error providing rendezvous for %s: %s", bitmask, err.Error())
|
||||||
|
if next == 0 {
|
||||||
|
next = discoveryAdvertiseRetryInterval
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t := time.NewTimer(next)
|
||||||
|
defer t.Stop()
|
||||||
|
|
||||||
|
for advertisingCtx.Err() == nil {
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
next, err = d.discovery.Advertise(advertisingCtx, string(bitmask))
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("bootstrap: error providing rendezvous for %s: %s", bitmask, err.Error())
|
||||||
|
if next == 0 {
|
||||||
|
next = discoveryAdvertiseRetryInterval
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Reset(next)
|
||||||
|
case <-advertisingCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopAdvertise stops advertising this node's interest in a bitmask. StopAdvertise is not thread-safe.
|
||||||
|
func (d *discover) StopAdvertise(bitmask []byte) {
|
||||||
|
if d.discovery == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if advertiseCancel, ok := d.advertising[string(bitmask)]; ok {
|
||||||
|
advertiseCancel()
|
||||||
|
delete(d.advertising, string(bitmask))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Discover searches for additional peers interested in a given bitmask
|
||||||
|
func (d *discover) Discover(bitmask []byte, opts ...discovery.Option) {
|
||||||
|
if d.discovery == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
d.discoverQ <- &discoverReq{bitmask, opts, make(chan struct{}, 1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bootstrap attempts to bootstrap to a given bitmask. Returns true if bootstrapped successfully, false otherwise.
|
||||||
|
func (d *discover) Bootstrap(ctx context.Context, bitmask []byte, ready RouterReady, opts ...discovery.Option) bool {
|
||||||
|
if d.discovery == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
t := time.NewTimer(time.Hour)
|
||||||
|
if !t.Stop() {
|
||||||
|
<-t.C
|
||||||
|
}
|
||||||
|
defer t.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Check if ready for publishing
|
||||||
|
bootstrapped := make(chan bool, 1)
|
||||||
|
select {
|
||||||
|
case d.p.eval <- func() {
|
||||||
|
done, _ := ready(d.p.rt, bitmask)
|
||||||
|
bootstrapped <- done
|
||||||
|
}:
|
||||||
|
if <-bootstrapped {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return false
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not ready discover more peers
|
||||||
|
disc := &discoverReq{bitmask, opts, make(chan struct{}, 1)}
|
||||||
|
select {
|
||||||
|
case d.discoverQ <- disc:
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return false
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-disc.done:
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return false
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Reset(time.Millisecond * 100)
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
case <-d.p.ctx.Done():
|
||||||
|
return false
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *discover) handleDiscovery(ctx context.Context, bitmask []byte, opts []discovery.Option) {
|
||||||
|
discoverCtx, cancel := context.WithTimeout(ctx, time.Second*10)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
peerCh, err := d.discovery.FindPeers(discoverCtx, string(bitmask), opts...)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("error finding peers for bitmask %s: %v", bitmask, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
d.connector.Connect(ctx, peerCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
type discoverReq struct {
|
||||||
|
bitmask []byte
|
||||||
|
opts []discovery.Option
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type pubSubDiscovery struct {
|
||||||
|
discovery.Discovery
|
||||||
|
opts []discovery.Option
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *pubSubDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
|
||||||
|
return d.Discovery.Advertise(ctx, "floodsub:"+ns, append(opts, d.opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *pubSubDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
|
||||||
|
return d.Discovery.FindPeers(ctx, "floodsub:"+ns, append(opts, d.opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDiscoveryOpts passes libp2p Discovery options into the PubSub discovery subsystem
|
||||||
|
func WithDiscoveryOpts(opts ...discovery.Option) DiscoverOpt {
|
||||||
|
return func(d *discoverOptions) error {
|
||||||
|
d.opts = opts
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackoffConnectorFactory creates a BackoffConnector that is attached to a given host
|
||||||
|
type BackoffConnectorFactory func(host host.Host) (*discimpl.BackoffConnector, error)
|
||||||
|
|
||||||
|
// WithDiscoverConnector adds a custom connector that deals with how the discovery subsystem connects to peers
|
||||||
|
func WithDiscoverConnector(connFactory BackoffConnectorFactory) DiscoverOpt {
|
||||||
|
return func(d *discoverOptions) error {
|
||||||
|
d.connFactory = connFactory
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
309
go-libp2p-blossomsub/discovery_test.go
Normal file
309
go-libp2p-blossomsub/discovery_test.go
Normal file
@ -0,0 +1,309 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/discovery"
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockDiscoveryServer struct {
|
||||||
|
mx sync.Mutex
|
||||||
|
db map[string]map[peer.ID]*discoveryRegistration
|
||||||
|
}
|
||||||
|
|
||||||
|
type discoveryRegistration struct {
|
||||||
|
info peer.AddrInfo
|
||||||
|
ttl time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDiscoveryServer() *mockDiscoveryServer {
|
||||||
|
return &mockDiscoveryServer{
|
||||||
|
db: make(map[string]map[peer.ID]*discoveryRegistration),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockDiscoveryServer) Advertise(ns string, info peer.AddrInfo, ttl time.Duration) (time.Duration, error) {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
|
||||||
|
peers, ok := s.db[ns]
|
||||||
|
if !ok {
|
||||||
|
peers = make(map[peer.ID]*discoveryRegistration)
|
||||||
|
s.db[ns] = peers
|
||||||
|
}
|
||||||
|
peers[info.ID] = &discoveryRegistration{info, ttl}
|
||||||
|
return ttl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockDiscoveryServer) FindPeers(ns string, limit int) (<-chan peer.AddrInfo, error) {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
|
||||||
|
peers, ok := s.db[ns]
|
||||||
|
if !ok || len(peers) == 0 {
|
||||||
|
emptyCh := make(chan peer.AddrInfo)
|
||||||
|
close(emptyCh)
|
||||||
|
return emptyCh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
count := len(peers)
|
||||||
|
if count > limit {
|
||||||
|
count = limit
|
||||||
|
}
|
||||||
|
ch := make(chan peer.AddrInfo, count)
|
||||||
|
numSent := 0
|
||||||
|
for _, reg := range peers {
|
||||||
|
if numSent == count {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
numSent++
|
||||||
|
ch <- reg.info
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *mockDiscoveryServer) hasPeerRecord(ns string, pid peer.ID) bool {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
|
||||||
|
if peers, ok := s.db[ns]; ok {
|
||||||
|
_, ok := peers[pid]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockDiscoveryClient struct {
|
||||||
|
host host.Host
|
||||||
|
server *mockDiscoveryServer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *mockDiscoveryClient) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
|
||||||
|
var options discovery.Options
|
||||||
|
err := options.Apply(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.server.Advertise(ns, *host.InfoFromHost(d.host), options.Ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *mockDiscoveryClient) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
|
||||||
|
var options discovery.Options
|
||||||
|
err := options.Apply(opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.server.FindPeers(ns, options.Limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummyDiscovery struct{}
|
||||||
|
|
||||||
|
func (d *dummyDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
|
||||||
|
return time.Hour, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dummyDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
|
||||||
|
retCh := make(chan peer.AddrInfo)
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
close(retCh)
|
||||||
|
}()
|
||||||
|
return retCh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleDiscovery(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Setup Discovery server and pubsub clients
|
||||||
|
const numHosts = 20
|
||||||
|
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
|
||||||
|
|
||||||
|
server := newDiscoveryServer()
|
||||||
|
discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(1 * time.Minute)}
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, numHosts)
|
||||||
|
psubs := make([]*PubSub, numHosts)
|
||||||
|
bitmaskHandlers := make([]*Bitmask, numHosts)
|
||||||
|
|
||||||
|
for i, h := range hosts {
|
||||||
|
disc := &mockDiscoveryClient{h, server}
|
||||||
|
ps := getPubsub(ctx, h, WithDiscovery(disc, WithDiscoveryOpts(discOpts...)))
|
||||||
|
psubs[i] = ps
|
||||||
|
bitmaskHandlers[i], _ = ps.Join(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe with all but one pubsub instance
|
||||||
|
msgs := make([]*Subscription, numHosts)
|
||||||
|
for i, th := range bitmaskHandlers[1:] {
|
||||||
|
subch, err := th.Subscribe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs[i+1] = subch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the advertisements to go through then check that they did
|
||||||
|
for {
|
||||||
|
server.mx.Lock()
|
||||||
|
numPeers := len(server.db["floodsub:foobar"])
|
||||||
|
server.mx.Unlock()
|
||||||
|
if numPeers == numHosts-1 {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, h := range hosts[1:] {
|
||||||
|
if !server.hasPeerRecord("floodsub:"+string(bitmask), h.ID()) {
|
||||||
|
t.Fatalf("Server did not register host %d with ID: %s", i+1, h.ID().Pretty())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try subscribing followed by publishing a single message
|
||||||
|
subch, err := bitmaskHandlers[0].Subscribe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
msgs[0] = subch
|
||||||
|
|
||||||
|
msg := []byte("first message")
|
||||||
|
if err := bitmaskHandlers[0].Publish(ctx, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sub := range msgs {
|
||||||
|
got, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(sub.err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(msg, got.Data) {
|
||||||
|
t.Fatal("got wrong message!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try random peers sending messages and make sure they are received
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i))
|
||||||
|
|
||||||
|
owner := rand.Intn(len(psubs))
|
||||||
|
|
||||||
|
if err := bitmaskHandlers[owner].Publish(ctx, msg, WithReadiness(MinBitmaskSize(1))); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sub := range msgs {
|
||||||
|
got, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(sub.err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(msg, got.Data) {
|
||||||
|
t.Fatal("got wrong message!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
|
||||||
|
t.Skip("flaky test disabled")
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Setup Discovery server and pubsub clients
|
||||||
|
partitionSize := BlossomSubDlo - 1
|
||||||
|
numHosts := partitionSize * 2
|
||||||
|
const ttl = 1 * time.Minute
|
||||||
|
|
||||||
|
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
|
||||||
|
|
||||||
|
server1, server2 := newDiscoveryServer(), newDiscoveryServer()
|
||||||
|
discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(ttl)}
|
||||||
|
|
||||||
|
// Put the pubsub clients into two partitions
|
||||||
|
hosts := getNetHosts(t, ctx, numHosts)
|
||||||
|
psubs := make([]*PubSub, numHosts)
|
||||||
|
bitmaskHandlers := make([]*Bitmask, numHosts)
|
||||||
|
|
||||||
|
for i, h := range hosts {
|
||||||
|
s := server1
|
||||||
|
if i >= partitionSize {
|
||||||
|
s = server2
|
||||||
|
}
|
||||||
|
disc := &mockDiscoveryClient{h, s}
|
||||||
|
ps := getBlossomSub(ctx, h, WithDiscovery(disc, WithDiscoveryOpts(discOpts...)))
|
||||||
|
psubs[i] = ps
|
||||||
|
bitmaskHandlers[i], _ = ps.Join(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs := make([]*Subscription, numHosts)
|
||||||
|
for i, th := range bitmaskHandlers {
|
||||||
|
subch, err := th.Subscribe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs[i] = subch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for network to finish forming then join the partitions via discovery
|
||||||
|
for _, ps := range psubs {
|
||||||
|
waitUntilBlossomSubMeshCount(ps, bitmask, partitionSize-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < partitionSize; i++ {
|
||||||
|
if _, err := server1.Advertise("floodsub:"+string(bitmask), *host.InfoFromHost(hosts[i+partitionSize]), ttl); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test the mesh
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i))
|
||||||
|
|
||||||
|
owner := rand.Intn(numHosts)
|
||||||
|
|
||||||
|
if err := bitmaskHandlers[owner].Publish(ctx, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sub := range msgs {
|
||||||
|
got, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(sub.err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(msg, got.Data) {
|
||||||
|
t.Fatal("got wrong message!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//lint:ignore U1000 used only by skipped tests at present
|
||||||
|
func waitUntilBlossomSubMeshCount(ps *PubSub, bitmask []byte, count int) {
|
||||||
|
done := false
|
||||||
|
doneCh := make(chan bool, 1)
|
||||||
|
rt := ps.rt.(*BlossomSubRouter)
|
||||||
|
for !done {
|
||||||
|
ps.eval <- func() {
|
||||||
|
doneCh <- len(rt.mesh[string(bitmask)]) == count
|
||||||
|
}
|
||||||
|
done = <-doneCh
|
||||||
|
if !done {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
108
go-libp2p-blossomsub/floodsub.go
Normal file
108
go-libp2p-blossomsub/floodsub.go
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FloodSubID = protocol.ID("/floodsub/1.0.0")
|
||||||
|
FloodSubBitmaskSearchSize = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewFloodsubWithProtocols returns a new floodsub-enabled PubSub objecting using the protocols specified in ps.
|
||||||
|
func NewFloodsubWithProtocols(ctx context.Context, h host.Host, ps []protocol.ID, opts ...Option) (*PubSub, error) {
|
||||||
|
rt := &FloodSubRouter{
|
||||||
|
protocols: ps,
|
||||||
|
}
|
||||||
|
return NewPubSub(ctx, h, rt, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFloodSub returns a new PubSub object using the FloodSubRouter.
|
||||||
|
func NewFloodSub(ctx context.Context, h host.Host, opts ...Option) (*PubSub, error) {
|
||||||
|
return NewFloodsubWithProtocols(ctx, h, []protocol.ID{FloodSubID}, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FloodSubRouter struct {
|
||||||
|
p *PubSub
|
||||||
|
protocols []protocol.ID
|
||||||
|
tracer *pubsubTracer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) Protocols() []protocol.ID {
|
||||||
|
return fs.protocols
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) Attach(p *PubSub) {
|
||||||
|
fs.p = p
|
||||||
|
fs.tracer = p.tracer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
|
||||||
|
fs.tracer.AddPeer(p, proto)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) RemovePeer(p peer.ID) {
|
||||||
|
fs.tracer.RemovePeer(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
|
||||||
|
// check all peers in the bitmask
|
||||||
|
tmap, ok := fs.p.bitmasks[string(bitmask)]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if suggested == 0 {
|
||||||
|
suggested = FloodSubBitmaskSearchSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tmap) >= suggested {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) Publish(msg *Message) {
|
||||||
|
from := msg.ReceivedFrom
|
||||||
|
bitmask := msg.GetBitmask()
|
||||||
|
|
||||||
|
out := rpcWithMessages(msg.Message)
|
||||||
|
for pid := range fs.p.bitmasks[string(bitmask)] {
|
||||||
|
if pid == from || pid == peer.ID(msg.GetFrom()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mch, ok := fs.p.peers[pid]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case mch <- out:
|
||||||
|
fs.tracer.SendRPC(out, pid)
|
||||||
|
default:
|
||||||
|
log.Infof("dropping message to peer %s: queue full", pid)
|
||||||
|
fs.tracer.DropRPC(out, pid)
|
||||||
|
// Drop it. The peer is too slow.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) Join(bitmask []byte) {
|
||||||
|
fs.tracer.Join(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *FloodSubRouter) Leave(bitmask []byte) {
|
||||||
|
fs.tracer.Leave(bitmask)
|
||||||
|
}
|
1282
go-libp2p-blossomsub/floodsub_test.go
Normal file
1282
go-libp2p-blossomsub/floodsub_test.go
Normal file
File diff suppressed because it is too large
Load Diff
87
go-libp2p-blossomsub/go.mod
Normal file
87
go-libp2p-blossomsub/go.mod
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
module source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub
|
||||||
|
|
||||||
|
go 1.18
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/benbjohnson/clock v1.3.0
|
||||||
|
github.com/gogo/protobuf v1.3.2
|
||||||
|
github.com/ipfs/go-log/v2 v2.5.1
|
||||||
|
github.com/libp2p/go-buffer-pool v0.1.0
|
||||||
|
github.com/libp2p/go-libp2p v0.25.0
|
||||||
|
github.com/libp2p/go-libp2p-testing v0.12.0
|
||||||
|
github.com/libp2p/go-msgio v0.3.0
|
||||||
|
github.com/multiformats/go-multiaddr v0.8.0
|
||||||
|
github.com/multiformats/go-varint v0.0.7
|
||||||
|
google.golang.org/protobuf v1.28.1
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
|
github.com/containerd/cgroups v1.0.4 // indirect
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||||
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
github.com/elastic/gosigar v0.14.2 // indirect
|
||||||
|
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||||
|
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||||
|
github.com/golang/mock v1.6.0 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/google/gopacket v1.1.19 // indirect
|
||||||
|
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
|
||||||
|
github.com/ipfs/go-cid v0.3.2 // indirect
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||||
|
github.com/klauspost/compress v1.15.15 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.1 // indirect
|
||||||
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
|
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||||
|
github.com/libp2p/go-netroute v0.2.1 // indirect
|
||||||
|
github.com/libp2p/go-reuseport v0.2.0 // indirect
|
||||||
|
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
|
github.com/miekg/dns v1.1.50 // indirect
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-multibase v0.1.1 // indirect
|
||||||
|
github.com/multiformats/go-multicodec v0.7.0 // indirect
|
||||||
|
github.com/multiformats/go-multihash v0.2.1 // indirect
|
||||||
|
github.com/multiformats/go-multistream v0.4.0 // indirect
|
||||||
|
github.com/onsi/ginkgo/v2 v2.5.1 // indirect
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.2 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
|
github.com/prometheus/common v0.39.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
|
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
|
||||||
|
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
|
||||||
|
github.com/quic-go/qtls-go1-20 v0.1.0 // indirect
|
||||||
|
github.com/quic-go/quic-go v0.32.0 // indirect
|
||||||
|
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
|
github.com/stretchr/testify v1.8.1 // indirect
|
||||||
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
|
go.uber.org/zap v1.24.0 // indirect
|
||||||
|
golang.org/x/crypto v0.4.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||||
|
golang.org/x/mod v0.9.0 // indirect
|
||||||
|
golang.org/x/net v0.8.0 // indirect
|
||||||
|
golang.org/x/sys v0.6.0 // indirect
|
||||||
|
golang.org/x/tools v0.7.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
lukechampine.com/blake3 v1.1.7 // indirect
|
||||||
|
)
|
443
go-libp2p-blossomsub/go.sum
Normal file
443
go-libp2p-blossomsub/go.sum
Normal file
@ -0,0 +1,443 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||||
|
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||||
|
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||||
|
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||||
|
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||||
|
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||||
|
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
|
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||||
|
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||||
|
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||||
|
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
||||||
|
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||||
|
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
|
||||||
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
|
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
|
||||||
|
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
|
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||||
|
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||||
|
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||||
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
|
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||||
|
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||||
|
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM=
|
||||||
|
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||||
|
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
|
github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc=
|
||||||
|
github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw=
|
||||||
|
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
|
||||||
|
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||||
|
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
||||||
|
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||||
|
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||||
|
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||||
|
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||||
|
github.com/libp2p/go-libp2p v0.25.0 h1:ND6Hc6ZYCzC8S++C4mOD7LdPnLXRkNbr12/8FXgUfIo=
|
||||||
|
github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o=
|
||||||
|
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||||
|
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||||
|
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||||
|
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||||
|
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
|
||||||
|
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
|
||||||
|
github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560=
|
||||||
|
github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k=
|
||||||
|
github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ=
|
||||||
|
github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
|
||||||
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||||
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
|
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||||
|
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||||
|
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||||
|
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||||
|
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||||
|
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||||
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||||
|
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||||
|
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||||
|
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
|
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||||
|
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
|
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||||
|
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||||
|
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||||
|
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||||
|
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||||
|
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
|
||||||
|
github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
|
||||||
|
github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||||
|
github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
|
||||||
|
github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
|
||||||
|
github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ=
|
||||||
|
github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw=
|
||||||
|
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||||
|
github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
|
||||||
|
github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
|
||||||
|
github.com/multiformats/go-multistream v0.4.0 h1:5i4JbawClkbuaX+mIVXiHQYVPxUW+zjv6w7jtSRukxc=
|
||||||
|
github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4=
|
||||||
|
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||||
|
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||||
|
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||||
|
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||||
|
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
|
||||||
|
github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg=
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
|
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||||
|
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||||
|
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||||
|
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
|
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||||
|
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||||
|
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||||
|
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||||
|
github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U=
|
||||||
|
github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc=
|
||||||
|
github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk=
|
||||||
|
github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
||||||
|
github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV54oAI=
|
||||||
|
github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
||||||
|
github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA=
|
||||||
|
github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo=
|
||||||
|
github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow=
|
||||||
|
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||||
|
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
|
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||||
|
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||||
|
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||||
|
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||||
|
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||||
|
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||||
|
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||||
|
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||||
|
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||||
|
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||||
|
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||||
|
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||||
|
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||||
|
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||||
|
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||||
|
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||||
|
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||||
|
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||||
|
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||||
|
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||||
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
|
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||||
|
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||||
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||||
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
|
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||||
|
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||||
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
|
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||||
|
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||||
|
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||||
|
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||||
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
|
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
||||||
|
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||||
|
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||||
|
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||||
|
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||||
|
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||||
|
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||||
|
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||||
|
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||||
|
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||||
|
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||||
|
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||||
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||||
|
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||||
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
|
||||||
|
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
|
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
|
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||||
|
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||||
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||||
|
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||||
|
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
|
||||||
|
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||||
|
nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
|
||||||
|
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||||
|
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
200
go-libp2p-blossomsub/gossip_tracer.go
Normal file
200
go-libp2p-blossomsub/gossip_tracer.go
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// gossipTracer is an internal tracer that tracks IWANT requests in order to penalize
|
||||||
|
// peers who don't follow up on IWANT requests after an IHAVE advertisement.
|
||||||
|
// The tracking of promises is probabilistic to avoid using too much memory.
|
||||||
|
type gossipTracer struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
idGen *msgIDGenerator
|
||||||
|
|
||||||
|
followUpTime time.Duration
|
||||||
|
|
||||||
|
// promises for messages by message ID; for each message tracked, we track the promise
|
||||||
|
// expiration time for each peer.
|
||||||
|
promises map[string]map[peer.ID]time.Time
|
||||||
|
// promises for each peer; for each peer, we track the promised message IDs.
|
||||||
|
// this index allows us to quickly void promises when a peer is throttled.
|
||||||
|
peerPromises map[peer.ID]map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGossipTracer() *gossipTracer {
|
||||||
|
return &gossipTracer{
|
||||||
|
idGen: newMsgIdGenerator(),
|
||||||
|
promises: make(map[string]map[peer.ID]time.Time),
|
||||||
|
peerPromises: make(map[peer.ID]map[string]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gt *gossipTracer) Start(gs *BlossomSubRouter) {
|
||||||
|
if gt == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
gt.idGen = gs.p.idGen
|
||||||
|
gt.followUpTime = gs.params.IWantFollowupTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// track a promise to deliver a message from a list of msgIDs we are requesting
|
||||||
|
func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs []string) {
|
||||||
|
if gt == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
idx := rand.Intn(len(msgIDs))
|
||||||
|
mid := msgIDs[idx]
|
||||||
|
|
||||||
|
gt.Lock()
|
||||||
|
defer gt.Unlock()
|
||||||
|
|
||||||
|
promises, ok := gt.promises[mid]
|
||||||
|
if !ok {
|
||||||
|
promises = make(map[peer.ID]time.Time)
|
||||||
|
gt.promises[mid] = promises
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = promises[p]
|
||||||
|
if !ok {
|
||||||
|
promises[p] = time.Now().Add(gt.followUpTime)
|
||||||
|
peerPromises, ok := gt.peerPromises[p]
|
||||||
|
if !ok {
|
||||||
|
peerPromises = make(map[string]struct{})
|
||||||
|
gt.peerPromises[p] = peerPromises
|
||||||
|
}
|
||||||
|
peerPromises[mid] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the number of broken promises for each peer who didn't follow up
|
||||||
|
// on an IWANT request.
|
||||||
|
func (gt *gossipTracer) GetBrokenPromises() map[peer.ID]int {
|
||||||
|
if gt == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
gt.Lock()
|
||||||
|
defer gt.Unlock()
|
||||||
|
|
||||||
|
var res map[peer.ID]int
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// find broken promises from peers
|
||||||
|
for mid, promises := range gt.promises {
|
||||||
|
for p, expire := range promises {
|
||||||
|
if expire.Before(now) {
|
||||||
|
if res == nil {
|
||||||
|
res = make(map[peer.ID]int)
|
||||||
|
}
|
||||||
|
res[p]++
|
||||||
|
|
||||||
|
delete(promises, p)
|
||||||
|
|
||||||
|
peerPromises := gt.peerPromises[p]
|
||||||
|
delete(peerPromises, mid)
|
||||||
|
if len(peerPromises) == 0 {
|
||||||
|
delete(gt.peerPromises, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(promises) == 0 {
|
||||||
|
delete(gt.promises, mid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ RawTracer = (*gossipTracer)(nil)
|
||||||
|
|
||||||
|
func (gt *gossipTracer) fulfillPromise(msg *Message) {
|
||||||
|
mid := gt.idGen.ID(msg)
|
||||||
|
|
||||||
|
gt.Lock()
|
||||||
|
defer gt.Unlock()
|
||||||
|
|
||||||
|
promises, ok := gt.promises[mid]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(gt.promises, mid)
|
||||||
|
|
||||||
|
// delete the promise for all peers that promised it, as they have no way to fulfill it.
|
||||||
|
for p := range promises {
|
||||||
|
peerPromises, ok := gt.peerPromises[p]
|
||||||
|
if ok {
|
||||||
|
delete(peerPromises, mid)
|
||||||
|
if len(peerPromises) == 0 {
|
||||||
|
delete(gt.peerPromises, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gt *gossipTracer) DeliverMessage(msg *Message) {
|
||||||
|
// someone delivered a message, fulfill promises for it
|
||||||
|
gt.fulfillPromise(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gt *gossipTracer) RejectMessage(msg *Message, reason string) {
|
||||||
|
// A message got rejected, so we can fulfill promises and let the score penalty apply
|
||||||
|
// from invalid message delivery.
|
||||||
|
// We do take exception and apply promise penalty regardless in the following cases, where
|
||||||
|
// the peer delivered an obviously invalid message.
|
||||||
|
switch reason {
|
||||||
|
case RejectMissingSignature:
|
||||||
|
return
|
||||||
|
case RejectInvalidSignature:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
gt.fulfillPromise(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gt *gossipTracer) ValidateMessage(msg *Message) {
|
||||||
|
// we consider the promise fulfilled as soon as the message begins validation
|
||||||
|
// if it was a case of signature issue it would have been rejected immediately
|
||||||
|
// without triggering the Validate trace
|
||||||
|
gt.fulfillPromise(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gt *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {}
|
||||||
|
func (gt *gossipTracer) RemovePeer(p peer.ID) {}
|
||||||
|
func (gt *gossipTracer) Join(bitmask []byte) {}
|
||||||
|
func (gt *gossipTracer) Leave(bitmask []byte) {}
|
||||||
|
func (gt *gossipTracer) Graft(p peer.ID, bitmask []byte) {}
|
||||||
|
func (gt *gossipTracer) Prune(p peer.ID, bitmask []byte) {}
|
||||||
|
func (gt *gossipTracer) DuplicateMessage(msg *Message) {}
|
||||||
|
func (gt *gossipTracer) RecvRPC(rpc *RPC) {}
|
||||||
|
func (gt *gossipTracer) SendRPC(rpc *RPC, p peer.ID) {}
|
||||||
|
func (gt *gossipTracer) DropRPC(rpc *RPC, p peer.ID) {}
|
||||||
|
func (gt *gossipTracer) UndeliverableMessage(msg *Message) {}
|
||||||
|
|
||||||
|
func (gt *gossipTracer) ThrottlePeer(p peer.ID) {
|
||||||
|
gt.Lock()
|
||||||
|
defer gt.Unlock()
|
||||||
|
|
||||||
|
peerPromises, ok := gt.peerPromises[p]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for mid := range peerPromises {
|
||||||
|
promises := gt.promises[mid]
|
||||||
|
delete(promises, p)
|
||||||
|
if len(promises) == 0 {
|
||||||
|
delete(gt.promises, mid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(gt.peerPromises, p)
|
||||||
|
}
|
103
go-libp2p-blossomsub/gossip_tracer_test.go
Normal file
103
go-libp2p-blossomsub/gossip_tracer_test.go
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBrokenPromises(t *testing.T) {
|
||||||
|
// tests that unfullfilled promises are tracked correctly
|
||||||
|
gt := newGossipTracer()
|
||||||
|
gt.followUpTime = 100 * time.Millisecond
|
||||||
|
|
||||||
|
peerA := peer.ID("A")
|
||||||
|
peerB := peer.ID("B")
|
||||||
|
peerC := peer.ID("C")
|
||||||
|
|
||||||
|
var mids []string
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
m := makeTestMessage(i)
|
||||||
|
m.From = []byte(peerA)
|
||||||
|
mid := DefaultMsgIdFn(m)
|
||||||
|
mids = append(mids, mid)
|
||||||
|
}
|
||||||
|
|
||||||
|
gt.AddPromise(peerA, mids)
|
||||||
|
gt.AddPromise(peerB, mids)
|
||||||
|
gt.AddPromise(peerC, mids)
|
||||||
|
|
||||||
|
// no broken promises yet
|
||||||
|
brokenPromises := gt.GetBrokenPromises()
|
||||||
|
if brokenPromises != nil {
|
||||||
|
t.Fatal("expected no broken promises")
|
||||||
|
}
|
||||||
|
|
||||||
|
// throttle one of the peers to save his promises
|
||||||
|
gt.ThrottlePeer(peerC)
|
||||||
|
|
||||||
|
// make promises break
|
||||||
|
time.Sleep(gt.followUpTime + time.Millisecond)
|
||||||
|
|
||||||
|
brokenPromises = gt.GetBrokenPromises()
|
||||||
|
if len(brokenPromises) != 2 {
|
||||||
|
t.Fatalf("expected 2 broken prmises, got %d", len(brokenPromises))
|
||||||
|
}
|
||||||
|
|
||||||
|
brokenPromisesA := brokenPromises[peerA]
|
||||||
|
if brokenPromisesA != 1 {
|
||||||
|
t.Fatalf("expected 1 broken promise from A, got %d", brokenPromisesA)
|
||||||
|
}
|
||||||
|
|
||||||
|
brokenPromisesB := brokenPromises[peerB]
|
||||||
|
if brokenPromisesB != 1 {
|
||||||
|
t.Fatalf("expected 1 broken promise from A, got %d", brokenPromisesB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that the peerPromises map has been vacated
|
||||||
|
if len(gt.peerPromises) != 0 {
|
||||||
|
t.Fatal("expected empty peerPromises map")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoBrokenPromises(t *testing.T) {
|
||||||
|
// like above, but this time we deliver messages to fullfil the promises
|
||||||
|
gt := newGossipTracer()
|
||||||
|
gt.followUpTime = 100 * time.Millisecond
|
||||||
|
|
||||||
|
peerA := peer.ID("A")
|
||||||
|
peerB := peer.ID("B")
|
||||||
|
|
||||||
|
var msgs []*pb.Message
|
||||||
|
var mids []string
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
m := makeTestMessage(i)
|
||||||
|
m.From = []byte(peerA)
|
||||||
|
msgs = append(msgs, m)
|
||||||
|
mid := DefaultMsgIdFn(m)
|
||||||
|
mids = append(mids, mid)
|
||||||
|
}
|
||||||
|
|
||||||
|
gt.AddPromise(peerA, mids)
|
||||||
|
gt.AddPromise(peerB, mids)
|
||||||
|
|
||||||
|
for _, m := range msgs {
|
||||||
|
gt.DeliverMessage(&Message{Message: m})
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(gt.followUpTime + time.Millisecond)
|
||||||
|
|
||||||
|
// there should be no broken promises
|
||||||
|
brokenPromises := gt.GetBrokenPromises()
|
||||||
|
if brokenPromises != nil {
|
||||||
|
t.Fatal("expected no broken promises")
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that the peerPromises map has been vacated
|
||||||
|
if len(gt.peerPromises) != 0 {
|
||||||
|
t.Fatal("expected empty peerPromises map")
|
||||||
|
}
|
||||||
|
}
|
105
go-libp2p-blossomsub/mcache.go
Normal file
105
go-libp2p-blossomsub/mcache.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewMessageCache creates a sliding window cache that remembers messages for as
|
||||||
|
// long as `history` slots.
|
||||||
|
//
|
||||||
|
// When queried for messages to advertise, the cache only returns messages in
|
||||||
|
// the last `gossip` slots.
|
||||||
|
//
|
||||||
|
// The `gossip` parameter must be smaller or equal to `history`, or this
|
||||||
|
// function will panic.
|
||||||
|
//
|
||||||
|
// The slack between `gossip` and `history` accounts for the reaction time
|
||||||
|
// between when a message is advertised via IHAVE gossip, and the peer pulls it
|
||||||
|
// via an IWANT command.
|
||||||
|
func NewMessageCache(gossip, history int) *MessageCache {
|
||||||
|
if gossip > history {
|
||||||
|
err := fmt.Errorf("invalid parameters for message cache; gossip slots (%d) cannot be larger than history slots (%d)",
|
||||||
|
gossip, history)
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return &MessageCache{
|
||||||
|
msgs: make(map[string]*Message),
|
||||||
|
peertx: make(map[string]map[peer.ID]int),
|
||||||
|
history: make([][]CacheEntry, history),
|
||||||
|
gossip: gossip,
|
||||||
|
msgID: func(msg *Message) string {
|
||||||
|
return DefaultMsgIdFn(msg.Message)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessageCache struct {
|
||||||
|
msgs map[string]*Message
|
||||||
|
peertx map[string]map[peer.ID]int
|
||||||
|
history [][]CacheEntry
|
||||||
|
gossip int
|
||||||
|
msgID func(*Message) string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MessageCache) SetMsgIdFn(msgID func(*Message) string) {
|
||||||
|
mc.msgID = msgID
|
||||||
|
}
|
||||||
|
|
||||||
|
type CacheEntry struct {
|
||||||
|
mid string
|
||||||
|
bitmask []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MessageCache) Put(msg *Message) {
|
||||||
|
mid := mc.msgID(msg)
|
||||||
|
mc.msgs[mid] = msg
|
||||||
|
mc.history[0] = append(mc.history[0], CacheEntry{mid: mid, bitmask: msg.GetBitmask()})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MessageCache) Get(mid string) (*Message, bool) {
|
||||||
|
m, ok := mc.msgs[mid]
|
||||||
|
return m, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MessageCache) GetForPeer(mid string, p peer.ID) (*Message, int, bool) {
|
||||||
|
m, ok := mc.msgs[mid]
|
||||||
|
if !ok {
|
||||||
|
return nil, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, ok := mc.peertx[mid]
|
||||||
|
if !ok {
|
||||||
|
tx = make(map[peer.ID]int)
|
||||||
|
mc.peertx[mid] = tx
|
||||||
|
}
|
||||||
|
tx[p]++
|
||||||
|
|
||||||
|
return m, tx[p], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MessageCache) GetGossipIDs(bitmask []byte) []string {
|
||||||
|
var mids []string
|
||||||
|
for _, entries := range mc.history[:mc.gossip] {
|
||||||
|
for _, entry := range entries {
|
||||||
|
if bytes.Equal(entry.bitmask, bitmask) {
|
||||||
|
mids = append(mids, entry.mid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mids
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MessageCache) Shift() {
|
||||||
|
last := mc.history[len(mc.history)-1]
|
||||||
|
for _, entry := range last {
|
||||||
|
delete(mc.msgs, entry.mid)
|
||||||
|
delete(mc.peertx, entry.mid)
|
||||||
|
}
|
||||||
|
for i := len(mc.history) - 2; i >= 0; i-- {
|
||||||
|
mc.history[i+1] = mc.history[i]
|
||||||
|
}
|
||||||
|
mc.history[0] = nil
|
||||||
|
}
|
167
go-libp2p-blossomsub/mcache_test.go
Normal file
167
go-libp2p-blossomsub/mcache_test.go
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMessageCache(t *testing.T) {
|
||||||
|
mcache := NewMessageCache(3, 5)
|
||||||
|
msgID := DefaultMsgIdFn
|
||||||
|
|
||||||
|
msgs := make([]*pb.Message, 60)
|
||||||
|
for i := range msgs {
|
||||||
|
msgs[i] = makeTestMessage(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mcache.Put(&Message{Message: msgs[i]})
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mid := msgID(msgs[i])
|
||||||
|
m, ok := mcache.Get(mid)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Message %d not in cache", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Message != msgs[i] {
|
||||||
|
t.Fatalf("Message %d does not match cache", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gids := mcache.GetGossipIDs([]byte{0x7e, 0x57})
|
||||||
|
if len(gids) != 10 {
|
||||||
|
t.Fatalf("Expected 10 gossip IDs; got %d", len(gids))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mid := msgID(msgs[i])
|
||||||
|
if mid != gids[i] {
|
||||||
|
t.Fatalf("GossipID mismatch for message %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mcache.Shift()
|
||||||
|
for i := 10; i < 20; i++ {
|
||||||
|
mcache.Put(&Message{Message: msgs[i]})
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
mid := msgID(msgs[i])
|
||||||
|
m, ok := mcache.Get(mid)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Message %d not in cache", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Message != msgs[i] {
|
||||||
|
t.Fatalf("Message %d does not match cache", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gids = mcache.GetGossipIDs([]byte{0x7e, 0x57})
|
||||||
|
if len(gids) != 20 {
|
||||||
|
t.Fatalf("Expected 20 gossip IDs; got %d", len(gids))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mid := msgID(msgs[i])
|
||||||
|
if mid != gids[10+i] {
|
||||||
|
t.Fatalf("GossipID mismatch for message %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 10; i < 20; i++ {
|
||||||
|
mid := msgID(msgs[i])
|
||||||
|
if mid != gids[i-10] {
|
||||||
|
t.Fatalf("GossipID mismatch for message %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mcache.Shift()
|
||||||
|
for i := 20; i < 30; i++ {
|
||||||
|
mcache.Put(&Message{Message: msgs[i]})
|
||||||
|
}
|
||||||
|
|
||||||
|
mcache.Shift()
|
||||||
|
for i := 30; i < 40; i++ {
|
||||||
|
mcache.Put(&Message{Message: msgs[i]})
|
||||||
|
}
|
||||||
|
|
||||||
|
mcache.Shift()
|
||||||
|
for i := 40; i < 50; i++ {
|
||||||
|
mcache.Put(&Message{Message: msgs[i]})
|
||||||
|
}
|
||||||
|
|
||||||
|
mcache.Shift()
|
||||||
|
for i := 50; i < 60; i++ {
|
||||||
|
mcache.Put(&Message{Message: msgs[i]})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(mcache.msgs) != 50 {
|
||||||
|
t.Fatalf("Expected 50 messages in the cache; got %d", len(mcache.msgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mid := msgID(msgs[i])
|
||||||
|
_, ok := mcache.Get(mid)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("Message %d still in cache", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 10; i < 60; i++ {
|
||||||
|
mid := msgID(msgs[i])
|
||||||
|
m, ok := mcache.Get(mid)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Message %d not in cache", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Message != msgs[i] {
|
||||||
|
t.Fatalf("Message %d does not match cache", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gids = mcache.GetGossipIDs([]byte{0x7e, 0x57})
|
||||||
|
if len(gids) != 30 {
|
||||||
|
t.Fatalf("Expected 30 gossip IDs; got %d", len(gids))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mid := msgID(msgs[50+i])
|
||||||
|
if mid != gids[i] {
|
||||||
|
t.Fatalf("GossipID mismatch for message %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 10; i < 20; i++ {
|
||||||
|
mid := msgID(msgs[30+i])
|
||||||
|
if mid != gids[i] {
|
||||||
|
t.Fatalf("GossipID mismatch for message %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 20; i < 30; i++ {
|
||||||
|
mid := msgID(msgs[10+i])
|
||||||
|
if mid != gids[i] {
|
||||||
|
t.Fatalf("GossipID mismatch for message %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestMessage(n int) *pb.Message {
|
||||||
|
seqno := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(seqno, uint64(n))
|
||||||
|
data := []byte(fmt.Sprintf("%d", n))
|
||||||
|
bitmask := []byte{0x7e, 0x57}
|
||||||
|
return &pb.Message{
|
||||||
|
Data: data,
|
||||||
|
Bitmask: bitmask,
|
||||||
|
From: []byte([]byte{0x7e, 0x57}),
|
||||||
|
Seqno: seqno,
|
||||||
|
}
|
||||||
|
}
|
52
go-libp2p-blossomsub/midgen.go
Normal file
52
go-libp2p-blossomsub/midgen.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// msgIDGenerator handles computing IDs for msgs
|
||||||
|
// It allows setting custom generators(MsgIdFunction) per bitmask
|
||||||
|
type msgIDGenerator struct {
|
||||||
|
Default MsgIdFunction
|
||||||
|
|
||||||
|
bitmaskGensLk sync.RWMutex
|
||||||
|
bitmaskGens map[string]MsgIdFunction
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMsgIdGenerator() *msgIDGenerator {
|
||||||
|
return &msgIDGenerator{
|
||||||
|
Default: DefaultMsgIdFn,
|
||||||
|
bitmaskGens: make(map[string]MsgIdFunction),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets custom id generator(MsgIdFunction) for bitmask.
|
||||||
|
func (m *msgIDGenerator) Set(bitmask []byte, gen MsgIdFunction) {
|
||||||
|
m.bitmaskGensLk.Lock()
|
||||||
|
m.bitmaskGens[string(bitmask)] = gen
|
||||||
|
m.bitmaskGensLk.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID computes ID for the msg or short-circuits with the cached value.
|
||||||
|
func (m *msgIDGenerator) ID(msg *Message) string {
|
||||||
|
if msg.ID != "" {
|
||||||
|
return msg.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.ID = m.RawID(msg.Message)
|
||||||
|
return msg.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawID computes ID for the proto 'msg'.
|
||||||
|
func (m *msgIDGenerator) RawID(msg *pb.Message) string {
|
||||||
|
m.bitmaskGensLk.RLock()
|
||||||
|
gen, ok := m.bitmaskGens[string(msg.GetBitmask())]
|
||||||
|
m.bitmaskGensLk.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
gen = m.Default
|
||||||
|
}
|
||||||
|
|
||||||
|
return gen(msg)
|
||||||
|
}
|
75
go-libp2p-blossomsub/notify.go
Normal file
75
go-libp2p-blossomsub/notify.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ network.Notifiee = (*PubSubNotif)(nil)
|
||||||
|
|
||||||
|
type PubSubNotif PubSub
|
||||||
|
|
||||||
|
func (p *PubSubNotif) OpenedStream(n network.Network, s network.Stream) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSubNotif) ClosedStream(n network.Network, s network.Stream) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSubNotif) Connected(n network.Network, c network.Conn) {
|
||||||
|
// ignore transient connections
|
||||||
|
if c.Stat().Transient {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
p.newPeersPrioLk.RLock()
|
||||||
|
p.newPeersMx.Lock()
|
||||||
|
p.newPeersPend[c.RemotePeer()] = struct{}{}
|
||||||
|
p.newPeersMx.Unlock()
|
||||||
|
p.newPeersPrioLk.RUnlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.newPeers <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSubNotif) Disconnected(n network.Network, c network.Conn) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSubNotif) Listen(n network.Network, _ ma.Multiaddr) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSubNotif) ListenClose(n network.Network, _ ma.Multiaddr) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PubSubNotif) Initialize() {
|
||||||
|
isTransient := func(pid peer.ID) bool {
|
||||||
|
for _, c := range p.host.Network().ConnsToPeer(pid) {
|
||||||
|
if !c.Stat().Transient {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
p.newPeersPrioLk.RLock()
|
||||||
|
p.newPeersMx.Lock()
|
||||||
|
for _, pid := range p.host.Network().Peers() {
|
||||||
|
if isTransient(pid) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
p.newPeersPend[pid] = struct{}{}
|
||||||
|
}
|
||||||
|
p.newPeersMx.Unlock()
|
||||||
|
p.newPeersPrioLk.RUnlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.newPeers <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
11
go-libp2p-blossomsub/pb/Makefile
Normal file
11
go-libp2p-blossomsub/pb/Makefile
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
PB = $(wildcard *.proto)
|
||||||
|
GO = $(PB:.proto=.pb.go)
|
||||||
|
|
||||||
|
all: $(GO)
|
||||||
|
|
||||||
|
%.pb.go: %.proto
|
||||||
|
protoc --go_out=paths=source_relative:. $<
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f *.pb.go
|
||||||
|
rm -f *.go
|
57
go-libp2p-blossomsub/pb/extensions.go
Normal file
57
go-libp2p-blossomsub/pb/extensions.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
package pb
|
||||||
|
|
||||||
|
import "google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
func (r *RPC) Size() int {
|
||||||
|
return proto.Size(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RPC_SubOpts) Size() int {
|
||||||
|
return proto.Size(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ControlGraft) Size() int {
|
||||||
|
return proto.Size(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ControlIHave) Size() int {
|
||||||
|
return proto.Size(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ControlIWant) Size() int {
|
||||||
|
return proto.Size(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ControlMessage) Size() int {
|
||||||
|
return proto.Size(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *ControlPrune) Size() int {
|
||||||
|
return proto.Size(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) Size() int {
|
||||||
|
return proto.Size(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ControlMessage) Marshal() ([]byte, error) {
|
||||||
|
return proto.Marshal(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RPC) MarshalTo(buf []byte) (int, error) {
|
||||||
|
data, err := proto.Marshal(r)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n := copy(buf, data)
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RPC) Unmarshal(buf []byte) error {
|
||||||
|
return proto.Unmarshal(buf, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) Marshal() ([]byte, error) {
|
||||||
|
return proto.Marshal(m)
|
||||||
|
}
|
818
go-libp2p-blossomsub/pb/rpc.pb.go
Normal file
818
go-libp2p-blossomsub/pb/rpc.pb.go
Normal file
@ -0,0 +1,818 @@
|
|||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.30.0
|
||||||
|
// protoc v3.21.12
|
||||||
|
// source: rpc.proto
|
||||||
|
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
type RPC struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Subscriptions []*RPC_SubOpts `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"`
|
||||||
|
Publish []*Message `protobuf:"bytes,2,rep,name=publish,proto3" json:"publish,omitempty"`
|
||||||
|
Control *ControlMessage `protobuf:"bytes,3,opt,name=control,proto3" json:"control,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC) Reset() {
|
||||||
|
*x = RPC{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RPC) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RPC) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RPC.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RPC) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC) GetSubscriptions() []*RPC_SubOpts {
|
||||||
|
if x != nil {
|
||||||
|
return x.Subscriptions
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC) GetPublish() []*Message {
|
||||||
|
if x != nil {
|
||||||
|
return x.Publish
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC) GetControl() *ControlMessage {
|
||||||
|
if x != nil {
|
||||||
|
return x.Control
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Message struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
From []byte `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"`
|
||||||
|
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||||
|
Seqno []byte `protobuf:"bytes,3,opt,name=seqno,proto3" json:"seqno,omitempty"`
|
||||||
|
Bitmask []byte `protobuf:"bytes,4,opt,name=bitmask,proto3" json:"bitmask,omitempty"`
|
||||||
|
Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"`
|
||||||
|
Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) Reset() {
|
||||||
|
*x = Message{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Message) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *Message) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
|
||||||
|
func (*Message) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) GetFrom() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.From
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) GetData() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) GetSeqno() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Seqno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) GetBitmask() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Bitmask
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) GetSignature() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Signature
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *Message) GetKey() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Key
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ControlMessage struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave,proto3" json:"ihave,omitempty"`
|
||||||
|
Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant,proto3" json:"iwant,omitempty"`
|
||||||
|
Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft,proto3" json:"graft,omitempty"`
|
||||||
|
Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune,proto3" json:"prune,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlMessage) Reset() {
|
||||||
|
*x = ControlMessage{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlMessage) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ControlMessage) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ControlMessage) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ControlMessage.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ControlMessage) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlMessage) GetIhave() []*ControlIHave {
|
||||||
|
if x != nil {
|
||||||
|
return x.Ihave
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlMessage) GetIwant() []*ControlIWant {
|
||||||
|
if x != nil {
|
||||||
|
return x.Iwant
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlMessage) GetGraft() []*ControlGraft {
|
||||||
|
if x != nil {
|
||||||
|
return x.Graft
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlMessage) GetPrune() []*ControlPrune {
|
||||||
|
if x != nil {
|
||||||
|
return x.Prune
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ControlIHave struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"`
|
||||||
|
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
|
||||||
|
MessageIDs []string `protobuf:"bytes,2,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlIHave) Reset() {
|
||||||
|
*x = ControlIHave{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[3]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlIHave) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ControlIHave) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ControlIHave) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[3]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ControlIHave.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ControlIHave) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlIHave) GetBitmask() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Bitmask
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlIHave) GetMessageIDs() []string {
|
||||||
|
if x != nil {
|
||||||
|
return x.MessageIDs
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ControlIWant struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
|
||||||
|
MessageIDs []string `protobuf:"bytes,1,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlIWant) Reset() {
|
||||||
|
*x = ControlIWant{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[4]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlIWant) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ControlIWant) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ControlIWant) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[4]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ControlIWant.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ControlIWant) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlIWant) GetMessageIDs() []string {
|
||||||
|
if x != nil {
|
||||||
|
return x.MessageIDs
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ControlGraft struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlGraft) Reset() {
|
||||||
|
*x = ControlGraft{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[5]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlGraft) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ControlGraft) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ControlGraft) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[5]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ControlGraft.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ControlGraft) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{5}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlGraft) GetBitmask() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Bitmask
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ControlPrune struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"`
|
||||||
|
Peers []*PeerInfo `protobuf:"bytes,2,rep,name=peers,proto3" json:"peers,omitempty"`
|
||||||
|
Backoff uint64 `protobuf:"varint,3,opt,name=backoff,proto3" json:"backoff,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlPrune) Reset() {
|
||||||
|
*x = ControlPrune{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[6]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlPrune) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ControlPrune) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *ControlPrune) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[6]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use ControlPrune.ProtoReflect.Descriptor instead.
|
||||||
|
func (*ControlPrune) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{6}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlPrune) GetBitmask() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Bitmask
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlPrune) GetPeers() []*PeerInfo {
|
||||||
|
if x != nil {
|
||||||
|
return x.Peers
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *ControlPrune) GetBackoff() uint64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Backoff
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type PeerInfo struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"`
|
||||||
|
SignedPeerRecord []byte `protobuf:"bytes,2,opt,name=signedPeerRecord,proto3,oneof" json:"signedPeerRecord,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *PeerInfo) Reset() {
|
||||||
|
*x = PeerInfo{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[7]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *PeerInfo) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*PeerInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *PeerInfo) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[7]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use PeerInfo.ProtoReflect.Descriptor instead.
|
||||||
|
func (*PeerInfo) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{7}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *PeerInfo) GetPeerID() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.PeerID
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *PeerInfo) GetSignedPeerRecord() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.SignedPeerRecord
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RPC_SubOpts struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Subscribe bool `protobuf:"varint,1,opt,name=subscribe,proto3" json:"subscribe,omitempty"` // subscribe or unsubcribe
|
||||||
|
Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3" json:"bitmask,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC_SubOpts) Reset() {
|
||||||
|
*x = RPC_SubOpts{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_rpc_proto_msgTypes[8]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC_SubOpts) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RPC_SubOpts) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RPC_SubOpts) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_rpc_proto_msgTypes[8]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RPC_SubOpts.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RPC_SubOpts) Descriptor() ([]byte, []int) {
|
||||||
|
return file_rpc_proto_rawDescGZIP(), []int{0, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC_SubOpts) GetSubscribe() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.Subscribe
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RPC_SubOpts) GetBitmask() []byte {
|
||||||
|
if x != nil {
|
||||||
|
return x.Bitmask
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_rpc_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_rpc_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x09, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x62, 0x6c, 0x6f,
|
||||||
|
0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x22, 0xf5, 0x01, 0x0a, 0x03, 0x52,
|
||||||
|
0x50, 0x43, 0x12, 0x40, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
|
||||||
|
0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x6c, 0x6f, 0x73,
|
||||||
|
0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x50, 0x43, 0x2e, 0x53, 0x75,
|
||||||
|
0x62, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
||||||
|
0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x18,
|
||||||
|
0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73,
|
||||||
|
0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x70,
|
||||||
|
0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
|
||||||
|
0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f,
|
||||||
|
0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4d,
|
||||||
|
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a,
|
||||||
|
0x41, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75,
|
||||||
|
0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73,
|
||||||
|
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d,
|
||||||
|
0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61,
|
||||||
|
0x73, 0x6b, 0x22, 0x91, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12,
|
||||||
|
0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x66, 0x72,
|
||||||
|
0x6f, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
||||||
|
0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x65, 0x71, 0x6e, 0x6f, 0x18,
|
||||||
|
0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x65, 0x71, 0x6e, 0x6f, 0x12, 0x18, 0x0a, 0x07,
|
||||||
|
0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62,
|
||||||
|
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
|
||||||
|
0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
|
||||||
|
0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28,
|
||||||
|
0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
|
0x6f, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x68, 0x61,
|
||||||
|
0x76, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73,
|
||||||
|
0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
||||||
|
0x49, 0x48, 0x61, 0x76, 0x65, 0x52, 0x05, 0x69, 0x68, 0x61, 0x76, 0x65, 0x12, 0x31, 0x0a, 0x05,
|
||||||
|
0x69, 0x77, 0x61, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x62, 0x6c,
|
||||||
|
0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
|
||||||
|
0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x69, 0x77, 0x61, 0x6e, 0x74, 0x12,
|
||||||
|
0x31, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x66, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b,
|
||||||
|
0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43,
|
||||||
|
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, 0x66, 0x74, 0x52, 0x05, 0x67, 0x72, 0x61,
|
||||||
|
0x66, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28,
|
||||||
|
0x0b, 0x32, 0x1b, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70,
|
||||||
|
0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x52, 0x05,
|
||||||
|
0x70, 0x72, 0x75, 0x6e, 0x65, 0x22, 0x48, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
||||||
|
0x49, 0x48, 0x61, 0x76, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b,
|
||||||
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x12,
|
||||||
|
0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20,
|
||||||
|
0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22,
|
||||||
|
0x2e, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x12,
|
||||||
|
0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20,
|
||||||
|
0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22,
|
||||||
|
0x28, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, 0x66, 0x74, 0x12,
|
||||||
|
0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
|
||||||
|
0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x0c, 0x43, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74,
|
||||||
|
0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d,
|
||||||
|
0x61, 0x73, 0x6b, 0x12, 0x2d, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03,
|
||||||
|
0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e,
|
||||||
|
0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65,
|
||||||
|
0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x03, 0x20,
|
||||||
|
0x01, 0x28, 0x04, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x78, 0x0a, 0x08,
|
||||||
|
0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72,
|
||||||
|
0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72,
|
||||||
|
0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50,
|
||||||
|
0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48,
|
||||||
|
0x01, 0x52, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63,
|
||||||
|
0x6f, 0x72, 0x64, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49,
|
||||||
|
0x44, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72,
|
||||||
|
0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x42, 0x43, 0x5a, 0x41, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||||
|
0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||||
|
0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72,
|
||||||
|
0x65, 0x70, 0x6f, 0x2f, 0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2d, 0x62, 0x6c,
|
||||||
|
0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||||
|
0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_rpc_proto_rawDescOnce sync.Once
|
||||||
|
file_rpc_proto_rawDescData = file_rpc_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_rpc_proto_rawDescGZIP() []byte {
|
||||||
|
file_rpc_proto_rawDescOnce.Do(func() {
|
||||||
|
file_rpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpc_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_rpc_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_rpc_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||||
|
var file_rpc_proto_goTypes = []interface{}{
|
||||||
|
(*RPC)(nil), // 0: blossomsub.pb.RPC
|
||||||
|
(*Message)(nil), // 1: blossomsub.pb.Message
|
||||||
|
(*ControlMessage)(nil), // 2: blossomsub.pb.ControlMessage
|
||||||
|
(*ControlIHave)(nil), // 3: blossomsub.pb.ControlIHave
|
||||||
|
(*ControlIWant)(nil), // 4: blossomsub.pb.ControlIWant
|
||||||
|
(*ControlGraft)(nil), // 5: blossomsub.pb.ControlGraft
|
||||||
|
(*ControlPrune)(nil), // 6: blossomsub.pb.ControlPrune
|
||||||
|
(*PeerInfo)(nil), // 7: blossomsub.pb.PeerInfo
|
||||||
|
(*RPC_SubOpts)(nil), // 8: blossomsub.pb.RPC.SubOpts
|
||||||
|
}
|
||||||
|
var file_rpc_proto_depIdxs = []int32{
|
||||||
|
8, // 0: blossomsub.pb.RPC.subscriptions:type_name -> blossomsub.pb.RPC.SubOpts
|
||||||
|
1, // 1: blossomsub.pb.RPC.publish:type_name -> blossomsub.pb.Message
|
||||||
|
2, // 2: blossomsub.pb.RPC.control:type_name -> blossomsub.pb.ControlMessage
|
||||||
|
3, // 3: blossomsub.pb.ControlMessage.ihave:type_name -> blossomsub.pb.ControlIHave
|
||||||
|
4, // 4: blossomsub.pb.ControlMessage.iwant:type_name -> blossomsub.pb.ControlIWant
|
||||||
|
5, // 5: blossomsub.pb.ControlMessage.graft:type_name -> blossomsub.pb.ControlGraft
|
||||||
|
6, // 6: blossomsub.pb.ControlMessage.prune:type_name -> blossomsub.pb.ControlPrune
|
||||||
|
7, // 7: blossomsub.pb.ControlPrune.peers:type_name -> blossomsub.pb.PeerInfo
|
||||||
|
8, // [8:8] is the sub-list for method output_type
|
||||||
|
8, // [8:8] is the sub-list for method input_type
|
||||||
|
8, // [8:8] is the sub-list for extension type_name
|
||||||
|
8, // [8:8] is the sub-list for extension extendee
|
||||||
|
0, // [0:8] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_rpc_proto_init() }
|
||||||
|
func file_rpc_proto_init() {
|
||||||
|
if File_rpc_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_rpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RPC); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*Message); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ControlMessage); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ControlIHave); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ControlIWant); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ControlGraft); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*ControlPrune); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*PeerInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RPC_SubOpts); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_rpc_proto_msgTypes[7].OneofWrappers = []interface{}{}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_rpc_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 9,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_rpc_proto_goTypes,
|
||||||
|
DependencyIndexes: file_rpc_proto_depIdxs,
|
||||||
|
MessageInfos: file_rpc_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_rpc_proto = out.File
|
||||||
|
file_rpc_proto_rawDesc = nil
|
||||||
|
file_rpc_proto_goTypes = nil
|
||||||
|
file_rpc_proto_depIdxs = nil
|
||||||
|
}
|
59
go-libp2p-blossomsub/pb/rpc.proto
Normal file
59
go-libp2p-blossomsub/pb/rpc.proto
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package blossomsub.pb;
|
||||||
|
|
||||||
|
option go_package = "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb";
|
||||||
|
|
||||||
|
message RPC {
|
||||||
|
repeated SubOpts subscriptions = 1;
|
||||||
|
repeated Message publish = 2;
|
||||||
|
|
||||||
|
message SubOpts {
|
||||||
|
bool subscribe = 1; // subscribe or unsubcribe
|
||||||
|
bytes bitmask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
ControlMessage control = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Message {
|
||||||
|
bytes from = 1;
|
||||||
|
bytes data = 2;
|
||||||
|
bytes seqno = 3;
|
||||||
|
bytes bitmask = 4;
|
||||||
|
bytes signature = 5;
|
||||||
|
bytes key = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlMessage {
|
||||||
|
repeated ControlIHave ihave = 1;
|
||||||
|
repeated ControlIWant iwant = 2;
|
||||||
|
repeated ControlGraft graft = 3;
|
||||||
|
repeated ControlPrune prune = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlIHave {
|
||||||
|
bytes bitmask = 1;
|
||||||
|
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
|
||||||
|
repeated string messageIDs = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlIWant {
|
||||||
|
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
|
||||||
|
repeated string messageIDs = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlGraft {
|
||||||
|
bytes bitmask = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlPrune {
|
||||||
|
bytes bitmask = 1;
|
||||||
|
repeated PeerInfo peers = 2;
|
||||||
|
uint64 backoff = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PeerInfo {
|
||||||
|
optional bytes peerID = 1;
|
||||||
|
optional bytes signedPeerRecord = 2;
|
||||||
|
}
|
2146
go-libp2p-blossomsub/pb/trace.pb.go
Normal file
2146
go-libp2p-blossomsub/pb/trace.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
152
go-libp2p-blossomsub/pb/trace.proto
Normal file
152
go-libp2p-blossomsub/pb/trace.proto
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package blossomsub.pb;
|
||||||
|
|
||||||
|
option go_package = "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb";
|
||||||
|
|
||||||
|
message TraceEvent {
|
||||||
|
optional Type type = 1;
|
||||||
|
optional bytes peerID = 2;
|
||||||
|
optional int64 timestamp = 3;
|
||||||
|
|
||||||
|
optional PublishMessage publishMessage = 4;
|
||||||
|
optional RejectMessage rejectMessage = 5;
|
||||||
|
optional DuplicateMessage duplicateMessage = 6;
|
||||||
|
optional DeliverMessage deliverMessage = 7;
|
||||||
|
optional AddPeer addPeer = 8;
|
||||||
|
optional RemovePeer removePeer = 9;
|
||||||
|
optional RecvRPC recvRPC = 10;
|
||||||
|
optional SendRPC sendRPC = 11;
|
||||||
|
optional DropRPC dropRPC = 12;
|
||||||
|
optional Join join = 13;
|
||||||
|
optional Leave leave = 14;
|
||||||
|
optional Graft graft = 15;
|
||||||
|
optional Prune prune = 16;
|
||||||
|
|
||||||
|
enum Type {
|
||||||
|
PUBLISH_MESSAGE = 0;
|
||||||
|
REJECT_MESSAGE = 1;
|
||||||
|
DUPLICATE_MESSAGE = 2;
|
||||||
|
DELIVER_MESSAGE = 3;
|
||||||
|
ADD_PEER = 4;
|
||||||
|
REMOVE_PEER = 5;
|
||||||
|
RECV_RPC = 6;
|
||||||
|
SEND_RPC = 7;
|
||||||
|
DROP_RPC = 8;
|
||||||
|
JOIN = 9;
|
||||||
|
LEAVE = 10;
|
||||||
|
GRAFT = 11;
|
||||||
|
PRUNE = 12;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PublishMessage {
|
||||||
|
optional bytes messageID = 1;
|
||||||
|
optional bytes bitmask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RejectMessage {
|
||||||
|
optional bytes messageID = 1;
|
||||||
|
optional bytes receivedFrom = 2;
|
||||||
|
optional string reason = 3;
|
||||||
|
optional bytes bitmask = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DuplicateMessage {
|
||||||
|
optional bytes messageID = 1;
|
||||||
|
optional bytes receivedFrom = 2;
|
||||||
|
optional bytes bitmask = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeliverMessage {
|
||||||
|
optional bytes messageID = 1;
|
||||||
|
optional bytes bitmask = 2;
|
||||||
|
optional bytes receivedFrom = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AddPeer {
|
||||||
|
optional bytes peerID = 1;
|
||||||
|
optional string proto = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemovePeer {
|
||||||
|
optional bytes peerID = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RecvRPC {
|
||||||
|
optional bytes receivedFrom = 1;
|
||||||
|
optional RPCMeta meta = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SendRPC {
|
||||||
|
optional bytes sendTo = 1;
|
||||||
|
optional RPCMeta meta = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DropRPC {
|
||||||
|
optional bytes sendTo = 1;
|
||||||
|
optional RPCMeta meta = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Join {
|
||||||
|
optional bytes bitmask = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Leave {
|
||||||
|
optional bytes bitmask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Graft {
|
||||||
|
optional bytes peerID = 1;
|
||||||
|
optional bytes bitmask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Prune {
|
||||||
|
optional bytes peerID = 1;
|
||||||
|
optional bytes bitmask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RPCMeta {
|
||||||
|
repeated MessageMeta messages = 1;
|
||||||
|
repeated SubMeta subscription = 2;
|
||||||
|
optional ControlMeta control = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MessageMeta {
|
||||||
|
optional bytes messageID = 1;
|
||||||
|
optional bytes bitmask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SubMeta {
|
||||||
|
optional bool subscribe = 1;
|
||||||
|
optional bytes bitmask = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlMeta {
|
||||||
|
repeated ControlIHaveMeta ihave = 1;
|
||||||
|
repeated ControlIWantMeta iwant = 2;
|
||||||
|
repeated ControlGraftMeta graft = 3;
|
||||||
|
repeated ControlPruneMeta prune = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlIHaveMeta {
|
||||||
|
optional bytes bitmask = 1;
|
||||||
|
repeated bytes messageIDs = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlIWantMeta {
|
||||||
|
repeated bytes messageIDs = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlGraftMeta {
|
||||||
|
optional bytes bitmask = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ControlPruneMeta {
|
||||||
|
optional bytes bitmask = 1;
|
||||||
|
repeated bytes peers = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message TraceEventBatch {
|
||||||
|
repeated TraceEvent batch = 1;
|
||||||
|
}
|
453
go-libp2p-blossomsub/peer_gater.go
Normal file
453
go-libp2p-blossomsub/peer_gater.go
Normal file
@ -0,0 +1,453 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
DefaultPeerGaterRetainStats = 6 * time.Hour
|
||||||
|
DefaultPeerGaterQuiet = time.Minute
|
||||||
|
DefaultPeerGaterDuplicateWeight = 0.125
|
||||||
|
DefaultPeerGaterIgnoreWeight = 1.0
|
||||||
|
DefaultPeerGaterRejectWeight = 16.0
|
||||||
|
DefaultPeerGaterThreshold = 0.33
|
||||||
|
DefaultPeerGaterGlobalDecay = ScoreParameterDecay(2 * time.Minute)
|
||||||
|
DefaultPeerGaterSourceDecay = ScoreParameterDecay(time.Hour)
|
||||||
|
)
|
||||||
|
|
||||||
|
// PeerGaterParams groups together parameters that control the operation of the peer gater
|
||||||
|
type PeerGaterParams struct {
|
||||||
|
// when the ratio of throttled/validated messages exceeds this threshold, the gater turns on
|
||||||
|
Threshold float64
|
||||||
|
// (linear) decay parameter for gater counters
|
||||||
|
GlobalDecay float64 // global counter decay
|
||||||
|
SourceDecay float64 // per IP counter decay
|
||||||
|
// decay interval
|
||||||
|
DecayInterval time.Duration
|
||||||
|
// counter zeroing threshold
|
||||||
|
DecayToZero float64
|
||||||
|
// how long to retain stats
|
||||||
|
RetainStats time.Duration
|
||||||
|
// quiet interval before turning off the gater; if there are no validation throttle events
|
||||||
|
// for this interval, the gater turns off
|
||||||
|
Quiet time.Duration
|
||||||
|
// weight of duplicate message deliveries
|
||||||
|
DuplicateWeight float64
|
||||||
|
// weight of ignored messages
|
||||||
|
IgnoreWeight float64
|
||||||
|
// weight of rejected messages
|
||||||
|
RejectWeight float64
|
||||||
|
|
||||||
|
// priority bitmask delivery weights
|
||||||
|
BitmaskDeliveryWeights map[string]float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PeerGaterParams) validate() error {
|
||||||
|
if p.Threshold <= 0 {
|
||||||
|
return fmt.Errorf("invalid Threshold; must be > 0")
|
||||||
|
}
|
||||||
|
if p.GlobalDecay <= 0 || p.GlobalDecay >= 1 {
|
||||||
|
return fmt.Errorf("invalid GlobalDecay; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
if p.SourceDecay <= 0 || p.SourceDecay >= 1 {
|
||||||
|
return fmt.Errorf("invalid SourceDecay; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
if p.DecayInterval < time.Second {
|
||||||
|
return fmt.Errorf("invalid DecayInterval; must be at least 1s")
|
||||||
|
}
|
||||||
|
if p.DecayToZero <= 0 || p.DecayToZero >= 1 {
|
||||||
|
return fmt.Errorf("invalid DecayToZero; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
// no need to check stats retention; a value of 0 means we don't retain stats
|
||||||
|
if p.Quiet < time.Second {
|
||||||
|
return fmt.Errorf("invalud Quiet interval; must be at least 1s")
|
||||||
|
}
|
||||||
|
if p.DuplicateWeight <= 0 {
|
||||||
|
return fmt.Errorf("invalid DuplicateWeight; must be > 0")
|
||||||
|
}
|
||||||
|
if p.IgnoreWeight < 1 {
|
||||||
|
return fmt.Errorf("invalid IgnoreWeight; must be >= 1")
|
||||||
|
}
|
||||||
|
if p.RejectWeight < 1 {
|
||||||
|
return fmt.Errorf("invalud RejectWeight; must be >= 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBitmaskDeliveryWeights is a fluid setter for the priority bitmask delivery weights
|
||||||
|
func (p *PeerGaterParams) WithBitmaskDeliveryWeights(w map[string]float64) *PeerGaterParams {
|
||||||
|
p.BitmaskDeliveryWeights = w
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPeerGaterParams creates a new PeerGaterParams struct, using the specified threshold and decay
|
||||||
|
// parameters and default values for all other parameters.
|
||||||
|
func NewPeerGaterParams(threshold, globalDecay, sourceDecay float64) *PeerGaterParams {
|
||||||
|
return &PeerGaterParams{
|
||||||
|
Threshold: threshold,
|
||||||
|
GlobalDecay: globalDecay,
|
||||||
|
SourceDecay: sourceDecay,
|
||||||
|
DecayToZero: DefaultDecayToZero,
|
||||||
|
DecayInterval: DefaultDecayInterval,
|
||||||
|
RetainStats: DefaultPeerGaterRetainStats,
|
||||||
|
Quiet: DefaultPeerGaterQuiet,
|
||||||
|
DuplicateWeight: DefaultPeerGaterDuplicateWeight,
|
||||||
|
IgnoreWeight: DefaultPeerGaterIgnoreWeight,
|
||||||
|
RejectWeight: DefaultPeerGaterRejectWeight,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPeerGaterParams creates a new PeerGaterParams struct using default values
|
||||||
|
func DefaultPeerGaterParams() *PeerGaterParams {
|
||||||
|
return NewPeerGaterParams(DefaultPeerGaterThreshold, DefaultPeerGaterGlobalDecay, DefaultPeerGaterSourceDecay)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the gater object.
|
||||||
|
type peerGater struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
host host.Host
|
||||||
|
|
||||||
|
// gater parameters
|
||||||
|
params *PeerGaterParams
|
||||||
|
|
||||||
|
// counters
|
||||||
|
validate, throttle float64
|
||||||
|
|
||||||
|
// time of last validation throttle
|
||||||
|
lastThrottle time.Time
|
||||||
|
|
||||||
|
// stats per peer.ID -- multiple peer IDs may share the same stats object if they are
|
||||||
|
// colocated in the same IP
|
||||||
|
peerStats map[peer.ID]*peerGaterStats
|
||||||
|
// stats per IP
|
||||||
|
ipStats map[string]*peerGaterStats
|
||||||
|
|
||||||
|
// for unit tests
|
||||||
|
getIP func(peer.ID) string
|
||||||
|
}
|
||||||
|
|
||||||
|
type peerGaterStats struct {
|
||||||
|
// number of connected peer IDs mapped to this stat object
|
||||||
|
connected int
|
||||||
|
// stats expiration time -- only valid if connected = 0
|
||||||
|
expire time.Time
|
||||||
|
|
||||||
|
// counters
|
||||||
|
deliver, duplicate, ignore, reject float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPeerGater is a BlossomSub router option that enables reactive validation queue
|
||||||
|
// management.
|
||||||
|
// The Gater is activated if the ratio of throttled/validated messages exceeds the specified
|
||||||
|
// threshold.
|
||||||
|
// Once active, the Gater probabilistically throttles peers _before_ they enter the validation
|
||||||
|
// queue, performing Random Early Drop.
|
||||||
|
// The throttle decision is randomized, with the probability of allowing messages to enter the
|
||||||
|
// validation queue controlled by the statistical observations of the performance of all peers
|
||||||
|
// in the IP address of the gated peer.
|
||||||
|
// The Gater deactivates if there is no validation throttlinc occurring for the specified quiet
|
||||||
|
// interval.
|
||||||
|
func WithPeerGater(params *PeerGaterParams) Option {
|
||||||
|
return func(ps *PubSub) error {
|
||||||
|
gs, ok := ps.rt.(*BlossomSubRouter)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("pubsub router is not BlossomSub")
|
||||||
|
}
|
||||||
|
|
||||||
|
err := params.validate()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gs.gate = newPeerGater(ps.ctx, ps.host, params)
|
||||||
|
|
||||||
|
// hook the tracer
|
||||||
|
if ps.tracer != nil {
|
||||||
|
ps.tracer.raw = append(ps.tracer.raw, gs.gate)
|
||||||
|
} else {
|
||||||
|
ps.tracer = &pubsubTracer{
|
||||||
|
raw: []RawTracer{gs.gate},
|
||||||
|
pid: ps.host.ID(),
|
||||||
|
idGen: ps.idGen,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPeerGater(ctx context.Context, host host.Host, params *PeerGaterParams) *peerGater {
|
||||||
|
pg := &peerGater{
|
||||||
|
params: params,
|
||||||
|
peerStats: make(map[peer.ID]*peerGaterStats),
|
||||||
|
ipStats: make(map[string]*peerGaterStats),
|
||||||
|
host: host,
|
||||||
|
}
|
||||||
|
go pg.background(ctx)
|
||||||
|
return pg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) background(ctx context.Context) {
|
||||||
|
tick := time.NewTicker(pg.params.DecayInterval)
|
||||||
|
|
||||||
|
defer tick.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick.C:
|
||||||
|
pg.decayStats()
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) decayStats() {
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
pg.validate *= pg.params.GlobalDecay
|
||||||
|
if pg.validate < pg.params.DecayToZero {
|
||||||
|
pg.validate = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.throttle *= pg.params.GlobalDecay
|
||||||
|
if pg.throttle < pg.params.DecayToZero {
|
||||||
|
pg.throttle = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
for ip, st := range pg.ipStats {
|
||||||
|
if st.connected > 0 {
|
||||||
|
st.deliver *= pg.params.SourceDecay
|
||||||
|
if st.deliver < pg.params.DecayToZero {
|
||||||
|
st.deliver = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
st.duplicate *= pg.params.SourceDecay
|
||||||
|
if st.duplicate < pg.params.DecayToZero {
|
||||||
|
st.duplicate = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
st.ignore *= pg.params.SourceDecay
|
||||||
|
if st.ignore < pg.params.DecayToZero {
|
||||||
|
st.ignore = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
st.reject *= pg.params.SourceDecay
|
||||||
|
if st.reject < pg.params.DecayToZero {
|
||||||
|
st.reject = 0
|
||||||
|
}
|
||||||
|
} else if st.expire.Before(now) {
|
||||||
|
delete(pg.ipStats, ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) getPeerStats(p peer.ID) *peerGaterStats {
|
||||||
|
st, ok := pg.peerStats[p]
|
||||||
|
if !ok {
|
||||||
|
st = pg.getIPStats(p)
|
||||||
|
pg.peerStats[p] = st
|
||||||
|
}
|
||||||
|
return st
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) getIPStats(p peer.ID) *peerGaterStats {
|
||||||
|
ip := pg.getPeerIP(p)
|
||||||
|
st, ok := pg.ipStats[ip]
|
||||||
|
if !ok {
|
||||||
|
st = &peerGaterStats{}
|
||||||
|
pg.ipStats[ip] = st
|
||||||
|
}
|
||||||
|
return st
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) getPeerIP(p peer.ID) string {
|
||||||
|
if pg.getIP != nil {
|
||||||
|
return pg.getIP(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
connToIP := func(c network.Conn) string {
|
||||||
|
remote := c.RemoteMultiaddr()
|
||||||
|
ip, err := manet.ToIP(remote)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error determining IP for remote peer in %s: %s", remote, err)
|
||||||
|
return "<unknown>"
|
||||||
|
}
|
||||||
|
return ip.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
conns := pg.host.Network().ConnsToPeer(p)
|
||||||
|
switch len(conns) {
|
||||||
|
case 0:
|
||||||
|
return "<unknown>"
|
||||||
|
case 1:
|
||||||
|
return connToIP(conns[0])
|
||||||
|
default:
|
||||||
|
// we have multiple connections -- order by number of streams and use the one with the
|
||||||
|
// most streams; it's a nightmare to track multiple IPs per peer, so pick the best one.
|
||||||
|
streams := make(map[string]int)
|
||||||
|
for _, c := range conns {
|
||||||
|
if c.Stat().Transient {
|
||||||
|
// ignore transient
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
streams[c.ID()] = len(c.GetStreams())
|
||||||
|
}
|
||||||
|
sort.Slice(conns, func(i, j int) bool {
|
||||||
|
return streams[conns[i].ID()] > streams[conns[j].ID()]
|
||||||
|
})
|
||||||
|
return connToIP(conns[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// router interface
|
||||||
|
func (pg *peerGater) AcceptFrom(p peer.ID) AcceptStatus {
|
||||||
|
if pg == nil {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
// check the quiet period; if the validation queue has not throttled for more than the Quiet
|
||||||
|
// interval, we turn off the circuit breaker and accept.
|
||||||
|
if time.Since(pg.lastThrottle) > pg.params.Quiet {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
// no throttle events -- or they have decayed; accept.
|
||||||
|
if pg.throttle == 0 {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the throttle/validate ration; if it is below threshold we accept.
|
||||||
|
if pg.validate != 0 && pg.throttle/pg.validate < pg.params.Threshold {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
st := pg.getPeerStats(p)
|
||||||
|
|
||||||
|
// compute the goodput of the peer; the denominator is the weighted mix of message counters
|
||||||
|
total := st.deliver + pg.params.DuplicateWeight*st.duplicate + pg.params.IgnoreWeight*st.ignore + pg.params.RejectWeight*st.reject
|
||||||
|
if total == 0 {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
// we make a randomized decision based on the goodput of the peer.
|
||||||
|
// the probabiity is biased by adding 1 to the delivery counter so that we don't unconditionally
|
||||||
|
// throttle in the first negative event; it also ensures that a peer always has a chance of being
|
||||||
|
// accepted; this is not a sinkhole/blacklist.
|
||||||
|
threshold := (1 + st.deliver) / (1 + total)
|
||||||
|
if rand.Float64() < threshold {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("throttling peer %s with threshold %f", p, threshold)
|
||||||
|
return AcceptControl
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- RawTracer interface methods
|
||||||
|
var _ RawTracer = (*peerGater)(nil)
|
||||||
|
|
||||||
|
// tracer interface
|
||||||
|
func (pg *peerGater) AddPeer(p peer.ID, proto protocol.ID) {
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
st := pg.getPeerStats(p)
|
||||||
|
st.connected++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) RemovePeer(p peer.ID) {
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
st := pg.getPeerStats(p)
|
||||||
|
st.connected--
|
||||||
|
st.expire = time.Now().Add(pg.params.RetainStats)
|
||||||
|
|
||||||
|
delete(pg.peerStats, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) Join(bitmask []byte) {}
|
||||||
|
func (pg *peerGater) Leave(bitmask []byte) {}
|
||||||
|
func (pg *peerGater) Graft(p peer.ID, bitmask []byte) {}
|
||||||
|
func (pg *peerGater) Prune(p peer.ID, bitmask []byte) {}
|
||||||
|
|
||||||
|
func (pg *peerGater) ValidateMessage(msg *Message) {
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
pg.validate++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) DeliverMessage(msg *Message) {
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||||
|
|
||||||
|
bitmask := msg.GetBitmask()
|
||||||
|
weight := pg.params.BitmaskDeliveryWeights[string(bitmask)]
|
||||||
|
|
||||||
|
if weight == 0 {
|
||||||
|
weight = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
st.deliver += weight
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) RejectMessage(msg *Message, reason string) {
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
switch reason {
|
||||||
|
case RejectValidationQueueFull:
|
||||||
|
fallthrough
|
||||||
|
case RejectValidationThrottled:
|
||||||
|
pg.lastThrottle = time.Now()
|
||||||
|
pg.throttle++
|
||||||
|
|
||||||
|
case RejectValidationIgnored:
|
||||||
|
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||||
|
st.ignore++
|
||||||
|
|
||||||
|
default:
|
||||||
|
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||||
|
st.reject++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) DuplicateMessage(msg *Message) {
|
||||||
|
pg.Lock()
|
||||||
|
defer pg.Unlock()
|
||||||
|
|
||||||
|
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||||
|
st.duplicate++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pg *peerGater) ThrottlePeer(p peer.ID) {}
|
||||||
|
|
||||||
|
func (pg *peerGater) RecvRPC(rpc *RPC) {}
|
||||||
|
|
||||||
|
func (pg *peerGater) SendRPC(rpc *RPC, p peer.ID) {}
|
||||||
|
|
||||||
|
func (pg *peerGater) DropRPC(rpc *RPC, p peer.ID) {}
|
||||||
|
|
||||||
|
func (pg *peerGater) UndeliverableMessage(msg *Message) {}
|
128
go-libp2p-blossomsub/peer_gater_test.go
Normal file
128
go-libp2p-blossomsub/peer_gater_test.go
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPeerGater(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
peerA := peer.ID("A")
|
||||||
|
peerAip := "1.2.3.4"
|
||||||
|
|
||||||
|
params := NewPeerGaterParams(.1, .9, .999)
|
||||||
|
err := params.validate()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pg := newPeerGater(ctx, nil, params)
|
||||||
|
pg.getIP = func(p peer.ID) string {
|
||||||
|
switch p {
|
||||||
|
case peerA:
|
||||||
|
return peerAip
|
||||||
|
default:
|
||||||
|
return "<wtf>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.AddPeer(peerA, "")
|
||||||
|
|
||||||
|
status := pg.AcceptFrom(peerA)
|
||||||
|
if status != AcceptAll {
|
||||||
|
t.Fatal("expected AcceptAll")
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &Message{ReceivedFrom: peerA}
|
||||||
|
|
||||||
|
pg.ValidateMessage(msg)
|
||||||
|
status = pg.AcceptFrom(peerA)
|
||||||
|
if status != AcceptAll {
|
||||||
|
t.Fatal("expected AcceptAll")
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.RejectMessage(msg, RejectValidationQueueFull)
|
||||||
|
status = pg.AcceptFrom(peerA)
|
||||||
|
if status != AcceptAll {
|
||||||
|
t.Fatal("expected AcceptAll")
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.RejectMessage(msg, RejectValidationThrottled)
|
||||||
|
status = pg.AcceptFrom(peerA)
|
||||||
|
if status != AcceptAll {
|
||||||
|
t.Fatal("expected AcceptAll")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
pg.RejectMessage(msg, RejectValidationIgnored)
|
||||||
|
pg.RejectMessage(msg, RejectValidationFailed)
|
||||||
|
}
|
||||||
|
|
||||||
|
accepted := false
|
||||||
|
for i := 0; !accepted && i < 1000; i++ {
|
||||||
|
status = pg.AcceptFrom(peerA)
|
||||||
|
if status == AcceptControl {
|
||||||
|
accepted = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !accepted {
|
||||||
|
t.Fatal("expected AcceptControl")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
pg.DeliverMessage(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
accepted = false
|
||||||
|
for i := 0; !accepted && i < 1000; i++ {
|
||||||
|
status = pg.AcceptFrom(peerA)
|
||||||
|
if status == AcceptAll {
|
||||||
|
accepted = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !accepted {
|
||||||
|
t.Fatal("expected to accept at least once")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
pg.decayStats()
|
||||||
|
}
|
||||||
|
|
||||||
|
status = pg.AcceptFrom(peerA)
|
||||||
|
if status != AcceptAll {
|
||||||
|
t.Fatal("expected AcceptAll")
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.RemovePeer(peerA)
|
||||||
|
pg.Lock()
|
||||||
|
_, ok := pg.peerStats[peerA]
|
||||||
|
pg.Unlock()
|
||||||
|
if ok {
|
||||||
|
t.Fatal("still have a stat record for peerA")
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.Lock()
|
||||||
|
_, ok = pg.ipStats[peerAip]
|
||||||
|
pg.Unlock()
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("expected to still have a stat record for peerA's ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
pg.Lock()
|
||||||
|
pg.ipStats[peerAip].expire = time.Now()
|
||||||
|
pg.Unlock()
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
pg.Lock()
|
||||||
|
_, ok = pg.ipStats["1.2.3.4"]
|
||||||
|
pg.Unlock()
|
||||||
|
if ok {
|
||||||
|
t.Fatal("still have a stat record for peerA's ip")
|
||||||
|
}
|
||||||
|
}
|
1422
go-libp2p-blossomsub/pubsub.go
Normal file
1422
go-libp2p-blossomsub/pubsub.go
Normal file
File diff suppressed because it is too large
Load Diff
49
go-libp2p-blossomsub/pubsub_test.go
Normal file
49
go-libp2p-blossomsub/pubsub_test.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// See https://source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/issues/426
|
||||||
|
func TestPubSubRemovesBlacklistedPeer(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
|
||||||
|
bl := NewMapBlacklist()
|
||||||
|
|
||||||
|
psubs0 := getPubsub(ctx, hosts[0])
|
||||||
|
psubs1 := getPubsub(ctx, hosts[1], WithBlacklist(bl))
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
// Bad peer is blacklisted after it has connected.
|
||||||
|
// Calling p.BlacklistPeer directly does the right thing but we should also clean
|
||||||
|
// up the peer if it has been added the the blacklist by another means.
|
||||||
|
bl.Add(hosts[0].ID())
|
||||||
|
|
||||||
|
_, err := psubs0.Subscribe([]byte{0x7e, 0x57})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sub1, err := psubs1.Subscribe([]byte{0x7e, 0x57})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
psubs0.Publish([]byte{0x7e, 0x57}, []byte("message"))
|
||||||
|
|
||||||
|
wctx, cancel2 := context.WithTimeout(ctx, 1*time.Second)
|
||||||
|
defer cancel2()
|
||||||
|
|
||||||
|
_, _ = sub1.Next(wctx)
|
||||||
|
|
||||||
|
// Explicitly cancel context so PubSub cleans up peer channels.
|
||||||
|
// Issue 426 reports a panic due to a peer channel being closed twice.
|
||||||
|
cancel()
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
}
|
168
go-libp2p-blossomsub/randomsub.go
Normal file
168
go-libp2p-blossomsub/randomsub.go
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RandomSubID = protocol.ID("/randomsub/1.0.0")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
RandomSubD = 6
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRandomSub returns a new PubSub object using RandomSubRouter as the router.
|
||||||
|
func NewRandomSub(ctx context.Context, h host.Host, size int, opts ...Option) (*PubSub, error) {
|
||||||
|
rt := &RandomSubRouter{
|
||||||
|
size: size,
|
||||||
|
peers: make(map[peer.ID]protocol.ID),
|
||||||
|
}
|
||||||
|
return NewPubSub(ctx, h, rt, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RandomSubRouter is a router that implements a random propagation strategy.
|
||||||
|
// For each message, it selects the square root of the network size peers, with a min of RandomSubD,
|
||||||
|
// and forwards the message to them.
|
||||||
|
type RandomSubRouter struct {
|
||||||
|
p *PubSub
|
||||||
|
peers map[peer.ID]protocol.ID
|
||||||
|
size int
|
||||||
|
tracer *pubsubTracer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) Protocols() []protocol.ID {
|
||||||
|
return []protocol.ID{RandomSubID, FloodSubID}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) Attach(p *PubSub) {
|
||||||
|
rs.p = p
|
||||||
|
rs.tracer = p.tracer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
|
||||||
|
rs.tracer.AddPeer(p, proto)
|
||||||
|
rs.peers[p] = proto
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) RemovePeer(p peer.ID) {
|
||||||
|
rs.tracer.RemovePeer(p)
|
||||||
|
delete(rs.peers, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
|
||||||
|
// check all peers in the bitmask
|
||||||
|
tmap, ok := rs.p.bitmasks[string(bitmask)]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
fsPeers := 0
|
||||||
|
rsPeers := 0
|
||||||
|
|
||||||
|
// count floodsub and randomsub peers
|
||||||
|
for p := range tmap {
|
||||||
|
switch rs.peers[p] {
|
||||||
|
case FloodSubID:
|
||||||
|
fsPeers++
|
||||||
|
case RandomSubID:
|
||||||
|
rsPeers++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if suggested == 0 {
|
||||||
|
suggested = RandomSubD
|
||||||
|
}
|
||||||
|
|
||||||
|
if fsPeers+rsPeers >= suggested {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if rsPeers >= RandomSubD {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus {
|
||||||
|
return AcceptAll
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) Publish(msg *Message) {
|
||||||
|
from := msg.ReceivedFrom
|
||||||
|
|
||||||
|
tosend := make(map[peer.ID]struct{})
|
||||||
|
rspeers := make(map[peer.ID]struct{})
|
||||||
|
src := peer.ID(msg.GetFrom())
|
||||||
|
|
||||||
|
bitmask := msg.GetBitmask()
|
||||||
|
tmap, ok := rs.p.bitmasks[string(bitmask)]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for p := range tmap {
|
||||||
|
if p == from || p == src {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.peers[p] == FloodSubID {
|
||||||
|
tosend[p] = struct{}{}
|
||||||
|
} else {
|
||||||
|
rspeers[p] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rspeers) > RandomSubD {
|
||||||
|
target := RandomSubD
|
||||||
|
sqrt := int(math.Ceil(math.Sqrt(float64(rs.size))))
|
||||||
|
if sqrt > target {
|
||||||
|
target = sqrt
|
||||||
|
}
|
||||||
|
if target > len(rspeers) {
|
||||||
|
target = len(rspeers)
|
||||||
|
}
|
||||||
|
xpeers := peerMapToList(rspeers)
|
||||||
|
shufflePeers(xpeers)
|
||||||
|
xpeers = xpeers[:target]
|
||||||
|
for _, p := range xpeers {
|
||||||
|
tosend[p] = struct{}{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for p := range rspeers {
|
||||||
|
tosend[p] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out := rpcWithMessages(msg.Message)
|
||||||
|
for p := range tosend {
|
||||||
|
mch, ok := rs.p.peers[p]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case mch <- out:
|
||||||
|
rs.tracer.SendRPC(out, p)
|
||||||
|
default:
|
||||||
|
log.Infof("dropping message to peer %s: queue full", p)
|
||||||
|
rs.tracer.DropRPC(out, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) Join(bitmask []byte) {
|
||||||
|
rs.tracer.Join(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RandomSubRouter) Leave(bitmask []byte) {
|
||||||
|
rs.tracer.Join(bitmask)
|
||||||
|
}
|
192
go-libp2p-blossomsub/randomsub_test.go
Normal file
192
go-libp2p-blossomsub/randomsub_test.go
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getRandomsub(ctx context.Context, h host.Host, size int, opts ...Option) *PubSub {
|
||||||
|
ps, err := NewRandomSub(ctx, h, size, opts...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRandomsubs(ctx context.Context, hs []host.Host, size int, opts ...Option) []*PubSub {
|
||||||
|
var psubs []*PubSub
|
||||||
|
for _, h := range hs {
|
||||||
|
psubs = append(psubs, getRandomsub(ctx, h, size, opts...))
|
||||||
|
}
|
||||||
|
return psubs
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryReceive(sub *Subscription) *Message {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
m, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomsubSmall(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 10)
|
||||||
|
psubs := getRandomsubs(ctx, hosts, 10)
|
||||||
|
|
||||||
|
connectAll(t, hosts)
|
||||||
|
|
||||||
|
var subs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
subs = append(subs, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("message %d", i))
|
||||||
|
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
|
||||||
|
|
||||||
|
for _, sub := range subs {
|
||||||
|
if tryReceive(sub) != nil {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if count < 7*len(hosts) {
|
||||||
|
t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomsubBig(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 50)
|
||||||
|
psubs := getRandomsubs(ctx, hosts, 50)
|
||||||
|
|
||||||
|
connectSome(t, hosts, 12)
|
||||||
|
|
||||||
|
var subs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
subs = append(subs, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("message %d", i))
|
||||||
|
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
|
||||||
|
|
||||||
|
for _, sub := range subs {
|
||||||
|
if tryReceive(sub) != nil {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if count < 7*len(hosts) {
|
||||||
|
t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomsubMixed(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 40)
|
||||||
|
fsubs := getPubsubs(ctx, hosts[:10])
|
||||||
|
rsubs := getRandomsubs(ctx, hosts[10:], 30)
|
||||||
|
psubs := append(fsubs, rsubs...)
|
||||||
|
|
||||||
|
connectSome(t, hosts, 12)
|
||||||
|
|
||||||
|
var subs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
subs = append(subs, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("message %d", i))
|
||||||
|
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
|
||||||
|
|
||||||
|
for _, sub := range subs {
|
||||||
|
if tryReceive(sub) != nil {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if count < 7*len(hosts) {
|
||||||
|
t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomsubEnoughPeers(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 40)
|
||||||
|
fsubs := getPubsubs(ctx, hosts[:10])
|
||||||
|
rsubs := getRandomsubs(ctx, hosts[10:], 30)
|
||||||
|
psubs := append(fsubs, rsubs...)
|
||||||
|
|
||||||
|
connectSome(t, hosts, 12)
|
||||||
|
|
||||||
|
for _, ps := range psubs {
|
||||||
|
_, err := ps.Subscribe([]byte{0x7e, 0x57})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
res := make(chan bool, 1)
|
||||||
|
rsubs[0].eval <- func() {
|
||||||
|
rs := rsubs[0].rt.(*RandomSubRouter)
|
||||||
|
res <- rs.EnoughPeers([]byte{0x7e, 0x57}, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
enough := <-res
|
||||||
|
if !enough {
|
||||||
|
t.Fatal("expected enough peers")
|
||||||
|
}
|
||||||
|
|
||||||
|
rsubs[0].eval <- func() {
|
||||||
|
rs := rsubs[0].rt.(*RandomSubRouter)
|
||||||
|
res <- rs.EnoughPeers([]byte{0x7e, 0x57}, 100)
|
||||||
|
}
|
||||||
|
|
||||||
|
enough = <-res
|
||||||
|
if !enough {
|
||||||
|
t.Fatal("expected enough peers")
|
||||||
|
}
|
||||||
|
}
|
1081
go-libp2p-blossomsub/score.go
Normal file
1081
go-libp2p-blossomsub/score.go
Normal file
File diff suppressed because it is too large
Load Diff
423
go-libp2p-blossomsub/score_params.go
Normal file
423
go-libp2p-blossomsub/score_params.go
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PeerScoreThresholds struct {
|
||||||
|
// whether it is allowed to just set some params and not all of them.
|
||||||
|
SkipAtomicValidation bool
|
||||||
|
|
||||||
|
// GossipThreshold is the score threshold below which gossip propagation is suppressed;
|
||||||
|
// should be negative.
|
||||||
|
GossipThreshold float64
|
||||||
|
|
||||||
|
// PublishThreshold is the score threshold below which we shouldn't publish when using flood
|
||||||
|
// publishing (also applies to fanout and floodsub peers); should be negative and <= GossipThreshold.
|
||||||
|
PublishThreshold float64
|
||||||
|
|
||||||
|
// GraylistThreshold is the score threshold below which message processing is suppressed altogether,
|
||||||
|
// implementing an effective gray list according to peer score; should be negative and <= PublishThreshold.
|
||||||
|
GraylistThreshold float64
|
||||||
|
|
||||||
|
// AcceptPXThreshold is the score threshold below which PX will be ignored; this should be positive
|
||||||
|
// and limited to scores attainable by bootstrappers and other trusted nodes.
|
||||||
|
AcceptPXThreshold float64
|
||||||
|
|
||||||
|
// OpportunisticGraftThreshold is the median mesh score threshold before triggering opportunistic
|
||||||
|
// grafting; this should have a small positive value.
|
||||||
|
OpportunisticGraftThreshold float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PeerScoreThresholds) validate() error {
|
||||||
|
|
||||||
|
if !p.SkipAtomicValidation || p.PublishThreshold != 0 || p.GossipThreshold != 0 || p.GraylistThreshold != 0 {
|
||||||
|
if p.GossipThreshold > 0 || isInvalidNumber(p.GossipThreshold) {
|
||||||
|
return fmt.Errorf("invalid gossip threshold; it must be <= 0 and a valid number")
|
||||||
|
}
|
||||||
|
if p.PublishThreshold > 0 || p.PublishThreshold > p.GossipThreshold || isInvalidNumber(p.PublishThreshold) {
|
||||||
|
return fmt.Errorf("invalid publish threshold; it must be <= 0 and <= gossip threshold and a valid number")
|
||||||
|
}
|
||||||
|
if p.GraylistThreshold > 0 || p.GraylistThreshold > p.PublishThreshold || isInvalidNumber(p.GraylistThreshold) {
|
||||||
|
return fmt.Errorf("invalid graylist threshold; it must be <= 0 and <= publish threshold and a valid number")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.SkipAtomicValidation || p.AcceptPXThreshold != 0 {
|
||||||
|
if p.AcceptPXThreshold < 0 || isInvalidNumber(p.AcceptPXThreshold) {
|
||||||
|
return fmt.Errorf("invalid accept PX threshold; it must be >= 0 and a valid number")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.SkipAtomicValidation || p.OpportunisticGraftThreshold != 0 {
|
||||||
|
if p.OpportunisticGraftThreshold < 0 || isInvalidNumber(p.OpportunisticGraftThreshold) {
|
||||||
|
return fmt.Errorf("invalid opportunistic grafting threshold; it must be >= 0 and a valid number")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type PeerScoreParams struct {
|
||||||
|
// whether it is allowed to just set some params and not all of them.
|
||||||
|
SkipAtomicValidation bool
|
||||||
|
|
||||||
|
// Score parameters per bitmask.
|
||||||
|
Bitmasks map[string]*BitmaskScoreParams
|
||||||
|
|
||||||
|
// Aggregate bitmask score cap; this limits the total contribution of bitmasks towards a positive
|
||||||
|
// score. It must be positive (or 0 for no cap).
|
||||||
|
BitmaskScoreCap float64
|
||||||
|
|
||||||
|
// P5: Application-specific peer scoring
|
||||||
|
AppSpecificScore func(p peer.ID) float64
|
||||||
|
AppSpecificWeight float64
|
||||||
|
|
||||||
|
// P6: IP-colocation factor.
|
||||||
|
// The parameter has an associated counter which counts the number of peers with the same IP.
|
||||||
|
// If the number of peers in the same IP exceeds IPColocationFactorThreshold, then the value
|
||||||
|
// is the square of the difference, ie (PeersInSameIP - IPColocationThreshold)^2.
|
||||||
|
// If the number of peers in the same IP is less than the threshold, then the value is 0.
|
||||||
|
// The weight of the parameter MUST be negative, unless you want to disable for testing.
|
||||||
|
// Note: In order to simulate many IPs in a managable manner when testing, you can set the weight to 0
|
||||||
|
// thus disabling the IP colocation penalty.
|
||||||
|
IPColocationFactorWeight float64
|
||||||
|
IPColocationFactorThreshold int
|
||||||
|
IPColocationFactorWhitelist []*net.IPNet
|
||||||
|
|
||||||
|
// P7: behavioural pattern penalties.
|
||||||
|
// This parameter has an associated counter which tracks misbehaviour as detected by the
|
||||||
|
// router. The router currently applies penalties for the following behaviors:
|
||||||
|
// - attempting to re-graft before the prune backoff time has elapsed.
|
||||||
|
// - not following up in IWANT requests for messages advertised with IHAVE.
|
||||||
|
//
|
||||||
|
// The value of the parameter is the square of the counter over the threshold, which decays with
|
||||||
|
// BehaviourPenaltyDecay.
|
||||||
|
// The weight of the parameter MUST be negative (or zero to disable).
|
||||||
|
BehaviourPenaltyWeight, BehaviourPenaltyThreshold, BehaviourPenaltyDecay float64
|
||||||
|
|
||||||
|
// the decay interval for parameter counters.
|
||||||
|
DecayInterval time.Duration
|
||||||
|
|
||||||
|
// counter value below which it is considered 0.
|
||||||
|
DecayToZero float64
|
||||||
|
|
||||||
|
// time to remember counters for a disconnected peer.
|
||||||
|
RetainScore time.Duration
|
||||||
|
|
||||||
|
// time to remember a message delivery for. Default to global TimeCacheDuration if 0.
|
||||||
|
SeenMsgTTL time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type BitmaskScoreParams struct {
|
||||||
|
// whether it is allowed to just set some params and not all of them.
|
||||||
|
SkipAtomicValidation bool
|
||||||
|
|
||||||
|
// The weight of the bitmask.
|
||||||
|
BitmaskWeight float64
|
||||||
|
|
||||||
|
// P1: time in the mesh
|
||||||
|
// This is the time the peer has been grafted in the mesh.
|
||||||
|
// The value of the parameter is the time/TimeInMeshQuantum, capped by TimeInMeshCap.
|
||||||
|
// The weight of the parameter MUST be positive (or zero to disable).
|
||||||
|
TimeInMeshWeight float64
|
||||||
|
TimeInMeshQuantum time.Duration
|
||||||
|
TimeInMeshCap float64
|
||||||
|
|
||||||
|
// P2: first message deliveries
|
||||||
|
// This is the number of message deliveries in the bitmask.
|
||||||
|
// The value of the parameter is a counter, decaying with FirstMessageDeliveriesDecay, and capped
|
||||||
|
// by FirstMessageDeliveriesCap.
|
||||||
|
// The weight of the parameter MUST be positive (or zero to disable).
|
||||||
|
FirstMessageDeliveriesWeight, FirstMessageDeliveriesDecay float64
|
||||||
|
FirstMessageDeliveriesCap float64
|
||||||
|
|
||||||
|
// P3: mesh message deliveries
|
||||||
|
// This is the number of message deliveries in the mesh, within the MeshMessageDeliveriesWindow of
|
||||||
|
// message validation; deliveries during validation also count and are retroactively applied
|
||||||
|
// when validation succeeds.
|
||||||
|
// This window accounts for the minimum time before a hostile mesh peer trying to game the score
|
||||||
|
// could replay back a valid message we just sent them.
|
||||||
|
// It effectively tracks first and near-first deliveries, i.e., a message seen from a mesh peer
|
||||||
|
// before we have forwarded it to them.
|
||||||
|
// The parameter has an associated counter, decaying with MeshMessageDeliveriesDecay.
|
||||||
|
// If the counter exceeds the threshold, its value is 0.
|
||||||
|
// If the counter is below the MeshMessageDeliveriesThreshold, the value is the square of
|
||||||
|
// the deficit, ie (MessageDeliveriesThreshold - counter)^2
|
||||||
|
// The penalty is only activated after MeshMessageDeliveriesActivation time in the mesh.
|
||||||
|
// The weight of the parameter MUST be negative (or zero to disable).
|
||||||
|
MeshMessageDeliveriesWeight, MeshMessageDeliveriesDecay float64
|
||||||
|
MeshMessageDeliveriesCap, MeshMessageDeliveriesThreshold float64
|
||||||
|
MeshMessageDeliveriesWindow, MeshMessageDeliveriesActivation time.Duration
|
||||||
|
|
||||||
|
// P3b: sticky mesh propagation failures
|
||||||
|
// This is a sticky penalty that applies when a peer gets pruned from the mesh with an active
|
||||||
|
// mesh message delivery penalty.
|
||||||
|
// The weight of the parameter MUST be negative (or zero to disable)
|
||||||
|
MeshFailurePenaltyWeight, MeshFailurePenaltyDecay float64
|
||||||
|
|
||||||
|
// P4: invalid messages
|
||||||
|
// This is the number of invalid messages in the bitmask.
|
||||||
|
// The value of the parameter is the square of the counter, decaying with
|
||||||
|
// InvalidMessageDeliveriesDecay.
|
||||||
|
// The weight of the parameter MUST be negative (or zero to disable).
|
||||||
|
InvalidMessageDeliveriesWeight, InvalidMessageDeliveriesDecay float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// peer score parameter validation
|
||||||
|
func (p *PeerScoreParams) validate() error {
|
||||||
|
for bitmask, params := range p.Bitmasks {
|
||||||
|
err := params.validate()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid score parameters for bitmask %s: %w", bitmask, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.SkipAtomicValidation || p.BitmaskScoreCap != 0 {
|
||||||
|
// check that the bitmask score is 0 or something positive
|
||||||
|
if p.BitmaskScoreCap < 0 || isInvalidNumber(p.BitmaskScoreCap) {
|
||||||
|
return fmt.Errorf("invalid bitmask score cap; must be positive (or 0 for no cap) and a valid number")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that we have an app specific score; the weight can be anything (but expected positive)
|
||||||
|
if p.AppSpecificScore == nil {
|
||||||
|
if p.SkipAtomicValidation {
|
||||||
|
p.AppSpecificScore = func(p peer.ID) float64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("missing application specific score function")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.SkipAtomicValidation || p.IPColocationFactorWeight != 0 {
|
||||||
|
// check the IP collocation factor
|
||||||
|
if p.IPColocationFactorWeight > 0 || isInvalidNumber(p.IPColocationFactorWeight) {
|
||||||
|
return fmt.Errorf("invalid IPColocationFactorWeight; must be negative (or 0 to disable) and a valid number")
|
||||||
|
}
|
||||||
|
if p.IPColocationFactorWeight != 0 && p.IPColocationFactorThreshold < 1 {
|
||||||
|
return fmt.Errorf("invalid IPColocationFactorThreshold; must be at least 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the behaviour penalty
|
||||||
|
if !p.SkipAtomicValidation || p.BehaviourPenaltyWeight != 0 || p.BehaviourPenaltyThreshold != 0 {
|
||||||
|
if p.BehaviourPenaltyWeight > 0 || isInvalidNumber(p.BehaviourPenaltyWeight) {
|
||||||
|
return fmt.Errorf("invalid BehaviourPenaltyWeight; must be negative (or 0 to disable) and a valid number")
|
||||||
|
}
|
||||||
|
if p.BehaviourPenaltyWeight != 0 && (p.BehaviourPenaltyDecay <= 0 || p.BehaviourPenaltyDecay >= 1 || isInvalidNumber(p.BehaviourPenaltyDecay)) {
|
||||||
|
return fmt.Errorf("invalid BehaviourPenaltyDecay; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
if p.BehaviourPenaltyThreshold < 0 || isInvalidNumber(p.BehaviourPenaltyThreshold) {
|
||||||
|
return fmt.Errorf("invalid BehaviourPenaltyThreshold; must be >= 0 and a valid number")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the decay parameters
|
||||||
|
if !p.SkipAtomicValidation || p.DecayInterval != 0 || p.DecayToZero != 0 {
|
||||||
|
if p.DecayInterval < time.Second {
|
||||||
|
return fmt.Errorf("invalid DecayInterval; must be at least 1s")
|
||||||
|
}
|
||||||
|
if p.DecayToZero <= 0 || p.DecayToZero >= 1 || isInvalidNumber(p.DecayToZero) {
|
||||||
|
return fmt.Errorf("invalid DecayToZero; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// no need to check the score retention; a value of 0 means that we don't retain scores
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BitmaskScoreParams) validate() error {
|
||||||
|
// make sure we have a sane bitmask weight
|
||||||
|
if p.BitmaskWeight < 0 || isInvalidNumber(p.BitmaskWeight) {
|
||||||
|
return fmt.Errorf("invalid bitmask weight; must be >= 0 and a valid number")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check P1
|
||||||
|
if err := p.validateTimeInMeshParams(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check P2
|
||||||
|
if err := p.validateMessageDeliveryParams(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// check P3
|
||||||
|
if err := p.validateMeshMessageDeliveryParams(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check P3b
|
||||||
|
if err := p.validateMessageFailurePenaltyParams(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check P4
|
||||||
|
if err := p.validateInvalidMessageDeliveryParams(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BitmaskScoreParams) validateTimeInMeshParams() error {
|
||||||
|
if p.SkipAtomicValidation {
|
||||||
|
// in non-atomic mode, parameters at their zero values are dismissed from validation.
|
||||||
|
if p.TimeInMeshWeight == 0 && p.TimeInMeshQuantum == 0 && p.TimeInMeshCap == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// either atomic validation mode, or some parameters have been set a value,
|
||||||
|
// hence, proceed with normal validation of all related parameters in this context.
|
||||||
|
|
||||||
|
if p.TimeInMeshQuantum == 0 {
|
||||||
|
return fmt.Errorf("invalid TimeInMeshQuantum; must be non zero")
|
||||||
|
}
|
||||||
|
if p.TimeInMeshWeight < 0 || isInvalidNumber(p.TimeInMeshWeight) {
|
||||||
|
return fmt.Errorf("invalid TimeInMeshWeight; must be positive (or 0 to disable) and a valid number")
|
||||||
|
}
|
||||||
|
if p.TimeInMeshWeight != 0 && p.TimeInMeshQuantum <= 0 {
|
||||||
|
return fmt.Errorf("invalid TimeInMeshQuantum; must be positive")
|
||||||
|
}
|
||||||
|
if p.TimeInMeshWeight != 0 && (p.TimeInMeshCap <= 0 || isInvalidNumber(p.TimeInMeshCap)) {
|
||||||
|
return fmt.Errorf("invalid TimeInMeshCap; must be positive and a valid number")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BitmaskScoreParams) validateMessageDeliveryParams() error {
|
||||||
|
if p.SkipAtomicValidation {
|
||||||
|
// in non-atomic mode, parameters at their zero values are dismissed from validation.
|
||||||
|
if p.FirstMessageDeliveriesWeight == 0 && p.FirstMessageDeliveriesCap == 0 && p.FirstMessageDeliveriesDecay == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// either atomic validation mode, or some parameters have been set a value,
|
||||||
|
// hence, proceed with normal validation of all related parameters in this context.
|
||||||
|
|
||||||
|
if p.FirstMessageDeliveriesWeight < 0 || isInvalidNumber(p.FirstMessageDeliveriesWeight) {
|
||||||
|
return fmt.Errorf("invallid FirstMessageDeliveriesWeight; must be positive (or 0 to disable) and a valid number")
|
||||||
|
}
|
||||||
|
if p.FirstMessageDeliveriesWeight != 0 && (p.FirstMessageDeliveriesDecay <= 0 || p.FirstMessageDeliveriesDecay >= 1 || isInvalidNumber(p.FirstMessageDeliveriesDecay)) {
|
||||||
|
return fmt.Errorf("invalid FirstMessageDeliveriesDecay; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
if p.FirstMessageDeliveriesWeight != 0 && (p.FirstMessageDeliveriesCap <= 0 || isInvalidNumber(p.FirstMessageDeliveriesCap)) {
|
||||||
|
return fmt.Errorf("invalid FirstMessageDeliveriesCap; must be positive and a valid number")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BitmaskScoreParams) validateMeshMessageDeliveryParams() error {
|
||||||
|
if p.SkipAtomicValidation {
|
||||||
|
// in non-atomic mode, parameters at their zero values are dismissed from validation.
|
||||||
|
if p.MeshMessageDeliveriesWeight == 0 &&
|
||||||
|
p.MeshMessageDeliveriesCap == 0 &&
|
||||||
|
p.MeshMessageDeliveriesDecay == 0 &&
|
||||||
|
p.MeshMessageDeliveriesThreshold == 0 &&
|
||||||
|
p.MeshMessageDeliveriesWindow == 0 &&
|
||||||
|
p.MeshMessageDeliveriesActivation == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// either atomic validation mode, or some parameters have been set a value,
|
||||||
|
// hence, proceed with normal validation of all related parameters in this context.
|
||||||
|
|
||||||
|
if p.MeshMessageDeliveriesWeight > 0 || isInvalidNumber(p.MeshMessageDeliveriesWeight) {
|
||||||
|
return fmt.Errorf("invalid MeshMessageDeliveriesWeight; must be negative (or 0 to disable) and a valid number")
|
||||||
|
}
|
||||||
|
if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesDecay <= 0 || p.MeshMessageDeliveriesDecay >= 1 || isInvalidNumber(p.MeshMessageDeliveriesDecay)) {
|
||||||
|
return fmt.Errorf("invalid MeshMessageDeliveriesDecay; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesCap <= 0 || isInvalidNumber(p.MeshMessageDeliveriesCap)) {
|
||||||
|
return fmt.Errorf("invalid MeshMessageDeliveriesCap; must be positive and a valid number")
|
||||||
|
}
|
||||||
|
if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesThreshold <= 0 || isInvalidNumber(p.MeshMessageDeliveriesThreshold)) {
|
||||||
|
return fmt.Errorf("invalid MeshMessageDeliveriesThreshold; must be positive and a valid number")
|
||||||
|
}
|
||||||
|
if p.MeshMessageDeliveriesWindow < 0 {
|
||||||
|
return fmt.Errorf("invalid MeshMessageDeliveriesWindow; must be non-negative")
|
||||||
|
}
|
||||||
|
if p.MeshMessageDeliveriesWeight != 0 && p.MeshMessageDeliveriesActivation < time.Second {
|
||||||
|
return fmt.Errorf("invalid MeshMessageDeliveriesActivation; must be at least 1s")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BitmaskScoreParams) validateMessageFailurePenaltyParams() error {
|
||||||
|
if p.SkipAtomicValidation {
|
||||||
|
// in selective mode, parameters at their zero values are dismissed from validation.
|
||||||
|
if p.MeshFailurePenaltyDecay == 0 && p.MeshFailurePenaltyWeight == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// either atomic validation mode, or some parameters have been set a value,
|
||||||
|
// hence, proceed with normal validation of all related parameters in this context.
|
||||||
|
|
||||||
|
if p.MeshFailurePenaltyWeight > 0 || isInvalidNumber(p.MeshFailurePenaltyWeight) {
|
||||||
|
return fmt.Errorf("invalid MeshFailurePenaltyWeight; must be negative (or 0 to disable) and a valid number")
|
||||||
|
}
|
||||||
|
if p.MeshFailurePenaltyWeight != 0 && (isInvalidNumber(p.MeshFailurePenaltyDecay) || p.MeshFailurePenaltyDecay <= 0 || p.MeshFailurePenaltyDecay >= 1) {
|
||||||
|
return fmt.Errorf("invalid MeshFailurePenaltyDecay; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *BitmaskScoreParams) validateInvalidMessageDeliveryParams() error {
|
||||||
|
if p.SkipAtomicValidation {
|
||||||
|
// in selective mode, parameters at their zero values are dismissed from validation.
|
||||||
|
if p.InvalidMessageDeliveriesDecay == 0 && p.InvalidMessageDeliveriesWeight == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// either atomic validation mode, or some parameters have been set a value,
|
||||||
|
// hence, proceed with normal validation of all related parameters in this context.
|
||||||
|
|
||||||
|
if p.InvalidMessageDeliveriesWeight > 0 || isInvalidNumber(p.InvalidMessageDeliveriesWeight) {
|
||||||
|
return fmt.Errorf("invalid InvalidMessageDeliveriesWeight; must be negative (or 0 to disable) and a valid number")
|
||||||
|
}
|
||||||
|
if p.InvalidMessageDeliveriesDecay <= 0 || p.InvalidMessageDeliveriesDecay >= 1 || isInvalidNumber(p.InvalidMessageDeliveriesDecay) {
|
||||||
|
return fmt.Errorf("invalid InvalidMessageDeliveriesDecay; must be between 0 and 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultDecayInterval = time.Second
|
||||||
|
DefaultDecayToZero = 0.01
|
||||||
|
)
|
||||||
|
|
||||||
|
// ScoreParameterDecay computes the decay factor for a parameter, assuming the DecayInterval is 1s
|
||||||
|
// and that the value decays to zero if it drops below 0.01
|
||||||
|
func ScoreParameterDecay(decay time.Duration) float64 {
|
||||||
|
return ScoreParameterDecayWithBase(decay, DefaultDecayInterval, DefaultDecayToZero)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScoreParameterDecayWithBase computes the decay factor for a parameter using base as the DecayInterval
|
||||||
|
func ScoreParameterDecayWithBase(decay time.Duration, base time.Duration, decayToZero float64) float64 {
|
||||||
|
// the decay is linear, so after n ticks the value is factor^n
|
||||||
|
// so factor^n = decayToZero => factor = decayToZero^(1/n)
|
||||||
|
ticks := float64(decay / base)
|
||||||
|
return math.Pow(decayToZero, 1/ticks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checks whether the provided floating-point number is `Not a Number`
|
||||||
|
// or an infinite number.
|
||||||
|
func isInvalidNumber(num float64) bool {
|
||||||
|
return math.IsNaN(num) || math.IsInf(num, 0)
|
||||||
|
}
|
739
go-libp2p-blossomsub/score_params_test.go
Normal file
739
go-libp2p-blossomsub/score_params_test.go
Normal file
@ -0,0 +1,739 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPeerScoreThreshold_AtomicValidation(t *testing.T) {
|
||||||
|
testPeerScoreThresholdsValidation(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeerScoreThreshold_SkipAtomicValidation(t *testing.T) {
|
||||||
|
testPeerScoreThresholdsValidation(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPeerScoreThresholdsValidation(t *testing.T, skipAtomicValidation bool) {
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
PublishThreshold: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: 0,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: -2,
|
||||||
|
GraylistThreshold: 0,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
AcceptPXThreshold: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
OpportunisticGraftThreshold: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: -2,
|
||||||
|
GraylistThreshold: -3,
|
||||||
|
AcceptPXThreshold: 1,
|
||||||
|
OpportunisticGraftThreshold: 2}).validate() != nil {
|
||||||
|
t.Fatal("expected validation success")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: math.Inf(-1),
|
||||||
|
PublishThreshold: -2,
|
||||||
|
GraylistThreshold: -3,
|
||||||
|
AcceptPXThreshold: 1,
|
||||||
|
OpportunisticGraftThreshold: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: math.Inf(-1),
|
||||||
|
GraylistThreshold: -3,
|
||||||
|
AcceptPXThreshold: 1,
|
||||||
|
OpportunisticGraftThreshold: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: -2,
|
||||||
|
GraylistThreshold: math.Inf(-1),
|
||||||
|
AcceptPXThreshold: 1,
|
||||||
|
OpportunisticGraftThreshold: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: -2,
|
||||||
|
GraylistThreshold: -3,
|
||||||
|
AcceptPXThreshold: math.NaN(),
|
||||||
|
OpportunisticGraftThreshold: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreThresholds{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: -2,
|
||||||
|
GraylistThreshold: -3,
|
||||||
|
AcceptPXThreshold: 1,
|
||||||
|
OpportunisticGraftThreshold: math.Inf(0),
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBitmaskScoreParamsValidation_InvalidParams_AtomicValidation(t *testing.T) {
|
||||||
|
testBitmaskScoreParamsValidationWithInvalidParameters(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBitmaskScoreParamsValidation_InvalidParams_SkipAtomicValidation(t *testing.T) {
|
||||||
|
testBitmaskScoreParamsValidationWithInvalidParameters(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBitmaskScoreParamsValidationWithInvalidParameters(t *testing.T, skipAtomicValidation bool) {
|
||||||
|
|
||||||
|
if skipAtomicValidation {
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: true}).validate() != nil {
|
||||||
|
t.Fatal("expected validation success")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (&BitmaskScoreParams{}).validate() == nil {
|
||||||
|
t.Fatal("expected validation failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskWeight: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshWeight: -1,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshWeight: 1,
|
||||||
|
TimeInMeshQuantum: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshWeight: 1,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
TimeInMeshCap: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
FirstMessageDeliveriesWeight: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
FirstMessageDeliveriesWeight: 1,
|
||||||
|
FirstMessageDeliveriesDecay: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
FirstMessageDeliveriesWeight: 1,
|
||||||
|
FirstMessageDeliveriesDecay: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
FirstMessageDeliveriesWeight: 1,
|
||||||
|
FirstMessageDeliveriesDecay: .5,
|
||||||
|
FirstMessageDeliveriesCap: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshMessageDeliveriesWeight: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: 2}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: .5,
|
||||||
|
MeshMessageDeliveriesCap: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: .5,
|
||||||
|
MeshMessageDeliveriesCap: 5,
|
||||||
|
MeshMessageDeliveriesThreshold: -3,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: .5,
|
||||||
|
MeshMessageDeliveriesCap: 5,
|
||||||
|
MeshMessageDeliveriesThreshold: 3,
|
||||||
|
MeshMessageDeliveriesWindow: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: .5,
|
||||||
|
MeshMessageDeliveriesCap: 5,
|
||||||
|
MeshMessageDeliveriesThreshold: 3,
|
||||||
|
MeshMessageDeliveriesWindow: time.Millisecond,
|
||||||
|
MeshMessageDeliveriesActivation: time.Millisecond}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: -1,
|
||||||
|
MeshFailurePenaltyDecay: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: -1,
|
||||||
|
MeshFailurePenaltyDecay: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
InvalidMessageDeliveriesWeight: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
InvalidMessageDeliveriesWeight: -1,
|
||||||
|
InvalidMessageDeliveriesDecay: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
InvalidMessageDeliveriesWeight: -1,
|
||||||
|
InvalidMessageDeliveriesDecay: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBitmaskScoreParamsValidation_ValidParams_AtomicValidation(t *testing.T) {
|
||||||
|
// Don't use these params in production!
|
||||||
|
if (&BitmaskScoreParams{
|
||||||
|
SkipAtomicValidation: false,
|
||||||
|
BitmaskWeight: 1,
|
||||||
|
TimeInMeshWeight: 0.01,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
TimeInMeshCap: 10,
|
||||||
|
FirstMessageDeliveriesWeight: 1,
|
||||||
|
FirstMessageDeliveriesDecay: 0.5,
|
||||||
|
FirstMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: 0.5,
|
||||||
|
MeshMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesThreshold: 5,
|
||||||
|
MeshMessageDeliveriesWindow: time.Millisecond,
|
||||||
|
MeshMessageDeliveriesActivation: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: -1,
|
||||||
|
MeshFailurePenaltyDecay: 0.5,
|
||||||
|
InvalidMessageDeliveriesWeight: -1,
|
||||||
|
InvalidMessageDeliveriesDecay: 0.5,
|
||||||
|
}).validate() != nil {
|
||||||
|
t.Fatal("expected validation success")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBitmaskScoreParamsValidation_NonAtomicValidation(t *testing.T) {
|
||||||
|
// Don't use these params in production!
|
||||||
|
// In non-atomic (selective) validation mode, the subset of parameters passes
|
||||||
|
// validation if the individual parameters values pass validation.
|
||||||
|
p := &BitmaskScoreParams{}
|
||||||
|
setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) {
|
||||||
|
params.SkipAtomicValidation = true
|
||||||
|
})
|
||||||
|
// including bitmask weight.
|
||||||
|
setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) {
|
||||||
|
params.BitmaskWeight = 1
|
||||||
|
})
|
||||||
|
// including time in mesh parameters.
|
||||||
|
setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) {
|
||||||
|
params.TimeInMeshWeight = 0.01
|
||||||
|
params.TimeInMeshQuantum = time.Second
|
||||||
|
params.TimeInMeshCap = 10
|
||||||
|
})
|
||||||
|
// including first message delivery parameters.
|
||||||
|
setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) {
|
||||||
|
params.FirstMessageDeliveriesWeight = 1
|
||||||
|
params.FirstMessageDeliveriesDecay = 0.5
|
||||||
|
params.FirstMessageDeliveriesCap = 10
|
||||||
|
})
|
||||||
|
// including mesh message delivery parameters.
|
||||||
|
setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) {
|
||||||
|
params.MeshMessageDeliveriesWeight = -1
|
||||||
|
params.MeshMessageDeliveriesDecay = 0.5
|
||||||
|
params.MeshMessageDeliveriesCap = 10
|
||||||
|
params.MeshMessageDeliveriesThreshold = 5
|
||||||
|
params.MeshMessageDeliveriesWindow = time.Millisecond
|
||||||
|
params.MeshMessageDeliveriesActivation = time.Second
|
||||||
|
})
|
||||||
|
// including mesh failure penalty parameters.
|
||||||
|
setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) {
|
||||||
|
params.MeshFailurePenaltyWeight = -1
|
||||||
|
params.MeshFailurePenaltyDecay = 0.5
|
||||||
|
})
|
||||||
|
// including invalid message delivery parameters.
|
||||||
|
setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) {
|
||||||
|
params.InvalidMessageDeliveriesWeight = -1
|
||||||
|
params.InvalidMessageDeliveriesDecay = 0.5
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeerScoreParamsValidation_InvalidParams_AtomicValidation(t *testing.T) {
|
||||||
|
testPeerScoreParamsValidationWithInvalidParams(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeerScoreParamsValidation_InvalidParams_SkipAtomicValidation(t *testing.T) {
|
||||||
|
testPeerScoreParamsValidationWithInvalidParams(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPeerScoreParamsValidationWithInvalidParams(t *testing.T, skipAtomicValidation bool) {
|
||||||
|
appScore := func(peer.ID) float64 { return 0 }
|
||||||
|
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: -1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if skipAtomicValidation {
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
}).validate() != nil {
|
||||||
|
t.Fatal("expected validation success")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: -1}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Millisecond,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: -1,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 2,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
BehaviourPenaltyWeight: 1}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
BehaviourPenaltyWeight: -1,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
BehaviourPenaltyWeight: -1,
|
||||||
|
BehaviourPenaltyDecay: 2,
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks the bitmask parameters for invalid values such as infinite and
|
||||||
|
// NaN numbers.
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
Bitmasks: map[string]*BitmaskScoreParams{
|
||||||
|
"test": {
|
||||||
|
BitmaskWeight: math.Inf(0),
|
||||||
|
TimeInMeshWeight: math.NaN(),
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
TimeInMeshCap: 10,
|
||||||
|
FirstMessageDeliveriesWeight: math.Inf(1),
|
||||||
|
FirstMessageDeliveriesDecay: 0.5,
|
||||||
|
FirstMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesWeight: math.Inf(-1),
|
||||||
|
MeshMessageDeliveriesDecay: math.NaN(),
|
||||||
|
MeshMessageDeliveriesCap: math.Inf(0),
|
||||||
|
MeshMessageDeliveriesThreshold: 5,
|
||||||
|
MeshMessageDeliveriesWindow: time.Millisecond,
|
||||||
|
MeshMessageDeliveriesActivation: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: -1,
|
||||||
|
MeshFailurePenaltyDecay: math.NaN(),
|
||||||
|
InvalidMessageDeliveriesWeight: math.Inf(0),
|
||||||
|
InvalidMessageDeliveriesDecay: math.NaN(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: math.Inf(0),
|
||||||
|
IPColocationFactorWeight: math.Inf(-1),
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
BehaviourPenaltyWeight: math.Inf(0),
|
||||||
|
BehaviourPenaltyDecay: math.NaN(),
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
SkipAtomicValidation: skipAtomicValidation,
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
Bitmasks: map[string]*BitmaskScoreParams{
|
||||||
|
"test": {
|
||||||
|
BitmaskWeight: -1,
|
||||||
|
TimeInMeshWeight: 0.01,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
TimeInMeshCap: 10,
|
||||||
|
FirstMessageDeliveriesWeight: 1,
|
||||||
|
FirstMessageDeliveriesDecay: 0.5,
|
||||||
|
FirstMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: 0.5,
|
||||||
|
MeshMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesThreshold: 5,
|
||||||
|
MeshMessageDeliveriesWindow: time.Millisecond,
|
||||||
|
MeshMessageDeliveriesActivation: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: -1,
|
||||||
|
MeshFailurePenaltyDecay: 0.5,
|
||||||
|
InvalidMessageDeliveriesWeight: -1,
|
||||||
|
InvalidMessageDeliveriesDecay: 0.5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).validate() == nil {
|
||||||
|
t.Fatal("expected validation failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeerScoreParamsValidation_ValidParams_AtomicValidation(t *testing.T) {
|
||||||
|
appScore := func(peer.ID) float64 { return 0 }
|
||||||
|
|
||||||
|
// don't use these params in production!
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
BehaviourPenaltyWeight: -1,
|
||||||
|
BehaviourPenaltyDecay: 0.999,
|
||||||
|
}).validate() != nil {
|
||||||
|
t.Fatal("expected validation success")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
BehaviourPenaltyWeight: -1,
|
||||||
|
BehaviourPenaltyDecay: 0.999,
|
||||||
|
}).validate() != nil {
|
||||||
|
t.Fatal("expected validation success")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (&PeerScoreParams{
|
||||||
|
BitmaskScoreCap: 1,
|
||||||
|
AppSpecificScore: appScore,
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
IPColocationFactorWeight: -1,
|
||||||
|
IPColocationFactorThreshold: 1,
|
||||||
|
Bitmasks: map[string]*BitmaskScoreParams{
|
||||||
|
"test": {
|
||||||
|
BitmaskWeight: 1,
|
||||||
|
TimeInMeshWeight: 0.01,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
TimeInMeshCap: 10,
|
||||||
|
FirstMessageDeliveriesWeight: 1,
|
||||||
|
FirstMessageDeliveriesDecay: 0.5,
|
||||||
|
FirstMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: 0.5,
|
||||||
|
MeshMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesThreshold: 5,
|
||||||
|
MeshMessageDeliveriesWindow: time.Millisecond,
|
||||||
|
MeshMessageDeliveriesActivation: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: -1,
|
||||||
|
MeshFailurePenaltyDecay: 0.5,
|
||||||
|
InvalidMessageDeliveriesWeight: -1,
|
||||||
|
InvalidMessageDeliveriesDecay: 0.5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).validate() != nil {
|
||||||
|
t.Fatal("expected validation success")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeerScoreParamsValidation_ValidParams_SkipAtomicValidation(t *testing.T) {
|
||||||
|
appScore := func(peer.ID) float64 { return 0 }
|
||||||
|
|
||||||
|
// don't use these params in production!
|
||||||
|
p := &PeerScoreParams{}
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.SkipAtomicValidation = true
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.AppSpecificScore = appScore
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.DecayInterval = time.Second
|
||||||
|
params.DecayToZero = 0.01
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.IPColocationFactorWeight = -1
|
||||||
|
params.IPColocationFactorThreshold = 1
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.BehaviourPenaltyWeight = -1
|
||||||
|
params.BehaviourPenaltyDecay = 0.999
|
||||||
|
})
|
||||||
|
|
||||||
|
p = &PeerScoreParams{SkipAtomicValidation: true, AppSpecificScore: appScore}
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.BitmaskScoreCap = 1
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.DecayInterval = time.Second
|
||||||
|
params.DecayToZero = 0.01
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.IPColocationFactorWeight = -1
|
||||||
|
params.IPColocationFactorThreshold = 1
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.BehaviourPenaltyWeight = -1
|
||||||
|
params.BehaviourPenaltyDecay = 0.999
|
||||||
|
})
|
||||||
|
setParamAndValidate(t, p, func(params *PeerScoreParams) {
|
||||||
|
params.Bitmasks = map[string]*BitmaskScoreParams{
|
||||||
|
"test": {
|
||||||
|
BitmaskWeight: 1,
|
||||||
|
TimeInMeshWeight: 0.01,
|
||||||
|
TimeInMeshQuantum: time.Second,
|
||||||
|
TimeInMeshCap: 10,
|
||||||
|
FirstMessageDeliveriesWeight: 1,
|
||||||
|
FirstMessageDeliveriesDecay: 0.5,
|
||||||
|
FirstMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesWeight: -1,
|
||||||
|
MeshMessageDeliveriesDecay: 0.5,
|
||||||
|
MeshMessageDeliveriesCap: 10,
|
||||||
|
MeshMessageDeliveriesThreshold: 5,
|
||||||
|
MeshMessageDeliveriesWindow: time.Millisecond,
|
||||||
|
MeshMessageDeliveriesActivation: time.Second,
|
||||||
|
MeshFailurePenaltyWeight: -1,
|
||||||
|
MeshFailurePenaltyDecay: 0.5,
|
||||||
|
InvalidMessageDeliveriesWeight: -1,
|
||||||
|
InvalidMessageDeliveriesDecay: 0.5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScoreParameterDecay(t *testing.T) {
|
||||||
|
decay1hr := ScoreParameterDecay(time.Hour)
|
||||||
|
if decay1hr != .9987216039048303 {
|
||||||
|
t.Fatalf("expected .9987216039048303, got %f", decay1hr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setParamAndValidate(t *testing.T, params *PeerScoreParams, set func(*PeerScoreParams)) {
|
||||||
|
set(params)
|
||||||
|
if err := params.validate(); err != nil {
|
||||||
|
t.Fatalf("expected validation success, got: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setBitmaskParamAndValidate(t *testing.T, params *BitmaskScoreParams, set func(bitmask *BitmaskScoreParams)) {
|
||||||
|
set(params)
|
||||||
|
if err := params.validate(); err != nil {
|
||||||
|
t.Fatalf("expected validation success, got: %s", err)
|
||||||
|
}
|
||||||
|
}
|
1080
go-libp2p-blossomsub/score_test.go
Normal file
1080
go-libp2p-blossomsub/score_test.go
Normal file
File diff suppressed because it is too large
Load Diff
138
go-libp2p-blossomsub/sign.go
Normal file
138
go-libp2p-blossomsub/sign.go
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/crypto"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MessageSignaturePolicy describes if signatures are produced, expected, and/or verified.
|
||||||
|
type MessageSignaturePolicy uint8
|
||||||
|
|
||||||
|
// LaxSign and LaxNoSign are deprecated. In the future msgSigning and msgVerification can be unified.
|
||||||
|
const (
|
||||||
|
// msgSigning is set when the locally produced messages must be signed
|
||||||
|
msgSigning MessageSignaturePolicy = 1 << iota
|
||||||
|
// msgVerification is set when external messages must be verfied
|
||||||
|
msgVerification
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// StrictSign produces signatures and expects and verifies incoming signatures
|
||||||
|
StrictSign = msgSigning | msgVerification
|
||||||
|
// StrictNoSign does not produce signatures and drops and penalises incoming messages that carry one
|
||||||
|
StrictNoSign = msgVerification
|
||||||
|
// LaxSign produces signatures and validates incoming signatures iff one is present
|
||||||
|
// Deprecated: it is recommend to either strictly enable, or strictly disable, signatures.
|
||||||
|
LaxSign = msgSigning
|
||||||
|
// LaxNoSign does not produce signatures and validates incoming signatures iff one is present
|
||||||
|
// Deprecated: it is recommend to either strictly enable, or strictly disable, signatures.
|
||||||
|
LaxNoSign = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
// mustVerify is true when a message signature must be verified.
|
||||||
|
// If signatures are not expected, verification checks if the signature is absent.
|
||||||
|
func (policy MessageSignaturePolicy) mustVerify() bool {
|
||||||
|
return policy&msgVerification != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustSign is true when messages should be signed, and incoming messages are expected to have a signature.
|
||||||
|
func (policy MessageSignaturePolicy) mustSign() bool {
|
||||||
|
return policy&msgSigning != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
const SignPrefix = "libp2p-pubsub:"
|
||||||
|
|
||||||
|
func verifyMessageSignature(m *pb.Message) error {
|
||||||
|
pubk, err := messagePubKey(m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
xm := *m
|
||||||
|
xm.Signature = nil
|
||||||
|
xm.Key = nil
|
||||||
|
bytes, err := xm.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes = withSignPrefix(bytes)
|
||||||
|
|
||||||
|
valid, err := pubk.Verify(bytes, m.Signature)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !valid {
|
||||||
|
return fmt.Errorf("invalid signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func messagePubKey(m *pb.Message) (crypto.PubKey, error) {
|
||||||
|
var pubk crypto.PubKey
|
||||||
|
|
||||||
|
pid, err := peer.IDFromBytes(m.From)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Key == nil {
|
||||||
|
// no attached key, it must be extractable from the source ID
|
||||||
|
pubk, err = pid.ExtractPublicKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot extract signing key: %s", err.Error())
|
||||||
|
}
|
||||||
|
if pubk == nil {
|
||||||
|
return nil, fmt.Errorf("cannot extract signing key")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pubk, err = crypto.UnmarshalPublicKey(m.Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot unmarshal signing key: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that the source ID matches the attached key
|
||||||
|
if !pid.MatchesPublicKey(pubk) {
|
||||||
|
return nil, fmt.Errorf("bad signing key; source ID %s doesn't match key", pid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func signMessage(pid peer.ID, key crypto.PrivKey, m *pb.Message) error {
|
||||||
|
bytes, err := m.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes = withSignPrefix(bytes)
|
||||||
|
|
||||||
|
sig, err := key.Sign(bytes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Signature = sig
|
||||||
|
|
||||||
|
pk, _ := pid.ExtractPublicKey()
|
||||||
|
if pk == nil {
|
||||||
|
pubk, err := crypto.MarshalPublicKey(key.GetPublic())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Key = pubk
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func withSignPrefix(bytes []byte) []byte {
|
||||||
|
return append([]byte(SignPrefix), bytes...)
|
||||||
|
}
|
43
go-libp2p-blossomsub/sign_test.go
Normal file
43
go-libp2p-blossomsub/sign_test.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/crypto"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSigning(t *testing.T) {
|
||||||
|
privk, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
testSignVerify(t, privk)
|
||||||
|
|
||||||
|
privk, _, err = crypto.GenerateKeyPair(crypto.Ed25519, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
testSignVerify(t, privk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSignVerify(t *testing.T, privk crypto.PrivKey) {
|
||||||
|
id, err := peer.IDFromPublicKey(privk.GetPublic())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
bitmask := []byte{0xf0, 0x00}
|
||||||
|
m := pb.Message{
|
||||||
|
Data: []byte("abc"),
|
||||||
|
Bitmask: bitmask,
|
||||||
|
From: []byte(id),
|
||||||
|
Seqno: []byte("123"),
|
||||||
|
}
|
||||||
|
signMessage(id, privk, &m)
|
||||||
|
err = verifyMessageSignature(&m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
51
go-libp2p-blossomsub/subscription.go
Normal file
51
go-libp2p-blossomsub/subscription.go
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Subscription handles the details of a particular Bitmask subscription.
|
||||||
|
// There may be many subscriptions for a given Bitmask.
|
||||||
|
type Subscription struct {
|
||||||
|
bitmask []byte
|
||||||
|
ch chan *Message
|
||||||
|
cancelCh chan<- *Subscription
|
||||||
|
ctx context.Context
|
||||||
|
err error
|
||||||
|
once sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bitmask returns the bitmask []byte associated with the Subscription
|
||||||
|
func (sub *Subscription) Bitmask() []byte {
|
||||||
|
return sub.bitmask
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next message in our subscription
|
||||||
|
func (sub *Subscription) Next(ctx context.Context) (*Message, error) {
|
||||||
|
select {
|
||||||
|
case msg, ok := <-sub.ch:
|
||||||
|
if !ok {
|
||||||
|
return msg, sub.err
|
||||||
|
}
|
||||||
|
|
||||||
|
return msg, nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel closes the subscription. If this is the last active subscription then pubsub will send an unsubscribe
|
||||||
|
// announcement to the network.
|
||||||
|
func (sub *Subscription) Cancel() {
|
||||||
|
select {
|
||||||
|
case sub.cancelCh <- sub:
|
||||||
|
case <-sub.ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sub *Subscription) close() {
|
||||||
|
sub.once.Do(func() {
|
||||||
|
close(sub.ch)
|
||||||
|
})
|
||||||
|
}
|
125
go-libp2p-blossomsub/subscription_filter.go
Normal file
125
go-libp2p-blossomsub/subscription_filter.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrTooManySubscriptions may be returned by a SubscriptionFilter to signal that there are too many
|
||||||
|
// subscriptions to process.
|
||||||
|
var ErrTooManySubscriptions = errors.New("too many subscriptions")
|
||||||
|
|
||||||
|
// SubscriptionFilter is a function that tells us whether we are interested in allowing and tracking
|
||||||
|
// subscriptions for a given bitmask.
|
||||||
|
//
|
||||||
|
// The filter is consulted whenever a subscription notification is received by another peer; if the
|
||||||
|
// filter returns false, then the notification is ignored.
|
||||||
|
//
|
||||||
|
// The filter is also consulted when joining bitmasks; if the filter returns false, then the Join
|
||||||
|
// operation will result in an error.
|
||||||
|
type SubscriptionFilter interface {
|
||||||
|
// CanSubscribe returns true if the bitmask is of interest and we can subscribe to it
|
||||||
|
CanSubscribe(bitmask []byte) bool
|
||||||
|
|
||||||
|
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||||
|
// It should filter only the subscriptions of interest and my return an error if (for instance)
|
||||||
|
// there are too many subscriptions.
|
||||||
|
FilterIncomingSubscriptions(peer.ID, []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSubscriptionFilter is a pubsub option that specifies a filter for subscriptions
|
||||||
|
// in bitmasks of interest.
|
||||||
|
func WithSubscriptionFilter(subFilter SubscriptionFilter) Option {
|
||||||
|
return func(ps *PubSub) error {
|
||||||
|
ps.subFilter = subFilter
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAllowlistSubscriptionFilter creates a subscription filter that only allows explicitly
|
||||||
|
// specified bitmasks for local subscriptions and incoming peer subscriptions.
|
||||||
|
func NewAllowlistSubscriptionFilter(bitmasks ...[]byte) SubscriptionFilter {
|
||||||
|
allow := make(map[string]struct{})
|
||||||
|
for _, bitmask := range bitmasks {
|
||||||
|
allow[string(bitmask)] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &allowlistSubscriptionFilter{allow: allow}
|
||||||
|
}
|
||||||
|
|
||||||
|
type allowlistSubscriptionFilter struct {
|
||||||
|
allow map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ SubscriptionFilter = (*allowlistSubscriptionFilter)(nil)
|
||||||
|
|
||||||
|
func (f *allowlistSubscriptionFilter) CanSubscribe(bitmask []byte) bool {
|
||||||
|
_, ok := f.allow[string(bitmask)]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *allowlistSubscriptionFilter) FilterIncomingSubscriptions(from peer.ID, subs []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) {
|
||||||
|
return FilterSubscriptions(subs, f.CanSubscribe), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterSubscriptions filters (and deduplicates) a list of subscriptions.
|
||||||
|
// filter should return true if a bitmask is of interest.
|
||||||
|
func FilterSubscriptions(subs []*pb.RPC_SubOpts, filter func([]byte) bool) []*pb.RPC_SubOpts {
|
||||||
|
accept := make(map[string]*pb.RPC_SubOpts)
|
||||||
|
|
||||||
|
for _, sub := range subs {
|
||||||
|
bitmask := sub.GetBitmask()
|
||||||
|
|
||||||
|
if !filter(bitmask) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
otherSub, ok := accept[string(bitmask)]
|
||||||
|
if ok {
|
||||||
|
if sub.GetSubscribe() != otherSub.GetSubscribe() {
|
||||||
|
delete(accept, string(bitmask))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
accept[string(bitmask)] = sub
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(accept) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]*pb.RPC_SubOpts, 0, len(accept))
|
||||||
|
for _, sub := range accept {
|
||||||
|
result = append(result, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapLimitSubscriptionFilter wraps a subscription filter with a hard limit in the number of
|
||||||
|
// subscriptions allowed in an RPC message.
|
||||||
|
func WrapLimitSubscriptionFilter(filter SubscriptionFilter, limit int) SubscriptionFilter {
|
||||||
|
return &limitSubscriptionFilter{filter: filter, limit: limit}
|
||||||
|
}
|
||||||
|
|
||||||
|
type limitSubscriptionFilter struct {
|
||||||
|
filter SubscriptionFilter
|
||||||
|
limit int
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ SubscriptionFilter = (*limitSubscriptionFilter)(nil)
|
||||||
|
|
||||||
|
func (f *limitSubscriptionFilter) CanSubscribe(bitmask []byte) bool {
|
||||||
|
return f.filter.CanSubscribe(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *limitSubscriptionFilter) FilterIncomingSubscriptions(from peer.ID, subs []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) {
|
||||||
|
if len(subs) > f.limit {
|
||||||
|
return nil, ErrTooManySubscriptions
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.filter.FilterIncomingSubscriptions(from, subs)
|
||||||
|
}
|
177
go-libp2p-blossomsub/subscription_filter_test.go
Normal file
177
go-libp2p-blossomsub/subscription_filter_test.go
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBasicSubscriptionFilter(t *testing.T) {
|
||||||
|
peerA := peer.ID("A")
|
||||||
|
|
||||||
|
bitmask1 := []byte{0xff, 0x00, 0x00, 0x00}
|
||||||
|
bitmask2 := []byte{0x00, 0xff, 0x00, 0x00}
|
||||||
|
bitmask3 := []byte{0x00, 0x00, 0xff, 0x00}
|
||||||
|
yes := true
|
||||||
|
subs := []*pb.RPC_SubOpts{
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask1,
|
||||||
|
Subscribe: yes,
|
||||||
|
},
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask2,
|
||||||
|
Subscribe: yes,
|
||||||
|
},
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask3,
|
||||||
|
Subscribe: yes,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := NewAllowlistSubscriptionFilter(bitmask1, bitmask2)
|
||||||
|
canSubscribe := filter.CanSubscribe(bitmask1)
|
||||||
|
if !canSubscribe {
|
||||||
|
t.Fatal("expected allowed subscription")
|
||||||
|
}
|
||||||
|
canSubscribe = filter.CanSubscribe(bitmask2)
|
||||||
|
if !canSubscribe {
|
||||||
|
t.Fatal("expected allowed subscription")
|
||||||
|
}
|
||||||
|
canSubscribe = filter.CanSubscribe(bitmask3)
|
||||||
|
if canSubscribe {
|
||||||
|
t.Fatal("expected disallowed subscription")
|
||||||
|
}
|
||||||
|
allowedSubs, err := filter.FilterIncomingSubscriptions(peerA, subs)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(allowedSubs) != 2 {
|
||||||
|
t.Fatalf("expected 2 allowed subscriptions but got %d", len(allowedSubs))
|
||||||
|
}
|
||||||
|
for _, sub := range allowedSubs {
|
||||||
|
if bytes.Equal(sub.GetBitmask(), bitmask3) {
|
||||||
|
t.Fatal("unpexted subscription to test3")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
limitFilter := WrapLimitSubscriptionFilter(filter, 2)
|
||||||
|
_, err = limitFilter.FilterIncomingSubscriptions(peerA, subs)
|
||||||
|
if err != ErrTooManySubscriptions {
|
||||||
|
t.Fatal("expected rejection because of too many subscriptions")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSubscriptionFilterDeduplication(t *testing.T) {
|
||||||
|
peerA := peer.ID("A")
|
||||||
|
|
||||||
|
bitmask1 := []byte{0xff, 0x00, 0x00, 0x00}
|
||||||
|
bitmask2 := []byte{0x00, 0xff, 0x00, 0x00}
|
||||||
|
bitmask3 := []byte{0x00, 0x00, 0xff, 0x00}
|
||||||
|
yes := true
|
||||||
|
no := false
|
||||||
|
subs := []*pb.RPC_SubOpts{
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask1,
|
||||||
|
Subscribe: yes,
|
||||||
|
},
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask1,
|
||||||
|
Subscribe: yes,
|
||||||
|
},
|
||||||
|
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask2,
|
||||||
|
Subscribe: yes,
|
||||||
|
},
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask2,
|
||||||
|
Subscribe: no,
|
||||||
|
},
|
||||||
|
&pb.RPC_SubOpts{
|
||||||
|
Bitmask: bitmask3,
|
||||||
|
Subscribe: yes,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := NewAllowlistSubscriptionFilter(bitmask1, bitmask2)
|
||||||
|
allowedSubs, err := filter.FilterIncomingSubscriptions(peerA, subs)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(allowedSubs) != 1 {
|
||||||
|
t.Fatalf("expected 2 allowed subscriptions but got %d", len(allowedSubs))
|
||||||
|
}
|
||||||
|
for _, sub := range allowedSubs {
|
||||||
|
if bytes.Equal(sub.GetBitmask(), bitmask3) || bytes.Equal(sub.GetBitmask(), bitmask2) {
|
||||||
|
t.Fatal("unexpected subscription")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSubscriptionFilterRPC(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
ps1 := getPubsub(ctx, hosts[0], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0xff, 0x00, 0x00, 0x00}, []byte{0x00, 0xff, 0x00, 0x00})))
|
||||||
|
ps2 := getPubsub(ctx, hosts[1], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0x00, 0xff, 0x00, 0x00}, []byte{0x00, 0x00, 0xff, 0x00})))
|
||||||
|
|
||||||
|
_ = mustSubscribe(t, ps1, []byte{0xff, 0x00, 0x00, 0x00})
|
||||||
|
_ = mustSubscribe(t, ps1, []byte{0x00, 0xff, 0x00, 0x00})
|
||||||
|
_ = mustSubscribe(t, ps2, []byte{0x00, 0xff, 0x00, 0x00})
|
||||||
|
_ = mustSubscribe(t, ps2, []byte{0x00, 0x00, 0xff, 0x00})
|
||||||
|
|
||||||
|
// check the rejection as well
|
||||||
|
_, err := ps1.Join([]byte{0x00, 0x00, 0xff, 0x00})
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected subscription error")
|
||||||
|
}
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
var sub1, sub2, sub3 bool
|
||||||
|
ready := make(chan struct{})
|
||||||
|
|
||||||
|
ps1.eval <- func() {
|
||||||
|
_, sub1 = ps1.bitmasks[string([]byte{0xff, 0x00, 0x00, 0x00})][hosts[1].ID()]
|
||||||
|
_, sub2 = ps1.bitmasks[string([]byte{0x00, 0xff, 0x00, 0x00})][hosts[1].ID()]
|
||||||
|
_, sub3 = ps1.bitmasks[string([]byte{0x00, 0x00, 0xff, 0x00})][hosts[1].ID()]
|
||||||
|
ready <- struct{}{}
|
||||||
|
}
|
||||||
|
<-ready
|
||||||
|
|
||||||
|
if sub1 {
|
||||||
|
t.Fatal("expected no subscription for test1")
|
||||||
|
}
|
||||||
|
if !sub2 {
|
||||||
|
t.Fatal("expected subscription for test2")
|
||||||
|
}
|
||||||
|
if sub3 {
|
||||||
|
t.Fatal("expected no subscription for test1")
|
||||||
|
}
|
||||||
|
|
||||||
|
ps2.eval <- func() {
|
||||||
|
_, sub1 = ps2.bitmasks[string([]byte{0xff, 0x00, 0x00, 0x00})][hosts[0].ID()]
|
||||||
|
_, sub2 = ps2.bitmasks[string([]byte{0x00, 0xff, 0x00, 0x00})][hosts[0].ID()]
|
||||||
|
_, sub3 = ps2.bitmasks[string([]byte{0x00, 0x00, 0xff, 0x00})][hosts[0].ID()]
|
||||||
|
ready <- struct{}{}
|
||||||
|
}
|
||||||
|
<-ready
|
||||||
|
|
||||||
|
if sub1 {
|
||||||
|
t.Fatal("expected no subscription for test1")
|
||||||
|
}
|
||||||
|
if !sub2 {
|
||||||
|
t.Fatal("expected subscription for test1")
|
||||||
|
}
|
||||||
|
if sub3 {
|
||||||
|
t.Fatal("expected no subscription for test1")
|
||||||
|
}
|
||||||
|
}
|
259
go-libp2p-blossomsub/tag_tracer.go
Normal file
259
go-libp2p-blossomsub/tag_tracer.go
Normal file
@ -0,0 +1,259 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// BlossomSubConnTagBumpMessageDelivery is the amount to add to the connection manager
|
||||||
|
// tag that tracks message deliveries. Each time a peer is the first to deliver a
|
||||||
|
// message within a bitmask, we "bump" a tag by this amount, up to a maximum
|
||||||
|
// of BlossomSubConnTagMessageDeliveryCap.
|
||||||
|
// Note that the delivery tags decay over time, decreasing by BlossomSubConnTagDecayAmount
|
||||||
|
// at every BlossomSubConnTagDecayInterval.
|
||||||
|
BlossomSubConnTagBumpMessageDelivery = 1
|
||||||
|
|
||||||
|
// BlossomSubConnTagDecayInterval is the decay interval for decaying connection manager tags.
|
||||||
|
BlossomSubConnTagDecayInterval = 10 * time.Minute
|
||||||
|
|
||||||
|
// BlossomSubConnTagDecayAmount is subtracted from decaying tag values at each decay interval.
|
||||||
|
BlossomSubConnTagDecayAmount = 1
|
||||||
|
|
||||||
|
// BlossomSubConnTagMessageDeliveryCap is the maximum value for the connection manager tags that
|
||||||
|
// track message deliveries.
|
||||||
|
BlossomSubConnTagMessageDeliveryCap = 15
|
||||||
|
)
|
||||||
|
|
||||||
|
// tagTracer is an internal tracer that applies connection manager tags to peer
|
||||||
|
// connections based on their behavior.
|
||||||
|
//
|
||||||
|
// We tag a peer's connections for the following reasons:
|
||||||
|
// - Directly connected peers are tagged with BlossomSubConnTagValueDirectPeer (default 1000).
|
||||||
|
// - Mesh peers are tagged with a value of BlossomSubConnTagValueMeshPeer (default 20).
|
||||||
|
// If a peer is in multiple bitmask meshes, they'll be tagged for each.
|
||||||
|
// - For each message that we receive, we bump a delivery tag for peer that delivered the message
|
||||||
|
// first.
|
||||||
|
// The delivery tags have a maximum value, BlossomSubConnTagMessageDeliveryCap, and they decay at
|
||||||
|
// a rate of BlossomSubConnTagDecayAmount / BlossomSubConnTagDecayInterval.
|
||||||
|
type tagTracer struct {
|
||||||
|
sync.RWMutex
|
||||||
|
|
||||||
|
cmgr connmgr.ConnManager
|
||||||
|
idGen *msgIDGenerator
|
||||||
|
decayer connmgr.Decayer
|
||||||
|
decaying map[string]connmgr.DecayingTag
|
||||||
|
direct map[peer.ID]struct{}
|
||||||
|
|
||||||
|
// a map of message ids to the set of peers who delivered the message after the first delivery,
|
||||||
|
// but before the message was finished validating
|
||||||
|
nearFirst map[string]map[peer.ID]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTagTracer(cmgr connmgr.ConnManager) *tagTracer {
|
||||||
|
decayer, ok := connmgr.SupportsDecay(cmgr)
|
||||||
|
if !ok {
|
||||||
|
log.Debugf("connection manager does not support decaying tags, delivery tags will not be applied")
|
||||||
|
}
|
||||||
|
return &tagTracer{
|
||||||
|
cmgr: cmgr,
|
||||||
|
idGen: newMsgIdGenerator(),
|
||||||
|
decayer: decayer,
|
||||||
|
decaying: make(map[string]connmgr.DecayingTag),
|
||||||
|
nearFirst: make(map[string]map[peer.ID]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) Start(gs *BlossomSubRouter) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.idGen = gs.p.idGen
|
||||||
|
t.direct = gs.direct
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) tagPeerIfDirect(p peer.ID) {
|
||||||
|
if t.direct == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// tag peer if it is a direct peer
|
||||||
|
_, direct := t.direct[p]
|
||||||
|
if direct {
|
||||||
|
t.cmgr.Protect(p, "pubsub:<direct>")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) tagMeshPeer(p peer.ID, bitmask []byte) {
|
||||||
|
tag := bitmaskTag(bitmask)
|
||||||
|
t.cmgr.Protect(p, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) untagMeshPeer(p peer.ID, bitmask []byte) {
|
||||||
|
tag := bitmaskTag(bitmask)
|
||||||
|
t.cmgr.Unprotect(p, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bitmaskTag(bitmask []byte) string {
|
||||||
|
return fmt.Sprintf("pubsub:%s", bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) addDeliveryTag(bitmask []byte) {
|
||||||
|
if t.decayer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
name := fmt.Sprintf("pubsub-deliveries:%s", bitmask)
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
tag, err := t.decayer.RegisterDecayingTag(
|
||||||
|
name,
|
||||||
|
BlossomSubConnTagDecayInterval,
|
||||||
|
connmgr.DecayFixed(BlossomSubConnTagDecayAmount),
|
||||||
|
connmgr.BumpSumBounded(0, BlossomSubConnTagMessageDeliveryCap))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("unable to create decaying delivery tag: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.decaying[string(bitmask)] = tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) removeDeliveryTag(bitmask []byte) {
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
tag, ok := t.decaying[string(bitmask)]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := tag.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error closing decaying connmgr tag: %s", err)
|
||||||
|
}
|
||||||
|
delete(t.decaying, string(bitmask))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) bumpDeliveryTag(p peer.ID, bitmask []byte) error {
|
||||||
|
t.RLock()
|
||||||
|
defer t.RUnlock()
|
||||||
|
|
||||||
|
tag, ok := t.decaying[string(bitmask)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("no decaying tag registered for bitmask %s", bitmask)
|
||||||
|
}
|
||||||
|
return tag.Bump(p, BlossomSubConnTagBumpMessageDelivery)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) bumpTagsForMessage(p peer.ID, msg *Message) {
|
||||||
|
bitmask := msg.GetBitmask()
|
||||||
|
err := t.bumpDeliveryTag(p, bitmask)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error bumping delivery tag: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nearFirstPeers returns the peers who delivered the message while it was still validating
|
||||||
|
func (t *tagTracer) nearFirstPeers(msg *Message) []peer.ID {
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
peersMap, ok := t.nearFirst[t.idGen.ID(msg)]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
peers := make([]peer.ID, 0, len(peersMap))
|
||||||
|
for p := range peersMap {
|
||||||
|
peers = append(peers, p)
|
||||||
|
}
|
||||||
|
return peers
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- RawTracer interface methods
|
||||||
|
var _ RawTracer = (*tagTracer)(nil)
|
||||||
|
|
||||||
|
func (t *tagTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||||
|
t.tagPeerIfDirect(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) Join(bitmask []byte) {
|
||||||
|
t.addDeliveryTag(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) DeliverMessage(msg *Message) {
|
||||||
|
nearFirst := t.nearFirstPeers(msg)
|
||||||
|
|
||||||
|
t.bumpTagsForMessage(msg.ReceivedFrom, msg)
|
||||||
|
for _, p := range nearFirst {
|
||||||
|
t.bumpTagsForMessage(p, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete the delivery state for this message
|
||||||
|
t.Lock()
|
||||||
|
delete(t.nearFirst, t.idGen.ID(msg))
|
||||||
|
t.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) Leave(bitmask []byte) {
|
||||||
|
t.removeDeliveryTag(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) Graft(p peer.ID, bitmask []byte) {
|
||||||
|
t.tagMeshPeer(p, bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) Prune(p peer.ID, bitmask []byte) {
|
||||||
|
t.untagMeshPeer(p, bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) ValidateMessage(msg *Message) {
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
|
||||||
|
// create map to start tracking the peers who deliver while we're validating
|
||||||
|
id := t.idGen.ID(msg)
|
||||||
|
if _, exists := t.nearFirst[id]; exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.nearFirst[id] = make(map[peer.ID]struct{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) DuplicateMessage(msg *Message) {
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
|
||||||
|
id := t.idGen.ID(msg)
|
||||||
|
peers, ok := t.nearFirst[id]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
peers[msg.ReceivedFrom] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) RejectMessage(msg *Message, reason string) {
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
|
||||||
|
// We want to delete the near-first delivery tracking for messages that have passed through
|
||||||
|
// the validation pipeline. Other rejection reasons (missing signature, etc) skip the validation
|
||||||
|
// queue, so we don't want to remove the state in case the message is still validating.
|
||||||
|
switch reason {
|
||||||
|
case RejectValidationThrottled:
|
||||||
|
fallthrough
|
||||||
|
case RejectValidationIgnored:
|
||||||
|
fallthrough
|
||||||
|
case RejectValidationFailed:
|
||||||
|
delete(t.nearFirst, t.idGen.ID(msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagTracer) RemovePeer(peer.ID) {}
|
||||||
|
func (t *tagTracer) ThrottlePeer(p peer.ID) {}
|
||||||
|
func (t *tagTracer) RecvRPC(rpc *RPC) {}
|
||||||
|
func (t *tagTracer) SendRPC(rpc *RPC, p peer.ID) {}
|
||||||
|
func (t *tagTracer) DropRPC(rpc *RPC, p peer.ID) {}
|
||||||
|
func (t *tagTracer) UndeliverableMessage(msg *Message) {}
|
260
go-libp2p-blossomsub/tag_tracer_test.go
Normal file
260
go-libp2p-blossomsub/tag_tracer_test.go
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
connmgri "github.com/libp2p/go-libp2p/core/connmgr"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTagTracerMeshTags(t *testing.T) {
|
||||||
|
// test that tags are applied when the tagTracer sees graft and prune events
|
||||||
|
|
||||||
|
cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
tt := newTagTracer(cmgr)
|
||||||
|
|
||||||
|
p := peer.ID("a-peer")
|
||||||
|
bitmask := []byte{0xff, 0x00, 0xff, 0x00}
|
||||||
|
|
||||||
|
tt.Join(bitmask)
|
||||||
|
tt.Graft(p, bitmask)
|
||||||
|
|
||||||
|
tag := "pubsub:" + string(bitmask)
|
||||||
|
if !cmgr.IsProtected(p, tag) {
|
||||||
|
t.Fatal("expected the mesh peer to be protected")
|
||||||
|
}
|
||||||
|
|
||||||
|
tt.Prune(p, bitmask)
|
||||||
|
if cmgr.IsProtected(p, tag) {
|
||||||
|
t.Fatal("expected the former mesh peer to be unprotected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTagTracerDirectPeerTags(t *testing.T) {
|
||||||
|
// test that we add a tag to direct peers
|
||||||
|
cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
tt := newTagTracer(cmgr)
|
||||||
|
|
||||||
|
p1 := peer.ID("1")
|
||||||
|
p2 := peer.ID("2")
|
||||||
|
p3 := peer.ID("3")
|
||||||
|
|
||||||
|
// in the real world, tagTracer.direct is set in the WithDirectPeers option function
|
||||||
|
tt.direct = make(map[peer.ID]struct{})
|
||||||
|
tt.direct[p1] = struct{}{}
|
||||||
|
|
||||||
|
tt.AddPeer(p1, BlossomSubID_v11)
|
||||||
|
tt.AddPeer(p2, BlossomSubID_v11)
|
||||||
|
tt.AddPeer(p3, BlossomSubID_v11)
|
||||||
|
|
||||||
|
tag := "pubsub:<direct>"
|
||||||
|
if !cmgr.IsProtected(p1, tag) {
|
||||||
|
t.Fatal("expected direct peer to be protected")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range []peer.ID{p2, p3} {
|
||||||
|
if cmgr.IsProtected(p, tag) {
|
||||||
|
t.Fatal("expected non-direct peer to be unprotected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTagTracerDeliveryTags(t *testing.T) {
|
||||||
|
t.Skip("flaky test temporarily disabled; TODO: fixme")
|
||||||
|
// test decaying delivery tags
|
||||||
|
|
||||||
|
// use fake time to test the tag decay
|
||||||
|
clk := clock.NewMock()
|
||||||
|
decayCfg := &connmgr.DecayerCfg{
|
||||||
|
Clock: clk,
|
||||||
|
Resolution: time.Minute,
|
||||||
|
}
|
||||||
|
cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute), connmgr.DecayerConfig(decayCfg))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tt := newTagTracer(cmgr)
|
||||||
|
|
||||||
|
bitmask1 := []byte{0xff, 0x00, 0xff, 0x00}
|
||||||
|
bitmask2 := []byte{0x00, 0xff, 0x00, 0xff}
|
||||||
|
|
||||||
|
p := peer.ID("a-peer")
|
||||||
|
|
||||||
|
tt.Join(bitmask1)
|
||||||
|
tt.Join(bitmask2)
|
||||||
|
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
// deliver only 5 messages to bitmask 2 (less than the cap)
|
||||||
|
bitmask := bitmask1
|
||||||
|
if i < 5 {
|
||||||
|
bitmask = bitmask2
|
||||||
|
}
|
||||||
|
msg := &Message{
|
||||||
|
ReceivedFrom: p,
|
||||||
|
Message: &pb.Message{
|
||||||
|
From: []byte(p),
|
||||||
|
Data: []byte("hello"),
|
||||||
|
Bitmask: bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tt.DeliverMessage(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have to tick the fake clock once to apply the bump
|
||||||
|
clk.Add(time.Minute)
|
||||||
|
|
||||||
|
tag1 := "pubsub-deliveries:" + string(bitmask1)
|
||||||
|
tag2 := "pubsub-deliveries:" + string(bitmask2)
|
||||||
|
|
||||||
|
// the tag value for bitmask-1 should be capped at BlossomSubConnTagMessageDeliveryCap (default 15)
|
||||||
|
val := getTagValue(cmgr, p, tag1)
|
||||||
|
expected := BlossomSubConnTagMessageDeliveryCap
|
||||||
|
if val != expected {
|
||||||
|
t.Errorf("expected delivery tag to be capped at %d, was %d", expected, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the value for bitmask-2 should equal the number of messages delivered (5), since it was less than the cap
|
||||||
|
val = getTagValue(cmgr, p, tag2)
|
||||||
|
expected = 5
|
||||||
|
if val != expected {
|
||||||
|
t.Errorf("expected delivery tag value = %d, got %d", expected, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we jump forward a few minutes, we should see the tags decrease by 1 / 10 minutes
|
||||||
|
clk.Add(50 * time.Minute)
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
val = getTagValue(cmgr, p, tag1)
|
||||||
|
expected = BlossomSubConnTagMessageDeliveryCap - 5
|
||||||
|
// the actual expected value should be BlossomSubConnTagMessageDeliveryCap - 5,
|
||||||
|
// however due to timing issues on Travis, we consistently get BlossomSubConnTagMessageDeliveryCap - 4
|
||||||
|
// there instead. So our assertion checks for the expected value +/- 1
|
||||||
|
if val > expected+1 || val < expected-1 {
|
||||||
|
t.Errorf("expected delivery tag value = %d ± 1, got %d", expected, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the tag for bitmask-2 should have reset to zero by now, but again we add one for Travis since it's slow...
|
||||||
|
val = getTagValue(cmgr, p, tag2)
|
||||||
|
expected = 0
|
||||||
|
if val > expected+1 || val < expected-1 {
|
||||||
|
t.Errorf("expected delivery tag value = %d ± 1, got %d", expected, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// leaving the bitmask should remove the tag
|
||||||
|
if !tagExists(cmgr, p, tag1) {
|
||||||
|
t.Errorf("expected delivery tag %s to be applied to peer %s", tag1, p)
|
||||||
|
}
|
||||||
|
tt.Leave(bitmask1)
|
||||||
|
// advance the real clock a bit to allow the connmgr to remove the tag async
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
if tagExists(cmgr, p, tag1) {
|
||||||
|
t.Errorf("expected delivery tag %s to be removed after leaving the bitmask", tag1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTagTracerDeliveryTagsNearFirst(t *testing.T) {
|
||||||
|
// use fake time to test the tag decay
|
||||||
|
clk := clock.NewMock()
|
||||||
|
decayCfg := &connmgr.DecayerCfg{
|
||||||
|
Clock: clk,
|
||||||
|
Resolution: time.Minute,
|
||||||
|
}
|
||||||
|
cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute), connmgr.DecayerConfig(decayCfg))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tt := newTagTracer(cmgr)
|
||||||
|
|
||||||
|
bitmask := []byte{0x7e, 0x57}
|
||||||
|
|
||||||
|
p := peer.ID("a-peer")
|
||||||
|
p2 := peer.ID("another-peer")
|
||||||
|
p3 := peer.ID("slow-peer")
|
||||||
|
|
||||||
|
tt.Join(bitmask)
|
||||||
|
|
||||||
|
for i := 0; i < BlossomSubConnTagMessageDeliveryCap+5; i++ {
|
||||||
|
msg := &Message{
|
||||||
|
ReceivedFrom: p,
|
||||||
|
Message: &pb.Message{
|
||||||
|
From: []byte(p),
|
||||||
|
Data: []byte(fmt.Sprintf("msg-%d", i)),
|
||||||
|
Bitmask: bitmask,
|
||||||
|
Seqno: []byte(fmt.Sprintf("%d", i)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// a duplicate of the message, received from p2
|
||||||
|
dup := &Message{
|
||||||
|
ReceivedFrom: p2,
|
||||||
|
Message: msg.Message,
|
||||||
|
}
|
||||||
|
|
||||||
|
// the message starts validating as soon as we receive it from p
|
||||||
|
tt.ValidateMessage(msg)
|
||||||
|
// p2 should get near-first credit for the duplicate message that arrives before
|
||||||
|
// validation is complete
|
||||||
|
tt.DuplicateMessage(dup)
|
||||||
|
// DeliverMessage gets called when validation is complete
|
||||||
|
tt.DeliverMessage(msg)
|
||||||
|
|
||||||
|
// p3 delivers a duplicate after validation completes & gets no credit
|
||||||
|
dup.ReceivedFrom = p3
|
||||||
|
tt.DuplicateMessage(dup)
|
||||||
|
}
|
||||||
|
|
||||||
|
clk.Add(time.Minute)
|
||||||
|
|
||||||
|
// both p and p2 should get delivery tags equal to the cap
|
||||||
|
tag := "pubsub-deliveries:" + string(bitmask)
|
||||||
|
val := getTagValue(cmgr, p, tag)
|
||||||
|
if val != BlossomSubConnTagMessageDeliveryCap {
|
||||||
|
t.Errorf("expected tag %s to have val %d, was %d", tag, BlossomSubConnTagMessageDeliveryCap, val)
|
||||||
|
}
|
||||||
|
val = getTagValue(cmgr, p2, tag)
|
||||||
|
if val != BlossomSubConnTagMessageDeliveryCap {
|
||||||
|
t.Errorf("expected tag %s for near-first peer to have val %d, was %d", tag, BlossomSubConnTagMessageDeliveryCap, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// p3 should have no delivery tag credit
|
||||||
|
val = getTagValue(cmgr, p3, tag)
|
||||||
|
if val != 0 {
|
||||||
|
t.Errorf("expected tag %s for slow peer to have val %d, was %d", tag, 0, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTagValue(mgr connmgri.ConnManager, p peer.ID, tag string) int {
|
||||||
|
info := mgr.GetTagInfo(p)
|
||||||
|
if info == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
val, ok := info.Tags[tag]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
//lint:ignore U1000 used only by skipped tests at present
|
||||||
|
func tagExists(mgr connmgri.ConnManager, p peer.ID, tag string) bool {
|
||||||
|
info := mgr.GetTagInfo(p)
|
||||||
|
if info == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, exists := info.Tags[tag]
|
||||||
|
return exists
|
||||||
|
}
|
56
go-libp2p-blossomsub/timecache/first_seen_cache.go
Normal file
56
go-libp2p-blossomsub/timecache/first_seen_cache.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package timecache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FirstSeenCache is a time cache that only marks the expiry of a message when first added.
|
||||||
|
type FirstSeenCache struct {
|
||||||
|
lk sync.RWMutex
|
||||||
|
m map[string]time.Time
|
||||||
|
ttl time.Duration
|
||||||
|
|
||||||
|
done func()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ TimeCache = (*FirstSeenCache)(nil)
|
||||||
|
|
||||||
|
func newFirstSeenCache(ttl time.Duration) *FirstSeenCache {
|
||||||
|
tc := &FirstSeenCache{
|
||||||
|
m: make(map[string]time.Time),
|
||||||
|
ttl: ttl,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, done := context.WithCancel(context.Background())
|
||||||
|
tc.done = done
|
||||||
|
go background(ctx, &tc.lk, tc.m)
|
||||||
|
|
||||||
|
return tc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *FirstSeenCache) Done() {
|
||||||
|
tc.done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *FirstSeenCache) Has(s string) bool {
|
||||||
|
tc.lk.RLock()
|
||||||
|
defer tc.lk.RUnlock()
|
||||||
|
|
||||||
|
_, ok := tc.m[s]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *FirstSeenCache) Add(s string) bool {
|
||||||
|
tc.lk.Lock()
|
||||||
|
defer tc.lk.Unlock()
|
||||||
|
|
||||||
|
_, ok := tc.m[s]
|
||||||
|
if ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
tc.m[s] = time.Now().Add(tc.ttl)
|
||||||
|
return true
|
||||||
|
}
|
46
go-libp2p-blossomsub/timecache/first_seen_cache_test.go
Normal file
46
go-libp2p-blossomsub/timecache/first_seen_cache_test.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package timecache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFirstSeenCacheFound(t *testing.T) {
|
||||||
|
tc := newFirstSeenCache(time.Minute)
|
||||||
|
|
||||||
|
tc.Add("test")
|
||||||
|
|
||||||
|
if !tc.Has("test") {
|
||||||
|
t.Fatal("should have this key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstSeenCacheExpire(t *testing.T) {
|
||||||
|
backgroundSweepInterval = time.Second
|
||||||
|
|
||||||
|
tc := newFirstSeenCache(time.Second)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
tc.Add(fmt.Sprint(i))
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if tc.Has(fmt.Sprint(i)) {
|
||||||
|
t.Fatalf("should have dropped this key: %s from the cache already", fmt.Sprint(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstSeenCacheNotFoundAfterExpire(t *testing.T) {
|
||||||
|
backgroundSweepInterval = time.Second
|
||||||
|
|
||||||
|
tc := newFirstSeenCache(time.Second)
|
||||||
|
tc.Add(fmt.Sprint(0))
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
if tc.Has(fmt.Sprint(0)) {
|
||||||
|
t.Fatal("should have dropped this from the cache already")
|
||||||
|
}
|
||||||
|
}
|
58
go-libp2p-blossomsub/timecache/last_seen_cache.go
Normal file
58
go-libp2p-blossomsub/timecache/last_seen_cache.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
package timecache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LastSeenCache is a time cache that extends the expiry of a seen message when added
|
||||||
|
// or checked for presence with Has..
|
||||||
|
type LastSeenCache struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
m map[string]time.Time
|
||||||
|
ttl time.Duration
|
||||||
|
|
||||||
|
done func()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ TimeCache = (*LastSeenCache)(nil)
|
||||||
|
|
||||||
|
func newLastSeenCache(ttl time.Duration) *LastSeenCache {
|
||||||
|
tc := &LastSeenCache{
|
||||||
|
m: make(map[string]time.Time),
|
||||||
|
ttl: ttl,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, done := context.WithCancel(context.Background())
|
||||||
|
tc.done = done
|
||||||
|
go background(ctx, &tc.lk, tc.m)
|
||||||
|
|
||||||
|
return tc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *LastSeenCache) Done() {
|
||||||
|
tc.done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *LastSeenCache) Add(s string) bool {
|
||||||
|
tc.lk.Lock()
|
||||||
|
defer tc.lk.Unlock()
|
||||||
|
|
||||||
|
_, ok := tc.m[s]
|
||||||
|
tc.m[s] = time.Now().Add(tc.ttl)
|
||||||
|
|
||||||
|
return !ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *LastSeenCache) Has(s string) bool {
|
||||||
|
tc.lk.Lock()
|
||||||
|
defer tc.lk.Unlock()
|
||||||
|
|
||||||
|
_, ok := tc.m[s]
|
||||||
|
if ok {
|
||||||
|
tc.m[s] = time.Now().Add(tc.ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok
|
||||||
|
}
|
92
go-libp2p-blossomsub/timecache/last_seen_cache_test.go
Normal file
92
go-libp2p-blossomsub/timecache/last_seen_cache_test.go
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package timecache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLastSeenCacheFound(t *testing.T) {
|
||||||
|
tc := newLastSeenCache(time.Minute)
|
||||||
|
|
||||||
|
tc.Add("test")
|
||||||
|
|
||||||
|
if !tc.Has("test") {
|
||||||
|
t.Fatal("should have this key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLastSeenCacheExpire(t *testing.T) {
|
||||||
|
backgroundSweepInterval = time.Second
|
||||||
|
tc := newLastSeenCache(time.Second)
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
tc.Add(fmt.Sprint(i))
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
if tc.Has(fmt.Sprint(i)) {
|
||||||
|
t.Fatalf("should have dropped this key: %s from the cache already", fmt.Sprint(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLastSeenCacheSlideForward(t *testing.T) {
|
||||||
|
t.Skip("timing is too fine grained to run in CI")
|
||||||
|
|
||||||
|
tc := newLastSeenCache(time.Second)
|
||||||
|
i := 0
|
||||||
|
|
||||||
|
// T0ms: Add 8 entries with a 100ms sleep after each
|
||||||
|
for i < 8 {
|
||||||
|
tc.Add(fmt.Sprint(i))
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
// T800ms: Lookup the first entry - this should slide the entry forward so that its expiration is a full second
|
||||||
|
// later.
|
||||||
|
if !tc.Has(fmt.Sprint(0)) {
|
||||||
|
t.Fatal("should have this key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// T800ms: Wait till after the first and second entries would have normally expired (had we not looked the first
|
||||||
|
// entry up).
|
||||||
|
time.Sleep(time.Millisecond * 400)
|
||||||
|
|
||||||
|
// T1200ms: The first entry should still be present in the cache - this will also slide the entry forward.
|
||||||
|
if !tc.Has(fmt.Sprint(0)) {
|
||||||
|
t.Fatal("should still have this key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// T1200ms: The second entry should have expired
|
||||||
|
if tc.Has(fmt.Sprint(1)) {
|
||||||
|
t.Fatal("should have dropped this from the cache already")
|
||||||
|
}
|
||||||
|
|
||||||
|
// T1200ms: Sleep till the first entry actually expires
|
||||||
|
time.Sleep(time.Millisecond * 1100)
|
||||||
|
|
||||||
|
// T2300ms: Now the first entry should have expired
|
||||||
|
if tc.Has(fmt.Sprint(0)) {
|
||||||
|
t.Fatal("should have dropped this from the cache already")
|
||||||
|
}
|
||||||
|
|
||||||
|
// And it should not have been added back
|
||||||
|
if tc.Has(fmt.Sprint(0)) {
|
||||||
|
t.Fatal("should have dropped this from the cache already")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLastSeenCacheNotFoundAfterExpire(t *testing.T) {
|
||||||
|
backgroundSweepInterval = time.Second
|
||||||
|
|
||||||
|
tc := newLastSeenCache(time.Second)
|
||||||
|
tc.Add(fmt.Sprint(0))
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
if tc.Has(fmt.Sprint(0)) {
|
||||||
|
t.Fatal("should have dropped this from the cache already")
|
||||||
|
}
|
||||||
|
}
|
52
go-libp2p-blossomsub/timecache/time_cache.go
Normal file
52
go-libp2p-blossomsub/timecache/time_cache.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package timecache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
logger "github.com/ipfs/go-log/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logger.Logger("blossomsub/timecache")
|
||||||
|
|
||||||
|
// Stategy is the TimeCache expiration strategy to use.
|
||||||
|
type Strategy uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Strategy_FirstSeen expires an entry from the time it was added.
|
||||||
|
Strategy_FirstSeen Strategy = iota
|
||||||
|
// Stategy_LastSeen expires an entry from the last time it was touched by an Add or Has.
|
||||||
|
Strategy_LastSeen
|
||||||
|
)
|
||||||
|
|
||||||
|
// TimeCache is a cahe of recently seen messages (by id).
|
||||||
|
type TimeCache interface {
|
||||||
|
// Add adds an id into the cache, if it is not already there.
|
||||||
|
// Returns true if the id was newly added to the cache.
|
||||||
|
// Depending on the implementation strategy, it may or may not update the expiry of
|
||||||
|
// an existing entry.
|
||||||
|
Add(string) bool
|
||||||
|
// Has checks the cache for the presence of an id.
|
||||||
|
// Depending on the implementation strategy, it may or may not update the expiry of
|
||||||
|
// an existing entry.
|
||||||
|
Has(string) bool
|
||||||
|
// Done signals that the user is done with this cache, which it may stop background threads
|
||||||
|
// and relinquish resources.
|
||||||
|
Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimeCache defaults to the original ("first seen") cache implementation
|
||||||
|
func NewTimeCache(ttl time.Duration) TimeCache {
|
||||||
|
return NewTimeCacheWithStrategy(Strategy_FirstSeen, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTimeCacheWithStrategy(strategy Strategy, ttl time.Duration) TimeCache {
|
||||||
|
switch strategy {
|
||||||
|
case Strategy_FirstSeen:
|
||||||
|
return newFirstSeenCache(ttl)
|
||||||
|
case Strategy_LastSeen:
|
||||||
|
return newLastSeenCache(ttl)
|
||||||
|
default:
|
||||||
|
// Default to the original time cache implementation
|
||||||
|
return newFirstSeenCache(ttl)
|
||||||
|
}
|
||||||
|
}
|
35
go-libp2p-blossomsub/timecache/util.go
Normal file
35
go-libp2p-blossomsub/timecache/util.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package timecache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var backgroundSweepInterval = time.Minute
|
||||||
|
|
||||||
|
func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) {
|
||||||
|
ticker := time.NewTicker(backgroundSweepInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case now := <-ticker.C:
|
||||||
|
sweep(lk, m, now)
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sweep(lk sync.Locker, m map[string]time.Time, now time.Time) {
|
||||||
|
lk.Lock()
|
||||||
|
defer lk.Unlock()
|
||||||
|
|
||||||
|
for k, expiry := range m {
|
||||||
|
if expiry.Before(now) {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
530
go-libp2p-blossomsub/trace.go
Normal file
530
go-libp2p-blossomsub/trace.go
Normal file
@ -0,0 +1,530 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EventTracer is a generic event tracer interface.
|
||||||
|
// This is a high level tracing interface which delivers tracing events, as defined by the protobuf
|
||||||
|
// schema in pb/trace.proto.
|
||||||
|
type EventTracer interface {
|
||||||
|
Trace(evt *pb.TraceEvent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawTracer is a low level tracing interface that allows an application to trace the internal
|
||||||
|
// operation of the pubsub subsystem.
|
||||||
|
//
|
||||||
|
// Note that the tracers are invoked synchronously, which means that application tracers must
|
||||||
|
// take care to not block or modify arguments.
|
||||||
|
//
|
||||||
|
// Warning: this interface is not fixed, we may be adding new methods as necessitated by the system
|
||||||
|
// in the future.
|
||||||
|
type RawTracer interface {
|
||||||
|
// AddPeer is invoked when a new peer is added.
|
||||||
|
AddPeer(p peer.ID, proto protocol.ID)
|
||||||
|
// RemovePeer is invoked when a peer is removed.
|
||||||
|
RemovePeer(p peer.ID)
|
||||||
|
// Join is invoked when a new bitmask is joined
|
||||||
|
Join(bitmask []byte)
|
||||||
|
// Leave is invoked when a bitmask is abandoned
|
||||||
|
Leave(bitmask []byte)
|
||||||
|
// Graft is invoked when a new peer is grafted on the mesh (BlossomSub)
|
||||||
|
Graft(p peer.ID, bitmask []byte)
|
||||||
|
// Prune is invoked when a peer is pruned from the message (BlossomSub)
|
||||||
|
Prune(p peer.ID, bitmask []byte)
|
||||||
|
// ValidateMessage is invoked when a message first enters the validation pipeline.
|
||||||
|
ValidateMessage(msg *Message)
|
||||||
|
// DeliverMessage is invoked when a message is delivered
|
||||||
|
DeliverMessage(msg *Message)
|
||||||
|
// RejectMessage is invoked when a message is Rejected or Ignored.
|
||||||
|
// The reason argument can be one of the named strings Reject*.
|
||||||
|
RejectMessage(msg *Message, reason string)
|
||||||
|
// DuplicateMessage is invoked when a duplicate message is dropped.
|
||||||
|
DuplicateMessage(msg *Message)
|
||||||
|
// ThrottlePeer is invoked when a peer is throttled by the peer gater.
|
||||||
|
ThrottlePeer(p peer.ID)
|
||||||
|
// RecvRPC is invoked when an incoming RPC is received.
|
||||||
|
RecvRPC(rpc *RPC)
|
||||||
|
// SendRPC is invoked when a RPC is sent.
|
||||||
|
SendRPC(rpc *RPC, p peer.ID)
|
||||||
|
// DropRPC is invoked when an outbound RPC is dropped, typically because of a queue full.
|
||||||
|
DropRPC(rpc *RPC, p peer.ID)
|
||||||
|
// UndeliverableMessage is invoked when the consumer of Subscribe is not reading messages fast enough and
|
||||||
|
// the pressure release mechanism trigger, dropping messages.
|
||||||
|
UndeliverableMessage(msg *Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pubsub tracer details
|
||||||
|
type pubsubTracer struct {
|
||||||
|
tracer EventTracer
|
||||||
|
raw []RawTracer
|
||||||
|
pid peer.ID
|
||||||
|
idGen *msgIDGenerator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) PublishMessage(msg *Message) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_PUBLISH_MESSAGE.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
PublishMessage: &pb.TraceEvent_PublishMessage{
|
||||||
|
MessageID: []byte(t.idGen.ID(msg)),
|
||||||
|
Bitmask: msg.Message.Bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) ValidateMessage(msg *Message) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.ReceivedFrom != t.pid {
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.ValidateMessage(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) RejectMessage(msg *Message, reason string) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.ReceivedFrom != t.pid {
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.RejectMessage(msg, reason)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_REJECT_MESSAGE.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
RejectMessage: &pb.TraceEvent_RejectMessage{
|
||||||
|
MessageID: []byte(t.idGen.ID(msg)),
|
||||||
|
ReceivedFrom: []byte(msg.ReceivedFrom),
|
||||||
|
Reason: &reason,
|
||||||
|
Bitmask: msg.Bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) DuplicateMessage(msg *Message) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.ReceivedFrom != t.pid {
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.DuplicateMessage(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_DUPLICATE_MESSAGE.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
DuplicateMessage: &pb.TraceEvent_DuplicateMessage{
|
||||||
|
MessageID: []byte(t.idGen.ID(msg)),
|
||||||
|
ReceivedFrom: []byte(msg.ReceivedFrom),
|
||||||
|
Bitmask: msg.Bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) DeliverMessage(msg *Message) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.ReceivedFrom != t.pid {
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.DeliverMessage(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_DELIVER_MESSAGE.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
DeliverMessage: &pb.TraceEvent_DeliverMessage{
|
||||||
|
MessageID: []byte(t.idGen.ID(msg)),
|
||||||
|
Bitmask: msg.Bitmask,
|
||||||
|
ReceivedFrom: []byte(msg.ReceivedFrom),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.AddPeer(p, proto)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
protoStr := string(proto)
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_ADD_PEER.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
AddPeer: &pb.TraceEvent_AddPeer{
|
||||||
|
PeerID: []byte(p),
|
||||||
|
Proto: &protoStr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) RemovePeer(p peer.ID) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.RemovePeer(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_REMOVE_PEER.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
RemovePeer: &pb.TraceEvent_RemovePeer{
|
||||||
|
PeerID: []byte(p),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) RecvRPC(rpc *RPC) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.RecvRPC(rpc)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_RECV_RPC.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
RecvRPC: &pb.TraceEvent_RecvRPC{
|
||||||
|
ReceivedFrom: []byte(rpc.from),
|
||||||
|
Meta: t.traceRPCMeta(rpc),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) SendRPC(rpc *RPC, p peer.ID) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.SendRPC(rpc, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_SEND_RPC.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
SendRPC: &pb.TraceEvent_SendRPC{
|
||||||
|
SendTo: []byte(p),
|
||||||
|
Meta: t.traceRPCMeta(rpc),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) DropRPC(rpc *RPC, p peer.ID) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.DropRPC(rpc, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_DROP_RPC.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
DropRPC: &pb.TraceEvent_DropRPC{
|
||||||
|
SendTo: []byte(p),
|
||||||
|
Meta: t.traceRPCMeta(rpc),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) UndeliverableMessage(msg *Message) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.UndeliverableMessage(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta {
|
||||||
|
rpcMeta := new(pb.TraceEvent_RPCMeta)
|
||||||
|
|
||||||
|
var msgs []*pb.TraceEvent_MessageMeta
|
||||||
|
for _, m := range rpc.Publish {
|
||||||
|
msgs = append(msgs, &pb.TraceEvent_MessageMeta{
|
||||||
|
MessageID: []byte(t.idGen.RawID(m)),
|
||||||
|
Bitmask: m.Bitmask,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
rpcMeta.Messages = msgs
|
||||||
|
|
||||||
|
var subs []*pb.TraceEvent_SubMeta
|
||||||
|
for _, sub := range rpc.Subscriptions {
|
||||||
|
subs = append(subs, &pb.TraceEvent_SubMeta{
|
||||||
|
Subscribe: &sub.Subscribe,
|
||||||
|
Bitmask: sub.Bitmask,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
rpcMeta.Subscription = subs
|
||||||
|
|
||||||
|
if rpc.Control != nil {
|
||||||
|
var ihave []*pb.TraceEvent_ControlIHaveMeta
|
||||||
|
for _, ctl := range rpc.Control.Ihave {
|
||||||
|
var mids [][]byte
|
||||||
|
for _, mid := range ctl.MessageIDs {
|
||||||
|
mids = append(mids, []byte(mid))
|
||||||
|
}
|
||||||
|
ihave = append(ihave, &pb.TraceEvent_ControlIHaveMeta{
|
||||||
|
Bitmask: ctl.Bitmask,
|
||||||
|
MessageIDs: mids,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var iwant []*pb.TraceEvent_ControlIWantMeta
|
||||||
|
for _, ctl := range rpc.Control.Iwant {
|
||||||
|
var mids [][]byte
|
||||||
|
for _, mid := range ctl.MessageIDs {
|
||||||
|
mids = append(mids, []byte(mid))
|
||||||
|
}
|
||||||
|
iwant = append(iwant, &pb.TraceEvent_ControlIWantMeta{
|
||||||
|
MessageIDs: mids,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var graft []*pb.TraceEvent_ControlGraftMeta
|
||||||
|
for _, ctl := range rpc.Control.Graft {
|
||||||
|
graft = append(graft, &pb.TraceEvent_ControlGraftMeta{
|
||||||
|
Bitmask: ctl.Bitmask,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var prune []*pb.TraceEvent_ControlPruneMeta
|
||||||
|
for _, ctl := range rpc.Control.Prune {
|
||||||
|
peers := make([][]byte, 0, len(ctl.Peers))
|
||||||
|
for _, pi := range ctl.Peers {
|
||||||
|
peers = append(peers, pi.PeerID)
|
||||||
|
}
|
||||||
|
prune = append(prune, &pb.TraceEvent_ControlPruneMeta{
|
||||||
|
Bitmask: ctl.Bitmask,
|
||||||
|
Peers: peers,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcMeta.Control = &pb.TraceEvent_ControlMeta{
|
||||||
|
Ihave: ihave,
|
||||||
|
Iwant: iwant,
|
||||||
|
Graft: graft,
|
||||||
|
Prune: prune,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rpcMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) Join(bitmask []byte) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.Join(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_JOIN.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
Join: &pb.TraceEvent_Join{
|
||||||
|
Bitmask: bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) Leave(bitmask []byte) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.Leave(bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_LEAVE.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
Leave: &pb.TraceEvent_Leave{
|
||||||
|
Bitmask: bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) Graft(p peer.ID, bitmask []byte) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.Graft(p, bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_GRAFT.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
Graft: &pb.TraceEvent_Graft{
|
||||||
|
PeerID: []byte(p),
|
||||||
|
Bitmask: bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) Prune(p peer.ID, bitmask []byte) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.Prune(p, bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.tracer == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UnixNano()
|
||||||
|
evt := &pb.TraceEvent{
|
||||||
|
Type: pb.TraceEvent_PRUNE.Enum(),
|
||||||
|
PeerID: []byte(t.pid),
|
||||||
|
Timestamp: &now,
|
||||||
|
Prune: &pb.TraceEvent_Prune{
|
||||||
|
PeerID: []byte(p),
|
||||||
|
Bitmask: bitmask,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tracer.Trace(evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *pubsubTracer) ThrottlePeer(p peer.ID) {
|
||||||
|
if t == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tr := range t.raw {
|
||||||
|
tr.ThrottlePeer(p)
|
||||||
|
}
|
||||||
|
}
|
323
go-libp2p-blossomsub/trace_test.go
Normal file
323
go-libp2p-blossomsub/trace_test.go
Normal file
@ -0,0 +1,323 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||||
|
|
||||||
|
bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
|
||||||
|
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-msgio/protoio"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testWithTracer(t *testing.T, tracer EventTracer) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 20)
|
||||||
|
psubs := getBlossomSubs(ctx, hosts,
|
||||||
|
WithEventTracer(tracer),
|
||||||
|
// to bootstrap from star topology
|
||||||
|
WithPeerExchange(true),
|
||||||
|
// to exercise the score paths in the tracer
|
||||||
|
WithPeerScore(
|
||||||
|
&PeerScoreParams{
|
||||||
|
BitmaskScoreCap: 100,
|
||||||
|
AppSpecificScore: func(peer.ID) float64 { return 0 },
|
||||||
|
DecayInterval: time.Second,
|
||||||
|
DecayToZero: 0.01,
|
||||||
|
},
|
||||||
|
&PeerScoreThresholds{
|
||||||
|
GossipThreshold: -1,
|
||||||
|
PublishThreshold: -2,
|
||||||
|
GraylistThreshold: -3,
|
||||||
|
OpportunisticGraftThreshold: 1,
|
||||||
|
}))
|
||||||
|
|
||||||
|
// add a validator that rejects some messages to exercise those code paths in the tracer
|
||||||
|
for _, ps := range psubs {
|
||||||
|
ps.RegisterBitmaskValidator([]byte{0x7e, 57}, func(ctx context.Context, p peer.ID, msg *Message) bool {
|
||||||
|
if string(msg.Data) == "invalid!" {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is the star topology test so that we make sure we get some PRUNEs and cover that code path
|
||||||
|
|
||||||
|
// add all peer addresses to the peerstores
|
||||||
|
// this is necessary because we can't have signed address records witout identify
|
||||||
|
// pushing them
|
||||||
|
for i := range hosts {
|
||||||
|
for j := range hosts {
|
||||||
|
if i == j {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hosts[i].Peerstore().AddAddrs(hosts[j].ID(), hosts[j].Addrs(), peerstore.PermanentAddrTTL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// build the star
|
||||||
|
for i := 1; i < 20; i++ {
|
||||||
|
connect(t, hosts[0], hosts[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// build the mesh
|
||||||
|
var subs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
go func(sub *Subscription) {
|
||||||
|
for {
|
||||||
|
_, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(sub)
|
||||||
|
subs = append(subs, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for the mesh to build
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// publish some messages
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
if i%7 == 0 {
|
||||||
|
psubs[i].Publish([]byte{0x7e, 0x57}, []byte("invalid!"))
|
||||||
|
} else {
|
||||||
|
msg := []byte(fmt.Sprintf("message %d", i))
|
||||||
|
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait a bit for propagation and call it day
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
// close all subscriptions to get some leave events
|
||||||
|
for _, sub := range subs {
|
||||||
|
sub.Cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for the leave to take effect
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
type traceStats struct {
|
||||||
|
publish, reject, duplicate, deliver, add, remove, recv, send, drop, join, leave, graft, prune int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceStats) process(evt *pb.TraceEvent) {
|
||||||
|
// fmt.Printf("process event %s\n", evt.GetType())
|
||||||
|
switch evt.GetType() {
|
||||||
|
case pb.TraceEvent_PUBLISH_MESSAGE:
|
||||||
|
t.publish++
|
||||||
|
case pb.TraceEvent_REJECT_MESSAGE:
|
||||||
|
t.reject++
|
||||||
|
case pb.TraceEvent_DUPLICATE_MESSAGE:
|
||||||
|
t.duplicate++
|
||||||
|
case pb.TraceEvent_DELIVER_MESSAGE:
|
||||||
|
t.deliver++
|
||||||
|
case pb.TraceEvent_ADD_PEER:
|
||||||
|
t.add++
|
||||||
|
case pb.TraceEvent_REMOVE_PEER:
|
||||||
|
t.remove++
|
||||||
|
case pb.TraceEvent_RECV_RPC:
|
||||||
|
t.recv++
|
||||||
|
case pb.TraceEvent_SEND_RPC:
|
||||||
|
t.send++
|
||||||
|
case pb.TraceEvent_DROP_RPC:
|
||||||
|
t.drop++
|
||||||
|
case pb.TraceEvent_JOIN:
|
||||||
|
t.join++
|
||||||
|
case pb.TraceEvent_LEAVE:
|
||||||
|
t.leave++
|
||||||
|
case pb.TraceEvent_GRAFT:
|
||||||
|
t.graft++
|
||||||
|
case pb.TraceEvent_PRUNE:
|
||||||
|
t.prune++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *traceStats) check(t *testing.T) {
|
||||||
|
if ts.publish == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.duplicate == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.deliver == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.reject == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.add == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.recv == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.send == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.join == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.leave == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.graft == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
if ts.prune == 0 {
|
||||||
|
t.Fatal("expected non-zero count")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONTracer(t *testing.T) {
|
||||||
|
tracer, err := NewJSONTracer("/tmp/trace.out.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testWithTracer(t, tracer)
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
tracer.Close()
|
||||||
|
|
||||||
|
var stats traceStats
|
||||||
|
var evt pb.TraceEvent
|
||||||
|
|
||||||
|
f, err := os.Open("/tmp/trace.out.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
dec := json.NewDecoder(f)
|
||||||
|
for {
|
||||||
|
evt.Reset()
|
||||||
|
err := dec.Decode(&evt)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.process(&evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.check(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPBTracer(t *testing.T) {
|
||||||
|
tracer, err := NewPBTracer("/tmp/trace.out.pb")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testWithTracer(t, tracer)
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
tracer.Close()
|
||||||
|
|
||||||
|
var stats traceStats
|
||||||
|
var evt pb.TraceEvent
|
||||||
|
|
||||||
|
f, err := os.Open("/tmp/trace.out.pb")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
r := protoio.NewDelimitedReader(f, 1<<20)
|
||||||
|
for {
|
||||||
|
evt.Reset()
|
||||||
|
err := r.ReadMsg(&evt)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.process(&evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.check(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockRemoteTracer struct {
|
||||||
|
mx sync.Mutex
|
||||||
|
ts traceStats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mrt *mockRemoteTracer) handleStream(s network.Stream) {
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
gzr, err := gzip.NewReader(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := protoio.NewDelimitedReader(gzr, 1<<24)
|
||||||
|
|
||||||
|
var batch pb.TraceEventBatch
|
||||||
|
for {
|
||||||
|
batch.Reset()
|
||||||
|
err := r.ReadMsg(&batch)
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
s.Reset()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mrt.mx.Lock()
|
||||||
|
for _, evt := range batch.GetBatch() {
|
||||||
|
mrt.ts.process(evt)
|
||||||
|
}
|
||||||
|
mrt.mx.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mrt *mockRemoteTracer) check(t *testing.T) {
|
||||||
|
mrt.mx.Lock()
|
||||||
|
defer mrt.mx.Unlock()
|
||||||
|
mrt.ts.check(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoteTracer(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
h1 := bhost.NewBlankHost(swarmt.GenSwarm(t))
|
||||||
|
h2 := bhost.NewBlankHost(swarmt.GenSwarm(t))
|
||||||
|
defer h1.Close()
|
||||||
|
defer h2.Close()
|
||||||
|
|
||||||
|
mrt := &mockRemoteTracer{}
|
||||||
|
h1.SetStreamHandler(RemoteTracerProtoID, mrt.handleStream)
|
||||||
|
|
||||||
|
tracer, err := NewRemoteTracer(ctx, h2, peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testWithTracer(t, tracer)
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
tracer.Close()
|
||||||
|
|
||||||
|
mrt.check(t)
|
||||||
|
}
|
310
go-libp2p-blossomsub/tracer.go
Normal file
310
go-libp2p-blossomsub/tracer.go
Normal file
@ -0,0 +1,310 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||||
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-msgio/protoio"
|
||||||
|
)
|
||||||
|
|
||||||
|
var TraceBufferSize = 1 << 16 // 64K ought to be enough for everyone; famous last words.
|
||||||
|
var MinTraceBatchSize = 16
|
||||||
|
|
||||||
|
// rejection reasons
|
||||||
|
const (
|
||||||
|
RejectBlacklstedPeer = "blacklisted peer"
|
||||||
|
RejectBlacklistedSource = "blacklisted source"
|
||||||
|
RejectMissingSignature = "missing signature"
|
||||||
|
RejectUnexpectedSignature = "unexpected signature"
|
||||||
|
RejectUnexpectedAuthInfo = "unexpected auth info"
|
||||||
|
RejectInvalidSignature = "invalid signature"
|
||||||
|
RejectValidationQueueFull = "validation queue full"
|
||||||
|
RejectValidationThrottled = "validation throttled"
|
||||||
|
RejectValidationFailed = "validation failed"
|
||||||
|
RejectValidationIgnored = "validation ignored"
|
||||||
|
RejectSelfOrigin = "self originated message"
|
||||||
|
)
|
||||||
|
|
||||||
|
type basicTracer struct {
|
||||||
|
ch chan struct{}
|
||||||
|
mx sync.Mutex
|
||||||
|
buf []*pb.TraceEvent
|
||||||
|
lossy bool
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *basicTracer) Trace(evt *pb.TraceEvent) {
|
||||||
|
t.mx.Lock()
|
||||||
|
defer t.mx.Unlock()
|
||||||
|
|
||||||
|
if t.closed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.lossy && len(t.buf) > TraceBufferSize {
|
||||||
|
log.Debug("trace buffer overflow; dropping trace event")
|
||||||
|
} else {
|
||||||
|
t.buf = append(t.buf, evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case t.ch <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *basicTracer) Close() {
|
||||||
|
t.mx.Lock()
|
||||||
|
defer t.mx.Unlock()
|
||||||
|
if !t.closed {
|
||||||
|
t.closed = true
|
||||||
|
close(t.ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONTracer is a tracer that writes events to a file, encoded in ndjson.
|
||||||
|
type JSONTracer struct {
|
||||||
|
basicTracer
|
||||||
|
w io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStdoutJSONTracer() (*JSONTracer, error) {
|
||||||
|
tr := &JSONTracer{w: os.Stdout, basicTracer: basicTracer{ch: make(chan struct{}, 1)}}
|
||||||
|
go tr.doWrite()
|
||||||
|
|
||||||
|
return tr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewJsonTracer creates a new JSONTracer writing traces to file.
|
||||||
|
func NewJSONTracer(file string) (*JSONTracer, error) {
|
||||||
|
return OpenJSONTracer(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenJSONTracer creates a new JSONTracer, with explicit control of OpenFile flags and permissions.
|
||||||
|
func OpenJSONTracer(file string, flags int, perm os.FileMode) (*JSONTracer, error) {
|
||||||
|
f, err := os.OpenFile(file, flags, perm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := &JSONTracer{w: f, basicTracer: basicTracer{ch: make(chan struct{}, 1)}}
|
||||||
|
go tr.doWrite()
|
||||||
|
|
||||||
|
return tr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *JSONTracer) doWrite() {
|
||||||
|
var buf []*pb.TraceEvent
|
||||||
|
enc := json.NewEncoder(t.w)
|
||||||
|
for {
|
||||||
|
_, ok := <-t.ch
|
||||||
|
|
||||||
|
t.mx.Lock()
|
||||||
|
tmp := t.buf
|
||||||
|
t.buf = buf[:0]
|
||||||
|
buf = tmp
|
||||||
|
t.mx.Unlock()
|
||||||
|
|
||||||
|
for i, evt := range buf {
|
||||||
|
err := enc.Encode(evt)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error writing event trace: %s", err.Error())
|
||||||
|
}
|
||||||
|
buf[i] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
t.w.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ EventTracer = (*JSONTracer)(nil)
|
||||||
|
|
||||||
|
// PBTracer is a tracer that writes events to a file, as delimited protobufs.
|
||||||
|
type PBTracer struct {
|
||||||
|
basicTracer
|
||||||
|
w io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPBTracer(file string) (*PBTracer, error) {
|
||||||
|
return OpenPBTracer(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenPBTracer creates a new PBTracer, with explicit control of OpenFile flags and permissions.
|
||||||
|
func OpenPBTracer(file string, flags int, perm os.FileMode) (*PBTracer, error) {
|
||||||
|
f, err := os.OpenFile(file, flags, perm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := &PBTracer{w: f, basicTracer: basicTracer{ch: make(chan struct{}, 1)}}
|
||||||
|
go tr.doWrite()
|
||||||
|
|
||||||
|
return tr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *PBTracer) doWrite() {
|
||||||
|
var buf []*pb.TraceEvent
|
||||||
|
w := protoio.NewDelimitedWriter(t.w)
|
||||||
|
for {
|
||||||
|
_, ok := <-t.ch
|
||||||
|
|
||||||
|
t.mx.Lock()
|
||||||
|
tmp := t.buf
|
||||||
|
t.buf = buf[:0]
|
||||||
|
buf = tmp
|
||||||
|
t.mx.Unlock()
|
||||||
|
|
||||||
|
for i, evt := range buf {
|
||||||
|
err := w.WriteMsg(evt)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error writing event trace: %s", err.Error())
|
||||||
|
}
|
||||||
|
buf[i] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
t.w.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ EventTracer = (*PBTracer)(nil)
|
||||||
|
|
||||||
|
const RemoteTracerProtoID = protocol.ID("/libp2p/pubsub/tracer/1.0.0")
|
||||||
|
|
||||||
|
// RemoteTracer is a tracer that sends trace events to a remote peer
|
||||||
|
type RemoteTracer struct {
|
||||||
|
basicTracer
|
||||||
|
ctx context.Context
|
||||||
|
host host.Host
|
||||||
|
peer peer.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRemoteTracer constructs a RemoteTracer, tracing to the peer identified by pi
|
||||||
|
func NewRemoteTracer(ctx context.Context, host host.Host, pi peer.AddrInfo) (*RemoteTracer, error) {
|
||||||
|
tr := &RemoteTracer{ctx: ctx, host: host, peer: pi.ID, basicTracer: basicTracer{ch: make(chan struct{}, 1), lossy: true}}
|
||||||
|
host.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.PermanentAddrTTL)
|
||||||
|
go tr.doWrite()
|
||||||
|
return tr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *RemoteTracer) doWrite() {
|
||||||
|
var buf []*pb.TraceEvent
|
||||||
|
|
||||||
|
s, err := t.openStream()
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("error opening remote tracer stream: %s", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var batch pb.TraceEventBatch
|
||||||
|
|
||||||
|
gzipW := gzip.NewWriter(s)
|
||||||
|
w := protoio.NewDelimitedWriter(gzipW)
|
||||||
|
|
||||||
|
for {
|
||||||
|
_, ok := <-t.ch
|
||||||
|
|
||||||
|
// deadline for batch accumulation
|
||||||
|
deadline := time.Now().Add(time.Second)
|
||||||
|
|
||||||
|
t.mx.Lock()
|
||||||
|
for len(t.buf) < MinTraceBatchSize && time.Now().Before(deadline) {
|
||||||
|
t.mx.Unlock()
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
t.mx.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp := t.buf
|
||||||
|
t.buf = buf[:0]
|
||||||
|
buf = tmp
|
||||||
|
t.mx.Unlock()
|
||||||
|
|
||||||
|
if len(buf) == 0 {
|
||||||
|
goto end
|
||||||
|
}
|
||||||
|
|
||||||
|
batch.Batch = buf
|
||||||
|
|
||||||
|
err = w.WriteMsg(&batch)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("error writing trace event batch: %s", err)
|
||||||
|
goto end
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gzipW.Flush()
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("error flushin gzip stream: %s", err)
|
||||||
|
goto end
|
||||||
|
}
|
||||||
|
|
||||||
|
end:
|
||||||
|
// nil out the buffer to gc consumed events
|
||||||
|
for i := range buf {
|
||||||
|
buf[i] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
if err != nil {
|
||||||
|
s.Reset()
|
||||||
|
} else {
|
||||||
|
gzipW.Close()
|
||||||
|
s.Close()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
s.Reset()
|
||||||
|
s, err = t.openStream()
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("error opening remote tracer stream: %s", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
gzipW.Reset(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *RemoteTracer) openStream() (network.Stream, error) {
|
||||||
|
for {
|
||||||
|
ctx, cancel := context.WithTimeout(t.ctx, time.Minute)
|
||||||
|
s, err := t.host.NewStream(ctx, t.peer, RemoteTracerProtoID)
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
if t.ctx.Err() != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait a minute and try again, to account for transient server downtime
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Minute):
|
||||||
|
continue
|
||||||
|
case <-t.ctx.Done():
|
||||||
|
return nil, t.ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ EventTracer = (*RemoteTracer)(nil)
|
590
go-libp2p-blossomsub/validation.go
Normal file
590
go-libp2p-blossomsub/validation.go
Normal file
@ -0,0 +1,590 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultValidateQueueSize = 32
|
||||||
|
defaultValidateConcurrency = 1024
|
||||||
|
defaultValidateThrottle = 8192
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidationError is an error that may be signalled from message publication when the message
|
||||||
|
// fails validation
|
||||||
|
type ValidationError struct {
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ValidationError) Error() string {
|
||||||
|
return e.Reason
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validator is a function that validates a message with a binary decision: accept or reject.
|
||||||
|
type Validator func(context.Context, peer.ID, *Message) bool
|
||||||
|
|
||||||
|
// ValidatorEx is an extended validation function that validates a message with an enumerated decision
|
||||||
|
type ValidatorEx func(context.Context, peer.ID, *Message) ValidationResult
|
||||||
|
|
||||||
|
// ValidationResult represents the decision of an extended validator
|
||||||
|
type ValidationResult int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ValidationAccept is a validation decision that indicates a valid message that should be accepted and
|
||||||
|
// delivered to the application and forwarded to the network.
|
||||||
|
ValidationAccept = ValidationResult(0)
|
||||||
|
// ValidationReject is a validation decision that indicates an invalid message that should not be
|
||||||
|
// delivered to the application or forwarded to the application. Furthermore the peer that forwarded
|
||||||
|
// the message should be penalized by peer scoring routers.
|
||||||
|
ValidationReject = ValidationResult(1)
|
||||||
|
// ValidationIgnore is a validation decision that indicates a message that should be ignored: it will
|
||||||
|
// be neither delivered to the application nor forwarded to the network. However, in contrast to
|
||||||
|
// ValidationReject, the peer that forwarded the message must not be penalized by peer scoring routers.
|
||||||
|
ValidationIgnore = ValidationResult(2)
|
||||||
|
// internal
|
||||||
|
validationThrottled = ValidationResult(-1)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidatorOpt is an option for RegisterBitmaskValidator.
|
||||||
|
type ValidatorOpt func(addVal *addValReq) error
|
||||||
|
|
||||||
|
// validation represents the validator pipeline.
|
||||||
|
// The validator pipeline performs signature validation and runs a
|
||||||
|
// sequence of user-configured validators per-bitmask. It is possible to
|
||||||
|
// adjust various concurrency parameters, such as the number of
|
||||||
|
// workers and the max number of simultaneous validations. The user
|
||||||
|
// can also attach inline validators that will be executed
|
||||||
|
// synchronously; this may be useful to prevent superfluous
|
||||||
|
// context-switching for lightweight tasks.
|
||||||
|
type validation struct {
|
||||||
|
p *PubSub
|
||||||
|
|
||||||
|
tracer *pubsubTracer
|
||||||
|
|
||||||
|
// mx protects the validator map
|
||||||
|
mx sync.Mutex
|
||||||
|
// bitmaskVals tracks per bitmask validators
|
||||||
|
bitmaskVals map[string]*validatorImpl
|
||||||
|
|
||||||
|
// defaultVals tracks default validators applicable to all bitmasks
|
||||||
|
defaultVals []*validatorImpl
|
||||||
|
|
||||||
|
// validateQ is the front-end to the validation pipeline
|
||||||
|
validateQ chan *validateReq
|
||||||
|
|
||||||
|
// validateThrottle limits the number of active validation goroutines
|
||||||
|
validateThrottle chan struct{}
|
||||||
|
|
||||||
|
// this is the number of synchronous validation workers
|
||||||
|
validateWorkers int
|
||||||
|
}
|
||||||
|
|
||||||
|
// validation requests
|
||||||
|
type validateReq struct {
|
||||||
|
vals []*validatorImpl
|
||||||
|
src peer.ID
|
||||||
|
msg *Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// representation of bitmask validators
|
||||||
|
type validatorImpl struct {
|
||||||
|
bitmask []byte
|
||||||
|
validate ValidatorEx
|
||||||
|
validateTimeout time.Duration
|
||||||
|
validateThrottle chan struct{}
|
||||||
|
validateInline bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// async request to add a bitmask validators
|
||||||
|
type addValReq struct {
|
||||||
|
bitmask []byte
|
||||||
|
validate interface{}
|
||||||
|
timeout time.Duration
|
||||||
|
throttle int
|
||||||
|
inline bool
|
||||||
|
resp chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// async request to remove a bitmask validator
|
||||||
|
type rmValReq struct {
|
||||||
|
bitmask []byte
|
||||||
|
resp chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// newValidation creates a new validation pipeline
|
||||||
|
func newValidation() *validation {
|
||||||
|
return &validation{
|
||||||
|
bitmaskVals: make(map[string]*validatorImpl),
|
||||||
|
validateQ: make(chan *validateReq, defaultValidateQueueSize),
|
||||||
|
validateThrottle: make(chan struct{}, defaultValidateThrottle),
|
||||||
|
validateWorkers: runtime.NumCPU(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start attaches the validation pipeline to a pubsub instance and starts background
|
||||||
|
// workers
|
||||||
|
func (v *validation) Start(p *PubSub) {
|
||||||
|
v.p = p
|
||||||
|
v.tracer = p.tracer
|
||||||
|
for i := 0; i < v.validateWorkers; i++ {
|
||||||
|
go v.validateWorker()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddValidator adds a new validator
|
||||||
|
func (v *validation) AddValidator(req *addValReq) {
|
||||||
|
val, err := v.makeValidator(req)
|
||||||
|
if err != nil {
|
||||||
|
req.resp <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v.mx.Lock()
|
||||||
|
defer v.mx.Unlock()
|
||||||
|
|
||||||
|
bitmask := val.bitmask
|
||||||
|
|
||||||
|
_, ok := v.bitmaskVals[string(bitmask)]
|
||||||
|
if ok {
|
||||||
|
req.resp <- fmt.Errorf("duplicate validator for bitmask %s", bitmask)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v.bitmaskVals[string(bitmask)] = val
|
||||||
|
req.resp <- nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *validation) makeValidator(req *addValReq) (*validatorImpl, error) {
|
||||||
|
makeValidatorEx := func(v Validator) ValidatorEx {
|
||||||
|
return func(ctx context.Context, p peer.ID, msg *Message) ValidationResult {
|
||||||
|
if v(ctx, p, msg) {
|
||||||
|
return ValidationAccept
|
||||||
|
} else {
|
||||||
|
return ValidationReject
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var validator ValidatorEx
|
||||||
|
switch v := req.validate.(type) {
|
||||||
|
case func(ctx context.Context, p peer.ID, msg *Message) bool:
|
||||||
|
validator = makeValidatorEx(Validator(v))
|
||||||
|
case Validator:
|
||||||
|
validator = makeValidatorEx(v)
|
||||||
|
|
||||||
|
case func(ctx context.Context, p peer.ID, msg *Message) ValidationResult:
|
||||||
|
validator = ValidatorEx(v)
|
||||||
|
case ValidatorEx:
|
||||||
|
validator = v
|
||||||
|
|
||||||
|
default:
|
||||||
|
bitmask := req.bitmask
|
||||||
|
if req.bitmask == nil {
|
||||||
|
bitmask = []byte{0xff, 0xff}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unknown validator type for bitmask %s; must be an instance of Validator or ValidatorEx", bitmask)
|
||||||
|
}
|
||||||
|
|
||||||
|
val := &validatorImpl{
|
||||||
|
bitmask: req.bitmask,
|
||||||
|
validate: validator,
|
||||||
|
validateTimeout: 0,
|
||||||
|
validateThrottle: make(chan struct{}, defaultValidateConcurrency),
|
||||||
|
validateInline: req.inline,
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.timeout > 0 {
|
||||||
|
val.validateTimeout = req.timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.throttle > 0 {
|
||||||
|
val.validateThrottle = make(chan struct{}, req.throttle)
|
||||||
|
}
|
||||||
|
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveValidator removes an existing validator
|
||||||
|
func (v *validation) RemoveValidator(req *rmValReq) {
|
||||||
|
v.mx.Lock()
|
||||||
|
defer v.mx.Unlock()
|
||||||
|
|
||||||
|
bitmask := req.bitmask
|
||||||
|
|
||||||
|
_, ok := v.bitmaskVals[string(bitmask)]
|
||||||
|
if ok {
|
||||||
|
delete(v.bitmaskVals, string(bitmask))
|
||||||
|
req.resp <- nil
|
||||||
|
} else {
|
||||||
|
req.resp <- fmt.Errorf("no validator for bitmask %s", bitmask)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushLocal synchronously pushes a locally published message and performs applicable
|
||||||
|
// validations.
|
||||||
|
// Returns an error if validation fails
|
||||||
|
func (v *validation) PushLocal(msg *Message) error {
|
||||||
|
v.p.tracer.PublishMessage(msg)
|
||||||
|
|
||||||
|
err := v.p.checkSigningPolicy(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := v.getValidators(msg)
|
||||||
|
return v.validate(vals, msg.ReceivedFrom, msg, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push pushes a message into the validation pipeline.
|
||||||
|
// It returns true if the message can be forwarded immediately without validation.
|
||||||
|
func (v *validation) Push(src peer.ID, msg *Message) bool {
|
||||||
|
vals := v.getValidators(msg)
|
||||||
|
|
||||||
|
if len(vals) > 0 || msg.Signature != nil {
|
||||||
|
select {
|
||||||
|
case v.validateQ <- &validateReq{vals, src, msg}:
|
||||||
|
default:
|
||||||
|
log.Debugf("message validation throttled: queue full; dropping message from %s", src)
|
||||||
|
v.tracer.RejectMessage(msg, RejectValidationQueueFull)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// getValidators returns all validators that apply to a given message
|
||||||
|
func (v *validation) getValidators(msg *Message) []*validatorImpl {
|
||||||
|
v.mx.Lock()
|
||||||
|
defer v.mx.Unlock()
|
||||||
|
|
||||||
|
var vals []*validatorImpl
|
||||||
|
vals = append(vals, v.defaultVals...)
|
||||||
|
|
||||||
|
bitmask := msg.GetBitmask()
|
||||||
|
|
||||||
|
val, ok := v.bitmaskVals[string(bitmask)]
|
||||||
|
if !ok {
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(vals, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateWorker is an active goroutine performing inline validation
|
||||||
|
func (v *validation) validateWorker() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case req := <-v.validateQ:
|
||||||
|
v.validate(req.vals, req.src, req.msg, false)
|
||||||
|
case <-v.p.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate performs validation and only sends the message if all validators succeed
|
||||||
|
func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, synchronous bool) error {
|
||||||
|
// If signature verification is enabled, but signing is disabled,
|
||||||
|
// the Signature is required to be nil upon receiving the message in PubSub.pushMsg.
|
||||||
|
if msg.Signature != nil {
|
||||||
|
if !v.validateSignature(msg) {
|
||||||
|
log.Debugf("message signature validation failed; dropping message from %s", src)
|
||||||
|
v.tracer.RejectMessage(msg, RejectInvalidSignature)
|
||||||
|
return ValidationError{Reason: RejectInvalidSignature}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we can mark the message as seen now that we have verified the signature
|
||||||
|
// and avoid invoking user validators more than once
|
||||||
|
id := v.p.idGen.ID(msg)
|
||||||
|
if !v.p.markSeen(id) {
|
||||||
|
v.tracer.DuplicateMessage(msg)
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
v.tracer.ValidateMessage(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
var inline, async []*validatorImpl
|
||||||
|
for _, val := range vals {
|
||||||
|
if val.validateInline || synchronous {
|
||||||
|
inline = append(inline, val)
|
||||||
|
} else {
|
||||||
|
async = append(async, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply inline (synchronous) validators
|
||||||
|
result := ValidationAccept
|
||||||
|
loop:
|
||||||
|
for _, val := range inline {
|
||||||
|
switch val.validateMsg(v.p.ctx, src, msg) {
|
||||||
|
case ValidationAccept:
|
||||||
|
case ValidationReject:
|
||||||
|
result = ValidationReject
|
||||||
|
break loop
|
||||||
|
case ValidationIgnore:
|
||||||
|
result = ValidationIgnore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if result == ValidationReject {
|
||||||
|
log.Debugf("message validation failed; dropping message from %s", src)
|
||||||
|
v.tracer.RejectMessage(msg, RejectValidationFailed)
|
||||||
|
return ValidationError{Reason: RejectValidationFailed}
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply async validators
|
||||||
|
if len(async) > 0 {
|
||||||
|
select {
|
||||||
|
case v.validateThrottle <- struct{}{}:
|
||||||
|
go func() {
|
||||||
|
v.doValidateBitmask(async, src, msg, result)
|
||||||
|
<-v.validateThrottle
|
||||||
|
}()
|
||||||
|
default:
|
||||||
|
log.Debugf("message validation throttled; dropping message from %s", src)
|
||||||
|
v.tracer.RejectMessage(msg, RejectValidationThrottled)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if result == ValidationIgnore {
|
||||||
|
v.tracer.RejectMessage(msg, RejectValidationIgnored)
|
||||||
|
return ValidationError{Reason: RejectValidationIgnored}
|
||||||
|
}
|
||||||
|
|
||||||
|
// no async validators, accepted message, send it!
|
||||||
|
select {
|
||||||
|
case v.p.sendMsg <- msg:
|
||||||
|
return nil
|
||||||
|
case <-v.p.ctx.Done():
|
||||||
|
return v.p.ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *validation) validateSignature(msg *Message) bool {
|
||||||
|
err := verifyMessageSignature(msg.Message)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("signature verification error: %s", err.Error())
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *validation) doValidateBitmask(vals []*validatorImpl, src peer.ID, msg *Message, r ValidationResult) {
|
||||||
|
result := v.validateBitmask(vals, src, msg)
|
||||||
|
|
||||||
|
if result == ValidationAccept && r != ValidationAccept {
|
||||||
|
result = r
|
||||||
|
}
|
||||||
|
|
||||||
|
switch result {
|
||||||
|
case ValidationAccept:
|
||||||
|
v.p.sendMsg <- msg
|
||||||
|
case ValidationReject:
|
||||||
|
log.Debugf("message validation failed; dropping message from %s", src)
|
||||||
|
v.tracer.RejectMessage(msg, RejectValidationFailed)
|
||||||
|
return
|
||||||
|
case ValidationIgnore:
|
||||||
|
log.Debugf("message validation punted; ignoring message from %s", src)
|
||||||
|
v.tracer.RejectMessage(msg, RejectValidationIgnored)
|
||||||
|
return
|
||||||
|
case validationThrottled:
|
||||||
|
log.Debugf("message validation throttled; ignoring message from %s", src)
|
||||||
|
v.tracer.RejectMessage(msg, RejectValidationThrottled)
|
||||||
|
|
||||||
|
default:
|
||||||
|
// BUG: this would be an internal programming error, so a panic seems appropiate.
|
||||||
|
panic(fmt.Errorf("unexpected validation result: %d", result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *validation) validateBitmask(vals []*validatorImpl, src peer.ID, msg *Message) ValidationResult {
|
||||||
|
if len(vals) == 1 {
|
||||||
|
return v.validateSingleBitmask(vals[0], src, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(v.p.ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
rch := make(chan ValidationResult, len(vals))
|
||||||
|
rcount := 0
|
||||||
|
|
||||||
|
for _, val := range vals {
|
||||||
|
rcount++
|
||||||
|
|
||||||
|
select {
|
||||||
|
case val.validateThrottle <- struct{}{}:
|
||||||
|
go func(val *validatorImpl) {
|
||||||
|
rch <- val.validateMsg(ctx, src, msg)
|
||||||
|
<-val.validateThrottle
|
||||||
|
}(val)
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Debugf("validation throttled for bitmask %s", val.bitmask)
|
||||||
|
rch <- validationThrottled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := ValidationAccept
|
||||||
|
loop:
|
||||||
|
for i := 0; i < rcount; i++ {
|
||||||
|
switch <-rch {
|
||||||
|
case ValidationAccept:
|
||||||
|
case ValidationReject:
|
||||||
|
result = ValidationReject
|
||||||
|
break loop
|
||||||
|
case ValidationIgnore:
|
||||||
|
// throttled validation has the same effect, but takes precedence over Ignore as it is not
|
||||||
|
// known whether the throttled validator would have signaled rejection.
|
||||||
|
if result != validationThrottled {
|
||||||
|
result = ValidationIgnore
|
||||||
|
}
|
||||||
|
case validationThrottled:
|
||||||
|
result = validationThrottled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// fast path for single bitmask validation that avoids the extra goroutine
|
||||||
|
func (v *validation) validateSingleBitmask(val *validatorImpl, src peer.ID, msg *Message) ValidationResult {
|
||||||
|
select {
|
||||||
|
case val.validateThrottle <- struct{}{}:
|
||||||
|
res := val.validateMsg(v.p.ctx, src, msg)
|
||||||
|
<-val.validateThrottle
|
||||||
|
return res
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Debugf("validation throttled for bitmask %s", val.bitmask)
|
||||||
|
return validationThrottled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (val *validatorImpl) validateMsg(ctx context.Context, src peer.ID, msg *Message) ValidationResult {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
|
log.Debugf("validation done; took %s", time.Since(start))
|
||||||
|
}()
|
||||||
|
|
||||||
|
if val.validateTimeout > 0 {
|
||||||
|
var cancel func()
|
||||||
|
ctx, cancel = context.WithTimeout(ctx, val.validateTimeout)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
r := val.validate(ctx, src, msg)
|
||||||
|
switch r {
|
||||||
|
case ValidationAccept:
|
||||||
|
fallthrough
|
||||||
|
case ValidationReject:
|
||||||
|
fallthrough
|
||||||
|
case ValidationIgnore:
|
||||||
|
return r
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Warnf("Unexpected result from validator: %d; ignoring message", r)
|
||||||
|
return ValidationIgnore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// / Options
|
||||||
|
// WithDefaultValidator adds a validator that applies to all bitmasks by default; it can be used
|
||||||
|
// more than once and add multiple validators. Having a defult validator does not inhibit registering
|
||||||
|
// a per bitmask validator.
|
||||||
|
func WithDefaultValidator(val interface{}, opts ...ValidatorOpt) Option {
|
||||||
|
return func(ps *PubSub) error {
|
||||||
|
addVal := &addValReq{
|
||||||
|
validate: val,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
err := opt(addVal)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := ps.val.makeValidator(addVal)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.val.defaultVals = append(ps.val.defaultVals, val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValidateQueueSize sets the buffer of validate queue. Defaults to 32.
|
||||||
|
// When queue is full, validation is throttled and new messages are dropped.
|
||||||
|
func WithValidateQueueSize(n int) Option {
|
||||||
|
return func(ps *PubSub) error {
|
||||||
|
if n > 0 {
|
||||||
|
ps.val.validateQ = make(chan *validateReq, n)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("validate queue size must be > 0")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValidateThrottle sets the upper bound on the number of active validation
|
||||||
|
// goroutines across all bitmasks. The default is 8192.
|
||||||
|
func WithValidateThrottle(n int) Option {
|
||||||
|
return func(ps *PubSub) error {
|
||||||
|
ps.val.validateThrottle = make(chan struct{}, n)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValidateWorkers sets the number of synchronous validation worker goroutines.
|
||||||
|
// Defaults to NumCPU.
|
||||||
|
//
|
||||||
|
// The synchronous validation workers perform signature validation, apply inline
|
||||||
|
// user validators, and schedule asynchronous user validators.
|
||||||
|
// You can adjust this parameter to devote less cpu time to synchronous validation.
|
||||||
|
func WithValidateWorkers(n int) Option {
|
||||||
|
return func(ps *PubSub) error {
|
||||||
|
if n > 0 {
|
||||||
|
ps.val.validateWorkers = n
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("number of validation workers must be > 0")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValidatorTimeout is an option that sets a timeout for an (asynchronous) bitmask validator.
|
||||||
|
// By default there is no timeout in asynchronous validators.
|
||||||
|
func WithValidatorTimeout(timeout time.Duration) ValidatorOpt {
|
||||||
|
return func(addVal *addValReq) error {
|
||||||
|
addVal.timeout = timeout
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValidatorConcurrency is an option that sets the bitmask validator throttle.
|
||||||
|
// This controls the number of active validation goroutines for the bitmask; the default is 1024.
|
||||||
|
func WithValidatorConcurrency(n int) ValidatorOpt {
|
||||||
|
return func(addVal *addValReq) error {
|
||||||
|
addVal.throttle = n
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValidatorInline is an option that sets the validation disposition to synchronous:
|
||||||
|
// it will be executed inline in validation front-end, without spawning a new goroutine.
|
||||||
|
// This is suitable for simple or cpu-bound validators that do not block.
|
||||||
|
func WithValidatorInline(inline bool) ValidatorOpt {
|
||||||
|
return func(addVal *addValReq) error {
|
||||||
|
addVal.inline = inline
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
101
go-libp2p-blossomsub/validation_builtin.go
Normal file
101
go-libp2p-blossomsub/validation_builtin.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PeerMetadataStore is an interface for storing and retrieving per peer metadata
|
||||||
|
type PeerMetadataStore interface {
|
||||||
|
// Get retrieves the metadata associated with a peer;
|
||||||
|
// It should return nil if there is no metadata associated with the peer and not an error.
|
||||||
|
Get(context.Context, peer.ID) ([]byte, error)
|
||||||
|
// Put sets the metadata associated with a peer.
|
||||||
|
Put(context.Context, peer.ID, []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasicSeqnoValidator is a basic validator, usable as a default validator, that ignores replayed
|
||||||
|
// messages outside the seen cache window. The validator uses the message seqno as a peer-specific
|
||||||
|
// nonce to decide whether the message should be propagated, comparing to the maximal nonce store
|
||||||
|
// in the peer metadata store. This is useful to ensure that there can be no infinitely propagating
|
||||||
|
// messages in the network regardless of the seen cache span and network diameter.
|
||||||
|
// It requires that pubsub is instantiated with a strict message signing policy and that seqnos
|
||||||
|
// are not disabled, ie it doesn't support anonymous mode.
|
||||||
|
//
|
||||||
|
// Warning: See https://github.com/libp2p/rust-libp2p/issues/3453
|
||||||
|
// TL;DR: rust is currently violating the spec by issuing a random seqno, which creates an
|
||||||
|
// interoperability hazard. We expect this issue to be addressed in the not so distant future,
|
||||||
|
// but keep this in mind if you are in a mixed environment with (older) rust nodes.
|
||||||
|
type BasicSeqnoValidator struct {
|
||||||
|
mx sync.RWMutex
|
||||||
|
meta PeerMetadataStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBasicSeqnoValidator constructs a BasicSeqnoValidator using the givven PeerMetadataStore.
|
||||||
|
func NewBasicSeqnoValidator(meta PeerMetadataStore) ValidatorEx {
|
||||||
|
val := &BasicSeqnoValidator{
|
||||||
|
meta: meta,
|
||||||
|
}
|
||||||
|
return val.validate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *BasicSeqnoValidator) validate(ctx context.Context, _ peer.ID, m *Message) ValidationResult {
|
||||||
|
p := m.GetFrom()
|
||||||
|
|
||||||
|
v.mx.RLock()
|
||||||
|
nonceBytes, err := v.meta.Get(ctx, p)
|
||||||
|
v.mx.RUnlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("error retrieving peer nonce: %s", err)
|
||||||
|
return ValidationIgnore
|
||||||
|
}
|
||||||
|
|
||||||
|
var nonce uint64
|
||||||
|
if len(nonceBytes) > 0 {
|
||||||
|
nonce = binary.BigEndian.Uint64(nonceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
var seqno uint64
|
||||||
|
seqnoBytes := m.GetSeqno()
|
||||||
|
if len(seqnoBytes) > 0 {
|
||||||
|
seqno = binary.BigEndian.Uint64(seqnoBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare against the largest seen nonce
|
||||||
|
if seqno <= nonce {
|
||||||
|
return ValidationIgnore
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the nonce and compare again with an exclusive lock before commiting (cf concurrent validation)
|
||||||
|
v.mx.Lock()
|
||||||
|
defer v.mx.Unlock()
|
||||||
|
|
||||||
|
nonceBytes, err = v.meta.Get(ctx, p)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("error retrieving peer nonce: %s", err)
|
||||||
|
return ValidationIgnore
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nonceBytes) > 0 {
|
||||||
|
nonce = binary.BigEndian.Uint64(nonceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if seqno <= nonce {
|
||||||
|
return ValidationIgnore
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the nonce
|
||||||
|
nonceBytes = make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(nonceBytes, seqno)
|
||||||
|
|
||||||
|
err = v.meta.Put(ctx, p, nonceBytes)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("error storing peer nonce: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ValidationAccept
|
||||||
|
}
|
278
go-libp2p-blossomsub/validation_builtin_test.go
Normal file
278
go-libp2p-blossomsub/validation_builtin_test.go
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pool "github.com/libp2p/go-buffer-pool"
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/libp2p/go-msgio"
|
||||||
|
"github.com/multiformats/go-varint"
|
||||||
|
|
||||||
|
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
var rng *rand.Rand
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rng = rand.New(rand.NewSource(314159))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBasicSeqnoValidator1(t *testing.T) {
|
||||||
|
testBasicSeqnoValidator(t, time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBasicSeqnoValidator2(t *testing.T) {
|
||||||
|
testBasicSeqnoValidator(t, time.Nanosecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBasicSeqnoValidator(t *testing.T, ttl time.Duration) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 20)
|
||||||
|
psubs := getPubsubsWithOptionC(ctx, hosts,
|
||||||
|
func(i int) Option {
|
||||||
|
return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore()))
|
||||||
|
},
|
||||||
|
func(i int) Option {
|
||||||
|
return WithSeenMessagesTTL(ttl)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
var msgs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs = append(msgs, subch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// connectAll(t, hosts)
|
||||||
|
sparseConnect(t, hosts)
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i))
|
||||||
|
|
||||||
|
owner := rng.Intn(len(psubs))
|
||||||
|
|
||||||
|
psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg)
|
||||||
|
|
||||||
|
for _, sub := range msgs {
|
||||||
|
got, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(sub.err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(msg, got.Data) {
|
||||||
|
t.Fatal("got wrong message!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBasicSeqnoValidatorReplay(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 20)
|
||||||
|
psubs := getPubsubsWithOptionC(ctx, hosts[:19],
|
||||||
|
func(i int) Option {
|
||||||
|
return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore()))
|
||||||
|
},
|
||||||
|
func(i int) Option {
|
||||||
|
return WithSeenMessagesTTL(time.Nanosecond)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
_ = newReplayActor(t, ctx, hosts[19])
|
||||||
|
|
||||||
|
var msgs []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs = append(msgs, subch)
|
||||||
|
}
|
||||||
|
|
||||||
|
sparseConnect(t, hosts)
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i))
|
||||||
|
|
||||||
|
owner := rng.Intn(len(psubs))
|
||||||
|
|
||||||
|
psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg)
|
||||||
|
|
||||||
|
for _, sub := range msgs {
|
||||||
|
got, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(sub.err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(msg, got.Data) {
|
||||||
|
t.Fatal("got wrong message!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sub := range msgs {
|
||||||
|
assertNeverReceives(t, sub, time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockPeerMetadataStore struct {
|
||||||
|
meta map[peer.ID][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockPeerMetadataStore() *mockPeerMetadataStore {
|
||||||
|
return &mockPeerMetadataStore{
|
||||||
|
meta: make(map[peer.ID][]byte),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockPeerMetadataStore) Get(ctx context.Context, p peer.ID) ([]byte, error) {
|
||||||
|
v, ok := m.meta[p]
|
||||||
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockPeerMetadataStore) Put(ctx context.Context, p peer.ID, v []byte) error {
|
||||||
|
m.meta[p] = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type replayActor struct {
|
||||||
|
t *testing.T
|
||||||
|
|
||||||
|
ctx context.Context
|
||||||
|
h host.Host
|
||||||
|
|
||||||
|
mx sync.Mutex
|
||||||
|
out map[peer.ID]network.Stream
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReplayActor(t *testing.T, ctx context.Context, h host.Host) *replayActor {
|
||||||
|
replay := &replayActor{t: t, ctx: ctx, h: h, out: make(map[peer.ID]network.Stream)}
|
||||||
|
h.SetStreamHandler(FloodSubID, replay.handleStream)
|
||||||
|
h.Network().Notify(&network.NotifyBundle{ConnectedF: replay.connected})
|
||||||
|
return replay
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *replayActor) handleStream(s network.Stream) {
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
p := s.Conn().RemotePeer()
|
||||||
|
|
||||||
|
rd := msgio.NewVarintReaderSize(s, 65536)
|
||||||
|
for {
|
||||||
|
msgbytes, err := rd.ReadMsg()
|
||||||
|
if err != nil {
|
||||||
|
s.Reset()
|
||||||
|
rd.ReleaseMsg(msgbytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rpc := new(pb.RPC)
|
||||||
|
err = rpc.Unmarshal(msgbytes)
|
||||||
|
rd.ReleaseMsg(msgbytes)
|
||||||
|
if err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// subscribe to the same bitmasks as our peer
|
||||||
|
subs := rpc.GetSubscriptions()
|
||||||
|
if len(subs) != 0 {
|
||||||
|
go r.send(p, &pb.RPC{Subscriptions: subs})
|
||||||
|
}
|
||||||
|
|
||||||
|
// replay all received messages
|
||||||
|
for _, pmsg := range rpc.GetPublish() {
|
||||||
|
go r.replay(pmsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *replayActor) send(p peer.ID, rpc *pb.RPC) {
|
||||||
|
r.mx.Lock()
|
||||||
|
defer r.mx.Unlock()
|
||||||
|
|
||||||
|
s, ok := r.out[p]
|
||||||
|
if !ok {
|
||||||
|
r.t.Logf("cannot send message to %s: no stream", p)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
size := uint64(rpc.Size())
|
||||||
|
|
||||||
|
buf := pool.Get(varint.UvarintSize(size) + int(size))
|
||||||
|
defer pool.Put(buf)
|
||||||
|
|
||||||
|
n := binary.PutUvarint(buf, size)
|
||||||
|
|
||||||
|
_, err := rpc.MarshalTo(buf[n:])
|
||||||
|
if err != nil {
|
||||||
|
r.t.Logf("replay: error marshalling message: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.Write(buf)
|
||||||
|
if err != nil {
|
||||||
|
r.t.Logf("replay: error sending message: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *replayActor) replay(msg *pb.Message) {
|
||||||
|
// replay the message 10 times to a random subset of peers
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
delay := time.Duration(1+rng.Intn(20)) * time.Millisecond
|
||||||
|
time.Sleep(delay)
|
||||||
|
|
||||||
|
var peers []peer.ID
|
||||||
|
r.mx.Lock()
|
||||||
|
for p, _ := range r.out {
|
||||||
|
if rng.Intn(2) > 0 {
|
||||||
|
peers = append(peers, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.mx.Unlock()
|
||||||
|
|
||||||
|
rpc := &pb.RPC{Publish: []*pb.Message{msg}}
|
||||||
|
r.t.Logf("replaying msg to %d peers", len(peers))
|
||||||
|
for _, p := range peers {
|
||||||
|
r.send(p, rpc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *replayActor) handleConnected(p peer.ID) {
|
||||||
|
s, err := r.h.NewStream(r.ctx, p, FloodSubID)
|
||||||
|
if err != nil {
|
||||||
|
r.t.Logf("replay: error opening stream: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
r.mx.Lock()
|
||||||
|
defer r.mx.Unlock()
|
||||||
|
r.out[p] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *replayActor) connected(_ network.Network, conn network.Conn) {
|
||||||
|
go r.handleConnected(conn.RemotePeer())
|
||||||
|
}
|
334
go-libp2p-blossomsub/validation_test.go
Normal file
334
go-libp2p-blossomsub/validation_test.go
Normal file
@ -0,0 +1,334 @@
|
|||||||
|
package blossomsub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegisterUnregisterValidator(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 1)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
|
||||||
|
err := psubs[0].RegisterBitmaskValidator([]byte{0xf0, 0x00}, func(context.Context, peer.ID, *Message) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = psubs[0].UnregisterBitmaskValidator([]byte{0xf0, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = psubs[0].UnregisterBitmaskValidator([]byte{0xf0, 0x00})
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Unregistered bogus bitmask validator")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRegisterValidatorEx(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 3)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
|
||||||
|
err := psubs[0].RegisterBitmaskValidator([]byte{0x7e, 0x57},
|
||||||
|
Validator(func(context.Context, peer.ID, *Message) bool {
|
||||||
|
return true
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = psubs[1].RegisterBitmaskValidator([]byte{0x7e, 0x57},
|
||||||
|
ValidatorEx(func(context.Context, peer.ID, *Message) ValidationResult {
|
||||||
|
return ValidationAccept
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = psubs[2].RegisterBitmaskValidator([]byte{0x7e, 0x57}, "bogus")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidate(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
|
||||||
|
|
||||||
|
err := psubs[1].RegisterBitmaskValidator(bitmask, func(ctx context.Context, from peer.ID, msg *Message) bool {
|
||||||
|
return !bytes.Contains(msg.Data, []byte("illegal"))
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sub, err := psubs[1].Subscribe(bitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 50)
|
||||||
|
|
||||||
|
msgs := []struct {
|
||||||
|
msg []byte
|
||||||
|
validates bool
|
||||||
|
}{
|
||||||
|
{msg: []byte("this is a legal message"), validates: true},
|
||||||
|
{msg: []byte("there also is nothing controversial about this message"), validates: true},
|
||||||
|
{msg: []byte("openly illegal content will be censored"), validates: false},
|
||||||
|
{msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range msgs {
|
||||||
|
err := psubs[0].Publish(bitmask, tc.msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case msg := <-sub.ch:
|
||||||
|
if !tc.validates {
|
||||||
|
t.Log(msg)
|
||||||
|
t.Error("expected message validation to filter out the message")
|
||||||
|
}
|
||||||
|
case <-time.After(333 * time.Millisecond):
|
||||||
|
if tc.validates {
|
||||||
|
t.Error("expected message validation to accept the message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidate2(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 1)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
|
||||||
|
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
|
||||||
|
|
||||||
|
err := psubs[0].RegisterBitmaskValidator(bitmask, func(ctx context.Context, from peer.ID, msg *Message) bool {
|
||||||
|
return !bytes.Contains(msg.Data, []byte("illegal"))
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs := []struct {
|
||||||
|
msg []byte
|
||||||
|
validates bool
|
||||||
|
}{
|
||||||
|
{msg: []byte("this is a legal message"), validates: true},
|
||||||
|
{msg: []byte("there also is nothing controversial about this message"), validates: true},
|
||||||
|
{msg: []byte("openly illegal content will be censored"), validates: false},
|
||||||
|
{msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range msgs {
|
||||||
|
err := psubs[0].Publish(bitmask, tc.msg)
|
||||||
|
if tc.validates {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected validation to fail for this message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateOverload(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
type msg struct {
|
||||||
|
msg []byte
|
||||||
|
validates bool
|
||||||
|
}
|
||||||
|
|
||||||
|
tcs := []struct {
|
||||||
|
msgs []msg
|
||||||
|
|
||||||
|
maxConcurrency int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
maxConcurrency: 10,
|
||||||
|
msgs: []msg{
|
||||||
|
{msg: []byte("this is a legal message"), validates: true},
|
||||||
|
{msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true},
|
||||||
|
{msg: []byte("there also is nothing controversial about this message"), validates: true},
|
||||||
|
{msg: []byte("also fine"), validates: true},
|
||||||
|
{msg: []byte("still, all good"), validates: true},
|
||||||
|
{msg: []byte("this is getting boring"), validates: true},
|
||||||
|
{msg: []byte([]byte{0xf0, 0x00}), validates: true},
|
||||||
|
{msg: []byte([]byte{0xf0, 0x0b, 0xa1, 0x20}), validates: true},
|
||||||
|
{msg: []byte("foofoo"), validates: true},
|
||||||
|
{msg: []byte("barfoo"), validates: true},
|
||||||
|
{msg: []byte("oh no!"), validates: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
maxConcurrency: 2,
|
||||||
|
msgs: []msg{
|
||||||
|
{msg: []byte("this is a legal message"), validates: true},
|
||||||
|
{msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true},
|
||||||
|
{msg: []byte("oh no!"), validates: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for tci, tc := range tcs {
|
||||||
|
t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) {
|
||||||
|
hosts := getNetHosts(t, ctx, 2)
|
||||||
|
psubs := getPubsubs(ctx, hosts)
|
||||||
|
|
||||||
|
connect(t, hosts[0], hosts[1])
|
||||||
|
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
|
||||||
|
|
||||||
|
block := make(chan struct{})
|
||||||
|
|
||||||
|
err := psubs[1].RegisterBitmaskValidator(bitmask,
|
||||||
|
func(ctx context.Context, from peer.ID, msg *Message) bool {
|
||||||
|
<-block
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
WithValidatorConcurrency(tc.maxConcurrency))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sub, err := psubs[1].Subscribe(bitmask)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 50)
|
||||||
|
|
||||||
|
if len(tc.msgs) != tc.maxConcurrency+1 {
|
||||||
|
t.Fatalf("expected number of messages sent to be maxConcurrency+1. Got %d, expected %d", len(tc.msgs), tc.maxConcurrency+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := psubs[0]
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
for _, tmsg := range tc.msgs {
|
||||||
|
select {
|
||||||
|
case msg := <-sub.ch:
|
||||||
|
if !tmsg.validates {
|
||||||
|
t.Log(msg)
|
||||||
|
t.Error("expected message validation to drop the message because all validator goroutines are taken")
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
if tmsg.validates {
|
||||||
|
t.Error("expected message validation to accept the message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, tmsg := range tc.msgs {
|
||||||
|
err := p.Publish(bitmask, tmsg.msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait a bit before unblocking the validator goroutines
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
close(block)
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateAssortedOptions(t *testing.T) {
|
||||||
|
// this test adds coverage for various options that are not covered in other tests
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
hosts := getNetHosts(t, ctx, 10)
|
||||||
|
psubs := getPubsubs(ctx, hosts,
|
||||||
|
WithValidateQueueSize(10),
|
||||||
|
WithValidateThrottle(10),
|
||||||
|
WithValidateWorkers(10))
|
||||||
|
|
||||||
|
sparseConnect(t, hosts)
|
||||||
|
|
||||||
|
for _, psub := range psubs {
|
||||||
|
err := psub.RegisterBitmaskValidator([]byte{0xff, 0x00, 0x00, 0x00},
|
||||||
|
func(context.Context, peer.ID, *Message) bool {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
WithValidatorTimeout(100*time.Millisecond))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = psub.RegisterBitmaskValidator([]byte{0x00, 0xff, 0x00, 0x00},
|
||||||
|
func(context.Context, peer.ID, *Message) bool {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
WithValidatorInline(true))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var subs1, subs2 []*Subscription
|
||||||
|
for _, ps := range psubs {
|
||||||
|
sub, err := ps.Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
subs1 = append(subs1, sub)
|
||||||
|
|
||||||
|
sub, err = ps.Subscribe([]byte{0x00, 0xff, 0x00, 0x00})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
subs2 = append(subs2, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
msg := []byte(fmt.Sprintf("message %d", i))
|
||||||
|
|
||||||
|
psubs[i].Publish([]byte{0xff, 0x00, 0x00, 0x00}, msg)
|
||||||
|
for _, sub := range subs1 {
|
||||||
|
assertReceive(t, sub, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
psubs[i].Publish([]byte{0x00, 0xff, 0x00, 0x00}, msg)
|
||||||
|
for _, sub := range subs2 {
|
||||||
|
assertReceive(t, sub, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user