From ffab09ae6bc961684993c0f1ac5fa6f9bad5c3a7 Mon Sep 17 00:00:00 2001 From: Cassandra Heart Date: Fri, 7 Jul 2023 01:07:10 -0500 Subject: [PATCH] Add ceremony-targeted go-libp2p-blossomsub --- go-libp2p-blossomsub/.gitignore | 2 + go-libp2p-blossomsub/LICENSE | 12 + go-libp2p-blossomsub/LICENSE-APACHE | 5 + go-libp2p-blossomsub/LICENSE-MIT | 19 + go-libp2p-blossomsub/README.md | 11 + go-libp2p-blossomsub/backoff.go | 107 + go-libp2p-blossomsub/backoff_test.go | 122 + go-libp2p-blossomsub/bitmask.go | 477 ++++ go-libp2p-blossomsub/bitmask_test.go | 1064 +++++++ go-libp2p-blossomsub/blacklist.go | 58 + go-libp2p-blossomsub/blacklist_test.go | 125 + go-libp2p-blossomsub/blossomsub.go | 1984 +++++++++++++ .../blossomsub_connmgr_test.go | 172 ++ go-libp2p-blossomsub/blossomsub_feat.go | 52 + go-libp2p-blossomsub/blossomsub_feat_test.go | 109 + .../blossomsub_matchfn_test.go | 84 + go-libp2p-blossomsub/blossomsub_spam_test.go | 813 ++++++ go-libp2p-blossomsub/blossomsub_test.go | 2502 +++++++++++++++++ go-libp2p-blossomsub/comm.go | 230 ++ go-libp2p-blossomsub/discovery.go | 348 +++ go-libp2p-blossomsub/discovery_test.go | 309 ++ go-libp2p-blossomsub/floodsub.go | 108 + go-libp2p-blossomsub/floodsub_test.go | 1282 +++++++++ go-libp2p-blossomsub/go.mod | 87 + go-libp2p-blossomsub/go.sum | 443 +++ go-libp2p-blossomsub/gossip_tracer.go | 200 ++ go-libp2p-blossomsub/gossip_tracer_test.go | 103 + go-libp2p-blossomsub/mcache.go | 105 + go-libp2p-blossomsub/mcache_test.go | 167 ++ go-libp2p-blossomsub/midgen.go | 52 + go-libp2p-blossomsub/notify.go | 75 + go-libp2p-blossomsub/pb/Makefile | 11 + go-libp2p-blossomsub/pb/extensions.go | 57 + go-libp2p-blossomsub/pb/rpc.pb.go | 818 ++++++ go-libp2p-blossomsub/pb/rpc.proto | 59 + go-libp2p-blossomsub/pb/trace.pb.go | 2146 ++++++++++++++ go-libp2p-blossomsub/pb/trace.proto | 152 + go-libp2p-blossomsub/peer_gater.go | 453 +++ go-libp2p-blossomsub/peer_gater_test.go | 128 + go-libp2p-blossomsub/pubsub.go | 1422 ++++++++++ go-libp2p-blossomsub/pubsub_test.go | 49 + go-libp2p-blossomsub/randomsub.go | 168 ++ go-libp2p-blossomsub/randomsub_test.go | 192 ++ go-libp2p-blossomsub/score.go | 1081 +++++++ go-libp2p-blossomsub/score_params.go | 423 +++ go-libp2p-blossomsub/score_params_test.go | 739 +++++ go-libp2p-blossomsub/score_test.go | 1080 +++++++ go-libp2p-blossomsub/sign.go | 138 + go-libp2p-blossomsub/sign_test.go | 43 + go-libp2p-blossomsub/subscription.go | 51 + go-libp2p-blossomsub/subscription_filter.go | 125 + .../subscription_filter_test.go | 177 ++ go-libp2p-blossomsub/tag_tracer.go | 259 ++ go-libp2p-blossomsub/tag_tracer_test.go | 260 ++ .../timecache/first_seen_cache.go | 56 + .../timecache/first_seen_cache_test.go | 46 + .../timecache/last_seen_cache.go | 58 + .../timecache/last_seen_cache_test.go | 92 + go-libp2p-blossomsub/timecache/time_cache.go | 52 + go-libp2p-blossomsub/timecache/util.go | 35 + go-libp2p-blossomsub/trace.go | 530 ++++ go-libp2p-blossomsub/trace_test.go | 323 +++ go-libp2p-blossomsub/tracer.go | 310 ++ go-libp2p-blossomsub/validation.go | 590 ++++ go-libp2p-blossomsub/validation_builtin.go | 101 + .../validation_builtin_test.go | 278 ++ go-libp2p-blossomsub/validation_test.go | 334 +++ 67 files changed, 24063 insertions(+) create mode 100644 go-libp2p-blossomsub/.gitignore create mode 100644 go-libp2p-blossomsub/LICENSE create mode 100644 go-libp2p-blossomsub/LICENSE-APACHE create mode 100644 go-libp2p-blossomsub/LICENSE-MIT create mode 100644 go-libp2p-blossomsub/README.md create mode 100644 go-libp2p-blossomsub/backoff.go create mode 100644 go-libp2p-blossomsub/backoff_test.go create mode 100644 go-libp2p-blossomsub/bitmask.go create mode 100644 go-libp2p-blossomsub/bitmask_test.go create mode 100644 go-libp2p-blossomsub/blacklist.go create mode 100644 go-libp2p-blossomsub/blacklist_test.go create mode 100644 go-libp2p-blossomsub/blossomsub.go create mode 100644 go-libp2p-blossomsub/blossomsub_connmgr_test.go create mode 100644 go-libp2p-blossomsub/blossomsub_feat.go create mode 100644 go-libp2p-blossomsub/blossomsub_feat_test.go create mode 100644 go-libp2p-blossomsub/blossomsub_matchfn_test.go create mode 100644 go-libp2p-blossomsub/blossomsub_spam_test.go create mode 100644 go-libp2p-blossomsub/blossomsub_test.go create mode 100644 go-libp2p-blossomsub/comm.go create mode 100644 go-libp2p-blossomsub/discovery.go create mode 100644 go-libp2p-blossomsub/discovery_test.go create mode 100644 go-libp2p-blossomsub/floodsub.go create mode 100644 go-libp2p-blossomsub/floodsub_test.go create mode 100644 go-libp2p-blossomsub/go.mod create mode 100644 go-libp2p-blossomsub/go.sum create mode 100644 go-libp2p-blossomsub/gossip_tracer.go create mode 100644 go-libp2p-blossomsub/gossip_tracer_test.go create mode 100644 go-libp2p-blossomsub/mcache.go create mode 100644 go-libp2p-blossomsub/mcache_test.go create mode 100644 go-libp2p-blossomsub/midgen.go create mode 100644 go-libp2p-blossomsub/notify.go create mode 100644 go-libp2p-blossomsub/pb/Makefile create mode 100644 go-libp2p-blossomsub/pb/extensions.go create mode 100644 go-libp2p-blossomsub/pb/rpc.pb.go create mode 100644 go-libp2p-blossomsub/pb/rpc.proto create mode 100644 go-libp2p-blossomsub/pb/trace.pb.go create mode 100644 go-libp2p-blossomsub/pb/trace.proto create mode 100644 go-libp2p-blossomsub/peer_gater.go create mode 100644 go-libp2p-blossomsub/peer_gater_test.go create mode 100644 go-libp2p-blossomsub/pubsub.go create mode 100644 go-libp2p-blossomsub/pubsub_test.go create mode 100644 go-libp2p-blossomsub/randomsub.go create mode 100644 go-libp2p-blossomsub/randomsub_test.go create mode 100644 go-libp2p-blossomsub/score.go create mode 100644 go-libp2p-blossomsub/score_params.go create mode 100644 go-libp2p-blossomsub/score_params_test.go create mode 100644 go-libp2p-blossomsub/score_test.go create mode 100644 go-libp2p-blossomsub/sign.go create mode 100644 go-libp2p-blossomsub/sign_test.go create mode 100644 go-libp2p-blossomsub/subscription.go create mode 100644 go-libp2p-blossomsub/subscription_filter.go create mode 100644 go-libp2p-blossomsub/subscription_filter_test.go create mode 100644 go-libp2p-blossomsub/tag_tracer.go create mode 100644 go-libp2p-blossomsub/tag_tracer_test.go create mode 100644 go-libp2p-blossomsub/timecache/first_seen_cache.go create mode 100644 go-libp2p-blossomsub/timecache/first_seen_cache_test.go create mode 100644 go-libp2p-blossomsub/timecache/last_seen_cache.go create mode 100644 go-libp2p-blossomsub/timecache/last_seen_cache_test.go create mode 100644 go-libp2p-blossomsub/timecache/time_cache.go create mode 100644 go-libp2p-blossomsub/timecache/util.go create mode 100644 go-libp2p-blossomsub/trace.go create mode 100644 go-libp2p-blossomsub/trace_test.go create mode 100644 go-libp2p-blossomsub/tracer.go create mode 100644 go-libp2p-blossomsub/validation.go create mode 100644 go-libp2p-blossomsub/validation_builtin.go create mode 100644 go-libp2p-blossomsub/validation_builtin_test.go create mode 100644 go-libp2p-blossomsub/validation_test.go diff --git a/go-libp2p-blossomsub/.gitignore b/go-libp2p-blossomsub/.gitignore new file mode 100644 index 0000000..66f8fb5 --- /dev/null +++ b/go-libp2p-blossomsub/.gitignore @@ -0,0 +1,2 @@ +.idea/ +.vscode/ diff --git a/go-libp2p-blossomsub/LICENSE b/go-libp2p-blossomsub/LICENSE new file mode 100644 index 0000000..ddae663 --- /dev/null +++ b/go-libp2p-blossomsub/LICENSE @@ -0,0 +1,12 @@ +This project heavily borrows code from go-libp2p-pubsub's codebase and therefore must +be subject to the license the pubsub repo utilizes. This has been kept verbatim for +reference: + +This project is transitioning from an MIT-only license to a dual MIT/Apache-2.0 license. +Unless otherwise noted, all code contributed prior to 2019-05-06 and not contributed by +a user listed in [this signoff issue](https://github.com/ipfs/go-ipfs/issues/6302) is +licensed under MIT-only. All new contributions (and past contributions since 2019-05-06) +are licensed under a dual MIT/Apache-2.0 license. + +MIT: https://www.opensource.org/licenses/mit +Apache-2.0: https://www.apache.org/licenses/license-2.0 \ No newline at end of file diff --git a/go-libp2p-blossomsub/LICENSE-APACHE b/go-libp2p-blossomsub/LICENSE-APACHE new file mode 100644 index 0000000..4c83a28 --- /dev/null +++ b/go-libp2p-blossomsub/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. \ No newline at end of file diff --git a/go-libp2p-blossomsub/LICENSE-MIT b/go-libp2p-blossomsub/LICENSE-MIT new file mode 100644 index 0000000..749aa1e --- /dev/null +++ b/go-libp2p-blossomsub/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/go-libp2p-blossomsub/README.md b/go-libp2p-blossomsub/README.md new file mode 100644 index 0000000..e34dfb9 --- /dev/null +++ b/go-libp2p-blossomsub/README.md @@ -0,0 +1,11 @@ +# go-libp2p-blossomsub + +First-pass of blossomsub, rudimentary fork of gossipsub – it does not merge subscriptions, bloom filtering needs to +happen at the publish level. This will be updated post-ceremony with the full bloom filter version. + +## License + +The go-libp2p-blossomsub project being forked from pubsub inherits the dual-license under Apache 2.0 and MIT terms: + +- Apache License, Version 2.0, ([LICENSE-APACHE](./LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](./LICENSE-MIT) or http://opensource.org/licenses/MIT) diff --git a/go-libp2p-blossomsub/backoff.go b/go-libp2p-blossomsub/backoff.go new file mode 100644 index 0000000..00cdc37 --- /dev/null +++ b/go-libp2p-blossomsub/backoff.go @@ -0,0 +1,107 @@ +package blossomsub + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + MinBackoffDelay = 100 * time.Millisecond + MaxBackoffDelay = 10 * time.Second + TimeToLive = 10 * time.Minute + BackoffCleanupInterval = 1 * time.Minute + BackoffMultiplier = 2 + MaxBackoffJitterCoff = 100 + MaxBackoffAttempts = 4 +) + +type backoffHistory struct { + duration time.Duration + lastTried time.Time + attempts int +} + +type backoff struct { + mu sync.Mutex + info map[peer.ID]*backoffHistory + ct int // size threshold that kicks off the cleaner + ci time.Duration // cleanup intervals + maxAttempts int // maximum backoff attempts prior to ejection +} + +func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Duration, maxAttempts int) *backoff { + b := &backoff{ + mu: sync.Mutex{}, + ct: sizeThreshold, + ci: cleanupInterval, + maxAttempts: maxAttempts, + info: make(map[peer.ID]*backoffHistory), + } + + rand.Seed(time.Now().UnixNano()) // used for jitter + go b.cleanupLoop(ctx) + + return b +} + +func (b *backoff) updateAndGet(id peer.ID) (time.Duration, error) { + b.mu.Lock() + defer b.mu.Unlock() + + h, ok := b.info[id] + switch { + case !ok || time.Since(h.lastTried) > TimeToLive: + // first request goes immediately. + h = &backoffHistory{ + duration: time.Duration(0), + attempts: 0, + } + case h.attempts >= b.maxAttempts: + return 0, fmt.Errorf("peer %s has reached its maximum backoff attempts", id) + + case h.duration < MinBackoffDelay: + h.duration = MinBackoffDelay + + case h.duration < MaxBackoffDelay: + jitter := rand.Intn(MaxBackoffJitterCoff) + h.duration = (BackoffMultiplier * h.duration) + time.Duration(jitter)*time.Millisecond + if h.duration > MaxBackoffDelay || h.duration < 0 { + h.duration = MaxBackoffDelay + } + } + + h.attempts += 1 + h.lastTried = time.Now() + b.info[id] = h + return h.duration, nil +} + +func (b *backoff) cleanup() { + b.mu.Lock() + defer b.mu.Unlock() + + for id, h := range b.info { + if time.Since(h.lastTried) > TimeToLive { + delete(b.info, id) + } + } +} + +func (b *backoff) cleanupLoop(ctx context.Context) { + ticker := time.NewTicker(b.ci) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return // pubsub shutting down + case <-ticker.C: + b.cleanup() + } + } +} diff --git a/go-libp2p-blossomsub/backoff_test.go b/go-libp2p-blossomsub/backoff_test.go new file mode 100644 index 0000000..1941a81 --- /dev/null +++ b/go-libp2p-blossomsub/backoff_test.go @@ -0,0 +1,122 @@ +package blossomsub + +import ( + "context" + "fmt" + "math" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestBackoff_Update(t *testing.T) { + id1 := peer.ID("peer-1") + id2 := peer.ID("peer-2") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + size := 10 + cleanupInterval := 5 * time.Second + maxBackoffAttempts := 10 + + b := newBackoff(ctx, size, cleanupInterval, maxBackoffAttempts) + + if len(b.info) > 0 { + t.Fatal("non-empty info map for backoff") + } + + if d, err := b.updateAndGet(id1); d != time.Duration(0) || err != nil { + t.Fatalf("invalid initialization: %v, \t, %s", d, err) + } + if d, err := b.updateAndGet(id2); d != time.Duration(0) || err != nil { + t.Fatalf("invalid initialization: %v, \t, %s", d, err) + } + + for i := 0; i < maxBackoffAttempts-1; i++ { + got, err := b.updateAndGet(id1) + if err != nil { + t.Fatalf("unexpected error post update: %s", err) + } + + expected := time.Duration(math.Pow(BackoffMultiplier, float64(i)) * + float64(MinBackoffDelay+MaxBackoffJitterCoff*time.Millisecond)) + if expected > MaxBackoffDelay { + expected = MaxBackoffDelay + } + + if expected < got { // considering jitter, expected backoff must always be greater than or equal to actual. + t.Fatalf("invalid backoff result, expected: %v, got: %v", expected, got) + } + } + + // trying once more beyond the threshold, hence expecting exceeding threshold + if _, err := b.updateAndGet(id1); err == nil { + t.Fatalf("expected an error for going beyond threshold but got nil") + } + + got, err := b.updateAndGet(id2) + if err != nil { + t.Fatalf("unexpected error post update: %s", err) + } + if got != MinBackoffDelay { + t.Fatalf("invalid backoff result, expected: %v, got: %v", MinBackoffDelay, got) + } + + // sets last tried of id2 to long ago that it resets back upon next try. + // update attempts on id2 are below threshold, hence peer should never go beyond backoff attempt threshold. + b.info[id2].lastTried = time.Now().Add(-TimeToLive) + got, err = b.updateAndGet(id2) + if err != nil { + t.Fatalf("unexpected error post update: %s", err) + } + if got != time.Duration(0) { + t.Fatalf("invalid ttl expiration, expected: %v, got: %v", time.Duration(0), got) + } + + if len(b.info) != 2 { + t.Fatalf("pre-invalidation attempt, info map size mismatch, expected: %d, got: %d", 2, len(b.info)) + } + +} + +func TestBackoff_Clean(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + size := 10 + cleanupInterval := 2 * time.Second + maxBackoffAttempts := 100 // setting attempts to a high number hence testing cleanup logic. + b := newBackoff(ctx, size, cleanupInterval, maxBackoffAttempts) + + for i := 0; i < size; i++ { + id := peer.ID(fmt.Sprintf("peer-%d", i)) + _, err := b.updateAndGet(id) + if err != nil { + t.Fatalf("unexpected error post update: %s", err) + } + b.info[id].lastTried = time.Now().Add(-TimeToLive) // enforces expiry + } + + if len(b.info) != size { + t.Fatalf("info map size mismatch, expected: %d, got: %d", size, len(b.info)) + } + + // waits for a cleanup loop to kick-in + time.Sleep(2 * cleanupInterval) + + // next update should trigger cleanup + got, err := b.updateAndGet(peer.ID("some-new-peer")) + if err != nil { + t.Fatalf("unexpected error post update: %s", err) + } + if got != time.Duration(0) { + t.Fatalf("invalid backoff result, expected: %v, got: %v", time.Duration(0), got) + } + + // except "some-new-peer" every other records must be cleaned up + if len(b.info) != 1 { + t.Fatalf("info map size mismatch, expected: %d, got: %d", 1, len(b.info)) + } +} diff --git a/go-libp2p-blossomsub/bitmask.go b/go-libp2p-blossomsub/bitmask.go new file mode 100644 index 0000000..4752cd6 --- /dev/null +++ b/go-libp2p-blossomsub/bitmask.go @@ -0,0 +1,477 @@ +package blossomsub + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/crypto" + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// ErrBitmaskClosed is returned if a Bitmask is utilized after it has been closed +var ErrBitmaskClosed = errors.New("this Bitmask is closed, try opening a new one") + +// ErrNilSignKey is returned if a nil private key was provided +var ErrNilSignKey = errors.New("nil sign key") + +// ErrEmptyPeerID is returned if an empty peer ID was provided +var ErrEmptyPeerID = errors.New("empty peer ID") + +// Bitmask is the handle for a pubsub bitmask +type Bitmask struct { + p *PubSub + bitmask []byte + + evtHandlerMux sync.RWMutex + evtHandlers map[*BitmaskEventHandler]struct{} + + mux sync.RWMutex + closed bool +} + +// String returns the bitmask associated with t +func (t *Bitmask) Bitmask() []byte { + return t.bitmask +} + +// SetScoreParams sets the bitmask score parameters if the pubsub router supports peer +// scoring +func (t *Bitmask) SetScoreParams(p *BitmaskScoreParams) error { + err := p.validate() + if err != nil { + return fmt.Errorf("invalid bitmask score parameters: %w", err) + } + + t.mux.Lock() + defer t.mux.Unlock() + + if t.closed { + return ErrBitmaskClosed + } + + result := make(chan error, 1) + update := func() { + gs, ok := t.p.rt.(*BlossomSubRouter) + if !ok { + result <- fmt.Errorf("pubsub router is not BlossomSub") + return + } + + if gs.score == nil { + result <- fmt.Errorf("peer scoring is not enabled in router") + return + } + + err := gs.score.SetBitmaskScoreParams(t.bitmask, p) + result <- err + } + + select { + case t.p.eval <- update: + err = <-result + return err + + case <-t.p.ctx.Done(): + return t.p.ctx.Err() + } +} + +// EventHandler creates a handle for bitmask specific events +// Multiple event handlers may be created and will operate independently of each other +func (t *Bitmask) EventHandler(opts ...BitmaskEventHandlerOpt) (*BitmaskEventHandler, error) { + t.mux.RLock() + defer t.mux.RUnlock() + if t.closed { + return nil, ErrBitmaskClosed + } + + h := &BitmaskEventHandler{ + bitmask: t, + err: nil, + + evtLog: make(map[peer.ID]EventType), + evtLogCh: make(chan struct{}, 1), + } + + for _, opt := range opts { + err := opt(h) + if err != nil { + return nil, err + } + } + + done := make(chan struct{}, 1) + + select { + case t.p.eval <- func() { + tmap := t.p.bitmasks[string(t.bitmask)] + for p := range tmap { + h.evtLog[p] = PeerJoin + } + + t.evtHandlerMux.Lock() + t.evtHandlers[h] = struct{}{} + t.evtHandlerMux.Unlock() + done <- struct{}{} + }: + case <-t.p.ctx.Done(): + return nil, t.p.ctx.Err() + } + + <-done + + return h, nil +} + +func (t *Bitmask) sendNotification(evt PeerEvent) { + t.evtHandlerMux.RLock() + defer t.evtHandlerMux.RUnlock() + + for h := range t.evtHandlers { + h.sendNotification(evt) + } +} + +// Subscribe returns a new Subscription for the bitmask. +// Note that subscription is not an instantaneous operation. It may take some time +// before the subscription is processed by the pubsub main loop and propagated to our peers. +func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) { + t.mux.RLock() + defer t.mux.RUnlock() + if t.closed { + return nil, ErrBitmaskClosed + } + + sub := &Subscription{ + bitmask: t.bitmask, + ctx: t.p.ctx, + } + + for _, opt := range opts { + err := opt(sub) + if err != nil { + return nil, err + } + } + + if sub.ch == nil { + // apply the default size + sub.ch = make(chan *Message, 32) + } + + out := make(chan *Subscription, 1) + + t.p.disc.Discover(sub.bitmask) + + select { + case t.p.addSub <- &addSubReq{ + sub: sub, + resp: out, + }: + case <-t.p.ctx.Done(): + return nil, t.p.ctx.Err() + } + + return <-out, nil +} + +// Relay enables message relaying for the bitmask and returns a reference +// cancel function. Subsequent calls increase the reference counter. +// To completely disable the relay, all references must be cancelled. +func (t *Bitmask) Relay() (RelayCancelFunc, error) { + t.mux.RLock() + defer t.mux.RUnlock() + if t.closed { + return nil, ErrBitmaskClosed + } + + out := make(chan RelayCancelFunc, 1) + + t.p.disc.Discover(t.bitmask) + + select { + case t.p.addRelay <- &addRelayReq{ + bitmask: t.bitmask, + resp: out, + }: + case <-t.p.ctx.Done(): + return nil, t.p.ctx.Err() + } + + return <-out, nil +} + +// RouterReady is a function that decides if a router is ready to publish +type RouterReady func(rt PubSubRouter, bitmask []byte) (bool, error) + +// ProvideKey is a function that provides a private key and its associated peer ID when publishing a new message +type ProvideKey func() (crypto.PrivKey, peer.ID) + +type PublishOptions struct { + ready RouterReady + customKey ProvideKey + local bool +} + +type PubOpt func(pub *PublishOptions) error + +// Publish publishes data to bitmask. +func (t *Bitmask) Publish(ctx context.Context, data []byte, opts ...PubOpt) error { + t.mux.RLock() + defer t.mux.RUnlock() + if t.closed { + return ErrBitmaskClosed + } + + pid := t.p.signID + key := t.p.signKey + + pub := &PublishOptions{} + for _, opt := range opts { + err := opt(pub) + if err != nil { + return err + } + } + + if pub.customKey != nil && !pub.local { + key, pid = pub.customKey() + if key == nil { + return ErrNilSignKey + } + if len(pid) == 0 { + return ErrEmptyPeerID + } + } + + m := &pb.Message{ + Data: data, + Bitmask: t.bitmask, + From: nil, + Seqno: nil, + } + if pid != "" { + m.From = []byte(pid) + m.Seqno = t.p.nextSeqno() + } + if key != nil { + m.From = []byte(pid) + err := signMessage(pid, key, m) + if err != nil { + return err + } + } + + if pub.ready != nil { + if t.p.disc.discovery != nil { + t.p.disc.Bootstrap(ctx, t.bitmask, pub.ready) + } else { + // TODO: we could likely do better than polling every 200ms. + // For example, block this goroutine on a channel, + // and check again whenever events tell us that the number of + // peers has increased. + var ticker *time.Ticker + readyLoop: + for { + // Check if ready for publishing. + // Similar to what disc.Bootstrap does. + res := make(chan bool, 1) + select { + case t.p.eval <- func() { + done, _ := pub.ready(t.p.rt, t.bitmask) + res <- done + }: + if <-res { + break readyLoop + } + case <-t.p.ctx.Done(): + return t.p.ctx.Err() + case <-ctx.Done(): + return ctx.Err() + } + if ticker == nil { + ticker = time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return fmt.Errorf("router is not ready: %w", ctx.Err()) + } + } + } + } + + return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), nil, pub.local}) +} + +// WithReadiness returns a publishing option for only publishing when the router is ready. +// This option is not useful unless PubSub is also using WithDiscovery +func WithReadiness(ready RouterReady) PubOpt { + return func(pub *PublishOptions) error { + pub.ready = ready + return nil + } +} + +// WithLocalPublication returns a publishing option to notify in-process subscribers only. +// It prevents message publication to mesh peers. +// Useful in edge cases where the msg needs to be only delivered to the in-process subscribers, +// e.g. not to spam the network with outdated msgs. +// Should not be used specifically for in-process pubsubing. +func WithLocalPublication(local bool) PubOpt { + return func(pub *PublishOptions) error { + pub.local = local + return nil + } +} + +// WithSecretKeyAndPeerId returns a publishing option for providing a custom private key and its corresponding peer ID +// This option is useful when we want to send messages from "virtual", never-connectable peers in the network +func WithSecretKeyAndPeerId(key crypto.PrivKey, pid peer.ID) PubOpt { + return func(pub *PublishOptions) error { + pub.customKey = func() (crypto.PrivKey, peer.ID) { + return key, pid + } + + return nil + } +} + +// Close closes down the bitmask. Will return an error unless there are no active event handlers or subscriptions. +// Does not error if the bitmask is already closed. +func (t *Bitmask) Close() error { + t.mux.Lock() + defer t.mux.Unlock() + if t.closed { + return nil + } + + req := &rmBitmaskReq{t, make(chan error, 1)} + + select { + case t.p.rmBitmask <- req: + case <-t.p.ctx.Done(): + return t.p.ctx.Err() + } + + err := <-req.resp + + if err == nil { + t.closed = true + } + + return err +} + +// ListPeers returns a list of peers we are connected to in the given bitmask. +func (t *Bitmask) ListPeers() []peer.ID { + t.mux.RLock() + defer t.mux.RUnlock() + if t.closed { + return []peer.ID{} + } + + return t.p.ListPeers(t.bitmask) +} + +type EventType int + +const ( + PeerJoin EventType = iota + PeerLeave +) + +// BitmaskEventHandler is used to manage bitmask specific events. No Subscription is required to receive events. +type BitmaskEventHandler struct { + bitmask *Bitmask + err error + + evtLogMx sync.Mutex + evtLog map[peer.ID]EventType + evtLogCh chan struct{} +} + +type BitmaskEventHandlerOpt func(t *BitmaskEventHandler) error + +type PeerEvent struct { + Type EventType + Peer peer.ID +} + +// Cancel closes the bitmask event handler +func (t *BitmaskEventHandler) Cancel() { + bitmask := t.bitmask + t.err = fmt.Errorf("bitmask event handler cancelled by calling handler.Cancel()") + + bitmask.evtHandlerMux.Lock() + delete(bitmask.evtHandlers, t) + t.bitmask.evtHandlerMux.Unlock() +} + +func (t *BitmaskEventHandler) sendNotification(evt PeerEvent) { + t.evtLogMx.Lock() + t.addToEventLog(evt) + t.evtLogMx.Unlock() +} + +// addToEventLog assumes a lock has been taken to protect the event log +func (t *BitmaskEventHandler) addToEventLog(evt PeerEvent) { + e, ok := t.evtLog[evt.Peer] + if !ok { + t.evtLog[evt.Peer] = evt.Type + // send signal that an event has been added to the event log + select { + case t.evtLogCh <- struct{}{}: + default: + } + } else if e != evt.Type { + delete(t.evtLog, evt.Peer) + } +} + +// pullFromEventLog assumes a lock has been taken to protect the event log +func (t *BitmaskEventHandler) pullFromEventLog() (PeerEvent, bool) { + for k, v := range t.evtLog { + evt := PeerEvent{Peer: k, Type: v} + delete(t.evtLog, k) + return evt, true + } + return PeerEvent{}, false +} + +// NextPeerEvent returns the next event regarding subscribed peers +// Guarantees: Peer Join and Peer Leave events for a given peer will fire in order. +// Unless a peer both Joins and Leaves before NextPeerEvent emits either event +// all events will eventually be received from NextPeerEvent. +func (t *BitmaskEventHandler) NextPeerEvent(ctx context.Context) (PeerEvent, error) { + for { + t.evtLogMx.Lock() + evt, ok := t.pullFromEventLog() + if ok { + // make sure an event log signal is available if there are events in the event log + if len(t.evtLog) > 0 { + select { + case t.evtLogCh <- struct{}{}: + default: + } + } + t.evtLogMx.Unlock() + return evt, nil + } + t.evtLogMx.Unlock() + + select { + case <-t.evtLogCh: + continue + case <-ctx.Done(): + return PeerEvent{}, ctx.Err() + } + } +} diff --git a/go-libp2p-blossomsub/bitmask_test.go b/go-libp2p-blossomsub/bitmask_test.go new file mode 100644 index 0000000..6e4d165 --- /dev/null +++ b/go-libp2p-blossomsub/bitmask_test.go @@ -0,0 +1,1064 @@ +package blossomsub + +import ( + "bytes" + "context" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/rand" + "sync" + "testing" + "time" + + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/peer" + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" +) + +func getBitmasks(psubs []*PubSub, bitmask []byte, opts ...BitmaskOpt) []*Bitmask { + bitmasks := make([]*Bitmask, len(psubs)) + + for i, ps := range psubs { + t, err := ps.Join(bitmask, opts...) + if err != nil { + panic(err) + } + bitmasks[i] = t + } + + return bitmasks +} + +func getBitmaskEvts(bitmasks []*Bitmask, opts ...BitmaskEventHandlerOpt) []*BitmaskEventHandler { + handlers := make([]*BitmaskEventHandler, len(bitmasks)) + + for i, t := range bitmasks { + h, err := t.EventHandler(opts...) + if err != nil { + panic(err) + } + handlers[i] = h + } + + return handlers +} + +func TestBitmaskCloseWithOpenSubscription(t *testing.T) { + var sub *Subscription + var err error + testBitmaskCloseWithOpenResource(t, + func(bitmask *Bitmask) { + sub, err = bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + }, + func() { + sub.Cancel() + }, + ) +} + +func TestBitmaskCloseWithOpenEventHandler(t *testing.T) { + var evts *BitmaskEventHandler + var err error + testBitmaskCloseWithOpenResource(t, + func(bitmask *Bitmask) { + evts, err = bitmask.EventHandler() + if err != nil { + t.Fatal(err) + } + }, + func() { + evts.Cancel() + }, + ) +} + +func TestBitmaskCloseWithOpenRelay(t *testing.T) { + var relayCancel RelayCancelFunc + var err error + testBitmaskCloseWithOpenResource(t, + func(bitmask *Bitmask) { + relayCancel, err = bitmask.Relay() + if err != nil { + t.Fatal(err) + } + }, + func() { + relayCancel() + }, + ) +} + +func testBitmaskCloseWithOpenResource(t *testing.T, openResource func(bitmask *Bitmask), closeResource func()) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numHosts = 1 + bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20} + hosts := getNetHosts(t, ctx, numHosts) + ps := getPubsub(ctx, hosts[0]) + + // Try create and cancel bitmask + bitmask, err := ps.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + if err := bitmask.Close(); err != nil { + t.Fatal(err) + } + + // Try create and cancel bitmask while there's an outstanding subscription/event handler + bitmask, err = ps.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + openResource(bitmask) + + if err := bitmask.Close(); err == nil { + t.Fatal("expected an error closing a bitmask with an open resource") + } + + // Check if the bitmask closes properly after closing the resource + closeResource() + time.Sleep(time.Millisecond * 100) + + if err := bitmask.Close(); err != nil { + t.Fatal(err) + } +} + +func TestBitmaskReuse(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numHosts = 2 + bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20} + hosts := getNetHosts(t, ctx, numHosts) + + sender := getPubsub(ctx, hosts[0], WithDiscovery(&dummyDiscovery{})) + receiver := getPubsub(ctx, hosts[1]) + + connectAll(t, hosts) + + // Sender creates bitmask + sendBitmask, err := sender.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + // Receiver creates and subscribes to the bitmask + receiveBitmask, err := receiver.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + sub, err := receiveBitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + + firstMsg := []byte("1") + if err := sendBitmask.Publish(ctx, firstMsg, WithReadiness(MinBitmaskSize(1))); err != nil { + t.Fatal(err) + } + + msg, err := sub.Next(ctx) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(msg.GetData(), firstMsg) { + t.Fatal("received incorrect message") + } + + if err := sendBitmask.Close(); err != nil { + t.Fatal(err) + } + + // Recreate the same bitmask + newSendBitmask, err := sender.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + // Try sending data with original bitmask + illegalSend := []byte("illegal") + if err := sendBitmask.Publish(ctx, illegalSend); err != ErrBitmaskClosed { + t.Fatal(err) + } + + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, time.Second*2) + defer timeoutCancel() + msg, err = sub.Next(timeoutCtx) + if err != context.DeadlineExceeded { + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(msg.GetData(), illegalSend) { + t.Fatal("received incorrect message from illegal bitmask") + } + t.Fatal("received message sent by illegal bitmask") + } + timeoutCancel() + + // Try cancelling the new bitmask by using the original bitmask + if err := sendBitmask.Close(); err != nil { + t.Fatal(err) + } + + secondMsg := []byte("2") + if err := newSendBitmask.Publish(ctx, secondMsg); err != nil { + t.Fatal(err) + } + + timeoutCtx, timeoutCancel = context.WithTimeout(ctx, time.Second*2) + defer timeoutCancel() + msg, err = sub.Next(timeoutCtx) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(msg.GetData(), secondMsg) { + t.Fatal("received incorrect message") + } +} + +func TestBitmaskEventHandlerCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numHosts = 5 + bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20} + hosts := getNetHosts(t, ctx, numHosts) + ps := getPubsub(ctx, hosts[0]) + + // Try create and cancel bitmask + bitmask, err := ps.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + evts, err := bitmask.EventHandler() + if err != nil { + t.Fatal(err) + } + evts.Cancel() + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, time.Second*2) + defer timeoutCancel() + connectAll(t, hosts) + _, err = evts.NextPeerEvent(timeoutCtx) + if err != context.DeadlineExceeded { + if err != nil { + t.Fatal(err) + } + t.Fatal("received event after cancel") + } +} + +func TestSubscriptionJoinNotification(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numLateSubscribers = 10 + const numHosts = 20 + hosts := getNetHosts(t, ctx, numHosts) + bitmasks := getBitmasks(getPubsubs(ctx, hosts), []byte{0xf0, 0x0b, 0xa1, 0x20}) + evts := getBitmaskEvts(bitmasks) + + subs := make([]*Subscription, numHosts) + bitmaskPeersFound := make([]map[peer.ID]struct{}, numHosts) + + // Have some peers subscribe earlier than other peers. + // This exercises whether we get subscription notifications from + // existing peers. + for i, bitmask := range bitmasks[numLateSubscribers:] { + subch, err := bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + + subs[i] = subch + } + + connectAll(t, hosts) + + time.Sleep(time.Millisecond * 100) + + // Have the rest subscribe + for i, bitmask := range bitmasks[:numLateSubscribers] { + subch, err := bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + + subs[i+numLateSubscribers] = subch + } + + wg := sync.WaitGroup{} + for i := 0; i < numHosts; i++ { + peersFound := make(map[peer.ID]struct{}) + bitmaskPeersFound[i] = peersFound + evt := evts[i] + wg.Add(1) + go func(peersFound map[peer.ID]struct{}) { + defer wg.Done() + for len(peersFound) < numHosts-1 { + event, err := evt.NextPeerEvent(ctx) + if err != nil { + panic(err) + } + if event.Type == PeerJoin { + peersFound[event.Peer] = struct{}{} + } + } + }(peersFound) + } + + wg.Wait() + for _, peersFound := range bitmaskPeersFound { + if len(peersFound) != numHosts-1 { + t.Fatal("incorrect number of peers found") + } + } +} + +func TestSubscriptionLeaveNotification(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numHosts = 20 + hosts := getNetHosts(t, ctx, numHosts) + psubs := getPubsubs(ctx, hosts) + bitmasks := getBitmasks(psubs, []byte{0xf0, 0x0b, 0xa1, 0x20}) + evts := getBitmaskEvts(bitmasks) + + subs := make([]*Subscription, numHosts) + bitmaskPeersFound := make([]map[peer.ID]struct{}, numHosts) + + // Subscribe all peers and wait until they've all been found + for i, bitmask := range bitmasks { + subch, err := bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + + subs[i] = subch + } + + connectAll(t, hosts) + + time.Sleep(time.Millisecond * 100) + + wg := sync.WaitGroup{} + for i := 0; i < numHosts; i++ { + peersFound := make(map[peer.ID]struct{}) + bitmaskPeersFound[i] = peersFound + evt := evts[i] + wg.Add(1) + go func(peersFound map[peer.ID]struct{}) { + defer wg.Done() + for len(peersFound) < numHosts-1 { + event, err := evt.NextPeerEvent(ctx) + if err != nil { + panic(err) + } + if event.Type == PeerJoin { + peersFound[event.Peer] = struct{}{} + } + } + }(peersFound) + } + + wg.Wait() + for _, peersFound := range bitmaskPeersFound { + if len(peersFound) != numHosts-1 { + t.Fatal("incorrect number of peers found") + } + } + + // Test removing peers and verifying that they cause events + subs[1].Cancel() + _ = hosts[2].Close() + psubs[0].BlacklistPeer(hosts[3].ID()) + + leavingPeers := make(map[peer.ID]struct{}) + for len(leavingPeers) < 3 { + event, err := evts[0].NextPeerEvent(ctx) + if err != nil { + t.Fatal(err) + } + if event.Type == PeerLeave { + leavingPeers[event.Peer] = struct{}{} + } + } + + if _, ok := leavingPeers[hosts[1].ID()]; !ok { + t.Fatal(fmt.Errorf("canceling subscription did not cause a leave event")) + } + if _, ok := leavingPeers[hosts[2].ID()]; !ok { + t.Fatal(fmt.Errorf("closing host did not cause a leave event")) + } + if _, ok := leavingPeers[hosts[3].ID()]; !ok { + t.Fatal(fmt.Errorf("blacklisting peer did not cause a leave event")) + } +} + +func TestSubscriptionManyNotifications(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + const numHosts = 33 + hosts := getNetHosts(t, ctx, numHosts) + bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask) + evts := getBitmaskEvts(bitmasks) + + subs := make([]*Subscription, numHosts) + bitmaskPeersFound := make([]map[peer.ID]struct{}, numHosts) + + // Subscribe all peers except one and wait until they've all been found + for i := 1; i < numHosts; i++ { + subch, err := bitmasks[i].Subscribe() + if err != nil { + t.Fatal(err) + } + + subs[i] = subch + } + + connectAll(t, hosts) + + time.Sleep(time.Millisecond * 100) + + wg := sync.WaitGroup{} + for i := 1; i < numHosts; i++ { + peersFound := make(map[peer.ID]struct{}) + bitmaskPeersFound[i] = peersFound + evt := evts[i] + wg.Add(1) + go func(peersFound map[peer.ID]struct{}) { + defer wg.Done() + for len(peersFound) < numHosts-2 { + event, err := evt.NextPeerEvent(ctx) + if err != nil { + panic(err) + } + if event.Type == PeerJoin { + peersFound[event.Peer] = struct{}{} + } + } + }(peersFound) + } + + wg.Wait() + for _, peersFound := range bitmaskPeersFound[1:] { + if len(peersFound) != numHosts-2 { + t.Fatalf("found %d peers, expected %d", len(peersFound), numHosts-2) + } + } + + // Wait for remaining peer to find other peers + remPeerBitmask, remPeerEvts := bitmasks[0], evts[0] + for len(remPeerBitmask.ListPeers()) < numHosts-1 { + time.Sleep(time.Millisecond * 100) + } + + // Subscribe the remaining peer and check that all the events came through + sub, err := remPeerBitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + + subs[0] = sub + + peerState := readAllQueuedEvents(ctx, t, remPeerEvts) + + if len(peerState) != numHosts-1 { + t.Fatal("incorrect number of peers found") + } + + for _, e := range peerState { + if e != PeerJoin { + t.Fatal("non Join event occurred") + } + } + + // Unsubscribe all peers except one and check that all the events came through + for i := 1; i < numHosts; i++ { + subs[i].Cancel() + } + + // Wait for remaining peer to disconnect from the other peers + for len(bitmasks[0].ListPeers()) != 0 { + time.Sleep(time.Millisecond * 100) + } + + peerState = readAllQueuedEvents(ctx, t, remPeerEvts) + + if len(peerState) != numHosts-1 { + t.Fatal("incorrect number of peers found") + } + + for _, e := range peerState { + if e != PeerLeave { + t.Fatal("non Leave event occurred") + } + } +} + +func TestSubscriptionNotificationSubUnSub(t *testing.T) { + // Resubscribe and Unsubscribe a peers and check the state for consistency + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + const numHosts = 35 + hosts := getNetHosts(t, ctx, numHosts) + bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask) + + for i := 1; i < numHosts; i++ { + connect(t, hosts[0], hosts[i]) + } + time.Sleep(time.Millisecond * 100) + + notifSubThenUnSub(ctx, t, bitmasks) +} + +func TestBitmaskRelay(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + const numHosts = 5 + + hosts := getNetHosts(t, ctx, numHosts) + bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask) + + // [0.Rel] - [1.Rel] - [2.Sub] + // | + // [3.Rel] - [4.Sub] + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + connect(t, hosts[1], hosts[3]) + connect(t, hosts[3], hosts[4]) + + time.Sleep(time.Millisecond * 100) + + var subs []*Subscription + + for i, bitmask := range bitmasks { + if i == 2 || i == 4 { + sub, err := bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + + subs = append(subs, sub) + } else { + _, err := bitmask.Relay() + if err != nil { + t.Fatal(err) + } + } + } + + time.Sleep(time.Millisecond * 100) + + for i := 0; i < 100; i++ { + msg := []byte("message") + + owner := rand.Intn(len(bitmasks)) + + err := bitmasks[owner].Publish(ctx, msg) + if err != nil { + t.Fatal(err) + } + + for _, sub := range subs { + received, err := sub.Next(ctx) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(msg, received.Data) { + t.Fatal("received message is other than expected") + } + } + } +} + +func TestBitmaskRelayReuse(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + const numHosts = 1 + + hosts := getNetHosts(t, ctx, numHosts) + pubsubs := getPubsubs(ctx, hosts) + bitmasks := getBitmasks(pubsubs, bitmask) + + relay1Cancel, err := bitmasks[0].Relay() + if err != nil { + t.Fatal(err) + } + + relay2Cancel, err := bitmasks[0].Relay() + if err != nil { + t.Fatal(err) + } + + relay3Cancel, err := bitmasks[0].Relay() + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 100) + + res := make(chan bool, 1) + pubsubs[0].eval <- func() { + res <- pubsubs[0].myRelays[string(bitmask)] == 3 + } + + isCorrectNumber := <-res + if !isCorrectNumber { + t.Fatal("incorrect number of relays") + } + + // only the first invocation should take effect + relay1Cancel() + relay1Cancel() + relay1Cancel() + + pubsubs[0].eval <- func() { + res <- pubsubs[0].myRelays[string(bitmask)] == 2 + } + + isCorrectNumber = <-res + if !isCorrectNumber { + t.Fatal("incorrect number of relays") + } + + relay2Cancel() + relay3Cancel() + + time.Sleep(time.Millisecond * 100) + + pubsubs[0].eval <- func() { + res <- pubsubs[0].myRelays[string(bitmask)] == 0 + } + + isCorrectNumber = <-res + if !isCorrectNumber { + t.Fatal("incorrect number of relays") + } +} + +func TestBitmaskRelayOnClosedBitmask(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + const numHosts = 1 + + hosts := getNetHosts(t, ctx, numHosts) + bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask) + + err := bitmasks[0].Close() + if err != nil { + t.Fatal(err) + } + + _, err = bitmasks[0].Relay() + if err == nil { + t.Fatalf("error should be returned") + } +} + +func TestProducePanic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numHosts = 5 + bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20} + hosts := getNetHosts(t, ctx, numHosts) + ps := getPubsub(ctx, hosts[0]) + + // Create bitmask + bitmask, err := ps.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + // Create subscription we're going to cancel + s, err := bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + // Create second subscription to keep us alive on the subscription map + // after the first one is canceled + s2, err := bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + _ = s2 + + s.Cancel() + time.Sleep(time.Second) + s.Cancel() + time.Sleep(time.Second) +} + +func notifSubThenUnSub(ctx context.Context, t *testing.T, bitmasks []*Bitmask) { + primaryBitmask := bitmasks[0] + msgs := make([]*Subscription, len(bitmasks)) + checkSize := len(bitmasks) - 1 + + // Subscribe all peers to the bitmask + var err error + for i, bitmask := range bitmasks { + msgs[i], err = bitmask.Subscribe() + if err != nil { + t.Fatal(err) + } + } + + // Wait for the primary peer to be connected to the other peers + for len(primaryBitmask.ListPeers()) < checkSize { + time.Sleep(time.Millisecond * 100) + } + + // Unsubscribe all peers except the primary + for i := 1; i < checkSize+1; i++ { + msgs[i].Cancel() + } + + // Wait for the unsubscribe messages to reach the primary peer + for len(primaryBitmask.ListPeers()) < 0 { + time.Sleep(time.Millisecond * 100) + } + + // read all available events and verify that there are no events to process + // this is because every peer that joined also left + primaryEvts, err := primaryBitmask.EventHandler() + if err != nil { + t.Fatal(err) + } + peerState := readAllQueuedEvents(ctx, t, primaryEvts) + + if len(peerState) != 0 { + for p, s := range peerState { + fmt.Println(p, s) + } + t.Fatalf("Received incorrect events. %d extra events", len(peerState)) + } +} + +func readAllQueuedEvents(ctx context.Context, t *testing.T, evt *BitmaskEventHandler) map[peer.ID]EventType { + peerState := make(map[peer.ID]EventType) + for { + ctx, cancel := context.WithTimeout(ctx, time.Millisecond*100) + event, err := evt.NextPeerEvent(ctx) + cancel() + + if err == context.DeadlineExceeded { + break + } else if err != nil { + t.Fatal(err) + } + + e, ok := peerState[event.Peer] + if !ok { + peerState[event.Peer] = event.Type + } else if e != event.Type { + delete(peerState, event.Peer) + } + } + return peerState +} + +func TestMinBitmaskSizeNoDiscovery(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + const numHosts = 3 + bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20} + hosts := getNetHosts(t, ctx, numHosts) + + sender := getPubsub(ctx, hosts[0]) + receiver1 := getPubsub(ctx, hosts[1]) + receiver2 := getPubsub(ctx, hosts[2]) + + connectAll(t, hosts) + + // Sender creates bitmask + sendBitmask, err := sender.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + // Receiver creates and subscribes to the bitmask + receiveBitmask1, err := receiver1.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + sub1, err := receiveBitmask1.Subscribe() + if err != nil { + t.Fatal(err) + } + + oneMsg := []byte("minimum one") + if err := sendBitmask.Publish(ctx, oneMsg, WithReadiness(MinBitmaskSize(1))); err != nil { + t.Fatal(err) + } + + if msg, err := sub1.Next(ctx); err != nil { + t.Fatal(err) + } else if !bytes.Equal(msg.GetData(), oneMsg) { + t.Fatal("received incorrect message") + } + + twoMsg := []byte("minimum two") + + // Attempting to publish with a minimum bitmask size of two should fail. + { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + if err := sendBitmask.Publish(ctx, twoMsg, WithReadiness(MinBitmaskSize(2))); !errors.Is(err, context.DeadlineExceeded) { + t.Fatal(err) + } + } + + // Subscribe the second receiver; the publish should now work. + receiveBitmask2, err := receiver2.Join(bitmaskID) + if err != nil { + t.Fatal(err) + } + + sub2, err := receiveBitmask2.Subscribe() + if err != nil { + t.Fatal(err) + } + + { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + if err := sendBitmask.Publish(ctx, twoMsg, WithReadiness(MinBitmaskSize(2))); err != nil { + t.Fatal(err) + } + } + + if msg, err := sub2.Next(ctx); err != nil { + t.Fatal(err) + } else if !bytes.Equal(msg.GetData(), twoMsg) { + t.Fatal("received incorrect message") + } +} + +func TestWithBitmaskMsgIdFunction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bitmaskA, bitmaskB := []byte{0xf0, 0x0b, 0xa1, 0x2a}, []byte{0xf0, 0x0b, 0xa1, 0x2b} + const numHosts = 2 + + hosts := getNetHosts(t, ctx, numHosts) + pubsubs := getPubsubs(ctx, hosts, WithMessageIdFn(func(pmsg *pb.Message) string { + hash := sha256.Sum256(pmsg.Data) + return string(hash[:]) + })) + connectAll(t, hosts) + + bitmasksA := getBitmasks(pubsubs, bitmaskA) // uses global msgIdFn + bitmasksB := getBitmasks(pubsubs, bitmaskB, WithBitmaskMessageIdFn(func(pmsg *pb.Message) string { // uses custom + hash := sha1.Sum(pmsg.Data) + return string(hash[:]) + })) + + payload := []byte("pubsub rocks") + + subA, err := bitmasksA[0].Subscribe() + if err != nil { + t.Fatal(err) + } + + err = bitmasksA[1].Publish(ctx, payload, WithReadiness(MinBitmaskSize(1))) + if err != nil { + t.Fatal(err) + } + + msgA, err := subA.Next(ctx) + if err != nil { + t.Fatal(err) + } + + subB, err := bitmasksB[0].Subscribe() + if err != nil { + t.Fatal(err) + } + + err = bitmasksB[1].Publish(ctx, payload, WithReadiness(MinBitmaskSize(1))) + if err != nil { + t.Fatal(err) + } + + msgB, err := subB.Next(ctx) + if err != nil { + t.Fatal(err) + } + + if msgA.ID == msgB.ID { + t.Fatal("msg ids are equal") + } +} + +func TestBitmaskPublishWithKeyInvalidParameters(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + const numHosts = 5 + + virtualPeer := tnet.RandPeerNetParamsOrFatal(t) + hosts := getNetHosts(t, ctx, numHosts) + bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask) + + t.Run("nil sign private key should error", func(t *testing.T) { + withVirtualKey := WithSecretKeyAndPeerId(nil, virtualPeer.ID) + err := bitmasks[0].Publish(ctx, []byte("buff"), withVirtualKey) + if err != ErrNilSignKey { + t.Fatal("error should have been of type errNilSignKey") + } + }) + t.Run("empty peer ID should error", func(t *testing.T) { + withVirtualKey := WithSecretKeyAndPeerId(virtualPeer.PrivKey, "") + err := bitmasks[0].Publish(ctx, []byte("buff"), withVirtualKey) + if err != ErrEmptyPeerID { + t.Fatal("error should have been of type errEmptyPeerID") + } + }) +} + +func TestBitmaskRelayPublishWithKey(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + const numHosts = 5 + + virtualPeer := tnet.RandPeerNetParamsOrFatal(t) + hosts := getNetHosts(t, ctx, numHosts) + bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask) + + // [0.Rel] - [1.Rel] - [2.Sub] + // | + // [3.Rel] - [4.Sub] + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + connect(t, hosts[1], hosts[3]) + connect(t, hosts[3], hosts[4]) + + time.Sleep(time.Millisecond * 100) + + var subs []*Subscription + + for i, bitmaskValue := range bitmasks { + if i == 2 || i == 4 { + sub, err := bitmaskValue.Subscribe() + if err != nil { + t.Fatal(err) + } + + subs = append(subs, sub) + } else { + _, err := bitmaskValue.Relay() + if err != nil { + t.Fatal(err) + } + } + } + + time.Sleep(time.Millisecond * 100) + + for i := 0; i < 100; i++ { + msg := []byte("message") + + owner := rand.Intn(len(bitmasks)) + + withVirtualKey := WithSecretKeyAndPeerId(virtualPeer.PrivKey, virtualPeer.ID) + err := bitmasks[owner].Publish(ctx, msg, withVirtualKey) + if err != nil { + t.Fatal(err) + } + + for _, sub := range subs { + received, errSub := sub.Next(ctx) + if errSub != nil { + t.Fatal(errSub) + } + + if !bytes.Equal(msg, received.Data) { + t.Fatal("received message is other than expected") + } + if string(received.From) != string(virtualPeer.ID) { + t.Fatal("received message is not from the virtual peer") + } + } + } +} + +func TestWithLocalPublication(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bitmask := []byte{0x7e, 57} + + hosts := getNetHosts(t, ctx, 2) + pubsubs := getPubsubs(ctx, hosts) + bitmasks := getBitmasks(pubsubs, bitmask) + connectAll(t, hosts) + + payload := []byte("pubsub smashes") + + local, err := bitmasks[0].Subscribe() + if err != nil { + t.Fatal(err) + } + + remote, err := bitmasks[1].Subscribe() + if err != nil { + t.Fatal(err) + } + + err = bitmasks[0].Publish(ctx, payload, WithLocalPublication(true)) + if err != nil { + t.Fatal(err) + } + + remoteCtx, cancel := context.WithTimeout(ctx, time.Millisecond*100) + defer cancel() + + msg, err := remote.Next(remoteCtx) + if msg != nil || err == nil { + t.Fatal("unexpected msg") + } + + msg, err = local.Next(ctx) + if err != nil { + t.Fatal(err) + } + if !msg.Local || !bytes.Equal(msg.Data, payload) { + t.Fatal("wrong message") + } +} diff --git a/go-libp2p-blossomsub/blacklist.go b/go-libp2p-blossomsub/blacklist.go new file mode 100644 index 0000000..87fa42d --- /dev/null +++ b/go-libp2p-blossomsub/blacklist.go @@ -0,0 +1,58 @@ +package blossomsub + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/peer" + + "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/timecache" +) + +// Blacklist is an interface for peer blacklisting. +type Blacklist interface { + Add(peer.ID) bool + Contains(peer.ID) bool +} + +// MapBlacklist is a blacklist implementation using a perfect map +type MapBlacklist map[peer.ID]struct{} + +// NewMapBlacklist creates a new MapBlacklist +func NewMapBlacklist() Blacklist { + return MapBlacklist(make(map[peer.ID]struct{})) +} + +func (b MapBlacklist) Add(p peer.ID) bool { + b[p] = struct{}{} + return true +} + +func (b MapBlacklist) Contains(p peer.ID) bool { + _, ok := b[p] + return ok +} + +// TimeCachedBlacklist is a blacklist implementation using a time cache +type TimeCachedBlacklist struct { + tc timecache.TimeCache +} + +// NewTimeCachedBlacklist creates a new TimeCachedBlacklist with the given expiry duration +func NewTimeCachedBlacklist(expiry time.Duration) (Blacklist, error) { + b := &TimeCachedBlacklist{tc: timecache.NewTimeCache(expiry)} + return b, nil +} + +// Add returns a bool saying whether Add of peer was successful +func (b *TimeCachedBlacklist) Add(p peer.ID) bool { + s := p.String() + if b.tc.Has(s) { + return false + } + b.tc.Add(s) + return true +} + +func (b *TimeCachedBlacklist) Contains(p peer.ID) bool { + return b.tc.Has(p.String()) +} diff --git a/go-libp2p-blossomsub/blacklist_test.go b/go-libp2p-blossomsub/blacklist_test.go new file mode 100644 index 0000000..42c0bdc --- /dev/null +++ b/go-libp2p-blossomsub/blacklist_test.go @@ -0,0 +1,125 @@ +package blossomsub + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestMapBlacklist(t *testing.T) { + b := NewMapBlacklist() + + p := peer.ID("test") + + b.Add(p) + if !b.Contains(p) { + t.Fatal("peer not in the blacklist") + } + +} + +func TestTimeCachedBlacklist(t *testing.T) { + b, err := NewTimeCachedBlacklist(10 * time.Minute) + if err != nil { + t.Fatal(err) + } + + p := peer.ID("test") + + b.Add(p) + if !b.Contains(p) { + t.Fatal("peer not in the blacklist") + } +} + +func TestBlacklist(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + connect(t, hosts[0], hosts[1]) + + sub, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 100) + psubs[1].BlacklistPeer(hosts[0].ID()) + time.Sleep(time.Millisecond * 100) + + psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message")) + + wctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + _, err = sub.Next(wctx) + + if err == nil { + t.Fatal("got message from blacklisted peer") + } +} + +func TestBlacklist2(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + connect(t, hosts[0], hosts[1]) + + _, err := psubs[0].Subscribe([]byte{0xff, 0x00, 0x00, 0x00}) + if err != nil { + t.Fatal(err) + } + + sub1, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 100) + psubs[1].BlacklistPeer(hosts[0].ID()) + time.Sleep(time.Millisecond * 100) + + psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message")) + + wctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + _, err = sub1.Next(wctx) + + if err == nil { + t.Fatal("got message from blacklisted peer") + } +} + +func TestBlacklist3(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + + psubs[1].BlacklistPeer(hosts[0].ID()) + time.Sleep(time.Millisecond * 100) + connect(t, hosts[0], hosts[1]) + + sub, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 100) + + psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message")) + + wctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + _, err = sub.Next(wctx) + + if err == nil { + t.Fatal("got message from blacklisted peer") + } +} diff --git a/go-libp2p-blossomsub/blossomsub.go b/go-libp2p-blossomsub/blossomsub.go new file mode 100644 index 0000000..7d40854 --- /dev/null +++ b/go-libp2p-blossomsub/blossomsub.go @@ -0,0 +1,1984 @@ +package blossomsub + +import ( + "context" + "fmt" + "math/rand" + "sort" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/record" +) + +const ( + // BlossomSubID_v11 is the protocol ID for version 1.1.0 of the BlossomSub protocol. + // It retains versioning matching to GossipSub 1.1.0 to indicate point of fork + BlossomSubID_v11 = protocol.ID("/blossomsub/1.1.0") +) + +// Defines the default BlossomSub parameters. +var ( + BlossomSubD = 6 + BlossomSubDlo = 5 + BlossomSubDhi = 12 + BlossomSubDscore = 4 + BlossomSubDout = 2 + BlossomSubHistoryLength = 5 + BlossomSubHistoryGossip = 3 + BlossomSubDlazy = 6 + BlossomSubGossipFactor = 0.25 + BlossomSubGossipRetransmission = 3 + BlossomSubHeartbeatInitialDelay = 100 * time.Millisecond + BlossomSubHeartbeatInterval = 1 * time.Second + BlossomSubFanoutTTL = 60 * time.Second + BlossomSubPrunePeers = 16 + BlossomSubPruneBackoff = time.Minute + BlossomSubUnsubscribeBackoff = 10 * time.Second + BlossomSubConnectors = 8 + BlossomSubMaxPendingConnections = 128 + BlossomSubConnectionTimeout = 30 * time.Second + BlossomSubDirectConnectTicks uint64 = 300 + BlossomSubDirectConnectInitialDelay = time.Second + BlossomSubOpportunisticGraftTicks uint64 = 60 + BlossomSubOpportunisticGraftPeers = 2 + BlossomSubGraftFloodThreshold = 10 * time.Second + BlossomSubMaxIHaveLength = 5000 + BlossomSubMaxIHaveMessages = 10 + BlossomSubIWantFollowupTime = 3 * time.Second +) + +// BlossomSubParams defines all the BlossomSub specific parameters. +type BlossomSubParams struct { + // overlay parameters. + + // D sets the optimal degree for a BlossomSub bitmask mesh. For example, if D == 6, + // each peer will want to have about six peers in their mesh for each bitmask they're subscribed to. + // D should be set somewhere between Dlo and Dhi. + D int + + // Dlo sets the lower bound on the number of peers we keep in a BlossomSub bitmask mesh. + // If we have fewer than Dlo peers, we will attempt to graft some more into the mesh at + // the next heartbeat. + Dlo int + + // Dhi sets the upper bound on the number of peers we keep in a BlossomSub bitmask mesh. + // If we have more than Dhi peers, we will select some to prune from the mesh at the next heartbeat. + Dhi int + + // Dscore affects how peers are selected when pruning a mesh due to over subscription. + // At least Dscore of the retained peers will be high-scoring, while the remainder are + // chosen randomly. + Dscore int + + // Dout sets the quota for the number of outbound connections to maintain in a bitmask mesh. + // When the mesh is pruned due to over subscription, we make sure that we have outbound connections + // to at least Dout of the survivor peers. This prevents sybil attackers from overwhelming + // our mesh with incoming connections. + // + // Dout must be set below Dlo, and must not exceed D / 2. + Dout int + + // gossip parameters + + // HistoryLength controls the size of the message cache used for gossip. + // The message cache will remember messages for HistoryLength heartbeats. + HistoryLength int + + // HistoryGossip controls how many cached message ids we will advertise in + // IHAVE gossip messages. When asked for our seen message IDs, we will return + // only those from the most recent HistoryGossip heartbeats. The slack between + // HistoryGossip and HistoryLength allows us to avoid advertising messages + // that will be expired by the time they're requested. + // + // HistoryGossip must be less than or equal to HistoryLength to + // avoid a runtime panic. + HistoryGossip int + + // Dlazy affects how many peers we will emit gossip to at each heartbeat. + // We will send gossip to at least Dlazy peers outside our mesh. The actual + // number may be more, depending on GossipFactor and how many peers we're + // connected to. + Dlazy int + + // GossipFactor affects how many peers we will emit gossip to at each heartbeat. + // We will send gossip to GossipFactor * (total number of non-mesh peers), or + // Dlazy, whichever is greater. + GossipFactor float64 + + // GossipRetransmission controls how many times we will allow a peer to request + // the same message id through IWANT gossip before we start ignoring them. This is designed + // to prevent peers from spamming us with requests and wasting our resources. + GossipRetransmission int + + // heartbeat interval + + // HeartbeatInitialDelay is the short delay before the heartbeat timer begins + // after the router is initialized. + HeartbeatInitialDelay time.Duration + + // HeartbeatInterval controls the time between heartbeats. + HeartbeatInterval time.Duration + + // SlowHeartbeatWarning is the duration threshold for heartbeat processing before emitting + // a warning; this would be indicative of an overloaded peer. + SlowHeartbeatWarning float64 + + // FanoutTTL controls how long we keep track of the fanout state. If it's been + // FanoutTTL since we've published to a bitmask that we're not subscribed to, + // we'll delete the fanout map for that bitmask. + FanoutTTL time.Duration + + // PrunePeers controls the number of peers to include in prune Peer eXchange. + // When we prune a peer that's eligible for PX (has a good score, etc), we will try to + // send them signed peer records for up to PrunePeers other peers that we + // know of. + PrunePeers int + + // PruneBackoff controls the backoff time for pruned peers. This is how long + // a peer must wait before attempting to graft into our mesh again after being pruned. + // When pruning a peer, we send them our value of PruneBackoff so they know + // the minimum time to wait. Peers running older versions may not send a backoff time, + // so if we receive a prune message without one, we will wait at least PruneBackoff + // before attempting to re-graft. + PruneBackoff time.Duration + + // UnsubscribeBackoff controls the backoff time to use when unsuscribing + // from a bitmask. A peer should not resubscribe to this bitmask before this + // duration. + UnsubscribeBackoff time.Duration + + // Connectors controls the number of active connection attempts for peers obtained through PX. + Connectors int + + // MaxPendingConnections sets the maximum number of pending connections for peers attempted through px. + MaxPendingConnections int + + // ConnectionTimeout controls the timeout for connection attempts. + ConnectionTimeout time.Duration + + // DirectConnectTicks is the number of heartbeat ticks for attempting to reconnect direct peers + // that are not currently connected. + DirectConnectTicks uint64 + + // DirectConnectInitialDelay is the initial delay before opening connections to direct peers + DirectConnectInitialDelay time.Duration + + // OpportunisticGraftTicks is the number of heartbeat ticks for attempting to improve the mesh + // with opportunistic grafting. Every OpportunisticGraftTicks we will attempt to select some + // high-scoring mesh peers to replace lower-scoring ones, if the median score of our mesh peers falls + // below a threshold (see https://godoc.org/source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub#PeerScoreThresholds). + OpportunisticGraftTicks uint64 + + // OpportunisticGraftPeers is the number of peers to opportunistically graft. + OpportunisticGraftPeers int + + // If a GRAFT comes before GraftFloodThreshold has elapsed since the last PRUNE, + // then there is an extra score penalty applied to the peer through P7. + GraftFloodThreshold time.Duration + + // MaxIHaveLength is the maximum number of messages to include in an IHAVE message. + // Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a + // peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the + // default if your system is pushing more than 5000 messages in HistoryGossip heartbeats; + // with the defaults this is 1666 messages/s. + MaxIHaveLength int + + // MaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer within a heartbeat. + MaxIHaveMessages int + + // Time to wait for a message requested through IWANT following an IHAVE advertisement. + // If the message is not received within this window, a broken promise is declared and + // the router may apply bahavioural penalties. + IWantFollowupTime time.Duration +} + +// NewBlossomSub returns a new PubSub object using the default BlossomSubRouter as the router. +// BlossomSub is not intended to be used directly in V1 – you will need to employ a bloom filter +// mapping per message corresponding to your own namespacing strategy. +func NewBlossomSub(ctx context.Context, h host.Host, opts ...Option) (*PubSub, error) { + rt := DefaultBlossomSubRouter(h) + opts = append(opts, WithRawTracer(rt.tagTracer)) + return NewBlossomSubWithRouter(ctx, h, rt, opts...) +} + +// NewBlossomSubWithRouter returns a new PubSub object using the given router. +func NewBlossomSubWithRouter(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option) (*PubSub, error) { + return NewPubSub(ctx, h, rt, opts...) +} + +// NewBlossomSubRouter returns a new BlossomSubRouter with custom parameters. +func NewBlossomSubRouter(h host.Host, params BlossomSubParams) *BlossomSubRouter { + return &BlossomSubRouter{ + peers: make(map[peer.ID]protocol.ID), + mesh: make(map[string]map[peer.ID]struct{}), + fanout: make(map[string]map[peer.ID]struct{}), + lastpub: make(map[string]int64), + gossip: make(map[peer.ID][]*pb.ControlIHave), + control: make(map[peer.ID]*pb.ControlMessage), + backoff: make(map[string]map[peer.ID]time.Time), + peerhave: make(map[peer.ID]int), + iasked: make(map[peer.ID]int), + outbound: make(map[peer.ID]bool), + connect: make(chan connectInfo, params.MaxPendingConnections), + mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), + protos: BlossomSubDefaultProtocols, + feature: BlossomSubDefaultFeatures, + tagTracer: newTagTracer(h.ConnManager()), + params: params, + } +} + +// DefaultBlossomSubRouter returns a new BlossomSubRouter with default parameters. +func DefaultBlossomSubRouter(h host.Host) *BlossomSubRouter { + params := DefaultBlossomSubParams() + return &BlossomSubRouter{ + peers: make(map[peer.ID]protocol.ID), + mesh: make(map[string]map[peer.ID]struct{}), + fanout: make(map[string]map[peer.ID]struct{}), + lastpub: make(map[string]int64), + gossip: make(map[peer.ID][]*pb.ControlIHave), + control: make(map[peer.ID]*pb.ControlMessage), + backoff: make(map[string]map[peer.ID]time.Time), + peerhave: make(map[peer.ID]int), + iasked: make(map[peer.ID]int), + outbound: make(map[peer.ID]bool), + connect: make(chan connectInfo, params.MaxPendingConnections), + mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), + protos: BlossomSubDefaultProtocols, + feature: BlossomSubDefaultFeatures, + tagTracer: newTagTracer(h.ConnManager()), + params: params, + } +} + +// DefaultBlossomSubParams returns the default blossom sub parameters +// as a config. +func DefaultBlossomSubParams() BlossomSubParams { + return BlossomSubParams{ + D: BlossomSubD, + Dlo: BlossomSubDlo, + Dhi: BlossomSubDhi, + Dscore: BlossomSubDscore, + Dout: BlossomSubDout, + HistoryLength: BlossomSubHistoryLength, + HistoryGossip: BlossomSubHistoryGossip, + Dlazy: BlossomSubDlazy, + GossipFactor: BlossomSubGossipFactor, + GossipRetransmission: BlossomSubGossipRetransmission, + HeartbeatInitialDelay: BlossomSubHeartbeatInitialDelay, + HeartbeatInterval: BlossomSubHeartbeatInterval, + FanoutTTL: BlossomSubFanoutTTL, + PrunePeers: BlossomSubPrunePeers, + PruneBackoff: BlossomSubPruneBackoff, + UnsubscribeBackoff: BlossomSubUnsubscribeBackoff, + Connectors: BlossomSubConnectors, + MaxPendingConnections: BlossomSubMaxPendingConnections, + ConnectionTimeout: BlossomSubConnectionTimeout, + DirectConnectTicks: BlossomSubDirectConnectTicks, + DirectConnectInitialDelay: BlossomSubDirectConnectInitialDelay, + OpportunisticGraftTicks: BlossomSubOpportunisticGraftTicks, + OpportunisticGraftPeers: BlossomSubOpportunisticGraftPeers, + GraftFloodThreshold: BlossomSubGraftFloodThreshold, + MaxIHaveLength: BlossomSubMaxIHaveLength, + MaxIHaveMessages: BlossomSubMaxIHaveMessages, + IWantFollowupTime: BlossomSubIWantFollowupTime, + SlowHeartbeatWarning: 0.1, + } +} + +// WithPeerScore is a BlossomSub router option that enables peer scoring. +func WithPeerScore(params *PeerScoreParams, thresholds *PeerScoreThresholds) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + + // sanity check: validate the score parameters + err := params.validate() + if err != nil { + return err + } + + // sanity check: validate the threshold values + err = thresholds.validate() + if err != nil { + return err + } + + gs.score = newPeerScore(params) + gs.gossipThreshold = thresholds.GossipThreshold + gs.publishThreshold = thresholds.PublishThreshold + gs.graylistThreshold = thresholds.GraylistThreshold + gs.acceptPXThreshold = thresholds.AcceptPXThreshold + gs.opportunisticGraftThreshold = thresholds.OpportunisticGraftThreshold + + gs.gossipTracer = newGossipTracer() + + // hook the tracer + if ps.tracer != nil { + ps.tracer.raw = append(ps.tracer.raw, gs.score, gs.gossipTracer) + } else { + ps.tracer = &pubsubTracer{ + raw: []RawTracer{gs.score, gs.gossipTracer}, + pid: ps.host.ID(), + idGen: ps.idGen, + } + } + + return nil + } +} + +// WithFloodPublish is a BlossomSub router option that enables flood publishing. +// When this is enabled, published messages are forwarded to all peers with score >= +// to publishThreshold +func WithFloodPublish(floodPublish bool) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + + gs.floodPublish = floodPublish + + return nil + } +} + +// WithPeerExchange is a BlossomSub router option that enables Peer eXchange on PRUNE. +// This should generally be enabled in bootstrappers and well connected/trusted nodes +// used for bootstrapping. +func WithPeerExchange(doPX bool) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + + gs.doPX = doPX + + return nil + } +} + +// WithDirectPeers is a BlossomSub router option that specifies peers with direct +// peering agreements. These peers are connected outside of the mesh, with all (valid) +// message unconditionally forwarded to them. The router will maintain open connections +// to these peers. Note that the peering agreement should be reciprocal with direct peers +// symmetrically configured at both ends. +func WithDirectPeers(pis []peer.AddrInfo) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + + direct := make(map[peer.ID]struct{}) + for _, pi := range pis { + direct[pi.ID] = struct{}{} + ps.host.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.PermanentAddrTTL) + } + + gs.direct = direct + + if gs.tagTracer != nil { + gs.tagTracer.direct = direct + } + + return nil + } +} + +// WithDirectConnectTicks is a BlossomSub router option that sets the number of +// heartbeat ticks between attempting to reconnect direct peers that are not +// currently connected. A "tick" is based on the heartbeat interval, which is +// 1s by default. The default value for direct connect ticks is 300. +func WithDirectConnectTicks(t uint64) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + gs.params.DirectConnectTicks = t + return nil + } +} + +// WithBlossomSubParams is a blossom sub router option that allows a custom +// config to be set when instantiating the BlossomSub router. +func WithBlossomSubParams(cfg BlossomSubParams) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + // Overwrite current config and associated variables in the router. + gs.params = cfg + gs.connect = make(chan connectInfo, cfg.MaxPendingConnections) + gs.mcache = NewMessageCache(cfg.HistoryGossip, cfg.HistoryLength) + + return nil + } +} + +// BlossomSubRouter is a router that implements the BlossomSub protocol. +// For each bitmask we have joined, we maintain an overlay through which +// messages flow; this is the mesh map. +// For each bitmask we publish to without joining, we maintain a list of peers +// to use for injecting our messages in the overlay with stable routes; this +// is the fanout map. Fanout peer lists are expired if we don't publish any +// messages to their bitmask for BlossomSubFanoutTTL. +type BlossomSubRouter struct { + p *PubSub + peers map[peer.ID]protocol.ID // peer protocols + direct map[peer.ID]struct{} // direct peers + mesh map[string]map[peer.ID]struct{} // bitmask meshes + fanout map[string]map[peer.ID]struct{} // bitmask fanout + lastpub map[string]int64 // last publish time for fanout bitmasks + gossip map[peer.ID][]*pb.ControlIHave // pending gossip + control map[peer.ID]*pb.ControlMessage // pending control messages + peerhave map[peer.ID]int // number of IHAVEs received from peer in the last heartbeat + iasked map[peer.ID]int // number of messages we have asked from peer in the last heartbeat + outbound map[peer.ID]bool // connection direction cache, marks peers with outbound connections + backoff map[string]map[peer.ID]time.Time // prune backoff + connect chan connectInfo // px connection requests + + protos []protocol.ID + feature BlossomSubFeatureTest + + mcache *MessageCache + tracer *pubsubTracer + score *peerScore + gossipTracer *gossipTracer + tagTracer *tagTracer + gate *peerGater + + // config for BlossomSub parameters + params BlossomSubParams + + // whether PX is enabled; this should be enabled in bootstrappers and other well connected/trusted + // nodes. + doPX bool + + // threshold for accepting PX from a peer; this should be positive and limited to scores + // attainable by bootstrappers and trusted nodes + acceptPXThreshold float64 + + // threshold for peer score to emit/accept gossip + // If the peer score is below this threshold, we won't emit or accept gossip from the peer. + // When there is no score, this value is 0. + gossipThreshold float64 + + // flood publish score threshold; we only publish to peers with score >= to the threshold + // when using flood publishing or the peer is a fanout or floodsub peer. + publishThreshold float64 + + // threshold for peer score before we graylist the peer and silently ignore its RPCs + graylistThreshold float64 + + // threshold for median peer score before triggering opportunistic grafting + opportunisticGraftThreshold float64 + + // whether to use flood publishing + floodPublish bool + + // number of heartbeats since the beginning of time; this allows us to amortize some resource + // clean up -- eg backoff clean up. + heartbeatTicks uint64 +} + +type connectInfo struct { + p peer.ID + spr *record.Envelope +} + +func (gs *BlossomSubRouter) Protocols() []protocol.ID { + return gs.protos +} + +func (gs *BlossomSubRouter) Attach(p *PubSub) { + gs.p = p + gs.tracer = p.tracer + + // start the scoring + gs.score.Start(gs) + + // and the gossip tracing + gs.gossipTracer.Start(gs) + + // and the tracer for connmgr tags + gs.tagTracer.Start(gs) + + // start using the same msg ID function as PubSub for caching messages. + gs.mcache.SetMsgIdFn(p.idGen.ID) + + // start the heartbeat + go gs.heartbeatTimer() + + // start the PX connectors + for i := 0; i < gs.params.Connectors; i++ { + go gs.connector() + } + + // connect to direct peers + if len(gs.direct) > 0 { + go func() { + if gs.params.DirectConnectInitialDelay > 0 { + time.Sleep(gs.params.DirectConnectInitialDelay) + } + for p := range gs.direct { + gs.connect <- connectInfo{p: p} + } + }() + } +} + +func (gs *BlossomSubRouter) AddPeer(p peer.ID, proto protocol.ID) { + log.Debugf("PEERUP: Add new peer %s using %s", p, proto) + gs.tracer.AddPeer(p, proto) + gs.peers[p] = proto + + // track the connection direction + outbound := false + conns := gs.p.host.Network().ConnsToPeer(p) +loop: + for _, c := range conns { + stat := c.Stat() + + if stat.Transient { + continue + } + + if stat.Direction == network.DirOutbound { + // only count the connection if it has a pubsub stream + for _, s := range c.GetStreams() { + if s.Protocol() == proto { + outbound = true + break loop + } + } + } + } + gs.outbound[p] = outbound +} + +func (gs *BlossomSubRouter) RemovePeer(p peer.ID) { + log.Debugf("PEERDOWN: Remove disconnected peer %s", p) + gs.tracer.RemovePeer(p) + delete(gs.peers, p) + for _, peers := range gs.mesh { + delete(peers, p) + } + for _, peers := range gs.fanout { + delete(peers, p) + } + delete(gs.gossip, p) + delete(gs.control, p) + delete(gs.outbound, p) +} + +func (gs *BlossomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool { + // check all peers in the bitmask + tmap, ok := gs.p.bitmasks[string(bitmask)] + if !ok { + return false + } + + fsPeers, gsPeers := 0, 0 + // floodsub peers + for p := range tmap { + if !gs.feature(BlossomSubFeatureMesh, gs.peers[p]) { + fsPeers++ + } + } + + // BlossomSub peers + gsPeers = len(gs.mesh[string(bitmask)]) + + if suggested == 0 { + suggested = gs.params.Dlo + } + + if fsPeers+gsPeers >= suggested || gsPeers >= gs.params.Dhi { + return true + } + + return false +} + +func (gs *BlossomSubRouter) AcceptFrom(p peer.ID) AcceptStatus { + _, direct := gs.direct[p] + if direct { + return AcceptAll + } + + if gs.score.Score(p) < gs.graylistThreshold { + return AcceptNone + } + + return gs.gate.AcceptFrom(p) +} + +func (gs *BlossomSubRouter) HandleRPC(rpc *RPC) { + ctl := rpc.GetControl() + if ctl == nil { + return + } + + iwant := gs.handleIHave(rpc.from, ctl) + ihave := gs.handleIWant(rpc.from, ctl) + prune := gs.handleGraft(rpc.from, ctl) + gs.handlePrune(rpc.from, ctl) + + if len(iwant) == 0 && len(ihave) == 0 && len(prune) == 0 { + return + } + + out := rpcWithControl(ihave, nil, iwant, nil, prune) + gs.sendRPC(rpc.from, out) +} + +func (gs *BlossomSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlIWant { + // we ignore IHAVE gossip from any peer whose score is below the gossip threshold + score := gs.score.Score(p) + if score < gs.gossipThreshold { + log.Debugf("IHAVE: ignoring peer %s with score below threshold [score = %f]", p, score) + return nil + } + + // IHAVE flood protection + gs.peerhave[p]++ + if gs.peerhave[p] > gs.params.MaxIHaveMessages { + log.Debugf("IHAVE: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring", p, gs.peerhave[p]) + return nil + } + + if gs.iasked[p] >= gs.params.MaxIHaveLength { + log.Debugf("IHAVE: peer %s has already advertised too many messages (%d); ignoring", p, gs.iasked[p]) + return nil + } + + iwant := make(map[string]struct{}) + for _, ihave := range ctl.GetIhave() { + bitmask := ihave.GetBitmask() + _, ok := gs.mesh[string(bitmask)] + if !ok { + continue + } + + if !gs.p.peerFilter(p, bitmask) { + continue + } + + for _, mid := range ihave.GetMessageIDs() { + if gs.p.seenMessage(mid) { + continue + } + iwant[mid] = struct{}{} + } + } + + if len(iwant) == 0 { + return nil + } + + iask := len(iwant) + if iask+gs.iasked[p] > gs.params.MaxIHaveLength { + iask = gs.params.MaxIHaveLength - gs.iasked[p] + } + + log.Debugf("IHAVE: Asking for %d out of %d messages from %s", iask, len(iwant), p) + + iwantlst := make([]string, 0, len(iwant)) + for mid := range iwant { + iwantlst = append(iwantlst, mid) + } + + // ask in random order + shuffleStrings(iwantlst) + + // truncate to the messages we are actually asking for and update the iasked counter + iwantlst = iwantlst[:iask] + gs.iasked[p] += iask + + gs.gossipTracer.AddPromise(p, iwantlst) + + return []*pb.ControlIWant{{MessageIDs: iwantlst}} +} + +func (gs *BlossomSubRouter) handleIWant(p peer.ID, ctl *pb.ControlMessage) []*pb.Message { + // we don't respond to IWANT requests from any peer whose score is below the gossip threshold + score := gs.score.Score(p) + if score < gs.gossipThreshold { + log.Debugf("IWANT: ignoring peer %s with score below threshold [score = %f]", p, score) + return nil + } + + ihave := make(map[string]*pb.Message) + for _, iwant := range ctl.GetIwant() { + for _, mid := range iwant.GetMessageIDs() { + msg, count, ok := gs.mcache.GetForPeer(mid, p) + if !ok { + continue + } + + if !gs.p.peerFilter(p, msg.GetBitmask()) { + continue + } + + if count > gs.params.GossipRetransmission { + log.Debugf("IWANT: Peer %s has asked for message %s too many times; ignoring request", p, mid) + continue + } + + ihave[mid] = msg.Message + } + } + + if len(ihave) == 0 { + return nil + } + + log.Debugf("IWANT: Sending %d messages to %s", len(ihave), p) + + msgs := make([]*pb.Message, 0, len(ihave)) + for _, msg := range ihave { + msgs = append(msgs, msg) + } + + return msgs +} + +func (gs *BlossomSubRouter) handleGraft(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlPrune { + var prune [][]byte + + doPX := gs.doPX + score := gs.score.Score(p) + now := time.Now() + + for _, graft := range ctl.GetGraft() { + bitmask := graft.GetBitmask() + + if !gs.p.peerFilter(p, bitmask) { + continue + } + + peers, ok := gs.mesh[string(bitmask)] + if !ok { + // don't do PX when there is an unknown bitmask to avoid leaking our peers + doPX = false + // spam hardening: ignore GRAFTs for unknown bitmasks + continue + } + + // check if it is already in the mesh; if so do nothing (we might have concurrent grafting) + _, inMesh := peers[p] + if inMesh { + continue + } + + // we don't GRAFT to/from direct peers; complain loudly if this happens + _, direct := gs.direct[p] + if direct { + log.Warnf("GRAFT: ignoring request from direct peer %s", p) + // this is possibly a bug from non-reciprocal configuration; send a PRUNE + prune = append(prune, bitmask) + // but don't PX + doPX = false + continue + } + + // make sure we are not backing off that peer + expire, backoff := gs.backoff[string(bitmask)][p] + if backoff && now.Before(expire) { + log.Debugf("GRAFT: ignoring backed off peer %s", p) + // add behavioural penalty + gs.score.AddPenalty(p, 1) + // no PX + doPX = false + // check the flood cutoff -- is the GRAFT coming too fast? + floodCutoff := expire.Add(gs.params.GraftFloodThreshold - gs.params.PruneBackoff) + if now.Before(floodCutoff) { + // extra penalty + gs.score.AddPenalty(p, 1) + } + // refresh the backoff + gs.addBackoff(p, bitmask, false) + prune = append(prune, bitmask) + continue + } + + // check the score + if score < 0 { + // we don't GRAFT peers with negative score + log.Debugf("GRAFT: ignoring peer %s with negative score [score = %f, bitmask = %s]", p, score, bitmask) + // we do send them PRUNE however, because it's a matter of protocol correctness + prune = append(prune, bitmask) + // but we won't PX to them + doPX = false + // add/refresh backoff so that we don't reGRAFT too early even if the score decays back up + gs.addBackoff(p, bitmask, false) + continue + } + + // check the number of mesh peers; if it is at (or over) Dhi, we only accept grafts + // from peers with outbound connections; this is a defensive check to restrict potential + // mesh takeover attacks combined with love bombing + if len(peers) >= gs.params.Dhi && !gs.outbound[p] { + prune = append(prune, bitmask) + gs.addBackoff(p, bitmask, false) + continue + } + + log.Debugf("GRAFT: add mesh link from %s in %s", p, bitmask) + gs.tracer.Graft(p, bitmask) + peers[p] = struct{}{} + } + + if len(prune) == 0 { + return nil + } + + cprune := make([]*pb.ControlPrune, 0, len(prune)) + for _, bitmask := range prune { + cprune = append(cprune, gs.makePrune(p, bitmask, doPX, false)) + } + + return cprune +} + +func (gs *BlossomSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) { + score := gs.score.Score(p) + + for _, prune := range ctl.GetPrune() { + bitmask := prune.GetBitmask() + peers, ok := gs.mesh[string(bitmask)] + if !ok { + continue + } + + log.Debugf("PRUNE: Remove mesh link to %s in %s", p, bitmask) + gs.tracer.Prune(p, bitmask) + delete(peers, p) + // is there a backoff specified by the peer? if so obey it. + backoff := prune.GetBackoff() + if backoff > 0 { + gs.doAddBackoff(p, bitmask, time.Duration(backoff)*time.Second) + } else { + gs.addBackoff(p, bitmask, false) + } + + px := prune.GetPeers() + if len(px) > 0 { + // we ignore PX from peers with insufficient score + if score < gs.acceptPXThreshold { + log.Debugf("PRUNE: ignoring PX from peer %s with insufficient score [score = %f, bitmask = %s]", p, score, bitmask) + continue + } + + gs.pxConnect(px) + } + } +} + +func (gs *BlossomSubRouter) addBackoff(p peer.ID, bitmask []byte, isUnsubscribe bool) { + backoff := gs.params.PruneBackoff + if isUnsubscribe { + backoff = gs.params.UnsubscribeBackoff + } + gs.doAddBackoff(p, bitmask, backoff) +} + +func (gs *BlossomSubRouter) doAddBackoff(p peer.ID, bitmask []byte, interval time.Duration) { + backoff, ok := gs.backoff[string(bitmask)] + if !ok { + backoff = make(map[peer.ID]time.Time) + gs.backoff[string(bitmask)] = backoff + } + expire := time.Now().Add(interval) + if backoff[p].Before(expire) { + backoff[p] = expire + } +} + +func (gs *BlossomSubRouter) pxConnect(peers []*pb.PeerInfo) { + if len(peers) > gs.params.PrunePeers { + shufflePeerInfo(peers) + peers = peers[:gs.params.PrunePeers] + } + + toconnect := make([]connectInfo, 0, len(peers)) + + for _, pi := range peers { + p := peer.ID(pi.PeerID) + + _, connected := gs.peers[p] + if connected { + continue + } + + var spr *record.Envelope + if pi.SignedPeerRecord != nil { + // the peer sent us a signed record; ensure that it is valid + envelope, r, err := record.ConsumeEnvelope(pi.SignedPeerRecord, peer.PeerRecordEnvelopeDomain) + if err != nil { + log.Warnf("error unmarshalling peer record obtained through px: %s", err) + continue + } + rec, ok := r.(*peer.PeerRecord) + if !ok { + log.Warnf("bogus peer record obtained through px: envelope payload is not PeerRecord") + continue + } + if rec.PeerID != p { + log.Warnf("bogus peer record obtained through px: peer ID %s doesn't match expected peer %s", rec.PeerID, p) + continue + } + spr = envelope + } + + toconnect = append(toconnect, connectInfo{p, spr}) + } + + if len(toconnect) == 0 { + return + } + + for _, ci := range toconnect { + select { + case gs.connect <- ci: + default: + log.Debugf("ignoring peer connection attempt; too many pending connections") + } + } +} + +func (gs *BlossomSubRouter) connector() { + for { + select { + case ci := <-gs.connect: + if gs.p.host.Network().Connectedness(ci.p) == network.Connected { + continue + } + + log.Debugf("connecting to %s", ci.p) + cab, ok := peerstore.GetCertifiedAddrBook(gs.p.host.Peerstore()) + if ok && ci.spr != nil { + _, err := cab.ConsumePeerRecord(ci.spr, peerstore.TempAddrTTL) + if err != nil { + log.Debugf("error processing peer record: %s", err) + } + } + + ctx, cancel := context.WithTimeout(gs.p.ctx, gs.params.ConnectionTimeout) + err := gs.p.host.Connect(ctx, peer.AddrInfo{ID: ci.p}) + cancel() + if err != nil { + log.Debugf("error connecting to %s: %s", ci.p, err) + } + + case <-gs.p.ctx.Done(): + return + } + } +} + +func (gs *BlossomSubRouter) Publish(msg *Message) { + gs.mcache.Put(msg) + + from := msg.ReceivedFrom + bitmask := msg.GetBitmask() + + tosend := make(map[peer.ID]struct{}) + + // any peers in the bitmask? + tmap, ok := gs.p.bitmasks[string(bitmask)] + if !ok { + return + } + + if gs.floodPublish && from == gs.p.host.ID() { + for p := range tmap { + _, direct := gs.direct[p] + if direct || gs.score.Score(p) >= gs.publishThreshold { + tosend[p] = struct{}{} + } + } + } else { + // direct peers + for p := range gs.direct { + _, inBitmask := tmap[p] + if inBitmask { + tosend[p] = struct{}{} + } + } + + // floodsub peers + for p := range tmap { + if !gs.feature(BlossomSubFeatureMesh, gs.peers[p]) && gs.score.Score(p) >= gs.publishThreshold { + tosend[p] = struct{}{} + } + } + + // BlossomSub peers + gmap, ok := gs.mesh[string(bitmask)] + if !ok { + // we are not in the mesh for bitmask, use fanout peers + gmap, ok = gs.fanout[string(bitmask)] + if !ok || len(gmap) == 0 { + // we don't have any, pick some with score above the publish threshold + peers := gs.getPeers(bitmask, gs.params.D, func(p peer.ID) bool { + _, direct := gs.direct[p] + return !direct && gs.score.Score(p) >= gs.publishThreshold + }) + + if len(peers) > 0 { + gmap = peerListToMap(peers) + gs.fanout[string(bitmask)] = gmap + } + } + gs.lastpub[string(bitmask)] = time.Now().UnixNano() + } + + for p := range gmap { + tosend[p] = struct{}{} + } + } + + out := rpcWithMessages(msg.Message) + for pid := range tosend { + if pid == from || pid == peer.ID(msg.GetFrom()) { + continue + } + + gs.sendRPC(pid, out) + } +} + +func (gs *BlossomSubRouter) Join(bitmask []byte) { + gmap, ok := gs.mesh[string(bitmask)] + if ok { + return + } + + log.Debugf("JOIN %s", bitmask) + gs.tracer.Join(bitmask) + + gmap, ok = gs.fanout[string(bitmask)] + if ok { + backoff := gs.backoff[string(bitmask)] + // these peers have a score above the publish threshold, which may be negative + // so drop the ones with a negative score + for p := range gmap { + _, doBackOff := backoff[p] + if gs.score.Score(p) < 0 || doBackOff { + delete(gmap, p) + } + } + + if len(gmap) < gs.params.D { + // we need more peers; eager, as this would get fixed in the next heartbeat + more := gs.getPeers(bitmask, gs.params.D-len(gmap), func(p peer.ID) bool { + // filter our current peers, direct peers, peers we are backing off, and + // peers with negative scores + _, inMesh := gmap[p] + _, direct := gs.direct[p] + _, doBackOff := backoff[p] + return !inMesh && !direct && !doBackOff && gs.score.Score(p) >= 0 + }) + for _, p := range more { + gmap[p] = struct{}{} + } + } + gs.mesh[string(bitmask)] = gmap + delete(gs.fanout, string(bitmask)) + delete(gs.lastpub, string(bitmask)) + } else { + backoff := gs.backoff[string(bitmask)] + peers := gs.getPeers(bitmask, gs.params.D, func(p peer.ID) bool { + // filter direct peers, peers we are backing off and peers with negative score + _, direct := gs.direct[p] + _, doBackOff := backoff[p] + return !direct && !doBackOff && gs.score.Score(p) >= 0 + }) + gmap = peerListToMap(peers) + gs.mesh[string(bitmask)] = gmap + } + + for p := range gmap { + log.Debugf("JOIN: Add mesh link to %s in %s", p, bitmask) + gs.tracer.Graft(p, bitmask) + gs.sendGraft(p, bitmask) + } +} + +func (gs *BlossomSubRouter) Leave(bitmask []byte) { + gmap, ok := gs.mesh[string(bitmask)] + if !ok { + return + } + + log.Debugf("LEAVE %s", bitmask) + gs.tracer.Leave(bitmask) + + delete(gs.mesh, string(bitmask)) + + for p := range gmap { + log.Debugf("LEAVE: Remove mesh link to %s in %s", p, bitmask) + gs.tracer.Prune(p, bitmask) + gs.sendPrune(p, bitmask, true) + // Add a backoff to this peer to prevent us from eagerly + // re-grafting this peer into our mesh if we rejoin this + // bitmask before the backoff period ends. + gs.addBackoff(p, bitmask, true) + } +} + +func (gs *BlossomSubRouter) sendGraft(p peer.ID, bitmask []byte) { + graft := []*pb.ControlGraft{{Bitmask: bitmask}} + out := rpcWithControl(nil, nil, nil, graft, nil) + gs.sendRPC(p, out) +} + +func (gs *BlossomSubRouter) sendPrune(p peer.ID, bitmask []byte, isUnsubscribe bool) { + prune := []*pb.ControlPrune{gs.makePrune(p, bitmask, gs.doPX, isUnsubscribe)} + out := rpcWithControl(nil, nil, nil, nil, prune) + gs.sendRPC(p, out) +} + +func (gs *BlossomSubRouter) sendRPC(p peer.ID, out *RPC) { + // do we own the RPC? + own := false + + // piggyback control message retries + ctl, ok := gs.control[p] + if ok { + out = copyRPC(out) + own = true + gs.piggybackControl(p, out, ctl) + delete(gs.control, p) + } + + // piggyback gossip + ihave, ok := gs.gossip[p] + if ok { + if !own { + out = copyRPC(out) + own = true + } + gs.piggybackGossip(p, out, ihave) + delete(gs.gossip, p) + } + + mch, ok := gs.p.peers[p] + if !ok { + return + } + + // If we're below the max message size, go ahead and send + if out.Size() < gs.p.maxMessageSize { + gs.doSendRPC(out, p, mch) + return + } + + // If we're too big, fragment into multiple RPCs and send each sequentially + outRPCs, err := fragmentRPC(out, gs.p.maxMessageSize) + if err != nil { + gs.doDropRPC(out, p, fmt.Sprintf("unable to fragment RPC: %s", err)) + return + } + + for _, rpc := range outRPCs { + gs.doSendRPC(rpc, p, mch) + } +} + +func (gs *BlossomSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) { + log.Debugf("dropping message to peer %s: %s", p.Pretty(), reason) + gs.tracer.DropRPC(rpc, p) + // push control messages that need to be retried + ctl := rpc.GetControl() + if ctl != nil { + gs.pushControl(p, ctl) + } +} + +func (gs *BlossomSubRouter) doSendRPC(rpc *RPC, p peer.ID, mch chan *RPC) { + select { + case mch <- rpc: + gs.tracer.SendRPC(rpc, p) + default: + gs.doDropRPC(rpc, p, "queue full") + } +} + +func fragmentRPC(rpc *RPC, limit int) ([]*RPC, error) { + if rpc.Size() < limit { + return []*RPC{rpc}, nil + } + + c := (rpc.Size() / limit) + 1 + rpcs := make([]*RPC, 1, c) + rpcs[0] = &RPC{RPC: pb.RPC{}, from: rpc.from} + + // outRPC returns the current RPC message if it will fit sizeToAdd more bytes + // otherwise, it will create a new RPC message and add it to the list. + // if withCtl is true, the returned message will have a non-nil empty Control message. + outRPC := func(sizeToAdd int, withCtl bool) *RPC { + current := rpcs[len(rpcs)-1] + // check if we can fit the new data, plus an extra byte for the protobuf field tag + if current.Size()+sizeToAdd+1 < limit { + if withCtl && current.Control == nil { + current.Control = &pb.ControlMessage{} + } + return current + } + var ctl *pb.ControlMessage + if withCtl { + ctl = &pb.ControlMessage{} + } + next := &RPC{RPC: pb.RPC{Control: ctl}, from: rpc.from} + rpcs = append(rpcs, next) + return next + } + + for _, msg := range rpc.GetPublish() { + s := msg.Size() + // if an individual message is too large, we can't fragment it and have to fail entirely + if s > limit { + return nil, fmt.Errorf("message with len=%d exceeds limit %d", s, limit) + } + out := outRPC(s, false) + out.Publish = append(out.Publish, msg) + } + + for _, sub := range rpc.GetSubscriptions() { + out := outRPC(sub.Size(), false) + out.Subscriptions = append(out.Subscriptions, sub) + } + + ctl := rpc.GetControl() + if ctl == nil { + // if there were no control messages, we're done + return rpcs, nil + } + // if all the control messages fit into one RPC, we just add it to the end and return + ctlOut := &RPC{RPC: pb.RPC{Control: ctl}, from: rpc.from} + if ctlOut.Size() < limit { + rpcs = append(rpcs, ctlOut) + return rpcs, nil + } + + // we need to split up the control messages into multiple RPCs + for _, graft := range ctl.Graft { + out := outRPC(graft.Size(), true) + out.Control.Graft = append(out.Control.Graft, graft) + } + for _, prune := range ctl.Prune { + out := outRPC(prune.Size(), true) + out.Control.Prune = append(out.Control.Prune, prune) + } + + // An individual IWANT or IHAVE message could be larger than the limit if we have + // a lot of message IDs. fragmentMessageIds will split them into buckets that + // fit within the limit, with some overhead for the control messages themselves + for _, iwant := range ctl.Iwant { + const protobufOverhead = 6 + idBuckets := fragmentMessageIds(iwant.MessageIDs, limit-protobufOverhead) + for _, ids := range idBuckets { + iwant := &pb.ControlIWant{MessageIDs: ids} + out := outRPC(iwant.Size(), true) + out.Control.Iwant = append(out.Control.Iwant, iwant) + } + } + for _, ihave := range ctl.Ihave { + const protobufOverhead = 6 + idBuckets := fragmentMessageIds(ihave.MessageIDs, limit-protobufOverhead) + for _, ids := range idBuckets { + ihave := &pb.ControlIHave{MessageIDs: ids} + out := outRPC(ihave.Size(), true) + out.Control.Ihave = append(out.Control.Ihave, ihave) + } + } + return rpcs, nil +} + +func fragmentMessageIds(msgIds []string, limit int) [][]string { + // account for two bytes of protobuf overhead per array element + const protobufOverhead = 2 + + out := [][]string{{}} + var currentBucket int + var bucketLen int + for i := 0; i < len(msgIds); i++ { + size := len(msgIds[i]) + protobufOverhead + if size > limit { + // pathological case where a single message ID exceeds the limit. + log.Warnf("message ID length %d exceeds limit %d, removing from outgoing gossip", size, limit) + continue + } + bucketLen += size + if bucketLen > limit { + out = append(out, []string{}) + currentBucket++ + bucketLen = size + } + out[currentBucket] = append(out[currentBucket], msgIds[i]) + } + return out +} + +func (gs *BlossomSubRouter) heartbeatTimer() { + time.Sleep(gs.params.HeartbeatInitialDelay) + select { + case gs.p.eval <- gs.heartbeat: + case <-gs.p.ctx.Done(): + return + } + + ticker := time.NewTicker(gs.params.HeartbeatInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + select { + case gs.p.eval <- gs.heartbeat: + case <-gs.p.ctx.Done(): + return + } + case <-gs.p.ctx.Done(): + return + } + } +} + +func (gs *BlossomSubRouter) heartbeat() { + start := time.Now() + defer func() { + if gs.params.SlowHeartbeatWarning > 0 { + slowWarning := time.Duration(gs.params.SlowHeartbeatWarning * float64(gs.params.HeartbeatInterval)) + if dt := time.Since(start); dt > slowWarning { + log.Warnw("slow heartbeat", "took", dt) + } + } + }() + + gs.heartbeatTicks++ + + tograft := make(map[peer.ID][][]byte) + toprune := make(map[peer.ID][][]byte) + noPX := make(map[peer.ID]bool) + + // clean up expired backoffs + gs.clearBackoff() + + // clean up iasked counters + gs.clearIHaveCounters() + + // apply IWANT request penalties + gs.applyIwantPenalties() + + // ensure direct peers are connected + gs.directConnect() + + // cache scores throughout the heartbeat + scores := make(map[peer.ID]float64) + score := func(p peer.ID) float64 { + s, ok := scores[p] + if !ok { + s = gs.score.Score(p) + scores[p] = s + } + return s + } + + // maintain the mesh for bitmasks we have joined + for bitmask, peers := range gs.mesh { + bitmask := []byte(bitmask) + prunePeer := func(p peer.ID) { + gs.tracer.Prune(p, bitmask) + delete(peers, p) + gs.addBackoff(p, bitmask, false) + bitmasks := toprune[p] + toprune[p] = append(bitmasks, bitmask) + } + + graftPeer := func(p peer.ID) { + log.Debugf("HEARTBEAT: Add mesh link to %s in %s", p, bitmask) + gs.tracer.Graft(p, bitmask) + peers[p] = struct{}{} + bitmasks := tograft[p] + tograft[p] = append(bitmasks, bitmask) + } + + // drop all peers with negative score, without PX + for p := range peers { + if score(p) < 0 { + log.Debugf("HEARTBEAT: Prune peer %s with negative score [score = %f, bitmask = %s]", p, score(p), bitmask) + prunePeer(p) + noPX[p] = true + } + } + + // do we have enough peers? + if l := len(peers); l < gs.params.Dlo { + backoff := gs.backoff[string(bitmask)] + ineed := gs.params.D - l + plst := gs.getPeers(bitmask, ineed, func(p peer.ID) bool { + // filter our current and direct peers, peers we are backing off, and peers with negative score + _, inMesh := peers[p] + _, doBackoff := backoff[p] + _, direct := gs.direct[p] + return !inMesh && !doBackoff && !direct && score(p) >= 0 + }) + + for _, p := range plst { + graftPeer(p) + } + } + + // do we have too many peers? + if len(peers) > gs.params.Dhi { + plst := peerMapToList(peers) + + // sort by score (but shuffle first for the case we don't use the score) + shufflePeers(plst) + sort.Slice(plst, func(i, j int) bool { + return score(plst[i]) > score(plst[j]) + }) + + // We keep the first D_score peers by score and the remaining up to D randomly + // under the constraint that we keep D_out peers in the mesh (if we have that many) + shufflePeers(plst[gs.params.Dscore:]) + + // count the outbound peers we are keeping + outbound := 0 + for _, p := range plst[:gs.params.D] { + if gs.outbound[p] { + outbound++ + } + } + + // if it's less than D_out, bubble up some outbound peers from the random selection + if outbound < gs.params.Dout { + rotate := func(i int) { + // rotate the plst to the right and put the ith peer in the front + p := plst[i] + for j := i; j > 0; j-- { + plst[j] = plst[j-1] + } + plst[0] = p + } + + // first bubble up all outbound peers already in the selection to the front + if outbound > 0 { + ihave := outbound + for i := 1; i < gs.params.D && ihave > 0; i++ { + p := plst[i] + if gs.outbound[p] { + rotate(i) + ihave-- + } + } + } + + // now bubble up enough outbound peers outside the selection to the front + ineed := gs.params.Dout - outbound + for i := gs.params.D; i < len(plst) && ineed > 0; i++ { + p := plst[i] + if gs.outbound[p] { + rotate(i) + ineed-- + } + } + } + + // prune the excess peers + for _, p := range plst[gs.params.D:] { + log.Debugf("HEARTBEAT: Remove mesh link to %s in %s", p, bitmask) + prunePeer(p) + } + } + + // do we have enough outboud peers? + if len(peers) >= gs.params.Dlo { + // count the outbound peers we have + outbound := 0 + for p := range peers { + if gs.outbound[p] { + outbound++ + } + } + + // if it's less than D_out, select some peers with outbound connections and graft them + if outbound < gs.params.Dout { + ineed := gs.params.Dout - outbound + backoff := gs.backoff[string(bitmask)] + plst := gs.getPeers(bitmask, ineed, func(p peer.ID) bool { + // filter our current and direct peers, peers we are backing off, and peers with negative score + _, inMesh := peers[p] + _, doBackoff := backoff[p] + _, direct := gs.direct[p] + return !inMesh && !doBackoff && !direct && gs.outbound[p] && score(p) >= 0 + }) + + for _, p := range plst { + graftPeer(p) + } + } + } + + // should we try to improve the mesh with opportunistic grafting? + if gs.heartbeatTicks%gs.params.OpportunisticGraftTicks == 0 && len(peers) > 1 { + // Opportunistic grafting works as follows: we check the median score of peers in the + // mesh; if this score is below the opportunisticGraftThreshold, we select a few peers at + // random with score over the median. + // The intention is to (slowly) improve an underperforming mesh by introducing good + // scoring peers that may have been gossiping at us. This allows us to get out of sticky + // situations where we are stuck with poor peers and also recover from churn of good peers. + + // now compute the median peer score in the mesh + plst := peerMapToList(peers) + sort.Slice(plst, func(i, j int) bool { + return score(plst[i]) < score(plst[j]) + }) + medianIndex := len(peers) / 2 + medianScore := scores[plst[medianIndex]] + + // if the median score is below the threshold, select a better peer (if any) and GRAFT + if medianScore < gs.opportunisticGraftThreshold { + backoff := gs.backoff[string(bitmask)] + plst = gs.getPeers(bitmask, gs.params.OpportunisticGraftPeers, func(p peer.ID) bool { + _, inMesh := peers[p] + _, doBackoff := backoff[p] + _, direct := gs.direct[p] + return !inMesh && !doBackoff && !direct && score(p) > medianScore + }) + + for _, p := range plst { + log.Debugf("HEARTBEAT: Opportunistically graft peer %s on bitmask %s", p, bitmask) + graftPeer(p) + } + } + } + + // 2nd arg are mesh peers excluded from gossip. We already push + // messages to them, so its redundant to gossip IHAVEs. + gs.emitGossip(bitmask, peers) + } + + // expire fanout for bitmasks we haven't published to in a while + now := time.Now().UnixNano() + for bitmask, lastpub := range gs.lastpub { + if lastpub+int64(gs.params.FanoutTTL) < now { + delete(gs.fanout, bitmask) + delete(gs.lastpub, bitmask) + } + } + + // maintain our fanout for bitmasks we are publishing but we have not joined + for bitmask, peers := range gs.fanout { + bitmask := []byte(bitmask) + // check whether our peers are still in the bitmask and have a score above the publish threshold + for p := range peers { + _, ok := gs.p.bitmasks[string(bitmask)][p] + if !ok || score(p) < gs.publishThreshold { + delete(peers, p) + } + } + + // do we need more peers? + if len(peers) < gs.params.D { + ineed := gs.params.D - len(peers) + plst := gs.getPeers(bitmask, ineed, func(p peer.ID) bool { + // filter our current and direct peers and peers with score above the publish threshold + _, inFanout := peers[p] + _, direct := gs.direct[p] + return !inFanout && !direct && score(p) >= gs.publishThreshold + }) + + for _, p := range plst { + peers[p] = struct{}{} + } + } + + // 2nd arg are fanout peers excluded from gossip. We already push + // messages to them, so its redundant to gossip IHAVEs. + gs.emitGossip(bitmask, peers) + } + + // send coalesced GRAFT/PRUNE messages (will piggyback gossip) + gs.sendGraftPrune(tograft, toprune, noPX) + + // flush all pending gossip that wasn't piggybacked above + gs.flush() + + // advance the message history window + gs.mcache.Shift() +} + +func (gs *BlossomSubRouter) clearIHaveCounters() { + if len(gs.peerhave) > 0 { + // throw away the old map and make a new one + gs.peerhave = make(map[peer.ID]int) + } + + if len(gs.iasked) > 0 { + // throw away the old map and make a new one + gs.iasked = make(map[peer.ID]int) + } +} + +func (gs *BlossomSubRouter) applyIwantPenalties() { + for p, count := range gs.gossipTracer.GetBrokenPromises() { + log.Infof("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count) + gs.score.AddPenalty(p, count) + } +} + +func (gs *BlossomSubRouter) clearBackoff() { + // we only clear once every 15 ticks to avoid iterating over the map(s) too much + if gs.heartbeatTicks%15 != 0 { + return + } + + now := time.Now() + for bitmask, backoff := range gs.backoff { + for p, expire := range backoff { + // add some slack time to the expiration + // https://github.com/libp2p/specs/pull/289 + if expire.Add(2 * BlossomSubHeartbeatInterval).Before(now) { + delete(backoff, p) + } + } + if len(backoff) == 0 { + delete(gs.backoff, bitmask) + } + } +} + +func (gs *BlossomSubRouter) directConnect() { + // we donly do this every some ticks to allow pending connections to complete and account + // for restarts/downtime + if gs.heartbeatTicks%gs.params.DirectConnectTicks != 0 { + return + } + + var toconnect []peer.ID + for p := range gs.direct { + _, connected := gs.peers[p] + if !connected { + toconnect = append(toconnect, p) + } + } + + if len(toconnect) > 0 { + go func() { + for _, p := range toconnect { + gs.connect <- connectInfo{p: p} + } + }() + } +} + +func (gs *BlossomSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][][]byte, noPX map[peer.ID]bool) { + for p, bitmasks := range tograft { + graft := make([]*pb.ControlGraft, 0, len(bitmasks)) + for _, bitmask := range bitmasks { + // copy bitmask []byte here since + // the reference to the string + // bitmask here changes with every + // iteration of the slice. + copiedID := bitmask + graft = append(graft, &pb.ControlGraft{Bitmask: copiedID}) + } + + var prune []*pb.ControlPrune + pruning, ok := toprune[p] + if ok { + delete(toprune, p) + prune = make([]*pb.ControlPrune, 0, len(pruning)) + for _, bitmask := range pruning { + prune = append(prune, gs.makePrune(p, bitmask, gs.doPX && !noPX[p], false)) + } + } + + out := rpcWithControl(nil, nil, nil, graft, prune) + gs.sendRPC(p, out) + } + + for p, bitmasks := range toprune { + prune := make([]*pb.ControlPrune, 0, len(bitmasks)) + for _, bitmask := range bitmasks { + prune = append(prune, gs.makePrune(p, bitmask, gs.doPX && !noPX[p], false)) + } + + out := rpcWithControl(nil, nil, nil, nil, prune) + gs.sendRPC(p, out) + } +} + +// emitGossip emits IHAVE gossip advertising items in the message cache window +// of this bitmask. +func (gs *BlossomSubRouter) emitGossip(bitmask []byte, exclude map[peer.ID]struct{}) { + mids := gs.mcache.GetGossipIDs(bitmask) + if len(mids) == 0 { + return + } + + // shuffle to emit in random order + shuffleStrings(mids) + + // if we are emitting more than BlossomSubMaxIHaveLength mids, truncate the list + if len(mids) > gs.params.MaxIHaveLength { + // we do the truncation (with shuffling) per peer below + log.Debugf("too many messages for gossip; will truncate IHAVE list (%d messages)", len(mids)) + } + + // Send gossip to GossipFactor peers above threshold, with a minimum of D_lazy. + // First we collect the peers above gossipThreshold that are not in the exclude set + // and then randomly select from that set. + // We also exclude direct peers, as there is no reason to emit gossip to them. + peers := make([]peer.ID, 0, len(gs.p.bitmasks[string(bitmask)])) + for p := range gs.p.bitmasks[string(bitmask)] { + _, inExclude := exclude[p] + _, direct := gs.direct[p] + if !inExclude && !direct && gs.feature(BlossomSubFeatureMesh, gs.peers[p]) && gs.score.Score(p) >= gs.gossipThreshold { + peers = append(peers, p) + } + } + + target := gs.params.Dlazy + factor := int(gs.params.GossipFactor * float64(len(peers))) + if factor > target { + target = factor + } + + if target > len(peers) { + target = len(peers) + } else { + shufflePeers(peers) + } + peers = peers[:target] + + // Emit the IHAVE gossip to the selected peers. + for _, p := range peers { + peerMids := mids + if len(mids) > gs.params.MaxIHaveLength { + // we do this per peer so that we emit a different set for each peer. + // we have enough redundancy in the system that this will significantly increase the message + // coverage when we do truncate. + peerMids = make([]string, gs.params.MaxIHaveLength) + shuffleStrings(mids) + copy(peerMids, mids) + } + gs.enqueueGossip(p, &pb.ControlIHave{Bitmask: bitmask, MessageIDs: peerMids}) + } +} + +func (gs *BlossomSubRouter) flush() { + // send gossip first, which will also piggyback pending control + for p, ihave := range gs.gossip { + delete(gs.gossip, p) + out := rpcWithControl(nil, ihave, nil, nil, nil) + gs.sendRPC(p, out) + } + + // send the remaining control messages that wasn't merged with gossip + for p, ctl := range gs.control { + delete(gs.control, p) + out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune) + gs.sendRPC(p, out) + } +} + +func (gs *BlossomSubRouter) enqueueGossip(p peer.ID, ihave *pb.ControlIHave) { + gossip := gs.gossip[p] + gossip = append(gossip, ihave) + gs.gossip[p] = gossip +} + +func (gs *BlossomSubRouter) piggybackGossip(p peer.ID, out *RPC, ihave []*pb.ControlIHave) { + ctl := out.GetControl() + if ctl == nil { + ctl = &pb.ControlMessage{} + out.Control = ctl + } + + ctl.Ihave = ihave +} + +func (gs *BlossomSubRouter) pushControl(p peer.ID, ctl *pb.ControlMessage) { + // remove IHAVE/IWANT from control message, gossip is not retried + ctl.Ihave = nil + ctl.Iwant = nil + if ctl.Graft != nil || ctl.Prune != nil { + gs.control[p] = ctl + } +} + +func (gs *BlossomSubRouter) piggybackControl(p peer.ID, out *RPC, ctl *pb.ControlMessage) { + // check control message for staleness first + var tograft []*pb.ControlGraft + var toprune []*pb.ControlPrune + + for _, graft := range ctl.GetGraft() { + bitmask := graft.GetBitmask() + peers, ok := gs.mesh[string(bitmask)] + if !ok { + continue + } + _, ok = peers[p] + if ok { + tograft = append(tograft, graft) + } + } + + for _, prune := range ctl.GetPrune() { + bitmask := prune.GetBitmask() + peers, ok := gs.mesh[string(bitmask)] + if !ok { + toprune = append(toprune, prune) + continue + } + _, ok = peers[p] + if !ok { + toprune = append(toprune, prune) + } + } + + if len(tograft) == 0 && len(toprune) == 0 { + return + } + + xctl := out.Control + if xctl == nil { + xctl = &pb.ControlMessage{} + out.Control = xctl + } + + if len(tograft) > 0 { + xctl.Graft = append(xctl.Graft, tograft...) + } + if len(toprune) > 0 { + xctl.Prune = append(xctl.Prune, toprune...) + } +} + +func (gs *BlossomSubRouter) makePrune(p peer.ID, bitmask []byte, doPX bool, isUnsubscribe bool) *pb.ControlPrune { + if !gs.feature(BlossomSubFeaturePX, gs.peers[p]) { + // BlossomSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway + return &pb.ControlPrune{Bitmask: bitmask} + } + + backoff := uint64(gs.params.PruneBackoff / time.Second) + if isUnsubscribe { + backoff = uint64(gs.params.UnsubscribeBackoff / time.Second) + } + + var px []*pb.PeerInfo + if doPX { + // select peers for Peer eXchange + peers := gs.getPeers(bitmask, gs.params.PrunePeers, func(xp peer.ID) bool { + return p != xp && gs.score.Score(xp) >= 0 + }) + + cab, ok := peerstore.GetCertifiedAddrBook(gs.p.host.Peerstore()) + px = make([]*pb.PeerInfo, 0, len(peers)) + for _, p := range peers { + // see if we have a signed peer record to send back; if we don't, just send + // the peer ID and let the pruned peer find them in the DHT -- we can't trust + // unsigned address records through px anyway. + var recordBytes []byte + if ok { + spr := cab.GetPeerRecord(p) + var err error + if spr != nil { + recordBytes, err = spr.Marshal() + if err != nil { + log.Warnf("error marshaling signed peer record for %s: %s", p, err) + } + } + } + px = append(px, &pb.PeerInfo{PeerID: []byte(p), SignedPeerRecord: recordBytes}) + } + } + + return &pb.ControlPrune{Bitmask: bitmask, Peers: px, Backoff: backoff} +} + +func (gs *BlossomSubRouter) getPeers(bitmask []byte, count int, filter func(peer.ID) bool) []peer.ID { + tmap, ok := gs.p.bitmasks[string(bitmask)] + if !ok { + return nil + } + + peers := make([]peer.ID, 0, len(tmap)) + for p := range tmap { + if gs.feature(BlossomSubFeatureMesh, gs.peers[p]) && filter(p) && gs.p.peerFilter(p, bitmask) { + peers = append(peers, p) + } + } + + shufflePeers(peers) + + if count > 0 && len(peers) > count { + peers = peers[:count] + } + + return peers +} + +// WithDefaultTagTracer returns the tag tracer of the BlossomSubRouter as a PubSub option. +// This is useful for cases where the BlossomSubRouter is instantiated externally, and is +// injected into the BlossomSub constructor as a dependency. This allows the tag tracer to be +// also injected into the BlossomSub constructor as a PubSub option dependency. +func (gs *BlossomSubRouter) WithDefaultTagTracer() Option { + return WithRawTracer(gs.tagTracer) +} + +func peerListToMap(peers []peer.ID) map[peer.ID]struct{} { + pmap := make(map[peer.ID]struct{}) + for _, p := range peers { + pmap[p] = struct{}{} + } + return pmap +} + +func peerMapToList(peers map[peer.ID]struct{}) []peer.ID { + plst := make([]peer.ID, 0, len(peers)) + for p := range peers { + plst = append(plst, p) + } + return plst +} + +func shufflePeers(peers []peer.ID) { + for i := range peers { + j := rand.Intn(i + 1) + peers[i], peers[j] = peers[j], peers[i] + } +} + +func shufflePeerInfo(peers []*pb.PeerInfo) { + for i := range peers { + j := rand.Intn(i + 1) + peers[i], peers[j] = peers[j], peers[i] + } +} + +func shuffleStrings(lst []string) { + for i := range lst { + j := rand.Intn(i + 1) + lst[i], lst[j] = lst[j], lst[i] + } +} diff --git a/go-libp2p-blossomsub/blossomsub_connmgr_test.go b/go-libp2p-blossomsub/blossomsub_connmgr_test.go new file mode 100644 index 0000000..f3bb81e --- /dev/null +++ b/go-libp2p-blossomsub/blossomsub_connmgr_test.go @@ -0,0 +1,172 @@ +package blossomsub + +import ( + "context" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p/core/host" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + + "github.com/libp2p/go-libp2p/core/peer" + bhost "github.com/libp2p/go-libp2p/p2p/host/blank" + "github.com/libp2p/go-libp2p/p2p/net/connmgr" +) + +func TestBlossomSubConnTagMessageDeliveries(t *testing.T) { + t.Skip("Test disabled with go-libp2p v0.22.0") // TODO: reenable test when updating to v0.23.0 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + oldBlossomSubD := BlossomSubD + oldBlossomSubDlo := BlossomSubDlo + oldBlossomSubDHi := BlossomSubDhi + oldBlossomSubConnTagDecayInterval := BlossomSubConnTagDecayInterval + oldBlossomSubConnTagMessageDeliveryCap := BlossomSubConnTagMessageDeliveryCap + + // set the BlossomSub D parameters low, so that we have some peers outside the mesh + BlossomSubDlo = 3 + BlossomSubD = 3 + BlossomSubDhi = 3 + // also set the tag decay interval so we don't have to wait forever for tests + BlossomSubConnTagDecayInterval = time.Second + + // set the cap for deliveries above BlossomSubConnTagValueMeshPeer, so the sybils + // will be forced out even if they end up in someone's mesh + BlossomSubConnTagMessageDeliveryCap = 50 + + // reset globals after test + defer func() { + BlossomSubD = oldBlossomSubD + BlossomSubDlo = oldBlossomSubDlo + BlossomSubDhi = oldBlossomSubDHi + BlossomSubConnTagDecayInterval = oldBlossomSubConnTagDecayInterval + BlossomSubConnTagMessageDeliveryCap = oldBlossomSubConnTagMessageDeliveryCap + }() + + decayClock := clock.NewMock() + decayCfg := connmgr.DecayerCfg{ + Resolution: time.Second, + Clock: decayClock, + } + + nHonest := 5 + nSquatter := 10 + connLimit := 10 + + connmgrs := make([]*connmgr.BasicConnMgr, nHonest) + honestHosts := make([]host.Host, nHonest) + honestPeers := make(map[peer.ID]struct{}) + + for i := 0; i < nHonest; i++ { + var err error + connmgrs[i], err = connmgr.NewConnManager(nHonest, connLimit, + connmgr.WithGracePeriod(0), + connmgr.WithSilencePeriod(time.Millisecond), + connmgr.DecayerConfig(&decayCfg), + ) + if err != nil { + t.Fatal(err) + } + + netw := swarmt.GenSwarm(t) + defer netw.Close() + h := bhost.NewBlankHost(netw, bhost.WithConnectionManager(connmgrs[i])) + honestHosts[i] = h + honestPeers[h.ID()] = struct{}{} + } + + // use flood publishing, so non-mesh peers will still be delivering messages + // to everyone + psubs := getBlossomSubs(ctx, honestHosts, + WithFloodPublish(true)) + + // sybil squatters to be connected later + sybilHosts := getNetHosts(t, ctx, nSquatter) + for _, h := range sybilHosts { + squatter := &sybilSquatter{h: h} + h.SetStreamHandler(BlossomSubID_v11, squatter.handleStream) + } + + // connect the honest hosts + connectAll(t, honestHosts) + + for _, h := range honestHosts { + if len(h.Network().Conns()) != nHonest-1 { + t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns())) + } + } + + // subscribe everyone to the bitmask + bitmask := []byte{0xff, 0x00, 0x00, 0x00} + for _, ps := range psubs { + _, err := ps.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + } + + // sleep to allow meshes to form + time.Sleep(2 * time.Second) + + // have all the hosts publish enough messages to ensure that they get some delivery credit + nMessages := BlossomSubConnTagMessageDeliveryCap * 2 + for _, ps := range psubs { + for i := 0; i < nMessages; i++ { + ps.Publish(bitmask, []byte("hello")) + } + } + + // advance the fake time for the tag decay + decayClock.Add(time.Second) + + // verify that they've given each other delivery connection tags + tag := "pubsub-deliveries:test" + for _, h := range honestHosts { + for _, h2 := range honestHosts { + if h.ID() == h2.ID() { + continue + } + val := getTagValue(h.ConnManager(), h2.ID(), tag) + if val == 0 { + t.Errorf("Expected non-zero delivery tag value for peer %s", h2.ID()) + } + } + } + + // now connect the sybils to put pressure on the real hosts' connection managers + allHosts := append(honestHosts, sybilHosts...) + connectAll(t, allHosts) + + // verify that we have a bunch of connections + for _, h := range honestHosts { + if len(h.Network().Conns()) != nHonest+nSquatter-1 { + t.Errorf("expected to have conns to all peers, have %d", len(h.Network().Conns())) + } + } + + // force the connection managers to trim, so we don't need to muck about with timing as much + for _, cm := range connmgrs { + cm.TrimOpenConns(ctx) + } + + // we should still have conns to all the honest peers, but not the sybils + for _, h := range honestHosts { + nHonestConns := 0 + nDishonestConns := 0 + for _, conn := range h.Network().Conns() { + if _, ok := honestPeers[conn.RemotePeer()]; !ok { + nDishonestConns++ + } else { + nHonestConns++ + } + } + if nDishonestConns > connLimit-nHonest { + t.Errorf("expected most dishonest conns to be pruned, have %d", nDishonestConns) + } + if nHonestConns != nHonest-1 { + t.Errorf("expected all honest conns to be preserved, have %d", nHonestConns) + } + } +} diff --git a/go-libp2p-blossomsub/blossomsub_feat.go b/go-libp2p-blossomsub/blossomsub_feat.go new file mode 100644 index 0000000..d9c7b2d --- /dev/null +++ b/go-libp2p-blossomsub/blossomsub_feat.go @@ -0,0 +1,52 @@ +package blossomsub + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/protocol" +) + +// BlossomSubFeatureTest is a feature test function; it takes a feature and a protocol ID and +// should return true if the feature is supported by the protocol +type BlossomSubFeatureTest = func(BlossomSubFeature, protocol.ID) bool + +// BlossomSubFeature is a feature discriminant enum +type BlossomSubFeature int + +const ( + // Protocol supports basic BlossomSub Mesh -- BlossomSub-v1.1 compatible + BlossomSubFeatureMesh = iota + // Protocol supports Peer eXchange on prune -- BlossomSub-v1.1 compatible + BlossomSubFeaturePX +) + +// BlossomSubDefaultProtocols is the default BlossomSub router protocol list +var BlossomSubDefaultProtocols = []protocol.ID{BlossomSubID_v11, FloodSubID} + +// BlossomSubDefaultFeatures is the feature test function for the default BlossomSub protocols +func BlossomSubDefaultFeatures(feat BlossomSubFeature, proto protocol.ID) bool { + switch feat { + case BlossomSubFeatureMesh: + return proto == BlossomSubID_v11 + case BlossomSubFeaturePX: + return proto == BlossomSubID_v11 + default: + return false + } +} + +// WithBlossomSubProtocols is a BlossomSub router option that configures a custom protocol list +// and feature test function +func WithBlossomSubProtocols(protos []protocol.ID, feature BlossomSubFeatureTest) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + + gs.protos = protos + gs.feature = feature + + return nil + } +} diff --git a/go-libp2p-blossomsub/blossomsub_feat_test.go b/go-libp2p-blossomsub/blossomsub_feat_test.go new file mode 100644 index 0000000..86064a8 --- /dev/null +++ b/go-libp2p-blossomsub/blossomsub_feat_test.go @@ -0,0 +1,109 @@ +package blossomsub + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/protocol" +) + +func TestDefaultBlossomSubFeatures(t *testing.T) { + if BlossomSubDefaultFeatures(BlossomSubFeatureMesh, FloodSubID) { + t.Fatal("floodsub should not support Mesh") + } + if !BlossomSubDefaultFeatures(BlossomSubFeatureMesh, BlossomSubID_v11) { + t.Fatal("BlossomSub-v1.1 should support Mesh") + } + + if BlossomSubDefaultFeatures(BlossomSubFeaturePX, FloodSubID) { + t.Fatal("floodsub should not support PX") + } + if !BlossomSubDefaultFeatures(BlossomSubFeatureMesh, BlossomSubID_v11) { + t.Fatal("BlossomSub-v1.1 should support PX") + } +} + +func TestBlossomSubCustomProtocols(t *testing.T) { + customsub := protocol.ID("customsub/1.0.0") + protos := []protocol.ID{customsub, FloodSubID} + features := func(feat BlossomSubFeature, proto protocol.ID) bool { + return proto == customsub + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 3) + + gsubs := getBlossomSubs(ctx, hosts[:2], WithBlossomSubProtocols(protos, features)) + fsub := getPubsub(ctx, hosts[2]) + psubs := append(gsubs, fsub) + + connectAll(t, hosts) + + bitmask := []byte{0xff, 0x00, 0x00, 0x00} + var subs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + subs = append(subs, subch) + } + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + // check the meshes of the gsubs, the BlossomSub meshes should include each other but not the + // floddsub peer + gsubs[0].eval <- func() { + gs := gsubs[0].rt.(*BlossomSubRouter) + + _, ok := gs.mesh[string(bitmask)][hosts[1].ID()] + if !ok { + t.Fatal("expected gs0 to have gs1 in its mesh") + } + + _, ok = gs.mesh[string(bitmask)][hosts[2].ID()] + if ok { + t.Fatal("expected gs0 to not have fs in its mesh") + } + } + + gsubs[1].eval <- func() { + gs := gsubs[1].rt.(*BlossomSubRouter) + + _, ok := gs.mesh[string(bitmask)][hosts[0].ID()] + if !ok { + t.Fatal("expected gs1 to have gs0 in its mesh") + } + + _, ok = gs.mesh[string(bitmask)][hosts[2].ID()] + if ok { + t.Fatal("expected gs1 to not have fs in its mesh") + } + } + + // send some messages + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("%d it's not quite a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish(bitmask, msg) + + for _, sub := range subs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} diff --git a/go-libp2p-blossomsub/blossomsub_matchfn_test.go b/go-libp2p-blossomsub/blossomsub_matchfn_test.go new file mode 100644 index 0000000..c45598b --- /dev/null +++ b/go-libp2p-blossomsub/blossomsub_matchfn_test.go @@ -0,0 +1,84 @@ +package blossomsub + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/protocol" +) + +func TestBlossomSubMatchingFn(t *testing.T) { + customsubA100 := protocol.ID("/customsub_a/1.0.0") + customsubA101Beta := protocol.ID("/customsub_a/1.0.1-beta") + customsubB100 := protocol.ID("/customsub_b/1.0.0") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := getNetHosts(t, ctx, 4) + psubs := []*PubSub{ + getBlossomSub(ctx, h[0], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubA100, BlossomSubID_v11}, BlossomSubDefaultFeatures)), + getBlossomSub(ctx, h[1], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubA101Beta}, BlossomSubDefaultFeatures)), + getBlossomSub(ctx, h[2], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{BlossomSubID_v11}, BlossomSubDefaultFeatures)), + getBlossomSub(ctx, h[3], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubB100}, BlossomSubDefaultFeatures)), + } + + connect(t, h[0], h[1]) + connect(t, h[0], h[2]) + connect(t, h[0], h[3]) + + // verify that the peers are connected + time.Sleep(2 * time.Second) + for i := 1; i < len(h); i++ { + if len(h[0].Network().ConnsToPeer(h[i].ID())) == 0 { + t.Fatal("expected a connection between peers") + } + } + + // build the mesh + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xff, 0x00, 0x00, 0x00}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + // publish a message + msg := []byte("message") + psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, msg) + + assertReceive(t, subs[0], msg) + assertReceive(t, subs[1], msg) // Should match via semver over CustomSub name, ignoring the version + assertReceive(t, subs[2], msg) // Should match via BlossomSubID_v11 + + // No message should be received because customsubA and customsubB have different names + ctxTimeout, timeoutCancel := context.WithTimeout(context.Background(), 1*time.Second) + defer timeoutCancel() + received := false + for { + msg, err := subs[3].Next(ctxTimeout) + if err != nil { + break + } + if msg != nil { + received = true + } + } + if received { + t.Fatal("Should not have received a message") + } +} + +func protocolNameMatch(base protocol.ID) func(protocol.ID) bool { + return func(check protocol.ID) bool { + baseName := strings.Split(string(base), "/")[1] + checkName := strings.Split(string(check), "/")[1] + return baseName == checkName + } +} diff --git a/go-libp2p-blossomsub/blossomsub_spam_test.go b/go-libp2p-blossomsub/blossomsub_spam_test.go new file mode 100644 index 0000000..070e35e --- /dev/null +++ b/go-libp2p-blossomsub/blossomsub_spam_test.go @@ -0,0 +1,813 @@ +package blossomsub + +import ( + "context" + "math/rand" + "strconv" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-msgio/protoio" +) + +// Test that when BlossomSub receives too many IWANT messages from a peer +// for the same message ID, it cuts off the peer +func TestBlossomSubAttackSpamIWANT(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create legitimate and attacker hosts + hosts := getNetHosts(t, ctx, 2) + legit := hosts[0] + attacker := hosts[1] + + // Set up BlossomSub on the legit host + ps, err := NewBlossomSub(ctx, legit) + if err != nil { + t.Fatal(err) + } + + // Subscribe to mybitmask on the legit host + mybitmask := []byte{0xff, 0x00, 0x00} + _, err = ps.Subscribe(mybitmask) + if err != nil { + t.Fatal(err) + } + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 16) + rand.Read(data) + + if err = ps.Publish(mybitmask, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the + // right number of messages + msgWaitMax := time.Second + msgCount := 0 + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received the right number of messages + checkMsgCount := func() { + // After the original message from the legit host, we keep sending + // IWANT until it stops replying. So the number of messages is + // + BlossomSubGossipRetransmission + exp := 1 + BlossomSubGossipRetransmission + if msgCount != exp { + t.Fatalf("Expected %d messages, got %d", exp, msgCount) + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgCount() + cancel() + return + case <-ctx.Done(): + checkMsgCount() + } + }() + + newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the legit host connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the bitmask and grafting to the peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}}, + }) + + go func() { + // Wait for a short interval to make sure the legit host + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Publish a message from the legit host + publishMsg() + }() + } + } + + // Each time the legit host sends a message + for _, msg := range irpc.GetPublish() { + // Increment the number of messages and reset the timer + msgCount++ + msgTimer.Reset(msgWaitMax) + + // Shouldn't get more than the expected number of messages + exp := 1 + BlossomSubGossipRetransmission + if msgCount > exp { + cancel() + t.Fatal("Received too many responses") + } + + // Send an IWANT with the message ID, causing the legit host + // to send another message (until it cuts off the attacker for + // being spammy) + iwantlst := []string{DefaultMsgIdFn(msg)} + iwant := []*pb.ControlIWant{{MessageIDs: iwantlst}} + orpc := rpcWithControl(nil, nil, iwant, nil, nil) + writeMsg(&orpc.RPC) + } + }) + + connect(t, hosts[0], hosts[1]) + + <-ctx.Done() +} + +// Test that BlossomSub only responds to IHAVE with IWANT once per heartbeat +func TestBlossomSubAttackSpamIHAVE(t *testing.T) { + originalBlossomSubIWantFollowupTime := BlossomSubIWantFollowupTime + BlossomSubIWantFollowupTime = 10 * time.Second + defer func() { + BlossomSubIWantFollowupTime = originalBlossomSubIWantFollowupTime + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create legitimate and attacker hosts + hosts := getNetHosts(t, ctx, 2) + legit := hosts[0] + attacker := hosts[1] + + // Set up BlossomSub on the legit host + ps, err := NewBlossomSub(ctx, legit, + WithPeerScore( + &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + BehaviourPenaltyWeight: -1, + BehaviourPenaltyDecay: ScoreParameterDecay(time.Minute), + DecayInterval: DefaultDecayInterval, + DecayToZero: DefaultDecayToZero, + }, + &PeerScoreThresholds{ + GossipThreshold: -100, + PublishThreshold: -500, + GraylistThreshold: -1000, + })) + if err != nil { + t.Fatal(err) + } + + // Subscribe to mybitmask on the legit host + mybitmask := []byte{0xff, 0x00, 0x00} + _, err = ps.Subscribe(mybitmask) + if err != nil { + t.Fatal(err) + } + + iWantCount := 0 + iWantCountMx := sync.Mutex{} + getIWantCount := func() int { + iWantCountMx.Lock() + defer iWantCountMx.Unlock() + return iWantCount + } + addIWantCount := func(i int) { + iWantCountMx.Lock() + defer iWantCountMx.Unlock() + iWantCount += i + } + + newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the legit host connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the bitmask and grafting to the peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}}, + }) + + go func() { + defer cancel() + + // Wait for a short interval to make sure the legit host + // received and processed the subscribe + graft + time.Sleep(20 * time.Millisecond) + + // Send a bunch of IHAVEs + for i := 0; i < 3*BlossomSubMaxIHaveLength; i++ { + ihavelst := []string{"someid" + strconv.Itoa(i)} + ihave := []*pb.ControlIHave{{Bitmask: sub.Bitmask, MessageIDs: ihavelst}} + orpc := rpcWithControl(nil, ihave, nil, nil, nil) + writeMsg(&orpc.RPC) + } + + select { + case <-ctx.Done(): + return + case <-time.After(BlossomSubHeartbeatInterval): + } + + // Should have hit the maximum number of IWANTs per peer + // per heartbeat + iwc := getIWantCount() + if iwc > BlossomSubMaxIHaveLength { + t.Errorf("Expecting max %d IWANTs per heartbeat but received %d", BlossomSubMaxIHaveLength, iwc) + return // cannot call t.Fatalf in a non-test goroutine + } + firstBatchCount := iwc + + // the score should still be 0 because we haven't broken any promises yet + score := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID()) + if score != 0 { + t.Errorf("Expected 0 score, but got %f", score) + return // cannot call t.Fatalf in a non-test goroutine + } + + // Send a bunch of IHAVEs + for i := 0; i < 3*BlossomSubMaxIHaveLength; i++ { + ihavelst := []string{"someid" + strconv.Itoa(i+100)} + ihave := []*pb.ControlIHave{{Bitmask: sub.Bitmask, MessageIDs: ihavelst}} + orpc := rpcWithControl(nil, ihave, nil, nil, nil) + writeMsg(&orpc.RPC) + } + + select { + case <-ctx.Done(): + return + case <-time.After(BlossomSubHeartbeatInterval): + } + + // Should have sent more IWANTs after the heartbeat + iwc = getIWantCount() + if iwc == firstBatchCount { + t.Error("Expecting to receive more IWANTs after heartbeat but did not") + return // cannot call t.Fatalf in a non-test goroutine + } + // Should not be more than the maximum per heartbeat + if iwc-firstBatchCount > BlossomSubMaxIHaveLength { + t.Errorf("Expecting max %d IWANTs per heartbeat but received %d", BlossomSubMaxIHaveLength, iwc-firstBatchCount) + return // cannot call t.Fatalf in a non-test goroutine + } + + select { + case <-ctx.Done(): + return + case <-time.After(BlossomSubIWantFollowupTime): + } + + // The score should now be negative because of broken promises + score = ps.rt.(*BlossomSubRouter).score.Score(attacker.ID()) + if score >= 0 { + t.Errorf("Expected negative score, but got %f", score) + return // cannot call t.Fatalf in a non-test goroutine + } + }() + } + } + + // Record the count of received IWANT messages + if ctl := irpc.GetControl(); ctl != nil { + addIWantCount(len(ctl.GetIwant())) + } + }) + + connect(t, hosts[0], hosts[1]) + + <-ctx.Done() +} + +// Test that when BlossomSub receives GRAFT for an unknown bitmask, it ignores +// the request +func TestBlossomSubAttackGRAFTNonExistentBitmask(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create legitimate and attacker hosts + hosts := getNetHosts(t, ctx, 2) + legit := hosts[0] + attacker := hosts[1] + + // Set up BlossomSub on the legit host + ps, err := NewBlossomSub(ctx, legit) + if err != nil { + t.Fatal(err) + } + + // Subscribe to mybitmask on the legit host + mybitmask := []byte{0xff, 0x00, 0x00} + _, err = ps.Subscribe(mybitmask) + if err != nil { + t.Fatal(err) + } + + // Checks that we haven't received any PRUNE message + pruneCount := 0 + checkForPrune := func() { + // We send a GRAFT for a non-existent bitmask so we shouldn't + // receive a PRUNE in response + if pruneCount != 0 { + t.Fatalf("Got %d unexpected PRUNE messages", pruneCount) + } + } + + newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the legit host connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the bitmask and grafting to the peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}}, + }) + + // Graft to the peer on a non-existent bitmask + nonExistentBitmask := []byte{0xff, 0x00, 0x00, 0xff, 0xff, 0xff} + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: nonExistentBitmask}}}, + }) + + go func() { + // Wait for a short interval to make sure the legit host + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // We shouldn't get any prune messages becaue the bitmask + // doesn't exist + checkForPrune() + cancel() + }() + } + } + + // Record the count of received PRUNE messages + if ctl := irpc.GetControl(); ctl != nil { + pruneCount += len(ctl.GetPrune()) + } + }) + + connect(t, hosts[0], hosts[1]) + + <-ctx.Done() +} + +// Test that when BlossomSub receives GRAFT for a peer that has been PRUNED, +// it penalizes through P7 and eventually graylists and ignores the requests if the +// GRAFTs are coming too fast +func TestBlossomSubAttackGRAFTDuringBackoff(t *testing.T) { + originalBlossomSubPruneBackoff := BlossomSubPruneBackoff + BlossomSubPruneBackoff = 200 * time.Millisecond + originalBlossomSubGraftFloodThreshold := BlossomSubGraftFloodThreshold + BlossomSubGraftFloodThreshold = 100 * time.Millisecond + defer func() { + BlossomSubPruneBackoff = originalBlossomSubPruneBackoff + BlossomSubGraftFloodThreshold = originalBlossomSubGraftFloodThreshold + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create legitimate and attacker hosts + hosts := getNetHosts(t, ctx, 2) + legit := hosts[0] + attacker := hosts[1] + + // Set up BlossomSub on the legit host + ps, err := NewBlossomSub(ctx, legit, + WithPeerScore( + &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + BehaviourPenaltyWeight: -100, + BehaviourPenaltyDecay: ScoreParameterDecay(time.Minute), + DecayInterval: DefaultDecayInterval, + DecayToZero: DefaultDecayToZero, + }, + &PeerScoreThresholds{ + GossipThreshold: -100, + PublishThreshold: -500, + GraylistThreshold: -1000, + })) + if err != nil { + t.Fatal(err) + } + + // Subscribe to mybitmask on the legit host + mybitmask := []byte{0xff, 0x00, 0x00} + _, err = ps.Subscribe(mybitmask) + if err != nil { + t.Fatal(err) + } + + pruneCount := 0 + pruneCountMx := sync.Mutex{} + getPruneCount := func() int { + pruneCountMx.Lock() + defer pruneCountMx.Unlock() + return pruneCount + } + addPruneCount := func(i int) { + pruneCountMx.Lock() + defer pruneCountMx.Unlock() + pruneCount += i + } + + newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the legit host connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the bitmask and grafting to the peer + graft := []*pb.ControlGraft{{Bitmask: sub.Bitmask}} + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}}, + Control: &pb.ControlMessage{Graft: graft}, + }) + + go func() { + defer cancel() + + // Wait for a short interval to make sure the legit host + // received and processed the subscribe + graft + time.Sleep(20 * time.Millisecond) + + // No PRUNE should have been sent at this stage + pc := getPruneCount() + if pc != 0 { + t.Errorf("Expected %d PRUNE messages but got %d", 0, pc) + return // cannot call t.Fatalf in a non-test goroutine + } + + // Send a PRUNE to remove the attacker node from the legit + // host's mesh + var prune []*pb.ControlPrune + prune = append(prune, &pb.ControlPrune{Bitmask: sub.Bitmask}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Prune: prune}, + }) + + select { + case <-ctx.Done(): + return + case <-time.After(20 * time.Millisecond): + } + + // No PRUNE should have been sent at this stage + pc = getPruneCount() + if pc != 0 { + t.Errorf("Expected %d PRUNE messages but got %d", 0, pc) + return // cannot call t.Fatalf in a non-test goroutine + + } + + // wait for the BlossomSubGraftFloodThreshold to pass before attempting another graft + time.Sleep(BlossomSubGraftFloodThreshold + time.Millisecond) + + // Send a GRAFT to attempt to rejoin the mesh + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Graft: graft}, + }) + + select { + case <-ctx.Done(): + return + case <-time.After(20 * time.Millisecond): + } + + // We should have been peanalized by the peer for sending before the backoff has expired + // but should still receive a PRUNE because we haven't dropped below GraylistThreshold + // yet. + pc = getPruneCount() + if pc != 1 { + t.Errorf("Expected %d PRUNE messages but got %d", 1, pc) + return // cannot call t.Fatalf in a non-test goroutine + } + + score1 := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID()) + if score1 >= 0 { + t.Errorf("Expected negative score, but got %f", score1) + return // cannot call t.Fatalf in a non-test goroutine + } + + // Send a GRAFT again to attempt to rejoin the mesh + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Graft: graft}, + }) + + select { + case <-ctx.Done(): + return + case <-time.After(20 * time.Millisecond): + } + + // we are before the flood threshold so we should be penalized twice, but still get + // a PRUNE because we are before the flood threshold + pc = getPruneCount() + if pc != 2 { + t.Errorf("Expected %d PRUNE messages but got %d", 2, pc) + return // cannot call t.Fatalf in a non-test goroutine + } + + score2 := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID()) + if score2 >= score1 { + t.Errorf("Expected score below %f, but got %f", score1, score2) + return // cannot call t.Fatalf in a non-test goroutine + } + + // Send another GRAFT; this should get us a PRUNE, but penalize us below the graylist threshold + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Graft: graft}, + }) + + select { + case <-ctx.Done(): + return + case <-time.After(20 * time.Millisecond): + } + + pc = getPruneCount() + if pc != 3 { + t.Errorf("Expected %d PRUNE messages but got %d", 3, pc) + return // cannot call t.Fatalf in a non-test goroutine + } + + score3 := ps.rt.(*BlossomSubRouter).score.Score(attacker.ID()) + if score3 >= score2 { + t.Errorf("Expected score below %f, but got %f", score2, score3) + return // cannot call t.Fatalf in a non-test goroutine + } + if score3 >= -1000 { + t.Errorf("Expected score below %f, but got %f", -1000.0, score3) + return // cannot call t.Fatalf in a non-test goroutine + } + + // Wait for the PRUNE backoff to expire and try again; this time we should fail + // because we are below the graylist threshold, so our RPC should be ignored and + // we should get no PRUNE back + select { + case <-ctx.Done(): + return + case <-time.After(BlossomSubPruneBackoff + time.Millisecond): + } + + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Graft: graft}, + }) + + select { + case <-ctx.Done(): + return + case <-time.After(20 * time.Millisecond): + } + + pc = getPruneCount() + if pc != 3 { + t.Errorf("Expected %d PRUNE messages but got %d", 3, pc) + return // cannot call t.Fatalf in a non-test goroutine + } + + // make sure we are _not_ in the mesh + res := make(chan bool) + ps.eval <- func() { + mesh := ps.rt.(*BlossomSubRouter).mesh[string(mybitmask)] + _, inMesh := mesh[attacker.ID()] + res <- inMesh + } + + inMesh := <-res + if inMesh { + t.Error("Expected to not be in the mesh of the legitimate host") + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + + if ctl := irpc.GetControl(); ctl != nil { + addPruneCount(len(ctl.GetPrune())) + } + }) + + connect(t, hosts[0], hosts[1]) + + <-ctx.Done() +} + +type gsAttackInvalidMsgTracer struct { + rejectCount int +} + +func (t *gsAttackInvalidMsgTracer) Trace(evt *pb.TraceEvent) { + // fmt.Printf(" %s %s\n", evt.Type, evt) + if evt.GetType() == pb.TraceEvent_REJECT_MESSAGE { + t.rejectCount++ + } +} + +// Test that when BlossomSub receives a lot of invalid messages from +// a peer it should graylist the peer +func TestBlossomSubAttackInvalidMessageSpam(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create legitimate and attacker hosts + hosts := getNetHosts(t, ctx, 2) + legit := hosts[0] + attacker := hosts[1] + + mybitmask := []byte{0xff, 0x00, 0x00} + + // Create parameters with reasonable default values + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + IPColocationFactorWeight: 0, + IPColocationFactorThreshold: 1, + DecayInterval: 5 * time.Second, + DecayToZero: 0.01, + RetainScore: 10 * time.Second, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + params.Bitmasks[string(mybitmask)] = &BitmaskScoreParams{ + BitmaskWeight: 0.25, + TimeInMeshWeight: 0.0027, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 3600, + FirstMessageDeliveriesWeight: 0.664, + FirstMessageDeliveriesDecay: 0.9916, + FirstMessageDeliveriesCap: 1500, + MeshMessageDeliveriesWeight: -0.25, + MeshMessageDeliveriesDecay: 0.97, + MeshMessageDeliveriesCap: 400, + MeshMessageDeliveriesThreshold: 100, + MeshMessageDeliveriesActivation: 30 * time.Second, + MeshMessageDeliveriesWindow: 5 * time.Minute, + MeshFailurePenaltyWeight: -0.25, + MeshFailurePenaltyDecay: 0.997, + InvalidMessageDeliveriesWeight: -99, + InvalidMessageDeliveriesDecay: 0.9994, + } + thresholds := &PeerScoreThresholds{ + GossipThreshold: -100, + PublishThreshold: -200, + GraylistThreshold: -300, + AcceptPXThreshold: 0, + } + + // Set up BlossomSub on the legit host + tracer := &gsAttackInvalidMsgTracer{} + ps, err := NewBlossomSub(ctx, legit, + WithEventTracer(tracer), + WithPeerScore(params, thresholds), + ) + if err != nil { + t.Fatal(err) + } + + attackerScore := func() float64 { + return ps.rt.(*BlossomSubRouter).score.Score(attacker.ID()) + } + + // Subscribe to mybitmask on the legit host + _, err = ps.Subscribe(mybitmask) + if err != nil { + t.Fatal(err) + } + + pruneCount := 0 + pruneCountMx := sync.Mutex{} + getPruneCount := func() int { + pruneCountMx.Lock() + defer pruneCountMx.Unlock() + return pruneCount + } + addPruneCount := func(i int) { + pruneCountMx.Lock() + defer pruneCountMx.Unlock() + pruneCount += i + } + + newMockGS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the legit host connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the bitmask and grafting to the peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Bitmask: sub.Bitmask}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: sub.Bitmask}}}, + }) + + go func() { + defer cancel() + + // Attacker score should start at zero + if attackerScore() != 0 { + t.Errorf("Expected attacker score to be zero but it's %f", attackerScore()) + return // cannot call t.Fatalf in a non-test goroutine + } + + // Send a bunch of messages with no signature (these will + // fail validation and reduce the attacker's score) + for i := 0; i < 100; i++ { + msg := &pb.Message{ + Data: []byte("some data" + strconv.Itoa(i)), + Bitmask: mybitmask, + From: []byte(attacker.ID()), + Seqno: []byte{byte(i + 1)}, + } + writeMsg(&pb.RPC{ + Publish: []*pb.Message{msg}, + }) + } + + // Wait for the initial heartbeat, plus a bit of padding + select { + case <-ctx.Done(): + return + case <-time.After(100*time.Millisecond + BlossomSubHeartbeatInitialDelay): + } + + // The attackers score should now have fallen below zero + if attackerScore() >= 0 { + t.Errorf("Expected attacker score to be less than zero but it's %f", attackerScore()) + return // cannot call t.Fatalf in a non-test goroutine + } + // There should be several rejected messages (because the signature was invalid) + if tracer.rejectCount == 0 { + t.Error("Expected message rejection but got none") + return // cannot call t.Fatal in a non-test goroutine + } + // The legit node should have sent a PRUNE message + pc := getPruneCount() + if pc == 0 { + t.Error("Expected attacker node to be PRUNED when score drops low enough") + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + + if ctl := irpc.GetControl(); ctl != nil { + addPruneCount(len(ctl.GetPrune())) + } + }) + + connect(t, hosts[0], hosts[1]) + + <-ctx.Done() +} + +type mockGSOnRead func(writeMsg func(*pb.RPC), irpc *pb.RPC) + +func newMockGS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg mockGSOnRead) { + // Listen on the BlossomSub protocol + const BlossomSubID = protocol.ID("/meshsub/1.0.0") + const maxMessageSize = 1024 * 1024 + attacker.SetStreamHandler(BlossomSubID, func(stream network.Stream) { + // When an incoming stream is opened, set up an outgoing stream + p := stream.Conn().RemotePeer() + ostream, err := attacker.NewStream(ctx, p, BlossomSubID) + if err != nil { + t.Fatal(err) + } + + r := protoio.NewDelimitedReader(stream, maxMessageSize) + w := protoio.NewDelimitedWriter(ostream) + + var irpc pb.RPC + + writeMsg := func(rpc *pb.RPC) { + if err = w.WriteMsg(rpc); err != nil { + t.Fatalf("error writing RPC: %s", err) + } + } + + // Keep reading messages and responding + for { + // Bail out when the test finishes + if ctx.Err() != nil { + return + } + + irpc.Reset() + + err := r.ReadMsg(&irpc) + + // Bail out when the test finishes + if ctx.Err() != nil { + return + } + + if err != nil { + t.Fatal(err) + } + + onReadMsg(writeMsg, &irpc) + } + }) +} diff --git a/go-libp2p-blossomsub/blossomsub_test.go b/go-libp2p-blossomsub/blossomsub_test.go new file mode 100644 index 0000000..e8dc5c3 --- /dev/null +++ b/go-libp2p-blossomsub/blossomsub_test.go @@ -0,0 +1,2502 @@ +package blossomsub + +import ( + "bytes" + "context" + "fmt" + "io" + "math/rand" + "sync" + "sync/atomic" + "testing" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/record" + + bhost "github.com/libp2p/go-libp2p/p2p/host/blank" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + + "github.com/libp2p/go-msgio/protoio" +) + +func getBlossomSub(ctx context.Context, h host.Host, opts ...Option) *PubSub { + ps, err := NewBlossomSub(ctx, h, opts...) + if err != nil { + panic(err) + } + return ps +} + +func getBlossomSubs(ctx context.Context, hs []host.Host, opts ...Option) []*PubSub { + var psubs []*PubSub + for _, h := range hs { + psubs = append(psubs, getBlossomSub(ctx, h, opts...)) + } + return psubs +} + +func TestSparseBlossomSub(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + sparseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestDenseBlossomSub(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubFanout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs[1:] { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := 0 + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } + + // subscribe the owner + subch, err := psubs[0].Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + msgs = append(msgs, subch) + + // wait for a heartbeat + time.Sleep(time.Second * 1) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := 0 + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubFanoutMaintenance(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs[1:] { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := 0 + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } + + // unsubscribe all peers to exercise fanout maintenance + for _, sub := range msgs { + sub.Cancel() + } + msgs = nil + + // wait for heartbeats + time.Sleep(time.Second * 2) + + // resubscribe and repeat + for _, ps := range psubs[1:] { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := 0 + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubFanoutExpiry(t *testing.T) { + BlossomSubFanoutTTL = 1 * time.Second + defer func() { + BlossomSubFanoutTTL = 60 * time.Second + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 10) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs[1:] { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 5; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := 0 + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } + + psubs[0].eval <- func() { + if len(psubs[0].rt.(*BlossomSubRouter).fanout) == 0 { + t.Fatal("owner has no fanout") + } + } + + // wait for TTL to expire fanout peers in owner + time.Sleep(time.Second * 2) + + psubs[0].eval <- func() { + if len(psubs[0].rt.(*BlossomSubRouter).fanout) > 0 { + t.Fatal("fanout hasn't expired") + } + } + + // wait for it to run in the event loop + time.Sleep(10 * time.Millisecond) +} + +func TestBlossomSubGossip(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + + // wait a bit to have some gossip interleaved + time.Sleep(time.Millisecond * 100) + } + + // and wait for some gossip flushing + time.Sleep(time.Second * 2) +} + +func TestBlossomSubGossipPiggyback(t *testing.T) { + t.Skip("test no longer relevant; gossip propagation has become eager") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + var xmsgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xba, 0x2c, 0x12, 0x08}) + if err != nil { + t.Fatal(err) + } + + xmsgs = append(xmsgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + psubs[owner].Publish([]byte{0xba, 0x2c, 0x12, 0x08}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + + for _, sub := range xmsgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + + // wait a bit to have some gossip interleaved + time.Sleep(time.Millisecond * 100) + } + + // and wait for some gossip flushing + time.Sleep(time.Second * 2) +} + +func TestBlossomSubGossipPropagation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getBlossomSubs(ctx, hosts) + + hosts1 := hosts[:BlossomSubD+1] + hosts2 := append(hosts[BlossomSubD+1:], hosts[0]) + + denseConnect(t, hosts1) + denseConnect(t, hosts2) + + var msgs1 []*Subscription + for _, ps := range psubs[1 : BlossomSubD+1] { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs1 = append(msgs1, subch) + } + + time.Sleep(time.Second * 1) + + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := 0 + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs1 { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } + + time.Sleep(time.Millisecond * 100) + + var msgs2 []*Subscription + for _, ps := range psubs[BlossomSubD+1:] { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs2 = append(msgs2, subch) + } + + var collect [][]byte + for i := 0; i < 10; i++ { + for _, sub := range msgs2 { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + collect = append(collect, got.Data) + } + } + + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + gotit := false + for j := 0; j < len(collect); j++ { + if bytes.Equal(msg, collect[j]) { + gotit = true + break + } + } + if !gotit { + t.Fatalf("Didn't get message %s", string(msg)) + } + } +} + +func TestBlossomSubPrune(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + // disconnect some peers from the mesh to get some PRUNEs + for _, sub := range msgs[:5] { + sub.Cancel() + } + + // wait a bit to take effect + time.Sleep(time.Millisecond * 100) + + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs[5:] { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubPruneBackoffTime(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 10) + + // App specific score that we'll change later. + currentScoreForHost0 := int32(0) + + params := DefaultBlossomSubParams() + params.HeartbeatInitialDelay = time.Millisecond * 10 + params.HeartbeatInterval = time.Millisecond * 100 + + psubs := getBlossomSubs(ctx, hosts, WithBlossomSubParams(params), WithPeerScore( + &PeerScoreParams{ + AppSpecificScore: func(p peer.ID) float64 { + if p == hosts[0].ID() { + return float64(atomic.LoadInt32(¤tScoreForHost0)) + } else { + return 0 + } + }, + AppSpecificWeight: 1, + DecayInterval: time.Second, + DecayToZero: 0.01, + }, + &PeerScoreThresholds{ + GossipThreshold: -1, + PublishThreshold: -1, + GraylistThreshold: -1, + })) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + connectAll(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second) + + pruneTime := time.Now() + // Flip the score. Host 0 should be pruned from everyone + atomic.StoreInt32(¤tScoreForHost0, -1000) + + // wait for heartbeats to run and prune + time.Sleep(time.Second) + + wg := sync.WaitGroup{} + var missingBackoffs uint32 = 0 + for i := 1; i < 10; i++ { + wg.Add(1) + // Copy i so this func keeps the correct value in the closure. + var idx = i + // Run this check in the eval thunk so that we don't step over the heartbeat goroutine and trigger a race. + psubs[idx].rt.(*BlossomSubRouter).p.eval <- func() { + defer wg.Done() + backoff, ok := psubs[idx].rt.(*BlossomSubRouter).backoff[string([]byte{0xf0, 0x0b, 0xa1, 0x20})][hosts[0].ID()] + if !ok { + atomic.AddUint32(&missingBackoffs, 1) + } + if ok && backoff.Sub(pruneTime)-params.PruneBackoff > time.Second { + t.Errorf("backoff time should be equal to prune backoff (with some slack) was %v", backoff.Sub(pruneTime)-params.PruneBackoff) + } + } + } + wg.Wait() + + // Sometimes not all the peers will have updated their backoffs by this point. If the majority haven't we'll fail this test. + if missingBackoffs >= 5 { + t.Errorf("missing too many backoffs: %v", missingBackoffs) + } + + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + // Don't publish from host 0, since everyone should have pruned it. + owner := rand.Intn(len(psubs)-1) + 1 + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs[1:] { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubGraft(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + sparseConnect(t, hosts) + + time.Sleep(time.Second * 1) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + + // wait for announce to propagate + time.Sleep(time.Millisecond * 100) + } + + time.Sleep(time.Second * 1) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubRemovePeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getBlossomSubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + denseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + // disconnect some peers to exercise RemovePeer paths + for _, host := range hosts[:5] { + host.Close() + } + + // wait a heartbeat + time.Sleep(time.Second * 1) + + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := 5 + rand.Intn(len(psubs)-5) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs[5:] { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubGraftPruneRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + psubs := getBlossomSubs(ctx, hosts) + denseConnect(t, hosts) + + var bitmasks [][]byte + var msgs [][]*Subscription + for i := 0; i < 35; i++ { + bitmask := []byte{byte(i)} + bitmasks = append(bitmasks, bitmask) + + var subs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + subs = append(subs, subch) + } + msgs = append(msgs, subs) + } + + // wait for heartbeats to build meshes + time.Sleep(time.Second * 5) + + for i, bitmask := range bitmasks { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish(bitmask, msg) + + for _, sub := range msgs[i] { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubControlPiggyback(t *testing.T) { + t.Skip("travis regularly fails on this test") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + psubs := getBlossomSubs(ctx, hosts) + denseConnect(t, hosts) + + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xff, 0xff, 0xff, 0xff}) + if err != nil { + t.Fatal(err) + } + go func(sub *Subscription) { + for { + _, err := sub.Next(ctx) + if err != nil { + break + } + } + }(subch) + } + + time.Sleep(time.Second * 1) + + // create a background flood of messages that overloads the queues + done := make(chan struct{}) + go func() { + owner := rand.Intn(len(psubs)) + for i := 0; i < 10000; i++ { + msg := []byte("background flooooood") + psubs[owner].Publish([]byte{0xff, 0xff, 0xff, 0xff}, msg) + } + done <- struct{}{} + }() + + time.Sleep(time.Millisecond * 20) + + // and subscribe to a bunch of bitmasks in the meantime -- this should + // result in some dropped control messages, with subsequent piggybacking + // in the background flood + var bitmasks [][]byte + var msgs [][]*Subscription + for i := 0; i < 5; i++ { + bitmask := []byte{byte(i)} + bitmasks = append(bitmasks, bitmask) + + var subs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + subs = append(subs, subch) + } + msgs = append(msgs, subs) + } + + // wait for the flood to stop + <-done + + // and test that we have functional overlays + for i, bitmask := range bitmasks { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish(bitmask, msg) + + for _, sub := range msgs[i] { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestMixedBlossomSub(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 30) + + gsubs := getBlossomSubs(ctx, hosts[:20]) + fsubs := getPubsubs(ctx, hosts[20:]) + psubs := append(gsubs, fsubs...) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x2b}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + sparseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x2b}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubMultihops(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 6) + + psubs := getBlossomSubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + connect(t, hosts[2], hosts[3]) + connect(t, hosts[3], hosts[4]) + connect(t, hosts[4], hosts[5]) + + var subs []*Subscription + for i := 1; i < 6; i++ { + ch, err := psubs[i].Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x2b}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, ch) + } + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + msg := []byte("i like cats") + err := psubs[0].Publish([]byte{0xf0, 0x0b, 0xa1, 0x2b}, msg) + if err != nil { + t.Fatal(err) + } + + // last node in the chain should get the message + select { + case out := <-subs[4].ch: + if !bytes.Equal(out.GetData(), msg) { + t.Fatal("got wrong data") + } + case <-time.After(time.Second * 5): + t.Fatal("timed out waiting for message") + } +} + +func TestBlossomSubTreeTopology(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + psubs := getBlossomSubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + connect(t, hosts[1], hosts[4]) + connect(t, hosts[2], hosts[3]) + connect(t, hosts[0], hosts[5]) + connect(t, hosts[5], hosts[6]) + connect(t, hosts[5], hosts[8]) + connect(t, hosts[6], hosts[7]) + connect(t, hosts[8], hosts[9]) + + /* + [0] -> [1] -> [2] -> [3] + | L->[4] + v + [5] -> [6] -> [7] + | + v + [8] -> [9] + */ + + var chs []*Subscription + for _, ps := range psubs { + ch, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + + chs = append(chs, ch) + } + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + assertPeerLists(t, hosts, psubs[0], 1, 5) + assertPeerLists(t, hosts, psubs[1], 0, 2, 4) + assertPeerLists(t, hosts, psubs[2], 1, 3) + + checkMessageRouting(t, []byte{0xf1, 0x22, 0xb0, 0x22}, []*PubSub{psubs[9], psubs[3]}, chs) +} + +// this tests overlay bootstrapping through px in BlossomSub v1.1 +// we start with a star topology and rely on px through prune to build the mesh +func TestBlossomSubStarTopology(t *testing.T) { + originalBlossomSubD := BlossomSubD + BlossomSubD = 4 + originalBlossomSubDhi := BlossomSubDhi + BlossomSubDhi = BlossomSubD + 1 + originalBlossomSubDlo := BlossomSubDlo + BlossomSubDlo = BlossomSubD - 1 + originalBlossomSubDscore := BlossomSubDscore + BlossomSubDscore = BlossomSubDlo + defer func() { + BlossomSubD = originalBlossomSubD + BlossomSubDhi = originalBlossomSubDhi + BlossomSubDlo = originalBlossomSubDlo + BlossomSubDscore = originalBlossomSubDscore + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getBlossomSubs(ctx, hosts, WithPeerExchange(true), WithFloodPublish(true)) + + // configure the center of the star with a very low D + psubs[0].eval <- func() { + gs := psubs[0].rt.(*BlossomSubRouter) + gs.params.D = 0 + gs.params.Dlo = 0 + gs.params.Dhi = 0 + gs.params.Dscore = 0 + } + + // add all peer addresses to the peerstores + // this is necessary because we can't have signed address records witout identify + // pushing them + for i := range hosts { + for j := range hosts { + if i == j { + continue + } + hosts[i].Peerstore().AddAddrs(hosts[j].ID(), hosts[j].Addrs(), peerstore.PermanentAddrTTL) + } + } + + // build the star + for i := 1; i < 20; i++ { + connect(t, hosts[0], hosts[i]) + } + + time.Sleep(time.Second) + + // build the mesh + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + // wait a bit for the mesh to build + time.Sleep(10 * time.Second) + + // check that all peers have > 1 connection + for i, h := range hosts { + if len(h.Network().Conns()) == 1 { + t.Errorf("peer %d has ony a single connection", i) + } + } + + // send a message from each peer and assert it was propagated + for i := 0; i < 20; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + + for _, sub := range subs { + assertReceive(t, sub, msg) + } + } +} + +// this tests overlay bootstrapping through px in BlossomSub v1.1, with addresses +// exchanged in signed peer records. +// we start with a star topology and rely on px through prune to build the mesh +func TestBlossomSubStarTopologyWithSignedPeerRecords(t *testing.T) { + originalBlossomSubD := BlossomSubD + BlossomSubD = 4 + originalBlossomSubDhi := BlossomSubDhi + BlossomSubDhi = BlossomSubD + 1 + originalBlossomSubDlo := BlossomSubDlo + BlossomSubDlo = BlossomSubD - 1 + originalBlossomSubDscore := BlossomSubDscore + BlossomSubDscore = BlossomSubDlo + defer func() { + BlossomSubD = originalBlossomSubD + BlossomSubDhi = originalBlossomSubDhi + BlossomSubDlo = originalBlossomSubDlo + BlossomSubDscore = originalBlossomSubDscore + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getBlossomSubs(ctx, hosts, WithPeerExchange(true), WithFloodPublish(true)) + + // configure the center of the star with a very low D + psubs[0].eval <- func() { + gs := psubs[0].rt.(*BlossomSubRouter) + gs.params.D = 0 + gs.params.Dlo = 0 + gs.params.Dhi = 0 + gs.params.Dscore = 0 + } + + // manually create signed peer records for each host and add them to the + // peerstore of the center of the star, which is doing the bootstrapping + for i := range hosts[1:] { + privKey := hosts[i].Peerstore().PrivKey(hosts[i].ID()) + if privKey == nil { + t.Fatalf("unable to get private key for host %s", hosts[i].ID().Pretty()) + } + ai := host.InfoFromHost(hosts[i]) + rec := peer.PeerRecordFromAddrInfo(*ai) + signedRec, err := record.Seal(rec, privKey) + if err != nil { + t.Fatalf("error creating signed peer record: %s", err) + } + + cab, ok := peerstore.GetCertifiedAddrBook(hosts[0].Peerstore()) + if !ok { + t.Fatal("peerstore does not implement CertifiedAddrBook") + } + _, err = cab.ConsumePeerRecord(signedRec, peerstore.PermanentAddrTTL) + if err != nil { + t.Fatalf("error adding signed peer record: %s", err) + } + } + + // build the star + for i := 1; i < 20; i++ { + connect(t, hosts[0], hosts[i]) + } + + time.Sleep(time.Second) + + // build the mesh + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + // wait a bit for the mesh to build + time.Sleep(10 * time.Second) + + // check that all peers have > 1 connection + for i, h := range hosts { + if len(h.Network().Conns()) == 1 { + t.Errorf("peer %d has ony a single connection", i) + } + } + + // send a message from each peer and assert it was propagated + for i := 0; i < 20; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + + for _, sub := range subs { + assertReceive(t, sub, msg) + } + } +} + +func TestBlossomSubDirectPeers(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := getNetHosts(t, ctx, 3) + psubs := []*PubSub{ + getBlossomSub(ctx, h[0], WithDirectConnectTicks(2)), + getBlossomSub(ctx, h[1], WithDirectPeers([]peer.AddrInfo{{ID: h[2].ID(), Addrs: h[2].Addrs()}}), WithDirectConnectTicks(2)), + getBlossomSub(ctx, h[2], WithDirectPeers([]peer.AddrInfo{{ID: h[1].ID(), Addrs: h[1].Addrs()}}), WithDirectConnectTicks(2)), + } + + connect(t, h[0], h[1]) + connect(t, h[0], h[2]) + + // verify that the direct peers connected + time.Sleep(2 * time.Second) + if len(h[1].Network().ConnsToPeer(h[2].ID())) == 0 { + t.Fatal("expected a connection between direct peers") + } + + // build the mesh + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + // publish some messages + for i := 0; i < 3; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + + for _, sub := range subs { + assertReceive(t, sub, msg) + } + } + + // disconnect the direct peers to test reconnection + for _, c := range h[1].Network().ConnsToPeer(h[2].ID()) { + c.Close() + } + + time.Sleep(5 * time.Second) + + if len(h[1].Network().ConnsToPeer(h[2].ID())) == 0 { + t.Fatal("expected a connection between direct peers") + } + + // publish some messages + for i := 0; i < 3; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + + for _, sub := range subs { + assertReceive(t, sub, msg) + } + } +} + +func TestBlossomSubPeerFilter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := getNetHosts(t, ctx, 3) + psubs := []*PubSub{ + getBlossomSub(ctx, h[0], WithPeerFilter(func(pid peer.ID, bitmask []byte) bool { + return pid == h[1].ID() + })), + getBlossomSub(ctx, h[1], WithPeerFilter(func(pid peer.ID, bitmask []byte) bool { + return pid == h[0].ID() + })), + getBlossomSub(ctx, h[2]), + } + + connect(t, h[0], h[1]) + connect(t, h[0], h[2]) + + // Join all peers + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + msg := []byte("message") + + psubs[0].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + assertReceive(t, subs[1], msg) + assertNeverReceives(t, subs[2], time.Second) + + psubs[1].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + assertReceive(t, subs[0], msg) + assertNeverReceives(t, subs[2], time.Second) +} + +func TestBlossomSubDirectPeersFanout(t *testing.T) { + // regression test for #371 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := getNetHosts(t, ctx, 3) + psubs := []*PubSub{ + getBlossomSub(ctx, h[0]), + getBlossomSub(ctx, h[1], WithDirectPeers([]peer.AddrInfo{{ID: h[2].ID(), Addrs: h[2].Addrs()}})), + getBlossomSub(ctx, h[2], WithDirectPeers([]peer.AddrInfo{{ID: h[1].ID(), Addrs: h[1].Addrs()}})), + } + + connect(t, h[0], h[1]) + connect(t, h[0], h[2]) + + // Join all peers except h2 + var subs []*Subscription + for _, ps := range psubs[:2] { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + // h2 publishes some messages to build a fanout + for i := 0; i < 3; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[2].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + + for _, sub := range subs { + assertReceive(t, sub, msg) + } + } + + // verify that h0 is in the fanout of h2, but not h1 who is a direct peer + result := make(chan bool, 2) + psubs[2].eval <- func() { + rt := psubs[2].rt.(*BlossomSubRouter) + fanout := rt.fanout[string([]byte{0xf1, 0x22, 0xb0, 0x22})] + _, ok := fanout[h[0].ID()] + result <- ok + _, ok = fanout[h[1].ID()] + result <- ok + } + + inFanout := <-result + if !inFanout { + t.Fatal("expected peer 0 to be in fanout") + } + + inFanout = <-result + if inFanout { + t.Fatal("expected peer 1 to not be in fanout") + } + + // now subscribe h2 too and verify tht h0 is in the mesh but not h1 + _, err := psubs[2].Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(2 * time.Second) + + psubs[2].eval <- func() { + rt := psubs[2].rt.(*BlossomSubRouter) + mesh := rt.mesh[string([]byte{0xf1, 0x22, 0xb0, 0x22})] + _, ok := mesh[h[0].ID()] + result <- ok + _, ok = mesh[h[1].ID()] + result <- ok + } + + inMesh := <-result + if !inMesh { + t.Fatal("expected peer 0 to be in mesh") + } + + inMesh = <-result + if inMesh { + t.Fatal("expected peer 1 to not be in mesh") + } +} + +func TestBlossomSubFloodPublish(t *testing.T) { + // uses a star topology without PX and publishes from the star to verify that all + // messages get received + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getBlossomSubs(ctx, hosts, WithFloodPublish(true)) + + // build the star + for i := 1; i < 20; i++ { + connect(t, hosts[0], hosts[i]) + } + + // build the (partial, unstable) mesh + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + // send a message from the star and assert it was received + for i := 0; i < 20; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[0].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + + for _, sub := range subs { + assertReceive(t, sub, msg) + } + } +} + +func TestBlossomSubEnoughPeers(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getBlossomSubs(ctx, hosts) + + for _, ps := range psubs { + _, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + } + + // at this point we have no connections and no mesh, so EnoughPeers should return false + res := make(chan bool, 1) + psubs[0].eval <- func() { + res <- psubs[0].rt.EnoughPeers([]byte{0xf1, 0x22, 0xb0, 0x22}, 0) + } + enough := <-res + if enough { + t.Fatal("should not have enough peers") + } + + // connect them densly to build up the mesh + denseConnect(t, hosts) + + time.Sleep(3 * time.Second) + + psubs[0].eval <- func() { + res <- psubs[0].rt.EnoughPeers([]byte{0xf1, 0x22, 0xb0, 0x22}, 0) + } + enough = <-res + if !enough { + t.Fatal("should have enough peers") + } +} + +func TestBlossomSubCustomParams(t *testing.T) { + // in this test we score sinkhole a peer to exercise code paths relative to negative scores + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + params := DefaultBlossomSubParams() + + wantedFollowTime := 1 * time.Second + params.IWantFollowupTime = wantedFollowTime + + customGossipFactor := 0.12 + params.GossipFactor = customGossipFactor + + wantedMaxPendingConns := 23 + params.MaxPendingConnections = wantedMaxPendingConns + hosts := getNetHosts(t, ctx, 1) + psubs := getBlossomSubs(ctx, hosts, + WithBlossomSubParams(params)) + + if len(psubs) != 1 { + t.Fatalf("incorrect number of pusbub objects received: wanted %d but got %d", 1, len(psubs)) + } + + rt, ok := psubs[0].rt.(*BlossomSubRouter) + if !ok { + t.Fatal("Did not get gossip sub router from pub sub object") + } + + if rt.params.IWantFollowupTime != wantedFollowTime { + t.Errorf("Wanted %d of param BlossomSubIWantFollowupTime but got %d", wantedFollowTime, rt.params.IWantFollowupTime) + } + if rt.params.GossipFactor != customGossipFactor { + t.Errorf("Wanted %f of param BlossomSubGossipFactor but got %f", customGossipFactor, rt.params.GossipFactor) + } + if rt.params.MaxPendingConnections != wantedMaxPendingConns { + t.Errorf("Wanted %d of param BlossomSubMaxPendingConnections but got %d", wantedMaxPendingConns, rt.params.MaxPendingConnections) + } +} + +func TestBlossomSubNegativeScore(t *testing.T) { + // in this test we score sinkhole a peer to exercise code paths relative to negative scores + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getBlossomSubs(ctx, hosts, + WithPeerScore( + &PeerScoreParams{ + AppSpecificScore: func(p peer.ID) float64 { + if p == hosts[0].ID() { + return -1000 + } else { + return 0 + } + }, + AppSpecificWeight: 1, + DecayInterval: time.Second, + DecayToZero: 0.01, + }, + &PeerScoreThresholds{ + GossipThreshold: -10, + PublishThreshold: -100, + GraylistThreshold: -10000, + })) + + denseConnect(t, hosts) + + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(3 * time.Second) + + for i := 0; i < 20; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i%20].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + time.Sleep(20 * time.Millisecond) + } + + // let the sinkholed peer try to emit gossip as well + time.Sleep(2 * time.Second) + + // checks: + // 1. peer 0 should only receive its own message + // 2. peers 1-20 should not receive a message from peer 0, because it's not part of the mesh + // and its gossip is rejected + collectAll := func(sub *Subscription) []*Message { + var res []*Message + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + for { + msg, err := sub.Next(ctx) + if err != nil { + break + } + + res = append(res, msg) + } + + return res + } + + count := len(collectAll(subs[0])) + if count != 1 { + t.Fatalf("expected 1 message but got %d instead", count) + } + + for _, sub := range subs[1:] { + all := collectAll(sub) + for _, m := range all { + if m.ReceivedFrom == hosts[0].ID() { + t.Fatal("received message from sinkholed peer") + } + } + } +} + +func TestBlossomSubScoreValidatorEx(t *testing.T) { + // this is a test that of the two message drop responses from a validator + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 3) + psubs := getBlossomSubs(ctx, hosts, + WithPeerScore( + &PeerScoreParams{ + AppSpecificScore: func(p peer.ID) float64 { return 0 }, + DecayInterval: time.Second, + DecayToZero: 0.01, + Bitmasks: map[string]*BitmaskScoreParams{ + string([]byte{0xf1, 0x22, 0xb0, 0x22}): { + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.9999, + }, + }, + }, + &PeerScoreThresholds{ + GossipThreshold: -10, + PublishThreshold: -100, + GraylistThreshold: -10000, + })) + + connectAll(t, hosts) + + err := psubs[0].RegisterBitmaskValidator([]byte{0xf1, 0x22, 0xb0, 0x22}, func(ctx context.Context, p peer.ID, msg *Message) ValidationResult { + // we ignore host1 and reject host2 + if p == hosts[1].ID() { + return ValidationIgnore + } + if p == hosts[2].ID() { + return ValidationReject + } + + return ValidationAccept + }) + if err != nil { + t.Fatal(err) + } + + sub, err := psubs[0].Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(100 * time.Millisecond) + + expectNoMessage := func(sub *Subscription) { + ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + + m, err := sub.Next(ctx) + if err == nil { + t.Fatal("expected no message, but got ", string(m.Data)) + } + } + + psubs[1].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, []byte("i am not a walrus")) + psubs[2].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, []byte("i am not a walrus either")) + + // assert no messages + expectNoMessage(sub) + + // assert that peer1's score is still 0 (its message was ignored) while peer2 should have + // a negative score (its message got rejected) + res := make(chan float64, 1) + psubs[0].eval <- func() { + res <- psubs[0].rt.(*BlossomSubRouter).score.Score(hosts[1].ID()) + } + score := <-res + if score != 0 { + t.Fatalf("expected 0 score for peer1, but got %f", score) + } + + psubs[0].eval <- func() { + res <- psubs[0].rt.(*BlossomSubRouter).score.Score(hosts[2].ID()) + } + score = <-res + if score >= 0 { + t.Fatalf("expected negative score for peer2, but got %f", score) + } +} + +func TestBlossomSubPiggybackControl(t *testing.T) { + // this is a direct test of the piggybackControl function as we can't reliably + // trigger it on travis + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := bhost.NewBlankHost(swarmt.GenSwarm(t)) + defer h.Close() + ps := getBlossomSub(ctx, h) + + blah := peer.ID("bogotr0n") + + res := make(chan *RPC, 1) + ps.eval <- func() { + gs := ps.rt.(*BlossomSubRouter) + test1 := []byte{0xff, 0x00, 0x00, 0x00} + test2 := []byte{0x00, 0xff, 0x00, 0x00} + test3 := []byte{0x00, 0x00, 0xff, 0x00} + gs.mesh[string(test1)] = make(map[peer.ID]struct{}) + gs.mesh[string(test2)] = make(map[peer.ID]struct{}) + gs.mesh[string(test1)][blah] = struct{}{} + + rpc := &RPC{RPC: pb.RPC{}} + gs.piggybackControl(blah, rpc, &pb.ControlMessage{ + Graft: []*pb.ControlGraft{{Bitmask: test1}, {Bitmask: test2}, {Bitmask: test3}}, + Prune: []*pb.ControlPrune{{Bitmask: test1}, {Bitmask: test2}, {Bitmask: test3}}, + }) + res <- rpc + } + + rpc := <-res + if rpc.Control == nil { + t.Fatal("expected non-nil control message") + } + if len(rpc.Control.Graft) != 1 { + t.Fatal("expected 1 GRAFT") + } + if !bytes.Equal(rpc.Control.Graft[0].GetBitmask(), []byte{0xff, 0x00, 0x00, 0x00}) { + t.Fatal("expected test1 as graft bitmask ID") + } + if len(rpc.Control.Prune) != 2 { + t.Fatal("expected 2 PRUNEs") + } + if !bytes.Equal(rpc.Control.Prune[0].GetBitmask(), []byte{0x00, 0xff, 0x00, 0x00}) { + t.Fatal("expected test2 as prune bitmask ID") + } + if !bytes.Equal(rpc.Control.Prune[1].GetBitmask(), []byte{0x00, 0x00, 0xff, 0x00}) { + t.Fatal("expected test3 as prune bitmask ID") + } +} + +func TestBlossomSubMultipleGraftBitmasks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getBlossomSubs(ctx, hosts) + sparseConnect(t, hosts) + + time.Sleep(time.Second * 1) + + firstBitmask := []byte{0xff, 0x00, 0x00, 0x00} + secondBitmask := []byte{0x00, 0xff, 0x00, 0x00} + thirdBitmask := []byte{0x00, 0x00, 0xff, 0x00} + + firstPeer := hosts[0].ID() + secondPeer := hosts[1].ID() + + p2Sub := psubs[1] + p1Router := psubs[0].rt.(*BlossomSubRouter) + p2Router := psubs[1].rt.(*BlossomSubRouter) + + finChan := make(chan struct{}) + + p2Sub.eval <- func() { + // Add bitmasks to second peer + p2Router.mesh[string(firstBitmask)] = map[peer.ID]struct{}{} + p2Router.mesh[string(secondBitmask)] = map[peer.ID]struct{}{} + p2Router.mesh[string(thirdBitmask)] = map[peer.ID]struct{}{} + + finChan <- struct{}{} + } + <-finChan + + // Send multiple GRAFT messages to second peer from + // 1st peer + p1Router.sendGraftPrune(map[peer.ID][][]byte{ + secondPeer: {firstBitmask, secondBitmask, thirdBitmask}, + }, map[peer.ID][][]byte{}, map[peer.ID]bool{}) + + time.Sleep(time.Second * 1) + + p2Sub.eval <- func() { + if _, ok := p2Router.mesh[string(firstBitmask)][firstPeer]; !ok { + t.Errorf("First peer wasnt added to mesh of the second peer for the bitmask %s", firstBitmask) + } + if _, ok := p2Router.mesh[string(secondBitmask)][firstPeer]; !ok { + t.Errorf("First peer wasnt added to mesh of the second peer for the bitmask %s", secondBitmask) + } + if _, ok := p2Router.mesh[string(thirdBitmask)][firstPeer]; !ok { + t.Errorf("First peer wasnt added to mesh of the second peer for the bitmask %s", thirdBitmask) + } + finChan <- struct{}{} + } + <-finChan +} + +func TestBlossomSubOpportunisticGrafting(t *testing.T) { + originalBlossomSubPruneBackoff := BlossomSubPruneBackoff + BlossomSubPruneBackoff = 500 * time.Millisecond + originalBlossomSubGraftFloodThreshold := BlossomSubGraftFloodThreshold + BlossomSubGraftFloodThreshold = 100 * time.Millisecond + originalBlossomSubOpportunisticGraftTicks := BlossomSubOpportunisticGraftTicks + BlossomSubOpportunisticGraftTicks = 2 + defer func() { + BlossomSubPruneBackoff = originalBlossomSubPruneBackoff + BlossomSubGraftFloodThreshold = originalBlossomSubGraftFloodThreshold + BlossomSubOpportunisticGraftTicks = originalBlossomSubOpportunisticGraftTicks + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 50) + // pubsubs for the first 10 hosts + psubs := getBlossomSubs(ctx, hosts[:10], + WithFloodPublish(true), + WithPeerScore( + &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + AppSpecificWeight: 0, + DecayInterval: time.Second, + DecayToZero: 0.01, + Bitmasks: map[string]*BitmaskScoreParams{ + string([]byte{0xf1, 0x22, 0xb0, 0x22}): { + BitmaskWeight: 1, + TimeInMeshWeight: 0.0002777, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 3600, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.9997, + FirstMessageDeliveriesCap: 100, + InvalidMessageDeliveriesDecay: 0.99997, + }, + }, + }, + &PeerScoreThresholds{ + GossipThreshold: -10, + PublishThreshold: -100, + GraylistThreshold: -10000, + OpportunisticGraftThreshold: 1, + })) + + // connect the real hosts with degree 5 + connectSome(t, hosts[:10], 5) + + // sybil squatters for the remaining 40 hosts + for _, h := range hosts[10:] { + squatter := &sybilSquatter{h: h} + h.SetStreamHandler(BlossomSubID_v11, squatter.handleStream) + } + + // connect all squatters to every real host + for _, squatter := range hosts[10:] { + for _, real := range hosts[:10] { + connect(t, squatter, real) + } + } + + // wait a bit for the connections to propagate events to the pubsubs + time.Sleep(time.Second) + + // ask the real pubsus to join the bitmask + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + // consume the messages + go func(sub *Subscription) { + for { + _, err := sub.Next(ctx) + if err != nil { + return + } + } + }(sub) + } + + // publish a bunch of messages from the real hosts + for i := 0; i < 1000; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i%10].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + time.Sleep(20 * time.Millisecond) + } + + // now wait a few of oppgraft cycles + time.Sleep(7 * time.Second) + + // check the honest peer meshes, they should have at least 3 honest peers each + res := make(chan int, 1) + for _, ps := range psubs { + ps.eval <- func() { + gs := ps.rt.(*BlossomSubRouter) + count := 0 + for _, h := range hosts[:10] { + _, ok := gs.mesh[string([]byte{0xf1, 0x22, 0xb0, 0x22})][h.ID()] + if ok { + count++ + } + } + res <- count + } + + count := <-res + if count < 3 { + t.Fatalf("expected at least 3 honest peers, got %d", count) + } + } +} +func TestBlossomSubLeaveBitmask(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := getNetHosts(t, ctx, 2) + psubs := []*PubSub{ + getBlossomSub(ctx, h[0]), + getBlossomSub(ctx, h[1]), + } + + connect(t, h[0], h[1]) + + // Join all peers + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + leaveTime := time.Now() + done := make(chan struct{}) + + psubs[0].rt.(*BlossomSubRouter).p.eval <- func() { + defer close(done) + psubs[0].rt.Leave([]byte{0xf1, 0x22, 0xb0, 0x22}) + time.Sleep(time.Second) + peerMap := psubs[0].rt.(*BlossomSubRouter).backoff[string([]byte{0xf1, 0x22, 0xb0, 0x22})] + if len(peerMap) != 1 { + t.Fatalf("No peer is populated in the backoff map for peer 0") + } + _, ok := peerMap[h[1].ID()] + if !ok { + t.Errorf("Expected peer does not exist in the backoff map") + } + + backoffTime := peerMap[h[1].ID()].Sub(leaveTime) + // Check that the backoff time is roughly the unsubscribebackoff time (with a slack of 1s) + if backoffTime-BlossomSubUnsubscribeBackoff > time.Second { + t.Error("Backoff time should be set to BlossomSubUnsubscribeBackoff.") + } + } + <-done + + done = make(chan struct{}) + // Ensure that remote peer 1 also applies the backoff appropriately + // for peer 0. + psubs[1].rt.(*BlossomSubRouter).p.eval <- func() { + defer close(done) + peerMap2 := psubs[1].rt.(*BlossomSubRouter).backoff[string([]byte{0xf1, 0x22, 0xb0, 0x22})] + if len(peerMap2) != 1 { + t.Fatalf("No peer is populated in the backoff map for peer 1") + } + _, ok := peerMap2[h[0].ID()] + if !ok { + t.Errorf("Expected peer does not exist in the backoff map") + } + + backoffTime := peerMap2[h[0].ID()].Sub(leaveTime) + // Check that the backoff time is roughly the unsubscribebackoff time (with a slack of 1s) + if backoffTime-BlossomSubUnsubscribeBackoff > time.Second { + t.Error("Backoff time should be set to BlossomSubUnsubscribeBackoff.") + } + } + <-done +} + +func TestBlossomSubJoinBitmask(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := getNetHosts(t, ctx, 3) + psubs := []*PubSub{ + getBlossomSub(ctx, h[0]), + getBlossomSub(ctx, h[1]), + getBlossomSub(ctx, h[2]), + } + + connect(t, h[0], h[1]) + connect(t, h[0], h[2]) + + router0 := psubs[0].rt.(*BlossomSubRouter) + + // Add in backoff for peer. + peerMap := make(map[peer.ID]time.Time) + peerMap[h[1].ID()] = time.Now().Add(router0.params.UnsubscribeBackoff) + + router0.backoff[string([]byte{0xf1, 0x22, 0xb0, 0x22})] = peerMap + + // Join all peers + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + meshMap := router0.mesh[string([]byte{0xf1, 0x22, 0xb0, 0x22})] + if len(meshMap) != 1 { + t.Fatalf("Unexpect peer included in the mesh") + } + + _, ok := meshMap[h[1].ID()] + if ok { + t.Fatalf("Peer that was to be backed off is included in the mesh") + } +} + +type sybilSquatter struct { + h host.Host +} + +func (sq *sybilSquatter) handleStream(s network.Stream) { + defer s.Close() + + os, err := sq.h.NewStream(context.Background(), s.Conn().RemotePeer(), BlossomSubID_v11) + if err != nil { + panic(err) + } + + // send a subscription for test in the output stream to become candidate for GRAFT + // and then just read and ignore the incoming RPCs + r := protoio.NewDelimitedReader(s, 1<<20) + w := protoio.NewDelimitedWriter(os) + truth := true + bitmask := []byte{0xf1, 0x22, 0xb0, 0x22} + err = w.WriteMsg(&pb.RPC{Subscriptions: []*pb.RPC_SubOpts{{Subscribe: truth, Bitmask: bitmask}}}) + if err != nil { + panic(err) + } + + var rpc pb.RPC + for { + rpc.Reset() + err = r.ReadMsg(&rpc) + if err != nil { + if err != io.EOF { + s.Reset() + } + return + } + } +} + +func TestBlossomSubPeerScoreInspect(t *testing.T) { + // this test exercises the code path sof peer score inspection + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + + inspector := &mockPeerScoreInspector{} + psub1 := getBlossomSub(ctx, hosts[0], + WithPeerScore( + &PeerScoreParams{ + Bitmasks: map[string]*BitmaskScoreParams{ + string([]byte{0xf1, 0x22, 0xb0, 0x22}): { + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.999, + FirstMessageDeliveriesCap: 100, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.9999, + }, + }, + AppSpecificScore: func(peer.ID) float64 { return 0 }, + DecayInterval: time.Second, + DecayToZero: 0.01, + }, + &PeerScoreThresholds{ + GossipThreshold: -1, + PublishThreshold: -10, + GraylistThreshold: -1000, + }), + WithPeerScoreInspect(inspector.inspect, time.Second)) + psub2 := getBlossomSub(ctx, hosts[1]) + psubs := []*PubSub{psub1, psub2} + + connect(t, hosts[0], hosts[1]) + + for _, ps := range psubs { + _, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + } + + time.Sleep(time.Second) + + for i := 0; i < 20; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i%2].Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + time.Sleep(20 * time.Millisecond) + } + + time.Sleep(time.Second + 200*time.Millisecond) + + score2 := inspector.score(hosts[1].ID()) + if score2 < 9 { + t.Fatalf("expected score to be at least 9, instead got %f", score2) + } +} + +func TestBlossomSubPeerScoreResetBitmaskParams(t *testing.T) { + // this test exercises the code path sof peer score inspection + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 1) + + ps := getBlossomSub(ctx, hosts[0], + WithPeerScore( + &PeerScoreParams{ + Bitmasks: map[string]*BitmaskScoreParams{ + string([]byte{0xf1, 0x22, 0xb0, 0x22}): { + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.999, + FirstMessageDeliveriesCap: 100, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.9999, + }, + }, + AppSpecificScore: func(peer.ID) float64 { return 0 }, + DecayInterval: time.Second, + DecayToZero: 0.01, + }, + &PeerScoreThresholds{ + GossipThreshold: -1, + PublishThreshold: -10, + GraylistThreshold: -1000, + })) + + bitmask, err := ps.Join([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + + err = bitmask.SetScoreParams( + &BitmaskScoreParams{ + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.999, + FirstMessageDeliveriesCap: 200, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.9999, + }) + if err != nil { + t.Fatal(err) + } +} + +type mockPeerScoreInspector struct { + mx sync.Mutex + scores map[peer.ID]float64 +} + +func (ps *mockPeerScoreInspector) inspect(scores map[peer.ID]float64) { + ps.mx.Lock() + defer ps.mx.Unlock() + ps.scores = scores +} + +func (ps *mockPeerScoreInspector) score(p peer.ID) float64 { + ps.mx.Lock() + defer ps.mx.Unlock() + return ps.scores[p] +} + +func TestBlossomSubRPCFragmentation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + ps := getBlossomSub(ctx, hosts[0]) + + // make a fake peer that requests everything through IWANT gossip + iwe := iwantEverything{h: hosts[1]} + iwe.h.SetStreamHandler(BlossomSubID_v11, iwe.handleStream) + + connect(t, hosts[0], hosts[1]) + + // have the real pubsub join the test bitmask + _, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + + // wait for the real pubsub to connect and try to graft to the faker + time.Sleep(time.Second) + + // publish a bunch of fairly large messages from the real host + nMessages := 1000 + msgSize := 20000 + for i := 0; i < nMessages; i++ { + msg := make([]byte, msgSize) + rand.Read(msg) + ps.Publish([]byte{0xf1, 0x22, 0xb0, 0x22}, msg) + time.Sleep(20 * time.Millisecond) + } + + // wait a bit for them to be received via gossip by the fake peer + time.Sleep(5 * time.Second) + iwe.lk.Lock() + defer iwe.lk.Unlock() + + // we should have received all the messages + if iwe.msgsReceived != nMessages { + t.Fatalf("expected fake BlossomSub peer to receive all messages, got %d / %d", iwe.msgsReceived, nMessages) + } + + // and we should have seen an IHAVE message for each of them + if iwe.ihavesReceived != nMessages { + t.Fatalf("expected to get IHAVEs for every message, got %d / %d", iwe.ihavesReceived, nMessages) + } + + // If everything were fragmented with maximum efficiency, we would expect to get + // (nMessages * msgSize) / ps.maxMessageSize total RPCs containing the messages we sent IWANTs for. + // The actual number will probably be larger, since there's some overhead for the RPC itself, and + // we probably aren't packing each RPC to it's maximum size + minExpectedRPCS := (nMessages * msgSize) / ps.maxMessageSize + if iwe.rpcsWithMessages < minExpectedRPCS { + t.Fatalf("expected to receive at least %d RPCs containing messages, got %d", minExpectedRPCS, iwe.rpcsWithMessages) + } +} + +// iwantEverything is a simple BlossomSub client that never grafts onto a mesh, +// instead requesting everything through IWANT gossip messages. It is used to +// test that large responses to IWANT requests are fragmented into multiple RPCs. +type iwantEverything struct { + h host.Host + lk sync.Mutex + rpcsWithMessages int + msgsReceived int + ihavesReceived int +} + +func (iwe *iwantEverything) handleStream(s network.Stream) { + defer s.Close() + + os, err := iwe.h.NewStream(context.Background(), s.Conn().RemotePeer(), BlossomSubID_v11) + if err != nil { + panic(err) + } + + msgIdsReceived := make(map[string]struct{}) + gossipMsgIdsReceived := make(map[string]struct{}) + + // send a subscription for test in the output stream to become candidate for gossip + r := protoio.NewDelimitedReader(s, 1<<20) + w := protoio.NewDelimitedWriter(os) + truth := true + bitmask := []byte{0xf1, 0x22, 0xb0, 0x22} + err = w.WriteMsg(&pb.RPC{Subscriptions: []*pb.RPC_SubOpts{{Subscribe: truth, Bitmask: bitmask}}}) + if err != nil { + panic(err) + } + + var rpc pb.RPC + for { + rpc.Reset() + err = r.ReadMsg(&rpc) + if err != nil { + if err != io.EOF { + s.Reset() + } + return + } + + iwe.lk.Lock() + if len(rpc.Publish) != 0 { + iwe.rpcsWithMessages++ + } + // keep track of unique message ids received + for _, msg := range rpc.Publish { + id := string(msg.Seqno) + if _, seen := msgIdsReceived[id]; !seen { + iwe.msgsReceived++ + } + msgIdsReceived[id] = struct{}{} + } + + if rpc.Control != nil { + // send a PRUNE for all grafts, so we don't get direct message deliveries + var prunes []*pb.ControlPrune + for _, graft := range rpc.Control.Graft { + prunes = append(prunes, &pb.ControlPrune{Bitmask: graft.Bitmask}) + } + + var iwants []*pb.ControlIWant + for _, ihave := range rpc.Control.Ihave { + iwants = append(iwants, &pb.ControlIWant{MessageIDs: ihave.MessageIDs}) + for _, msgId := range ihave.MessageIDs { + if _, seen := gossipMsgIdsReceived[msgId]; !seen { + iwe.ihavesReceived++ + } + gossipMsgIdsReceived[msgId] = struct{}{} + } + } + + out := rpcWithControl(nil, nil, iwants, nil, prunes) + err = w.WriteMsg(out) + if err != nil { + panic(err) + } + } + iwe.lk.Unlock() + } +} + +func TestFragmentRPCFunction(t *testing.T) { + p := peer.ID("some-peer") + bitmask := []byte{0xf1, 0x22, 0xb0, 0x22} + rpc := &RPC{from: p} + limit := 1024 + + mkMsg := func(size int) *pb.Message { + msg := &pb.Message{} + msg.Data = make([]byte, size-4) // subtract the protobuf overhead, so msg.Size() returns requested size + rand.Read(msg.Data) + return msg + } + + ensureBelowLimit := func(rpcs []*RPC) { + for _, r := range rpcs { + if r.Size() > limit { + t.Fatalf("expected fragmented RPC to be below %d bytes, was %d", limit, r.Size()) + } + } + } + + // it should not fragment if everything fits in one RPC + rpc.Publish = []*pb.Message{} + rpc.Publish = []*pb.Message{mkMsg(10), mkMsg(10)} + results, err := fragmentRPC(rpc, limit) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected single RPC if input is < limit, got %d", len(results)) + } + + // if there's a message larger than the limit, we should fail + rpc.Publish = []*pb.Message{mkMsg(10), mkMsg(limit * 2)} + results, err = fragmentRPC(rpc, limit) + if err == nil { + t.Fatalf("expected an error if a message exceeds limit, got %d RPCs instead", len(results)) + } + + // if the individual messages are below the limit, but the RPC as a whole is larger, we should fragment + nMessages := 100 + msgSize := 200 + truth := true + rpc.Subscriptions = []*pb.RPC_SubOpts{ + { + Subscribe: truth, + Bitmask: bitmask, + }, + } + rpc.Publish = make([]*pb.Message, nMessages) + for i := 0; i < nMessages; i++ { + rpc.Publish[i] = mkMsg(msgSize) + } + results, err = fragmentRPC(rpc, limit) + if err != nil { + t.Fatal(err) + } + ensureBelowLimit(results) + msgsPerRPC := limit / msgSize + expectedRPCs := nMessages / msgsPerRPC + if len(results) != expectedRPCs { + t.Fatalf("expected %d RPC messages in output, got %d", expectedRPCs, len(results)) + } + var nMessagesFragmented int + var nSubscriptions int + for _, r := range results { + nMessagesFragmented += len(r.Publish) + nSubscriptions += len(r.Subscriptions) + } + if nMessagesFragmented != nMessages { + t.Fatalf("expected fragemented RPCs to contain same number of messages as input, got %d / %d", nMessagesFragmented, nMessages) + } + if nSubscriptions != 1 { + t.Fatal("expected subscription to be present in one of the fragmented messages, but not found") + } + + // if we're fragmenting, and the input RPC has control messages, + // the control messages should be in a separate RPC at the end + // reuse RPC from prev test, but add a control message + rpc.Control = &pb.ControlMessage{ + Graft: []*pb.ControlGraft{{Bitmask: bitmask}}, + Prune: []*pb.ControlPrune{{Bitmask: bitmask}}, + Ihave: []*pb.ControlIHave{{MessageIDs: []string{"foo"}}}, + Iwant: []*pb.ControlIWant{{MessageIDs: []string{"bar"}}}, + } + results, err = fragmentRPC(rpc, limit) + if err != nil { + t.Fatal(err) + } + ensureBelowLimit(results) + // we expect one more RPC than last time, with the final one containing the control messages + expectedCtrl := 1 + expectedRPCs = (nMessages / msgsPerRPC) + expectedCtrl + if len(results) != expectedRPCs { + t.Fatalf("expected %d RPC messages in output, got %d", expectedRPCs, len(results)) + } + ctl := results[len(results)-1].Control + if ctl == nil { + t.Fatal("expected final fragmented RPC to contain control messages, but .Control was nil") + } + // since it was not altered, the original control message should be identical to the output control message + originalBytes, err := rpc.Control.Marshal() + if err != nil { + t.Fatal(err) + } + receivedBytes, err := ctl.Marshal() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(originalBytes, receivedBytes) { + t.Fatal("expected control message to be unaltered if it fits within one RPC message") + } + + // if the control message is too large to fit into a single RPC, it should be split into multiple RPCs + nBitmasks := 5 // pretend we're subscribed to multiple bitmasks and sending IHAVE / IWANTs for each + messageIdSize := 32 + msgsPerBitmask := 100 // enough that a single IHAVE or IWANT will exceed the limit + rpc.Control.Ihave = make([]*pb.ControlIHave, nBitmasks) + rpc.Control.Iwant = make([]*pb.ControlIWant, nBitmasks) + for i := 0; i < nBitmasks; i++ { + messageIds := make([]string, msgsPerBitmask) + for m := 0; m < msgsPerBitmask; m++ { + mid := make([]byte, messageIdSize) + rand.Read(mid) + messageIds[m] = string(mid) + } + rpc.Control.Ihave[i] = &pb.ControlIHave{MessageIDs: messageIds} + rpc.Control.Iwant[i] = &pb.ControlIWant{MessageIDs: messageIds} + } + results, err = fragmentRPC(rpc, limit) + if err != nil { + t.Fatal(err) + } + ensureBelowLimit(results) + minExpectedCtl := rpc.Control.Size() / limit + minExpectedRPCs := (nMessages / msgsPerRPC) + minExpectedCtl + if len(results) < minExpectedRPCs { + t.Fatalf("expected at least %d total RPCs (at least %d with control messages), got %d total", expectedRPCs, expectedCtrl, len(results)) + } + + // Test the pathological case where a single gossip message ID exceeds the limit. + // It should not be present in the fragmented messages, but smaller IDs should be + rpc.Reset() + giantIdBytes := make([]byte, limit*2) + rand.Read(giantIdBytes) + rpc.Control = &pb.ControlMessage{ + Iwant: []*pb.ControlIWant{ + {MessageIDs: []string{"hello", string(giantIdBytes)}}, + }, + } + results, err = fragmentRPC(rpc, limit) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 RPC, got %d", len(results)) + } + if len(results[0].Control.Iwant) != 1 { + t.Fatalf("expected 1 IWANT, got %d", len(results[0].Control.Iwant)) + } + if results[0].Control.Iwant[0].MessageIDs[0] != "hello" { + t.Fatalf("expected small message ID to be included unaltered, got %s instead", + results[0].Control.Iwant[0].MessageIDs[0]) + } +} diff --git a/go-libp2p-blossomsub/comm.go b/go-libp2p-blossomsub/comm.go new file mode 100644 index 0000000..fa24ebd --- /dev/null +++ b/go-libp2p-blossomsub/comm.go @@ -0,0 +1,230 @@ +package blossomsub + +import ( + "context" + "encoding/binary" + "io" + "time" + + pool "github.com/libp2p/go-buffer-pool" + "github.com/multiformats/go-varint" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-msgio" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" +) + +// get the initial RPC containing all of our subscriptions to send to new peers +func (p *PubSub) getHelloPacket() *RPC { + var rpc RPC + + subscriptions := make(map[string]bool) + + for t := range p.mySubs { + subscriptions[t] = true + } + + for t := range p.myRelays { + subscriptions[t] = true + } + + for t := range subscriptions { + as := &pb.RPC_SubOpts{ + Bitmask: []byte(t), + Subscribe: true, + } + rpc.Subscriptions = append(rpc.Subscriptions, as) + } + return &rpc +} + +func (p *PubSub) handleNewStream(s network.Stream) { + peer := s.Conn().RemotePeer() + + p.inboundStreamsMx.Lock() + other, dup := p.inboundStreams[peer] + if dup { + log.Debugf("duplicate inbound stream from %s; resetting other stream", peer) + other.Reset() + } + p.inboundStreams[peer] = s + p.inboundStreamsMx.Unlock() + + defer func() { + p.inboundStreamsMx.Lock() + if p.inboundStreams[peer] == s { + delete(p.inboundStreams, peer) + } + p.inboundStreamsMx.Unlock() + }() + + r := msgio.NewVarintReaderSize(s, p.maxMessageSize) + for { + msgbytes, err := r.ReadMsg() + if err != nil { + r.ReleaseMsg(msgbytes) + if err != io.EOF { + s.Reset() + log.Debugf("error reading rpc from %s: %s", s.Conn().RemotePeer(), err) + } else { + // Just be nice. They probably won't read this + // but it doesn't hurt to send it. + s.Close() + } + + return + } + + rpc := new(RPC) + err = rpc.Unmarshal(msgbytes) + r.ReleaseMsg(msgbytes) + if err != nil { + s.Reset() + log.Warnf("bogus rpc from %s: %s", s.Conn().RemotePeer(), err) + return + } + + rpc.from = peer + select { + case p.incoming <- rpc: + case <-p.ctx.Done(): + // Close is useless because the other side isn't reading. + s.Reset() + return + } + } +} + +func (p *PubSub) notifyPeerDead(pid peer.ID) { + p.peerDeadPrioLk.RLock() + p.peerDeadMx.Lock() + p.peerDeadPend[pid] = struct{}{} + p.peerDeadMx.Unlock() + p.peerDeadPrioLk.RUnlock() + + select { + case p.peerDead <- struct{}{}: + default: + } +} + +func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan *RPC) { + s, err := p.host.NewStream(p.ctx, pid, p.rt.Protocols()...) + if err != nil { + log.Debug("opening new stream to peer: ", err, pid) + + select { + case p.newPeerError <- pid: + case <-ctx.Done(): + } + + return + } + + go p.handleSendingMessages(ctx, s, outgoing) + go p.handlePeerDead(s) + select { + case p.newPeerStream <- s: + case <-ctx.Done(): + } +} + +func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing <-chan *RPC) { + select { + case <-time.After(backoff): + p.handleNewPeer(ctx, pid, outgoing) + case <-ctx.Done(): + return + } +} + +func (p *PubSub) handlePeerDead(s network.Stream) { + pid := s.Conn().RemotePeer() + + _, err := s.Read([]byte{0}) + if err == nil { + log.Debugf("unexpected message from %s", pid) + } + + s.Reset() + p.notifyPeerDead(pid) +} + +func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) { + writeRpc := func(rpc *RPC) error { + size := uint64(rpc.Size()) + + buf := pool.Get(varint.UvarintSize(size) + int(size)) + defer pool.Put(buf) + + n := binary.PutUvarint(buf, size) + _, err := rpc.MarshalTo(buf[n:]) + if err != nil { + return err + } + + _, err = s.Write(buf) + return err + } + + defer s.Close() + for { + select { + case rpc, ok := <-outgoing: + if !ok { + return + } + + err := writeRpc(rpc) + if err != nil { + s.Reset() + log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) + return + } + case <-ctx.Done(): + return + } + } +} + +func rpcWithSubs(subs ...*pb.RPC_SubOpts) *RPC { + return &RPC{ + RPC: pb.RPC{ + Subscriptions: subs, + }, + } +} + +func rpcWithMessages(msgs ...*pb.Message) *RPC { + return &RPC{RPC: pb.RPC{Publish: msgs}} +} + +func rpcWithControl(msgs []*pb.Message, + ihave []*pb.ControlIHave, + iwant []*pb.ControlIWant, + graft []*pb.ControlGraft, + prune []*pb.ControlPrune) *RPC { + return &RPC{ + RPC: pb.RPC{ + Publish: msgs, + Control: &pb.ControlMessage{ + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + }, + }, + } +} + +func copyRPC(rpc *RPC) *RPC { + res := new(RPC) + *res = *rpc + if rpc.Control != nil { + res.Control = new(pb.ControlMessage) + *res.Control = *rpc.Control + } + return res +} diff --git a/go-libp2p-blossomsub/discovery.go b/go-libp2p-blossomsub/discovery.go new file mode 100644 index 0000000..9644c9a --- /dev/null +++ b/go-libp2p-blossomsub/discovery.go @@ -0,0 +1,348 @@ +package blossomsub + +import ( + "context" + "math/rand" + "time" + + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + discimpl "github.com/libp2p/go-libp2p/p2p/discovery/backoff" +) + +var ( + // poll interval + + // DiscoveryPollInitialDelay is how long the discovery system waits after it first starts before polling + DiscoveryPollInitialDelay = 0 * time.Millisecond + // DiscoveryPollInterval is approximately how long the discovery system waits in between checks for whether the + // more peers are needed for any bitmask + DiscoveryPollInterval = 1 * time.Second +) + +// interval at which to retry advertisements when they fail. +const discoveryAdvertiseRetryInterval = 2 * time.Minute + +type DiscoverOpt func(*discoverOptions) error + +type discoverOptions struct { + connFactory BackoffConnectorFactory + opts []discovery.Option +} + +func defaultDiscoverOptions() *discoverOptions { + rngSrc := rand.NewSource(rand.Int63()) + minBackoff, maxBackoff := time.Second*10, time.Hour + cacheSize := 100 + dialTimeout := time.Minute * 2 + discoverOpts := &discoverOptions{ + connFactory: func(host host.Host) (*discimpl.BackoffConnector, error) { + backoff := discimpl.NewExponentialBackoff(minBackoff, maxBackoff, discimpl.FullJitter, time.Second, 5.0, 0, rand.New(rngSrc)) + return discimpl.NewBackoffConnector(host, cacheSize, dialTimeout, backoff) + }, + } + + return discoverOpts +} + +// discover represents the discovery pipeline. +// The discovery pipeline handles advertising and discovery of peers +type discover struct { + p *PubSub + + // discovery assists in discovering and advertising peers for a bitmask + discovery discovery.Discovery + + // advertising tracks which bitmasks are being advertised + advertising map[string]context.CancelFunc + + // discoverQ handles continuing peer discovery + discoverQ chan *discoverReq + + // ongoing tracks ongoing discovery requests + ongoing map[string]struct{} + + // done handles completion of a discovery request + done chan string + + // connector handles connecting to new peers found via discovery + connector *discimpl.BackoffConnector + + // options are the set of options to be used to complete struct construction in Start + options *discoverOptions +} + +// MinBitmaskSize returns a function that checks if a router is ready for publishing based on the bitmask size. +// The router ultimately decides the whether it is ready or not, the given size is just a suggestion. Note +// that the bitmask size does not include the router in the count. +func MinBitmaskSize(size int) RouterReady { + return func(rt PubSubRouter, bitmask []byte) (bool, error) { + return rt.EnoughPeers(bitmask, size), nil + } +} + +// Start attaches the discovery pipeline to a pubsub instance, initializes discovery and starts event loop +func (d *discover) Start(p *PubSub, opts ...DiscoverOpt) error { + if d.discovery == nil || p == nil { + return nil + } + + d.p = p + d.advertising = make(map[string]context.CancelFunc) + d.discoverQ = make(chan *discoverReq, 32) + d.ongoing = make(map[string]struct{}) + d.done = make(chan string) + + conn, err := d.options.connFactory(p.host) + if err != nil { + return err + } + d.connector = conn + + go d.discoverLoop() + go d.pollTimer() + + return nil +} + +func (d *discover) pollTimer() { + select { + case <-time.After(DiscoveryPollInitialDelay): + case <-d.p.ctx.Done(): + return + } + + select { + case d.p.eval <- d.requestDiscovery: + case <-d.p.ctx.Done(): + return + } + + ticker := time.NewTicker(DiscoveryPollInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + select { + case d.p.eval <- d.requestDiscovery: + case <-d.p.ctx.Done(): + return + } + case <-d.p.ctx.Done(): + return + } + } +} + +func (d *discover) requestDiscovery() { + for _, b := range d.p.myBitmasks { + if !d.p.rt.EnoughPeers(b.bitmask, 0) { + d.discoverQ <- &discoverReq{bitmask: b.bitmask, done: make(chan struct{}, 1)} + } + } +} + +func (d *discover) discoverLoop() { + for { + select { + case discover := <-d.discoverQ: + bitmask := discover.bitmask + + if _, ok := d.ongoing[string(bitmask)]; ok { + discover.done <- struct{}{} + continue + } + + d.ongoing[string(bitmask)] = struct{}{} + + go func() { + d.handleDiscovery(d.p.ctx, bitmask, discover.opts) + select { + case d.done <- string(bitmask): + case <-d.p.ctx.Done(): + } + discover.done <- struct{}{} + }() + case bitmask := <-d.done: + delete(d.ongoing, bitmask) + case <-d.p.ctx.Done(): + return + } + } +} + +// Advertise advertises this node's interest in a bitmask to a discovery service. Advertise is not thread-safe. +func (d *discover) Advertise(bitmask []byte) { + if d.discovery == nil { + return + } + + advertisingCtx, cancel := context.WithCancel(d.p.ctx) + + if _, ok := d.advertising[string(bitmask)]; ok { + cancel() + return + } + d.advertising[string(bitmask)] = cancel + + go func() { + next, err := d.discovery.Advertise(advertisingCtx, string(bitmask)) + if err != nil { + log.Warnf("bootstrap: error providing rendezvous for %s: %s", bitmask, err.Error()) + if next == 0 { + next = discoveryAdvertiseRetryInterval + } + } + + t := time.NewTimer(next) + defer t.Stop() + + for advertisingCtx.Err() == nil { + select { + case <-t.C: + next, err = d.discovery.Advertise(advertisingCtx, string(bitmask)) + if err != nil { + log.Warnf("bootstrap: error providing rendezvous for %s: %s", bitmask, err.Error()) + if next == 0 { + next = discoveryAdvertiseRetryInterval + } + } + t.Reset(next) + case <-advertisingCtx.Done(): + return + } + } + }() +} + +// StopAdvertise stops advertising this node's interest in a bitmask. StopAdvertise is not thread-safe. +func (d *discover) StopAdvertise(bitmask []byte) { + if d.discovery == nil { + return + } + + if advertiseCancel, ok := d.advertising[string(bitmask)]; ok { + advertiseCancel() + delete(d.advertising, string(bitmask)) + } +} + +// Discover searches for additional peers interested in a given bitmask +func (d *discover) Discover(bitmask []byte, opts ...discovery.Option) { + if d.discovery == nil { + return + } + + d.discoverQ <- &discoverReq{bitmask, opts, make(chan struct{}, 1)} +} + +// Bootstrap attempts to bootstrap to a given bitmask. Returns true if bootstrapped successfully, false otherwise. +func (d *discover) Bootstrap(ctx context.Context, bitmask []byte, ready RouterReady, opts ...discovery.Option) bool { + if d.discovery == nil { + return true + } + + t := time.NewTimer(time.Hour) + if !t.Stop() { + <-t.C + } + defer t.Stop() + + for { + // Check if ready for publishing + bootstrapped := make(chan bool, 1) + select { + case d.p.eval <- func() { + done, _ := ready(d.p.rt, bitmask) + bootstrapped <- done + }: + if <-bootstrapped { + return true + } + case <-d.p.ctx.Done(): + return false + case <-ctx.Done(): + return false + } + + // If not ready discover more peers + disc := &discoverReq{bitmask, opts, make(chan struct{}, 1)} + select { + case d.discoverQ <- disc: + case <-d.p.ctx.Done(): + return false + case <-ctx.Done(): + return false + } + + select { + case <-disc.done: + case <-d.p.ctx.Done(): + return false + case <-ctx.Done(): + return false + } + + t.Reset(time.Millisecond * 100) + select { + case <-t.C: + case <-d.p.ctx.Done(): + return false + case <-ctx.Done(): + return false + } + } +} + +func (d *discover) handleDiscovery(ctx context.Context, bitmask []byte, opts []discovery.Option) { + discoverCtx, cancel := context.WithTimeout(ctx, time.Second*10) + defer cancel() + + peerCh, err := d.discovery.FindPeers(discoverCtx, string(bitmask), opts...) + if err != nil { + log.Debugf("error finding peers for bitmask %s: %v", bitmask, err) + return + } + + d.connector.Connect(ctx, peerCh) +} + +type discoverReq struct { + bitmask []byte + opts []discovery.Option + done chan struct{} +} + +type pubSubDiscovery struct { + discovery.Discovery + opts []discovery.Option +} + +func (d *pubSubDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) { + return d.Discovery.Advertise(ctx, "floodsub:"+ns, append(opts, d.opts...)...) +} + +func (d *pubSubDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) { + return d.Discovery.FindPeers(ctx, "floodsub:"+ns, append(opts, d.opts...)...) +} + +// WithDiscoveryOpts passes libp2p Discovery options into the PubSub discovery subsystem +func WithDiscoveryOpts(opts ...discovery.Option) DiscoverOpt { + return func(d *discoverOptions) error { + d.opts = opts + return nil + } +} + +// BackoffConnectorFactory creates a BackoffConnector that is attached to a given host +type BackoffConnectorFactory func(host host.Host) (*discimpl.BackoffConnector, error) + +// WithDiscoverConnector adds a custom connector that deals with how the discovery subsystem connects to peers +func WithDiscoverConnector(connFactory BackoffConnectorFactory) DiscoverOpt { + return func(d *discoverOptions) error { + d.connFactory = connFactory + return nil + } +} diff --git a/go-libp2p-blossomsub/discovery_test.go b/go-libp2p-blossomsub/discovery_test.go new file mode 100644 index 0000000..dc392fb --- /dev/null +++ b/go-libp2p-blossomsub/discovery_test.go @@ -0,0 +1,309 @@ +package blossomsub + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" +) + +type mockDiscoveryServer struct { + mx sync.Mutex + db map[string]map[peer.ID]*discoveryRegistration +} + +type discoveryRegistration struct { + info peer.AddrInfo + ttl time.Duration +} + +func newDiscoveryServer() *mockDiscoveryServer { + return &mockDiscoveryServer{ + db: make(map[string]map[peer.ID]*discoveryRegistration), + } +} + +func (s *mockDiscoveryServer) Advertise(ns string, info peer.AddrInfo, ttl time.Duration) (time.Duration, error) { + s.mx.Lock() + defer s.mx.Unlock() + + peers, ok := s.db[ns] + if !ok { + peers = make(map[peer.ID]*discoveryRegistration) + s.db[ns] = peers + } + peers[info.ID] = &discoveryRegistration{info, ttl} + return ttl, nil +} + +func (s *mockDiscoveryServer) FindPeers(ns string, limit int) (<-chan peer.AddrInfo, error) { + s.mx.Lock() + defer s.mx.Unlock() + + peers, ok := s.db[ns] + if !ok || len(peers) == 0 { + emptyCh := make(chan peer.AddrInfo) + close(emptyCh) + return emptyCh, nil + } + + count := len(peers) + if count > limit { + count = limit + } + ch := make(chan peer.AddrInfo, count) + numSent := 0 + for _, reg := range peers { + if numSent == count { + break + } + numSent++ + ch <- reg.info + } + close(ch) + + return ch, nil +} + +func (s *mockDiscoveryServer) hasPeerRecord(ns string, pid peer.ID) bool { + s.mx.Lock() + defer s.mx.Unlock() + + if peers, ok := s.db[ns]; ok { + _, ok := peers[pid] + return ok + } + return false +} + +type mockDiscoveryClient struct { + host host.Host + server *mockDiscoveryServer +} + +func (d *mockDiscoveryClient) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) { + var options discovery.Options + err := options.Apply(opts...) + if err != nil { + return 0, err + } + + return d.server.Advertise(ns, *host.InfoFromHost(d.host), options.Ttl) +} + +func (d *mockDiscoveryClient) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) { + var options discovery.Options + err := options.Apply(opts...) + if err != nil { + return nil, err + } + + return d.server.FindPeers(ns, options.Limit) +} + +type dummyDiscovery struct{} + +func (d *dummyDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) { + return time.Hour, nil +} + +func (d *dummyDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) { + retCh := make(chan peer.AddrInfo) + go func() { + time.Sleep(time.Second) + close(retCh) + }() + return retCh, nil +} + +func TestSimpleDiscovery(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Setup Discovery server and pubsub clients + const numHosts = 20 + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + server := newDiscoveryServer() + discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(1 * time.Minute)} + + hosts := getNetHosts(t, ctx, numHosts) + psubs := make([]*PubSub, numHosts) + bitmaskHandlers := make([]*Bitmask, numHosts) + + for i, h := range hosts { + disc := &mockDiscoveryClient{h, server} + ps := getPubsub(ctx, h, WithDiscovery(disc, WithDiscoveryOpts(discOpts...))) + psubs[i] = ps + bitmaskHandlers[i], _ = ps.Join(bitmask) + } + + // Subscribe with all but one pubsub instance + msgs := make([]*Subscription, numHosts) + for i, th := range bitmaskHandlers[1:] { + subch, err := th.Subscribe() + if err != nil { + t.Fatal(err) + } + + msgs[i+1] = subch + } + + // Wait for the advertisements to go through then check that they did + for { + server.mx.Lock() + numPeers := len(server.db["floodsub:foobar"]) + server.mx.Unlock() + if numPeers == numHosts-1 { + break + } else { + time.Sleep(time.Millisecond * 100) + } + } + + for i, h := range hosts[1:] { + if !server.hasPeerRecord("floodsub:"+string(bitmask), h.ID()) { + t.Fatalf("Server did not register host %d with ID: %s", i+1, h.ID().Pretty()) + } + } + + // Try subscribing followed by publishing a single message + subch, err := bitmaskHandlers[0].Subscribe() + if err != nil { + t.Fatal(err) + } + msgs[0] = subch + + msg := []byte("first message") + if err := bitmaskHandlers[0].Publish(ctx, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil { + t.Fatal(err) + } + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + + // Try random peers sending messages and make sure they are received + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + if err := bitmaskHandlers[owner].Publish(ctx, msg, WithReadiness(MinBitmaskSize(1))); err != nil { + t.Fatal(err) + } + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) { + t.Skip("flaky test disabled") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Setup Discovery server and pubsub clients + partitionSize := BlossomSubDlo - 1 + numHosts := partitionSize * 2 + const ttl = 1 * time.Minute + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + server1, server2 := newDiscoveryServer(), newDiscoveryServer() + discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(ttl)} + + // Put the pubsub clients into two partitions + hosts := getNetHosts(t, ctx, numHosts) + psubs := make([]*PubSub, numHosts) + bitmaskHandlers := make([]*Bitmask, numHosts) + + for i, h := range hosts { + s := server1 + if i >= partitionSize { + s = server2 + } + disc := &mockDiscoveryClient{h, s} + ps := getBlossomSub(ctx, h, WithDiscovery(disc, WithDiscoveryOpts(discOpts...))) + psubs[i] = ps + bitmaskHandlers[i], _ = ps.Join(bitmask) + } + + msgs := make([]*Subscription, numHosts) + for i, th := range bitmaskHandlers { + subch, err := th.Subscribe() + if err != nil { + t.Fatal(err) + } + + msgs[i] = subch + } + + // Wait for network to finish forming then join the partitions via discovery + for _, ps := range psubs { + waitUntilBlossomSubMeshCount(ps, bitmask, partitionSize-1) + } + + for i := 0; i < partitionSize; i++ { + if _, err := server1.Advertise("floodsub:"+string(bitmask), *host.InfoFromHost(hosts[i+partitionSize]), ttl); err != nil { + t.Fatal(err) + } + } + + // test the mesh + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + + owner := rand.Intn(numHosts) + + if err := bitmaskHandlers[owner].Publish(ctx, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil { + t.Fatal(err) + } + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +//lint:ignore U1000 used only by skipped tests at present +func waitUntilBlossomSubMeshCount(ps *PubSub, bitmask []byte, count int) { + done := false + doneCh := make(chan bool, 1) + rt := ps.rt.(*BlossomSubRouter) + for !done { + ps.eval <- func() { + doneCh <- len(rt.mesh[string(bitmask)]) == count + } + done = <-doneCh + if !done { + time.Sleep(100 * time.Millisecond) + } + } +} diff --git a/go-libp2p-blossomsub/floodsub.go b/go-libp2p-blossomsub/floodsub.go new file mode 100644 index 0000000..7eb3919 --- /dev/null +++ b/go-libp2p-blossomsub/floodsub.go @@ -0,0 +1,108 @@ +package blossomsub + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +const ( + FloodSubID = protocol.ID("/floodsub/1.0.0") + FloodSubBitmaskSearchSize = 5 +) + +// NewFloodsubWithProtocols returns a new floodsub-enabled PubSub objecting using the protocols specified in ps. +func NewFloodsubWithProtocols(ctx context.Context, h host.Host, ps []protocol.ID, opts ...Option) (*PubSub, error) { + rt := &FloodSubRouter{ + protocols: ps, + } + return NewPubSub(ctx, h, rt, opts...) +} + +// NewFloodSub returns a new PubSub object using the FloodSubRouter. +func NewFloodSub(ctx context.Context, h host.Host, opts ...Option) (*PubSub, error) { + return NewFloodsubWithProtocols(ctx, h, []protocol.ID{FloodSubID}, opts...) +} + +type FloodSubRouter struct { + p *PubSub + protocols []protocol.ID + tracer *pubsubTracer +} + +func (fs *FloodSubRouter) Protocols() []protocol.ID { + return fs.protocols +} + +func (fs *FloodSubRouter) Attach(p *PubSub) { + fs.p = p + fs.tracer = p.tracer +} + +func (fs *FloodSubRouter) AddPeer(p peer.ID, proto protocol.ID) { + fs.tracer.AddPeer(p, proto) +} + +func (fs *FloodSubRouter) RemovePeer(p peer.ID) { + fs.tracer.RemovePeer(p) +} + +func (fs *FloodSubRouter) EnoughPeers(bitmask []byte, suggested int) bool { + // check all peers in the bitmask + tmap, ok := fs.p.bitmasks[string(bitmask)] + if !ok { + return false + } + + if suggested == 0 { + suggested = FloodSubBitmaskSearchSize + } + + if len(tmap) >= suggested { + return true + } + + return false +} + +func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus { + return AcceptAll +} + +func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {} + +func (fs *FloodSubRouter) Publish(msg *Message) { + from := msg.ReceivedFrom + bitmask := msg.GetBitmask() + + out := rpcWithMessages(msg.Message) + for pid := range fs.p.bitmasks[string(bitmask)] { + if pid == from || pid == peer.ID(msg.GetFrom()) { + continue + } + + mch, ok := fs.p.peers[pid] + if !ok { + continue + } + + select { + case mch <- out: + fs.tracer.SendRPC(out, pid) + default: + log.Infof("dropping message to peer %s: queue full", pid) + fs.tracer.DropRPC(out, pid) + // Drop it. The peer is too slow. + } + } +} + +func (fs *FloodSubRouter) Join(bitmask []byte) { + fs.tracer.Join(bitmask) +} + +func (fs *FloodSubRouter) Leave(bitmask []byte) { + fs.tracer.Leave(bitmask) +} diff --git a/go-libp2p-blossomsub/floodsub_test.go b/go-libp2p-blossomsub/floodsub_test.go new file mode 100644 index 0000000..54e2d4a --- /dev/null +++ b/go-libp2p-blossomsub/floodsub_test.go @@ -0,0 +1,1282 @@ +package blossomsub + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "fmt" + "io" + "math/rand" + "sort" + "sync" + "testing" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + bhost "github.com/libp2p/go-libp2p/p2p/host/blank" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + + "github.com/libp2p/go-msgio/protoio" +) + +func checkMessageRouting(t *testing.T, bitmask []byte, pubs []*PubSub, subs []*Subscription) { + data := make([]byte, 16) + rand.Read(data) + + for _, p := range pubs { + err := p.Publish(bitmask, data) + if err != nil { + t.Fatal(err) + } + + for _, s := range subs { + assertReceive(t, s, data) + } + } +} + +func getNetHosts(t *testing.T, ctx context.Context, n int) []host.Host { + var out []host.Host + + for i := 0; i < n; i++ { + netw := swarmt.GenSwarm(t) + h := bhost.NewBlankHost(netw) + t.Cleanup(func() { h.Close() }) + out = append(out, h) + } + + return out +} + +func connect(t *testing.T, a, b host.Host) { + pinfo := a.Peerstore().PeerInfo(a.ID()) + err := b.Connect(context.Background(), pinfo) + if err != nil { + t.Fatal(err) + } +} + +func sparseConnect(t *testing.T, hosts []host.Host) { + connectSome(t, hosts, 3) +} + +func denseConnect(t *testing.T, hosts []host.Host) { + connectSome(t, hosts, 10) +} + +func connectSome(t *testing.T, hosts []host.Host, d int) { + for i, a := range hosts { + for j := 0; j < d; j++ { + n := rand.Intn(len(hosts)) + if n == i { + j-- + continue + } + + b := hosts[n] + + connect(t, a, b) + } + } +} + +func connectAll(t *testing.T, hosts []host.Host) { + for i, a := range hosts { + for j, b := range hosts { + if i == j { + continue + } + + connect(t, a, b) + } + } +} + +func getPubsub(ctx context.Context, h host.Host, opts ...Option) *PubSub { + ps, err := NewFloodSub(ctx, h, opts...) + if err != nil { + panic(err) + } + return ps +} + +func getPubsubs(ctx context.Context, hs []host.Host, opts ...Option) []*PubSub { + var psubs []*PubSub + for _, h := range hs { + psubs = append(psubs, getPubsub(ctx, h, opts...)) + } + return psubs +} + +func getPubsubsWithOptionC(ctx context.Context, hs []host.Host, cons ...func(int) Option) []*PubSub { + var psubs []*PubSub + for _, h := range hs { + var opts []Option + for i, c := range cons { + opts = append(opts, c(i)) + } + psubs = append(psubs, getPubsub(ctx, h, opts...)) + } + return psubs +} + +func assertReceive(t *testing.T, ch *Subscription, exp []byte) { + select { + case msg := <-ch.ch: + if !bytes.Equal(msg.GetData(), exp) { + t.Fatalf("got wrong message, expected %s but got %s", string(exp), string(msg.GetData())) + } + case <-time.After(time.Second * 5): + t.Logf("%#v\n", ch) + t.Fatal("timed out waiting for message of: ", string(exp)) + } +} + +func assertNeverReceives(t *testing.T, ch *Subscription, timeout time.Duration) { + select { + case msg := <-ch.ch: + t.Logf("%#v\n", ch) + t.Fatal("got unexpected message: ", string(msg.GetData())) + case <-time.After(timeout): + } +} + +func TestBasicFloodsub(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getNetHosts(t, ctx, 20) + + psubs := getPubsubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + // connectAll(t, hosts) + sparseConnect(t, hosts) + + time.Sleep(time.Millisecond * 100) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i)) + + owner := rand.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestMultihops(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 6) + + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + connect(t, hosts[2], hosts[3]) + connect(t, hosts[3], hosts[4]) + connect(t, hosts[4], hosts[5]) + + var subs []*Subscription + for i := 1; i < 6; i++ { + ch, err := psubs[i].Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, ch) + } + + time.Sleep(time.Millisecond * 100) + + msg := []byte("i like cats") + err := psubs[0].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + if err != nil { + t.Fatal(err) + } + + // last node in the chain should get the message + select { + case out := <-subs[4].ch: + if !bytes.Equal(out.GetData(), msg) { + t.Fatal("got wrong data") + } + case <-time.After(time.Second * 5): + t.Fatal("timed out waiting for message") + } +} + +func TestReconnects(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 3) + + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[0], hosts[2]) + + A, err := psubs[1].Subscribe([]byte{0xca, 0x75}) + if err != nil { + t.Fatal(err) + } + + B, err := psubs[2].Subscribe([]byte{0xca, 0x75}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 100) + + msg := []byte("apples and oranges") + err = psubs[0].Publish([]byte{0xca, 0x75}, msg) + if err != nil { + t.Fatal(err) + } + + assertReceive(t, A, msg) + assertReceive(t, B, msg) + + B.Cancel() + + time.Sleep(time.Millisecond * 50) + + msg2 := []byte("potato") + err = psubs[0].Publish([]byte{0xca, 0x75}, msg2) + if err != nil { + t.Fatal(err) + } + + assertReceive(t, A, msg2) + select { + case _, ok := <-B.ch: + if ok { + t.Fatal("shouldnt have gotten data on this channel") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for B chan to be closed") + } + + nSubs := len(psubs[2].mySubs[string([]byte{0xca, 0x75})]) + if nSubs > 0 { + t.Fatal(`B should have 0 subscribers for channel []byte{0xca,0x75}, has`, nSubs) + } + + ch2, err := psubs[2].Subscribe([]byte{0xca, 0x75}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 100) + + nextmsg := []byte("ifps is kul") + err = psubs[0].Publish([]byte{0xca, 0x75}, nextmsg) + if err != nil { + t.Fatal(err) + } + + assertReceive(t, ch2, nextmsg) +} + +// make sure messages arent routed between nodes who arent subscribed +func TestNoConnection(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + + psubs := getPubsubs(ctx, hosts) + + ch, err := psubs[5].Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + err = psubs[0].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, []byte("TESTING")) + if err != nil { + t.Fatal(err) + } + + select { + case <-ch.ch: + t.Fatal("shouldnt have gotten a message") + case <-time.After(time.Millisecond * 200): + } +} + +func TestSelfReceive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + host := getNetHosts(t, ctx, 1)[0] + + psub, err := NewFloodSub(ctx, host) + if err != nil { + t.Fatal(err) + } + + msg := []byte("hello world") + + err = psub.Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 10) + + ch, err := psub.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msg2 := []byte("goodbye world") + err = psub.Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg2) + if err != nil { + t.Fatal(err) + } + + assertReceive(t, ch, msg2) +} + +func TestOneToOne(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + + sub, err := psubs[1].Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 50) + + checkMessageRouting(t, []byte{0xf0, 0x0b, 0xa1, 0x20}, psubs, []*Subscription{sub}) +} + +func assertPeerLists(t *testing.T, hosts []host.Host, ps *PubSub, has ...int) { + peers := ps.ListPeers([]byte{}) + set := make(map[peer.ID]struct{}) + for _, p := range peers { + set[p] = struct{}{} + } + + for _, h := range has { + if _, ok := set[hosts[h].ID()]; !ok { + t.Fatal("expected to have connection to peer: ", h) + } + } +} + +func TestTreeTopology(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + connect(t, hosts[1], hosts[4]) + connect(t, hosts[2], hosts[3]) + connect(t, hosts[0], hosts[5]) + connect(t, hosts[5], hosts[6]) + connect(t, hosts[5], hosts[8]) + connect(t, hosts[6], hosts[7]) + connect(t, hosts[8], hosts[9]) + + /* + [0] -> [1] -> [2] -> [3] + | L->[4] + v + [5] -> [6] -> [7] + | + v + [8] -> [9] + */ + + var chs []*Subscription + for _, ps := range psubs { + ch, err := ps.Subscribe([]byte{0xf1, 0x22, 0xb0, 0x22}) + if err != nil { + t.Fatal(err) + } + + chs = append(chs, ch) + } + + time.Sleep(time.Millisecond * 50) + + assertPeerLists(t, hosts, psubs[0], 1, 5) + assertPeerLists(t, hosts, psubs[1], 0, 2, 4) + assertPeerLists(t, hosts, psubs[2], 1, 3) + + checkMessageRouting(t, []byte{0xf1, 0x22, 0xb0, 0x22}, []*PubSub{psubs[9], psubs[3]}, chs) +} + +func assertHasBitmasks(t *testing.T, ps *PubSub, expbitmasks ...string) { + bitmasks := ps.GetBitmasks() + sort.Strings(bitmasks) + sort.Strings(expbitmasks) + + if len(bitmasks) != len(expbitmasks) { + t.Fatalf("expected to have %v, but got %v", expbitmasks, bitmasks) + } + + for i, v := range expbitmasks { + if bitmasks[i] != v { + t.Fatalf("expected %s but have %s", v, bitmasks[i]) + } + } +} + +func TestFloodSubPluggableProtocol(t *testing.T) { + t.Run("multi-procol router acts like a hub", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 3) + + psubA := mustCreatePubSub(ctx, t, hosts[0], "/esh/floodsub", "/lsr/floodsub") + psubB := mustCreatePubSub(ctx, t, hosts[1], "/esh/floodsub") + psubC := mustCreatePubSub(ctx, t, hosts[2], "/lsr/floodsub") + + subA := mustSubscribe(t, psubA, []byte{0xf0, 0x0b, 0xa1, 0x20}) + defer subA.Cancel() + + subB := mustSubscribe(t, psubB, []byte{0xf0, 0x0b, 0xa1, 0x20}) + defer subB.Cancel() + + subC := mustSubscribe(t, psubC, []byte{0xf0, 0x0b, 0xa1, 0x20}) + defer subC.Cancel() + + // B --> A, C --> A + connect(t, hosts[1], hosts[0]) + connect(t, hosts[2], hosts[0]) + + time.Sleep(time.Millisecond * 100) + + psubC.Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, []byte([]byte{0xba, 0x12})) + + assertReceive(t, subA, []byte([]byte{0xba, 0x12})) + assertReceive(t, subB, []byte([]byte{0xba, 0x12})) + assertReceive(t, subC, []byte([]byte{0xba, 0x12})) + }) + + t.Run("won't talk to routers with no protocol overlap", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + + psubA := mustCreatePubSub(ctx, t, hosts[0], "/esh/floodsub") + psubB := mustCreatePubSub(ctx, t, hosts[1], "/lsr/floodsub") + + subA := mustSubscribe(t, psubA, []byte{0xf0, 0x0b, 0xa1, 0x20}) + defer subA.Cancel() + + subB := mustSubscribe(t, psubB, []byte{0xf0, 0x0b, 0xa1, 0x20}) + defer subB.Cancel() + + connect(t, hosts[1], hosts[0]) + + time.Sleep(time.Millisecond * 100) + + psubA.Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, []byte([]byte{0xba, 0x12})) + + assertReceive(t, subA, []byte([]byte{0xba, 0x12})) + + pass := false + select { + case <-subB.ch: + t.Fatal("different protocols: should not have received message") + case <-time.After(time.Second * 1): + pass = true + } + + if !pass { + t.Fatal("should have timed out waiting for message") + } + }) +} + +func mustCreatePubSub(ctx context.Context, t *testing.T, h host.Host, ps ...protocol.ID) *PubSub { + psub, err := NewFloodsubWithProtocols(ctx, h, ps) + if err != nil { + t.Fatal(err) + } + + return psub +} + +func mustSubscribe(t *testing.T, ps *PubSub, bitmask []byte) *Subscription { + sub, err := ps.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + return sub +} + +func TestSubReporting(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + host := getNetHosts(t, ctx, 1)[0] + psub, err := NewFloodSub(ctx, host) + if err != nil { + t.Fatal(err) + } + + fooSub, err := psub.Subscribe([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + + barSub, err := psub.Subscribe([]byte{0xba, 0x12}) + if err != nil { + t.Fatal(err) + } + + assertHasBitmasks(t, psub, string([]byte{0xf0, 0x00}), string([]byte{0xba, 0x12})) + + _, err = psub.Subscribe([]byte{0xba, 0x20}) + if err != nil { + t.Fatal(err) + } + + assertHasBitmasks(t, psub, string([]byte{0xf0, 0x00}), string([]byte{0xba, 0x12}), string([]byte{0xba, 0x20})) + + barSub.Cancel() + assertHasBitmasks(t, psub, string([]byte{0xf0, 0x00}), string([]byte{0xba, 0x20})) + fooSub.Cancel() + assertHasBitmasks(t, psub, string([]byte{0xba, 0x20})) + + _, err = psub.Subscribe([]byte{0xf1, 0x24}) + if err != nil { + t.Fatal(err) + } + + assertHasBitmasks(t, psub, string([]byte{0xba, 0x20}), string([]byte{0xf1, 0x24})) +} + +func TestPeerBitmaskReporting(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 4) + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[0], hosts[2]) + connect(t, hosts[0], hosts[3]) + + _, err := psubs[1].Subscribe([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + _, err = psubs[1].Subscribe([]byte{0xba, 0x12}) + if err != nil { + t.Fatal(err) + } + _, err = psubs[1].Subscribe([]byte{0xba, 0x20}) + if err != nil { + t.Fatal(err) + } + + _, err = psubs[2].Subscribe([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + _, err = psubs[2].Subscribe([]byte{0x01, 0xdf}) + if err != nil { + t.Fatal(err) + } + + _, err = psubs[3].Subscribe([]byte{0xba, 0x20}) + if err != nil { + t.Fatal(err) + } + _, err = psubs[3].Subscribe([]byte{0x01, 0xdf}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 200) + + peers := psubs[0].ListPeers([]byte{0x01, 0xdf}) + assertPeerList(t, peers, hosts[2].ID(), hosts[3].ID()) + + peers = psubs[0].ListPeers([]byte{0xf0, 0x00}) + assertPeerList(t, peers, hosts[1].ID(), hosts[2].ID()) + + peers = psubs[0].ListPeers([]byte{0xba, 0x20}) + assertPeerList(t, peers, hosts[1].ID(), hosts[3].ID()) + + peers = psubs[0].ListPeers([]byte{0xba, 0x12}) + assertPeerList(t, peers, hosts[1].ID()) +} + +func TestSubscribeMultipleTimes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + + sub1, err := psubs[0].Subscribe([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + sub2, err := psubs[0].Subscribe([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + + // make sure subscribing is finished by the time we publish + time.Sleep(10 * time.Millisecond) + + psubs[1].Publish([]byte{0xf0, 0x00}, []byte([]byte{0xba, 0x12})) + + msg, err := sub1.Next(ctx) + if err != nil { + t.Fatalf("unexpected error: %v.", err) + } + + data := msg.GetData() + + if !bytes.Equal(data, []byte{0xba, 0x12}) { + t.Fatalf("data is %s, expected %s.", data, []byte{0xba, 0x12}) + } + + msg, err = sub2.Next(ctx) + if err != nil { + t.Fatalf("unexpected error: %v.", err) + } + data = msg.GetData() + + if !bytes.Equal(data, []byte{0xba, 0x12}) { + t.Fatalf("data is %s, expected %s.", data, []byte{0xba, 0x12}) + } +} + +func TestPeerDisconnect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + + _, err := psubs[0].Subscribe([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + + _, err = psubs[1].Subscribe([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 300) + + peers := psubs[0].ListPeers([]byte{0xf0, 0x00}) + assertPeerList(t, peers, hosts[1].ID()) + for _, c := range hosts[1].Network().ConnsToPeer(hosts[0].ID()) { + c.Close() + } + + time.Sleep(time.Millisecond * 300) + + peers = psubs[0].ListPeers([]byte{0xf0, 0x00}) + assertPeerList(t, peers) +} + +func assertPeerList(t *testing.T, peers []peer.ID, expected ...peer.ID) { + sort.Sort(peer.IDSlice(peers)) + sort.Sort(peer.IDSlice(expected)) + + if len(peers) != len(expected) { + t.Fatalf("mismatch: %s != %s", peers, expected) + } + + for i, p := range peers { + if expected[i] != p { + t.Fatalf("mismatch: %s != %s", peers, expected) + } + } +} + +func TestWithNoSigning(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts, WithNoAuthor(), WithMessageIdFn(func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + })) + + connect(t, hosts[0], hosts[1]) + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + data := []byte("this is a message") + + sub, err := psubs[1].Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 10) + + err = psubs[0].Publish(bitmask, data) + if err != nil { + t.Fatal(err) + } + + msg, err := sub.Next(ctx) + if err != nil { + t.Fatal(err) + } + if msg.Signature != nil { + t.Fatal("signature in message") + } + if msg.From != nil { + t.Fatal("from in message") + } + if msg.Seqno != nil { + t.Fatal("seqno in message") + } + if string(msg.Data) != string(data) { + t.Fatalf("unexpected data: %s", string(msg.Data)) + } +} + +func TestWithSigning(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts, WithStrictSignatureVerification(true)) + + connect(t, hosts[0], hosts[1]) + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + data := []byte("this is a message") + + sub, err := psubs[1].Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 10) + + err = psubs[0].Publish(bitmask, data) + if err != nil { + t.Fatal(err) + } + + msg, err := sub.Next(ctx) + if err != nil { + t.Fatal(err) + } + if msg.Signature == nil { + t.Fatal("no signature in message") + } + if msg.From == nil { + t.Fatal("from not in message") + } + if msg.Seqno == nil { + t.Fatal("seqno not in message") + } + if string(msg.Data) != string(data) { + t.Fatalf("unexpected data: %s", string(msg.Data)) + } +} + +func TestImproperlySignedMessageRejected(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + adversary := hosts[0] + honestPeer := hosts[1] + + // The adversary enables signing, but disables verification to let through + // an incorrectly signed message. + adversaryPubSub := getPubsub( + ctx, + adversary, + WithMessageSigning(true), + WithStrictSignatureVerification(false), + ) + honestPubSub := getPubsub( + ctx, + honestPeer, + WithStrictSignatureVerification(true), + ) + + connect(t, adversary, honestPeer) + + var ( + bitmask = []byte{0xf0, 0x0b, 0xa1, 0x20} + correctMessage = []byte("this is a correct message") + incorrectMessage = []byte("this is the incorrect message") + ) + + adversarySubscription, err := adversaryPubSub.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + honestPeerSubscription, err := honestPubSub.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + time.Sleep(time.Millisecond * 50) + + // First the adversary sends the correct message. + err = adversaryPubSub.Publish(bitmask, correctMessage) + if err != nil { + t.Fatal(err) + } + + // Change the sign key for the adversarial peer, and send the second, + // incorrectly signed, message. + adversaryPubSub.signID = honestPubSub.signID + adversaryPubSub.signKey = honestPubSub.host.Peerstore().PrivKey(honestPubSub.signID) + err = adversaryPubSub.Publish(bitmask, incorrectMessage) + if err != nil { + t.Fatal(err) + } + + var adversaryMessages []*Message + adversaryContext, adversaryCancel := context.WithCancel(ctx) + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + default: + msg, err := adversarySubscription.Next(ctx) + if err != nil { + return + } + adversaryMessages = append(adversaryMessages, msg) + } + } + }(adversaryContext) + + <-time.After(1 * time.Second) + adversaryCancel() + + // Ensure the adversary successfully publishes the incorrectly signed + // message. If the adversary "sees" this, we successfully got through + // their local validation. + if len(adversaryMessages) != 2 { + t.Fatalf("got %d messages, expected 2", len(adversaryMessages)) + } + + // the honest peer's validation process will drop the message; + // next will never furnish the incorrect message. + var honestPeerMessages []*Message + honestPeerContext, honestPeerCancel := context.WithCancel(ctx) + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + default: + msg, err := honestPeerSubscription.Next(ctx) + if err != nil { + return + } + honestPeerMessages = append(honestPeerMessages, msg) + } + } + }(honestPeerContext) + + <-time.After(1 * time.Second) + honestPeerCancel() + + if len(honestPeerMessages) != 1 { + t.Fatalf("got %d messages, expected 1", len(honestPeerMessages)) + } + if string(honestPeerMessages[0].GetData()) != string(correctMessage) { + t.Fatalf( + "got %s, expected message %s", + honestPeerMessages[0].GetData(), + correctMessage, + ) + } +} + +func TestMessageSender(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + hosts := getNetHosts(t, ctx, 3) + psubs := getPubsubs(ctx, hosts) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + time.Sleep(time.Millisecond * 100) + + for i := 0; i < 3; i++ { + for j := 0; j < 100; j++ { + msg := []byte(fmt.Sprintf("%d sent %d", i, j)) + + psubs[i].Publish(bitmask, msg) + + for k, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + + var expectedHost int + if i == k { + expectedHost = i + } else if k != 1 { + expectedHost = 1 + } else { + expectedHost = i + } + + if got.ReceivedFrom != hosts[expectedHost].ID() { + t.Fatal("got wrong message sender") + } + } + } + } +} + +func TestConfigurableMaxMessageSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + + // use a 4mb limit; default is 1mb; we'll test with a 2mb payload. + psubs := getPubsubs(ctx, hosts, WithMaxMessageSize(1<<22)) + + sparseConnect(t, hosts) + time.Sleep(time.Millisecond * 100) + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + var subs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + subs = append(subs, subch) + } + + // 2mb payload. + msg := make([]byte, 1<<21) + rand.Read(msg) + err := psubs[0].Publish(bitmask, msg) + if err != nil { + t.Fatal(err) + } + + // make sure that all peers received the message. + for _, sub := range subs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + +} + +func TestAnnounceRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + ps := getPubsub(ctx, hosts[0]) + watcher := &announceWatcher{} + hosts[1].SetStreamHandler(FloodSubID, watcher.handleStream) + + _, err := ps.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + + // connect the watcher to the pubsub + connect(t, hosts[0], hosts[1]) + + // wait a bit for the first subscription to be emitted and trigger announce retry + time.Sleep(100 * time.Millisecond) + go ps.announceRetry(hosts[1].ID(), []byte{0x7e, 0x57}, true) + + // wait a bit for the subscription to propagate and ensure it was received twice + time.Sleep(time.Second + 100*time.Millisecond) + count := watcher.countSubs() + if count != 2 { + t.Fatalf("expected 2 subscription messages, but got %d", count) + } +} + +type announceWatcher struct { + mx sync.Mutex + subs int +} + +func (aw *announceWatcher) handleStream(s network.Stream) { + defer s.Close() + + r := protoio.NewDelimitedReader(s, 1<<20) + + var rpc pb.RPC + for { + rpc.Reset() + err := r.ReadMsg(&rpc) + if err != nil { + if err != io.EOF { + s.Reset() + } + return + } + + for _, sub := range rpc.GetSubscriptions() { + if sub.GetSubscribe() && bytes.Equal(sub.GetBitmask(), []byte{0x7e, 0x57}) { + aw.mx.Lock() + aw.subs++ + aw.mx.Unlock() + } + } + } +} + +func (aw *announceWatcher) countSubs() int { + aw.mx.Lock() + defer aw.mx.Unlock() + return aw.subs +} + +func TestPubsubWithAssortedOptions(t *testing.T) { + // this test uses assorted options that are not covered in other tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hashMsgID := func(m *pb.Message) string { + hash := sha256.Sum256(m.Data) + return string(hash[:]) + } + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts, + WithMessageIdFn(hashMsgID), + WithPeerOutboundQueueSize(10), + WithMessageAuthor(""), + WithBlacklist(NewMapBlacklist())) + + connect(t, hosts[0], hosts[1]) + + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + for i := 0; i < 2; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0x7e, 0x57}, msg) + + for _, sub := range subs { + assertReceive(t, sub, msg) + } + } +} + +func TestWithInvalidMessageAuthor(t *testing.T) { + // this test exercises the failure path in the WithMessageAuthor option + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h := bhost.NewBlankHost(swarmt.GenSwarm(t)) + defer h.Close() + _, err := NewFloodSub(ctx, h, WithMessageAuthor("bogotr0n")) + if err == nil { + t.Fatal("expected error") + } +} + +func TestPreconnectedNodes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // If this test fails it may hang so set a timeout + ctx, cancel = context.WithTimeout(ctx, time.Second*10) + defer cancel() + + // Create hosts + h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) + h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) + defer h1.Close() + defer h2.Close() + + opts := []Option{WithDiscovery(&dummyDiscovery{})} + // Setup first PubSub + p1, err := NewFloodSub(ctx, h1, opts...) + if err != nil { + t.Fatal(err) + } + + // Connect the two hosts together + connect(t, h2, h1) + + // Setup the second DHT + p2, err := NewFloodSub(ctx, h2, opts...) + if err != nil { + t.Fatal(err) + } + + // See if it works + p2Bitmask, err := p2.Join([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + + p1Bitmask, err := p1.Join([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + + testPublish := func(publisher, receiver *Bitmask, msg []byte) { + receiverSub, err := receiver.Subscribe() + if err != nil { + t.Fatal(err) + } + + if err := publisher.Publish(ctx, msg, WithReadiness(MinBitmaskSize(1))); err != nil { + t.Fatal(err) + } + + m, err := receiverSub.Next(ctx) + if err != nil { + t.Fatal(err) + } + + if receivedData := m.GetData(); !bytes.Equal(receivedData, msg) { + t.Fatalf("expected message %v, got %v", msg, receivedData) + } + } + + // Test both directions since PubSub uses one directional streams + testPublish(p1Bitmask, p2Bitmask, []byte("test1-to-2")) + testPublish(p1Bitmask, p2Bitmask, []byte("test2-to-1")) +} + +func TestDedupInboundStreams(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) + h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) + defer h1.Close() + defer h2.Close() + + _, err := NewFloodSub(ctx, h1) + if err != nil { + t.Fatal(err) + } + + // Connect the two hosts together + connect(t, h2, h1) + + // open a few streams and make sure all but the last one get reset + s1, err := h2.NewStream(ctx, h1.ID(), FloodSubID) + if err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + + s2, err := h2.NewStream(ctx, h1.ID(), FloodSubID) + if err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + + s3, err := h2.NewStream(ctx, h1.ID(), FloodSubID) + if err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + + // check that s1 and s2 have been reset + _, err = s1.Read([]byte{0}) + if err == nil { + t.Fatal("expected s1 to be reset") + } + + _, err = s2.Read([]byte{0}) + if err == nil { + t.Fatal("expected s2 to be reset") + } + + // check that s3 is readable and simply times out + s3.SetReadDeadline(time.Now().Add(time.Millisecond)) + _, err = s3.Read([]byte{0}) + err2, ok := err.(interface{ Timeout() bool }) + if !ok || !err2.Timeout() { + t.Fatal(err) + } +} diff --git a/go-libp2p-blossomsub/go.mod b/go-libp2p-blossomsub/go.mod new file mode 100644 index 0000000..aec4283 --- /dev/null +++ b/go-libp2p-blossomsub/go.mod @@ -0,0 +1,87 @@ +module source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub + +go 1.18 + +require ( + github.com/benbjohnson/clock v1.3.0 + github.com/gogo/protobuf v1.3.2 + github.com/ipfs/go-log/v2 v2.5.1 + github.com/libp2p/go-buffer-pool v0.1.0 + github.com/libp2p/go-libp2p v0.25.0 + github.com/libp2p/go-libp2p-testing v0.12.0 + github.com/libp2p/go-msgio v0.3.0 + github.com/multiformats/go-multiaddr v0.8.0 + github.com/multiformats/go-varint v0.0.7 + google.golang.org/protobuf v1.28.1 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect + github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect + github.com/ipfs/go-cid v0.3.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/klauspost/cpuid/v2 v2.2.1 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.0 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.50 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.1.1 // indirect + github.com/multiformats/go-multicodec v0.7.0 // indirect + github.com/multiformats/go-multihash v0.2.1 // indirect + github.com/multiformats/go-multistream v0.4.0 // indirect + github.com/onsi/ginkgo/v2 v2.5.1 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/quic-go/qtls-go1-18 v0.2.0 // indirect + github.com/quic-go/qtls-go1-19 v0.2.0 // indirect + github.com/quic-go/qtls-go1-20 v0.1.0 // indirect + github.com/quic-go/quic-go v0.32.0 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/stretchr/testify v1.8.1 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.4.0 // indirect + golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect + golang.org/x/mod v0.9.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/tools v0.7.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.1.7 // indirect +) diff --git a/go-libp2p-blossomsub/go.sum b/go-libp2p-blossomsub/go.sum new file mode 100644 index 0000000..5dab271 --- /dev/null +++ b/go-libp2p-blossomsub/go.sum @@ -0,0 +1,443 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= +github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI= +github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.25.0 h1:ND6Hc6ZYCzC8S++C4mOD7LdPnLXRkNbr12/8FXgUfIo= +github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= +github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= +github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.4.0 h1:5i4JbawClkbuaX+mIVXiHQYVPxUW+zjv6w7jtSRukxc= +github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= +github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U= +github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= +github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk= +github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV54oAI= +github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA= +github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= +github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/go-libp2p-blossomsub/gossip_tracer.go b/go-libp2p-blossomsub/gossip_tracer.go new file mode 100644 index 0000000..0c47fdf --- /dev/null +++ b/go-libp2p-blossomsub/gossip_tracer.go @@ -0,0 +1,200 @@ +package blossomsub + +import ( + "math/rand" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +// gossipTracer is an internal tracer that tracks IWANT requests in order to penalize +// peers who don't follow up on IWANT requests after an IHAVE advertisement. +// The tracking of promises is probabilistic to avoid using too much memory. +type gossipTracer struct { + sync.Mutex + + idGen *msgIDGenerator + + followUpTime time.Duration + + // promises for messages by message ID; for each message tracked, we track the promise + // expiration time for each peer. + promises map[string]map[peer.ID]time.Time + // promises for each peer; for each peer, we track the promised message IDs. + // this index allows us to quickly void promises when a peer is throttled. + peerPromises map[peer.ID]map[string]struct{} +} + +func newGossipTracer() *gossipTracer { + return &gossipTracer{ + idGen: newMsgIdGenerator(), + promises: make(map[string]map[peer.ID]time.Time), + peerPromises: make(map[peer.ID]map[string]struct{}), + } +} + +func (gt *gossipTracer) Start(gs *BlossomSubRouter) { + if gt == nil { + return + } + + gt.idGen = gs.p.idGen + gt.followUpTime = gs.params.IWantFollowupTime +} + +// track a promise to deliver a message from a list of msgIDs we are requesting +func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs []string) { + if gt == nil { + return + } + + idx := rand.Intn(len(msgIDs)) + mid := msgIDs[idx] + + gt.Lock() + defer gt.Unlock() + + promises, ok := gt.promises[mid] + if !ok { + promises = make(map[peer.ID]time.Time) + gt.promises[mid] = promises + } + + _, ok = promises[p] + if !ok { + promises[p] = time.Now().Add(gt.followUpTime) + peerPromises, ok := gt.peerPromises[p] + if !ok { + peerPromises = make(map[string]struct{}) + gt.peerPromises[p] = peerPromises + } + peerPromises[mid] = struct{}{} + } +} + +// returns the number of broken promises for each peer who didn't follow up +// on an IWANT request. +func (gt *gossipTracer) GetBrokenPromises() map[peer.ID]int { + if gt == nil { + return nil + } + + gt.Lock() + defer gt.Unlock() + + var res map[peer.ID]int + now := time.Now() + + // find broken promises from peers + for mid, promises := range gt.promises { + for p, expire := range promises { + if expire.Before(now) { + if res == nil { + res = make(map[peer.ID]int) + } + res[p]++ + + delete(promises, p) + + peerPromises := gt.peerPromises[p] + delete(peerPromises, mid) + if len(peerPromises) == 0 { + delete(gt.peerPromises, p) + } + } + } + + if len(promises) == 0 { + delete(gt.promises, mid) + } + } + + return res +} + +var _ RawTracer = (*gossipTracer)(nil) + +func (gt *gossipTracer) fulfillPromise(msg *Message) { + mid := gt.idGen.ID(msg) + + gt.Lock() + defer gt.Unlock() + + promises, ok := gt.promises[mid] + if !ok { + return + } + delete(gt.promises, mid) + + // delete the promise for all peers that promised it, as they have no way to fulfill it. + for p := range promises { + peerPromises, ok := gt.peerPromises[p] + if ok { + delete(peerPromises, mid) + if len(peerPromises) == 0 { + delete(gt.peerPromises, p) + } + } + } +} + +func (gt *gossipTracer) DeliverMessage(msg *Message) { + // someone delivered a message, fulfill promises for it + gt.fulfillPromise(msg) +} + +func (gt *gossipTracer) RejectMessage(msg *Message, reason string) { + // A message got rejected, so we can fulfill promises and let the score penalty apply + // from invalid message delivery. + // We do take exception and apply promise penalty regardless in the following cases, where + // the peer delivered an obviously invalid message. + switch reason { + case RejectMissingSignature: + return + case RejectInvalidSignature: + return + } + + gt.fulfillPromise(msg) +} + +func (gt *gossipTracer) ValidateMessage(msg *Message) { + // we consider the promise fulfilled as soon as the message begins validation + // if it was a case of signature issue it would have been rejected immediately + // without triggering the Validate trace + gt.fulfillPromise(msg) +} + +func (gt *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {} +func (gt *gossipTracer) RemovePeer(p peer.ID) {} +func (gt *gossipTracer) Join(bitmask []byte) {} +func (gt *gossipTracer) Leave(bitmask []byte) {} +func (gt *gossipTracer) Graft(p peer.ID, bitmask []byte) {} +func (gt *gossipTracer) Prune(p peer.ID, bitmask []byte) {} +func (gt *gossipTracer) DuplicateMessage(msg *Message) {} +func (gt *gossipTracer) RecvRPC(rpc *RPC) {} +func (gt *gossipTracer) SendRPC(rpc *RPC, p peer.ID) {} +func (gt *gossipTracer) DropRPC(rpc *RPC, p peer.ID) {} +func (gt *gossipTracer) UndeliverableMessage(msg *Message) {} + +func (gt *gossipTracer) ThrottlePeer(p peer.ID) { + gt.Lock() + defer gt.Unlock() + + peerPromises, ok := gt.peerPromises[p] + if !ok { + return + } + + for mid := range peerPromises { + promises := gt.promises[mid] + delete(promises, p) + if len(promises) == 0 { + delete(gt.promises, mid) + } + } + + delete(gt.peerPromises, p) +} diff --git a/go-libp2p-blossomsub/gossip_tracer_test.go b/go-libp2p-blossomsub/gossip_tracer_test.go new file mode 100644 index 0000000..1fe62fe --- /dev/null +++ b/go-libp2p-blossomsub/gossip_tracer_test.go @@ -0,0 +1,103 @@ +package blossomsub + +import ( + "testing" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestBrokenPromises(t *testing.T) { + // tests that unfullfilled promises are tracked correctly + gt := newGossipTracer() + gt.followUpTime = 100 * time.Millisecond + + peerA := peer.ID("A") + peerB := peer.ID("B") + peerC := peer.ID("C") + + var mids []string + for i := 0; i < 100; i++ { + m := makeTestMessage(i) + m.From = []byte(peerA) + mid := DefaultMsgIdFn(m) + mids = append(mids, mid) + } + + gt.AddPromise(peerA, mids) + gt.AddPromise(peerB, mids) + gt.AddPromise(peerC, mids) + + // no broken promises yet + brokenPromises := gt.GetBrokenPromises() + if brokenPromises != nil { + t.Fatal("expected no broken promises") + } + + // throttle one of the peers to save his promises + gt.ThrottlePeer(peerC) + + // make promises break + time.Sleep(gt.followUpTime + time.Millisecond) + + brokenPromises = gt.GetBrokenPromises() + if len(brokenPromises) != 2 { + t.Fatalf("expected 2 broken prmises, got %d", len(brokenPromises)) + } + + brokenPromisesA := brokenPromises[peerA] + if brokenPromisesA != 1 { + t.Fatalf("expected 1 broken promise from A, got %d", brokenPromisesA) + } + + brokenPromisesB := brokenPromises[peerB] + if brokenPromisesB != 1 { + t.Fatalf("expected 1 broken promise from A, got %d", brokenPromisesB) + } + + // verify that the peerPromises map has been vacated + if len(gt.peerPromises) != 0 { + t.Fatal("expected empty peerPromises map") + } +} + +func TestNoBrokenPromises(t *testing.T) { + // like above, but this time we deliver messages to fullfil the promises + gt := newGossipTracer() + gt.followUpTime = 100 * time.Millisecond + + peerA := peer.ID("A") + peerB := peer.ID("B") + + var msgs []*pb.Message + var mids []string + for i := 0; i < 100; i++ { + m := makeTestMessage(i) + m.From = []byte(peerA) + msgs = append(msgs, m) + mid := DefaultMsgIdFn(m) + mids = append(mids, mid) + } + + gt.AddPromise(peerA, mids) + gt.AddPromise(peerB, mids) + + for _, m := range msgs { + gt.DeliverMessage(&Message{Message: m}) + } + + time.Sleep(gt.followUpTime + time.Millisecond) + + // there should be no broken promises + brokenPromises := gt.GetBrokenPromises() + if brokenPromises != nil { + t.Fatal("expected no broken promises") + } + + // verify that the peerPromises map has been vacated + if len(gt.peerPromises) != 0 { + t.Fatal("expected empty peerPromises map") + } +} diff --git a/go-libp2p-blossomsub/mcache.go b/go-libp2p-blossomsub/mcache.go new file mode 100644 index 0000000..d4a7479 --- /dev/null +++ b/go-libp2p-blossomsub/mcache.go @@ -0,0 +1,105 @@ +package blossomsub + +import ( + "bytes" + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// NewMessageCache creates a sliding window cache that remembers messages for as +// long as `history` slots. +// +// When queried for messages to advertise, the cache only returns messages in +// the last `gossip` slots. +// +// The `gossip` parameter must be smaller or equal to `history`, or this +// function will panic. +// +// The slack between `gossip` and `history` accounts for the reaction time +// between when a message is advertised via IHAVE gossip, and the peer pulls it +// via an IWANT command. +func NewMessageCache(gossip, history int) *MessageCache { + if gossip > history { + err := fmt.Errorf("invalid parameters for message cache; gossip slots (%d) cannot be larger than history slots (%d)", + gossip, history) + panic(err) + } + return &MessageCache{ + msgs: make(map[string]*Message), + peertx: make(map[string]map[peer.ID]int), + history: make([][]CacheEntry, history), + gossip: gossip, + msgID: func(msg *Message) string { + return DefaultMsgIdFn(msg.Message) + }, + } +} + +type MessageCache struct { + msgs map[string]*Message + peertx map[string]map[peer.ID]int + history [][]CacheEntry + gossip int + msgID func(*Message) string +} + +func (mc *MessageCache) SetMsgIdFn(msgID func(*Message) string) { + mc.msgID = msgID +} + +type CacheEntry struct { + mid string + bitmask []byte +} + +func (mc *MessageCache) Put(msg *Message) { + mid := mc.msgID(msg) + mc.msgs[mid] = msg + mc.history[0] = append(mc.history[0], CacheEntry{mid: mid, bitmask: msg.GetBitmask()}) +} + +func (mc *MessageCache) Get(mid string) (*Message, bool) { + m, ok := mc.msgs[mid] + return m, ok +} + +func (mc *MessageCache) GetForPeer(mid string, p peer.ID) (*Message, int, bool) { + m, ok := mc.msgs[mid] + if !ok { + return nil, 0, false + } + + tx, ok := mc.peertx[mid] + if !ok { + tx = make(map[peer.ID]int) + mc.peertx[mid] = tx + } + tx[p]++ + + return m, tx[p], true +} + +func (mc *MessageCache) GetGossipIDs(bitmask []byte) []string { + var mids []string + for _, entries := range mc.history[:mc.gossip] { + for _, entry := range entries { + if bytes.Equal(entry.bitmask, bitmask) { + mids = append(mids, entry.mid) + } + } + } + return mids +} + +func (mc *MessageCache) Shift() { + last := mc.history[len(mc.history)-1] + for _, entry := range last { + delete(mc.msgs, entry.mid) + delete(mc.peertx, entry.mid) + } + for i := len(mc.history) - 2; i >= 0; i-- { + mc.history[i+1] = mc.history[i] + } + mc.history[0] = nil +} diff --git a/go-libp2p-blossomsub/mcache_test.go b/go-libp2p-blossomsub/mcache_test.go new file mode 100644 index 0000000..9aae23a --- /dev/null +++ b/go-libp2p-blossomsub/mcache_test.go @@ -0,0 +1,167 @@ +package blossomsub + +import ( + "encoding/binary" + "fmt" + "testing" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" +) + +func TestMessageCache(t *testing.T) { + mcache := NewMessageCache(3, 5) + msgID := DefaultMsgIdFn + + msgs := make([]*pb.Message, 60) + for i := range msgs { + msgs[i] = makeTestMessage(i) + } + + for i := 0; i < 10; i++ { + mcache.Put(&Message{Message: msgs[i]}) + } + + for i := 0; i < 10; i++ { + mid := msgID(msgs[i]) + m, ok := mcache.Get(mid) + if !ok { + t.Fatalf("Message %d not in cache", i) + } + + if m.Message != msgs[i] { + t.Fatalf("Message %d does not match cache", i) + } + } + + gids := mcache.GetGossipIDs([]byte{0x7e, 0x57}) + if len(gids) != 10 { + t.Fatalf("Expected 10 gossip IDs; got %d", len(gids)) + } + + for i := 0; i < 10; i++ { + mid := msgID(msgs[i]) + if mid != gids[i] { + t.Fatalf("GossipID mismatch for message %d", i) + } + } + + mcache.Shift() + for i := 10; i < 20; i++ { + mcache.Put(&Message{Message: msgs[i]}) + } + + for i := 0; i < 20; i++ { + mid := msgID(msgs[i]) + m, ok := mcache.Get(mid) + if !ok { + t.Fatalf("Message %d not in cache", i) + } + + if m.Message != msgs[i] { + t.Fatalf("Message %d does not match cache", i) + } + } + + gids = mcache.GetGossipIDs([]byte{0x7e, 0x57}) + if len(gids) != 20 { + t.Fatalf("Expected 20 gossip IDs; got %d", len(gids)) + } + + for i := 0; i < 10; i++ { + mid := msgID(msgs[i]) + if mid != gids[10+i] { + t.Fatalf("GossipID mismatch for message %d", i) + } + } + + for i := 10; i < 20; i++ { + mid := msgID(msgs[i]) + if mid != gids[i-10] { + t.Fatalf("GossipID mismatch for message %d", i) + } + } + + mcache.Shift() + for i := 20; i < 30; i++ { + mcache.Put(&Message{Message: msgs[i]}) + } + + mcache.Shift() + for i := 30; i < 40; i++ { + mcache.Put(&Message{Message: msgs[i]}) + } + + mcache.Shift() + for i := 40; i < 50; i++ { + mcache.Put(&Message{Message: msgs[i]}) + } + + mcache.Shift() + for i := 50; i < 60; i++ { + mcache.Put(&Message{Message: msgs[i]}) + } + + if len(mcache.msgs) != 50 { + t.Fatalf("Expected 50 messages in the cache; got %d", len(mcache.msgs)) + } + + for i := 0; i < 10; i++ { + mid := msgID(msgs[i]) + _, ok := mcache.Get(mid) + if ok { + t.Fatalf("Message %d still in cache", i) + } + } + + for i := 10; i < 60; i++ { + mid := msgID(msgs[i]) + m, ok := mcache.Get(mid) + if !ok { + t.Fatalf("Message %d not in cache", i) + } + + if m.Message != msgs[i] { + t.Fatalf("Message %d does not match cache", i) + } + } + + gids = mcache.GetGossipIDs([]byte{0x7e, 0x57}) + if len(gids) != 30 { + t.Fatalf("Expected 30 gossip IDs; got %d", len(gids)) + } + + for i := 0; i < 10; i++ { + mid := msgID(msgs[50+i]) + if mid != gids[i] { + t.Fatalf("GossipID mismatch for message %d", i) + } + } + + for i := 10; i < 20; i++ { + mid := msgID(msgs[30+i]) + if mid != gids[i] { + t.Fatalf("GossipID mismatch for message %d", i) + } + } + + for i := 20; i < 30; i++ { + mid := msgID(msgs[10+i]) + if mid != gids[i] { + t.Fatalf("GossipID mismatch for message %d", i) + } + } + +} + +func makeTestMessage(n int) *pb.Message { + seqno := make([]byte, 8) + binary.BigEndian.PutUint64(seqno, uint64(n)) + data := []byte(fmt.Sprintf("%d", n)) + bitmask := []byte{0x7e, 0x57} + return &pb.Message{ + Data: data, + Bitmask: bitmask, + From: []byte([]byte{0x7e, 0x57}), + Seqno: seqno, + } +} diff --git a/go-libp2p-blossomsub/midgen.go b/go-libp2p-blossomsub/midgen.go new file mode 100644 index 0000000..4510669 --- /dev/null +++ b/go-libp2p-blossomsub/midgen.go @@ -0,0 +1,52 @@ +package blossomsub + +import ( + "sync" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" +) + +// msgIDGenerator handles computing IDs for msgs +// It allows setting custom generators(MsgIdFunction) per bitmask +type msgIDGenerator struct { + Default MsgIdFunction + + bitmaskGensLk sync.RWMutex + bitmaskGens map[string]MsgIdFunction +} + +func newMsgIdGenerator() *msgIDGenerator { + return &msgIDGenerator{ + Default: DefaultMsgIdFn, + bitmaskGens: make(map[string]MsgIdFunction), + } +} + +// Set sets custom id generator(MsgIdFunction) for bitmask. +func (m *msgIDGenerator) Set(bitmask []byte, gen MsgIdFunction) { + m.bitmaskGensLk.Lock() + m.bitmaskGens[string(bitmask)] = gen + m.bitmaskGensLk.Unlock() +} + +// ID computes ID for the msg or short-circuits with the cached value. +func (m *msgIDGenerator) ID(msg *Message) string { + if msg.ID != "" { + return msg.ID + } + + msg.ID = m.RawID(msg.Message) + return msg.ID +} + +// RawID computes ID for the proto 'msg'. +func (m *msgIDGenerator) RawID(msg *pb.Message) string { + m.bitmaskGensLk.RLock() + gen, ok := m.bitmaskGens[string(msg.GetBitmask())] + m.bitmaskGensLk.RUnlock() + if !ok { + gen = m.Default + } + + return gen(msg) +} diff --git a/go-libp2p-blossomsub/notify.go b/go-libp2p-blossomsub/notify.go new file mode 100644 index 0000000..4d9ef0f --- /dev/null +++ b/go-libp2p-blossomsub/notify.go @@ -0,0 +1,75 @@ +package blossomsub + +import ( + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" +) + +var _ network.Notifiee = (*PubSubNotif)(nil) + +type PubSubNotif PubSub + +func (p *PubSubNotif) OpenedStream(n network.Network, s network.Stream) { +} + +func (p *PubSubNotif) ClosedStream(n network.Network, s network.Stream) { +} + +func (p *PubSubNotif) Connected(n network.Network, c network.Conn) { + // ignore transient connections + if c.Stat().Transient { + return + } + + go func() { + p.newPeersPrioLk.RLock() + p.newPeersMx.Lock() + p.newPeersPend[c.RemotePeer()] = struct{}{} + p.newPeersMx.Unlock() + p.newPeersPrioLk.RUnlock() + + select { + case p.newPeers <- struct{}{}: + default: + } + }() +} + +func (p *PubSubNotif) Disconnected(n network.Network, c network.Conn) { +} + +func (p *PubSubNotif) Listen(n network.Network, _ ma.Multiaddr) { +} + +func (p *PubSubNotif) ListenClose(n network.Network, _ ma.Multiaddr) { +} + +func (p *PubSubNotif) Initialize() { + isTransient := func(pid peer.ID) bool { + for _, c := range p.host.Network().ConnsToPeer(pid) { + if !c.Stat().Transient { + return false + } + } + + return true + } + + p.newPeersPrioLk.RLock() + p.newPeersMx.Lock() + for _, pid := range p.host.Network().Peers() { + if isTransient(pid) { + continue + } + + p.newPeersPend[pid] = struct{}{} + } + p.newPeersMx.Unlock() + p.newPeersPrioLk.RUnlock() + + select { + case p.newPeers <- struct{}{}: + default: + } +} diff --git a/go-libp2p-blossomsub/pb/Makefile b/go-libp2p-blossomsub/pb/Makefile new file mode 100644 index 0000000..b2b90ab --- /dev/null +++ b/go-libp2p-blossomsub/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --go_out=paths=source_relative:. $< + +clean: + rm -f *.pb.go + rm -f *.go \ No newline at end of file diff --git a/go-libp2p-blossomsub/pb/extensions.go b/go-libp2p-blossomsub/pb/extensions.go new file mode 100644 index 0000000..add1086 --- /dev/null +++ b/go-libp2p-blossomsub/pb/extensions.go @@ -0,0 +1,57 @@ +package pb + +import "google.golang.org/protobuf/proto" + +func (r *RPC) Size() int { + return proto.Size(r) +} + +func (r *RPC_SubOpts) Size() int { + return proto.Size(r) +} + +func (i *ControlGraft) Size() int { + return proto.Size(i) +} + +func (i *ControlIHave) Size() int { + return proto.Size(i) +} + +func (i *ControlIWant) Size() int { + return proto.Size(i) +} + +func (i *ControlMessage) Size() int { + return proto.Size(i) +} + +func (i *ControlPrune) Size() int { + return proto.Size(i) +} + +func (m *Message) Size() int { + return proto.Size(m) +} + +func (c *ControlMessage) Marshal() ([]byte, error) { + return proto.Marshal(c) +} + +func (r *RPC) MarshalTo(buf []byte) (int, error) { + data, err := proto.Marshal(r) + if err != nil { + return 0, err + } + + n := copy(buf, data) + return n, nil +} + +func (r *RPC) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, r) +} + +func (m *Message) Marshal() ([]byte, error) { + return proto.Marshal(m) +} diff --git a/go-libp2p-blossomsub/pb/rpc.pb.go b/go-libp2p-blossomsub/pb/rpc.pb.go new file mode 100644 index 0000000..3adad29 --- /dev/null +++ b/go-libp2p-blossomsub/pb/rpc.pb.go @@ -0,0 +1,818 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: rpc.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RPC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subscriptions []*RPC_SubOpts `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` + Publish []*Message `protobuf:"bytes,2,rep,name=publish,proto3" json:"publish,omitempty"` + Control *ControlMessage `protobuf:"bytes,3,opt,name=control,proto3" json:"control,omitempty"` +} + +func (x *RPC) Reset() { + *x = RPC{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RPC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RPC) ProtoMessage() {} + +func (x *RPC) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RPC.ProtoReflect.Descriptor instead. +func (*RPC) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{0} +} + +func (x *RPC) GetSubscriptions() []*RPC_SubOpts { + if x != nil { + return x.Subscriptions + } + return nil +} + +func (x *RPC) GetPublish() []*Message { + if x != nil { + return x.Publish + } + return nil +} + +func (x *RPC) GetControl() *ControlMessage { + if x != nil { + return x.Control + } + return nil +} + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + From []byte `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Seqno []byte `protobuf:"bytes,3,opt,name=seqno,proto3" json:"seqno,omitempty"` + Bitmask []byte `protobuf:"bytes,4,opt,name=bitmask,proto3" json:"bitmask,omitempty"` + Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{1} +} + +func (x *Message) GetFrom() []byte { + if x != nil { + return x.From + } + return nil +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *Message) GetSeqno() []byte { + if x != nil { + return x.Seqno + } + return nil +} + +func (x *Message) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +func (x *Message) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *Message) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +type ControlMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave,proto3" json:"ihave,omitempty"` + Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant,proto3" json:"iwant,omitempty"` + Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft,proto3" json:"graft,omitempty"` + Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune,proto3" json:"prune,omitempty"` +} + +func (x *ControlMessage) Reset() { + *x = ControlMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControlMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlMessage) ProtoMessage() {} + +func (x *ControlMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlMessage.ProtoReflect.Descriptor instead. +func (*ControlMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{2} +} + +func (x *ControlMessage) GetIhave() []*ControlIHave { + if x != nil { + return x.Ihave + } + return nil +} + +func (x *ControlMessage) GetIwant() []*ControlIWant { + if x != nil { + return x.Iwant + } + return nil +} + +func (x *ControlMessage) GetGraft() []*ControlGraft { + if x != nil { + return x.Graft + } + return nil +} + +func (x *ControlMessage) GetPrune() []*ControlPrune { + if x != nil { + return x.Prune + } + return nil +} + +type ControlIHave struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"` + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + MessageIDs []string `protobuf:"bytes,2,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"` +} + +func (x *ControlIHave) Reset() { + *x = ControlIHave{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControlIHave) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlIHave) ProtoMessage() {} + +func (x *ControlIHave) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlIHave.ProtoReflect.Descriptor instead. +func (*ControlIHave) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{3} +} + +func (x *ControlIHave) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +func (x *ControlIHave) GetMessageIDs() []string { + if x != nil { + return x.MessageIDs + } + return nil +} + +type ControlIWant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + MessageIDs []string `protobuf:"bytes,1,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"` +} + +func (x *ControlIWant) Reset() { + *x = ControlIWant{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControlIWant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlIWant) ProtoMessage() {} + +func (x *ControlIWant) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlIWant.ProtoReflect.Descriptor instead. +func (*ControlIWant) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{4} +} + +func (x *ControlIWant) GetMessageIDs() []string { + if x != nil { + return x.MessageIDs + } + return nil +} + +type ControlGraft struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"` +} + +func (x *ControlGraft) Reset() { + *x = ControlGraft{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControlGraft) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlGraft) ProtoMessage() {} + +func (x *ControlGraft) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlGraft.ProtoReflect.Descriptor instead. +func (*ControlGraft) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{5} +} + +func (x *ControlGraft) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type ControlPrune struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"` + Peers []*PeerInfo `protobuf:"bytes,2,rep,name=peers,proto3" json:"peers,omitempty"` + Backoff uint64 `protobuf:"varint,3,opt,name=backoff,proto3" json:"backoff,omitempty"` +} + +func (x *ControlPrune) Reset() { + *x = ControlPrune{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControlPrune) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlPrune) ProtoMessage() {} + +func (x *ControlPrune) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlPrune.ProtoReflect.Descriptor instead. +func (*ControlPrune) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{6} +} + +func (x *ControlPrune) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +func (x *ControlPrune) GetPeers() []*PeerInfo { + if x != nil { + return x.Peers + } + return nil +} + +func (x *ControlPrune) GetBackoff() uint64 { + if x != nil { + return x.Backoff + } + return 0 +} + +type PeerInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"` + SignedPeerRecord []byte `protobuf:"bytes,2,opt,name=signedPeerRecord,proto3,oneof" json:"signedPeerRecord,omitempty"` +} + +func (x *PeerInfo) Reset() { + *x = PeerInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerInfo) ProtoMessage() {} + +func (x *PeerInfo) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerInfo.ProtoReflect.Descriptor instead. +func (*PeerInfo) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{7} +} + +func (x *PeerInfo) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil +} + +func (x *PeerInfo) GetSignedPeerRecord() []byte { + if x != nil { + return x.SignedPeerRecord + } + return nil +} + +type RPC_SubOpts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subscribe bool `protobuf:"varint,1,opt,name=subscribe,proto3" json:"subscribe,omitempty"` // subscribe or unsubcribe + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3" json:"bitmask,omitempty"` +} + +func (x *RPC_SubOpts) Reset() { + *x = RPC_SubOpts{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RPC_SubOpts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RPC_SubOpts) ProtoMessage() {} + +func (x *RPC_SubOpts) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RPC_SubOpts.ProtoReflect.Descriptor instead. +func (*RPC_SubOpts) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RPC_SubOpts) GetSubscribe() bool { + if x != nil { + return x.Subscribe + } + return false +} + +func (x *RPC_SubOpts) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +var File_rpc_proto protoreflect.FileDescriptor + +var file_rpc_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x62, 0x6c, 0x6f, + 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x22, 0xf5, 0x01, 0x0a, 0x03, 0x52, + 0x50, 0x43, 0x12, 0x40, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, + 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x50, 0x43, 0x2e, 0x53, 0x75, + 0x62, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, + 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, + 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, + 0x41, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, + 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, + 0x73, 0x6b, 0x22, 0x91, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x66, 0x72, + 0x6f, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x65, 0x71, 0x6e, 0x6f, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x65, 0x71, 0x6e, 0x6f, 0x12, 0x18, 0x0a, 0x07, + 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, + 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x68, 0x61, + 0x76, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, + 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x49, 0x48, 0x61, 0x76, 0x65, 0x52, 0x05, 0x69, 0x68, 0x61, 0x76, 0x65, 0x12, 0x31, 0x0a, 0x05, + 0x69, 0x77, 0x61, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x62, 0x6c, + 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x69, 0x77, 0x61, 0x6e, 0x74, 0x12, + 0x31, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x66, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, 0x66, 0x74, 0x52, 0x05, 0x67, 0x72, 0x61, + 0x66, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x52, 0x05, + 0x70, 0x72, 0x75, 0x6e, 0x65, 0x22, 0x48, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x49, 0x48, 0x61, 0x76, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x12, + 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22, + 0x2e, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x12, + 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22, + 0x28, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, 0x66, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, + 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, + 0x61, 0x73, 0x6b, 0x12, 0x2d, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, + 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x70, 0x65, 0x65, + 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x78, 0x0a, 0x08, + 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, + 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, + 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x01, 0x52, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x44, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x42, 0x43, 0x5a, 0x41, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, + 0x65, 0x70, 0x6f, 0x2f, 0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2d, 0x62, 0x6c, + 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_rpc_proto_rawDescOnce sync.Once + file_rpc_proto_rawDescData = file_rpc_proto_rawDesc +) + +func file_rpc_proto_rawDescGZIP() []byte { + file_rpc_proto_rawDescOnce.Do(func() { + file_rpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpc_proto_rawDescData) + }) + return file_rpc_proto_rawDescData +} + +var file_rpc_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_rpc_proto_goTypes = []interface{}{ + (*RPC)(nil), // 0: blossomsub.pb.RPC + (*Message)(nil), // 1: blossomsub.pb.Message + (*ControlMessage)(nil), // 2: blossomsub.pb.ControlMessage + (*ControlIHave)(nil), // 3: blossomsub.pb.ControlIHave + (*ControlIWant)(nil), // 4: blossomsub.pb.ControlIWant + (*ControlGraft)(nil), // 5: blossomsub.pb.ControlGraft + (*ControlPrune)(nil), // 6: blossomsub.pb.ControlPrune + (*PeerInfo)(nil), // 7: blossomsub.pb.PeerInfo + (*RPC_SubOpts)(nil), // 8: blossomsub.pb.RPC.SubOpts +} +var file_rpc_proto_depIdxs = []int32{ + 8, // 0: blossomsub.pb.RPC.subscriptions:type_name -> blossomsub.pb.RPC.SubOpts + 1, // 1: blossomsub.pb.RPC.publish:type_name -> blossomsub.pb.Message + 2, // 2: blossomsub.pb.RPC.control:type_name -> blossomsub.pb.ControlMessage + 3, // 3: blossomsub.pb.ControlMessage.ihave:type_name -> blossomsub.pb.ControlIHave + 4, // 4: blossomsub.pb.ControlMessage.iwant:type_name -> blossomsub.pb.ControlIWant + 5, // 5: blossomsub.pb.ControlMessage.graft:type_name -> blossomsub.pb.ControlGraft + 6, // 6: blossomsub.pb.ControlMessage.prune:type_name -> blossomsub.pb.ControlPrune + 7, // 7: blossomsub.pb.ControlPrune.peers:type_name -> blossomsub.pb.PeerInfo + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_rpc_proto_init() } +func file_rpc_proto_init() { + if File_rpc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_rpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RPC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlIHave); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlIWant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlGraft); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlPrune); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RPC_SubOpts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_rpc_proto_msgTypes[7].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_rpc_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rpc_proto_goTypes, + DependencyIndexes: file_rpc_proto_depIdxs, + MessageInfos: file_rpc_proto_msgTypes, + }.Build() + File_rpc_proto = out.File + file_rpc_proto_rawDesc = nil + file_rpc_proto_goTypes = nil + file_rpc_proto_depIdxs = nil +} diff --git a/go-libp2p-blossomsub/pb/rpc.proto b/go-libp2p-blossomsub/pb/rpc.proto new file mode 100644 index 0000000..0de6ba1 --- /dev/null +++ b/go-libp2p-blossomsub/pb/rpc.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package blossomsub.pb; + +option go_package = "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"; + +message RPC { + repeated SubOpts subscriptions = 1; + repeated Message publish = 2; + + message SubOpts { + bool subscribe = 1; // subscribe or unsubcribe + bytes bitmask = 2; + } + + ControlMessage control = 3; +} + +message Message { + bytes from = 1; + bytes data = 2; + bytes seqno = 3; + bytes bitmask = 4; + bytes signature = 5; + bytes key = 6; +} + +message ControlMessage { + repeated ControlIHave ihave = 1; + repeated ControlIWant iwant = 2; + repeated ControlGraft graft = 3; + repeated ControlPrune prune = 4; +} + +message ControlIHave { + bytes bitmask = 1; + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + repeated string messageIDs = 2; +} + +message ControlIWant { + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + repeated string messageIDs = 1; +} + +message ControlGraft { + bytes bitmask = 1; +} + +message ControlPrune { + bytes bitmask = 1; + repeated PeerInfo peers = 2; + uint64 backoff = 3; +} + +message PeerInfo { + optional bytes peerID = 1; + optional bytes signedPeerRecord = 2; +} \ No newline at end of file diff --git a/go-libp2p-blossomsub/pb/trace.pb.go b/go-libp2p-blossomsub/pb/trace.pb.go new file mode 100644 index 0000000..50f76a4 --- /dev/null +++ b/go-libp2p-blossomsub/pb/trace.pb.go @@ -0,0 +1,2146 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: trace.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TraceEvent_Type int32 + +const ( + TraceEvent_PUBLISH_MESSAGE TraceEvent_Type = 0 + TraceEvent_REJECT_MESSAGE TraceEvent_Type = 1 + TraceEvent_DUPLICATE_MESSAGE TraceEvent_Type = 2 + TraceEvent_DELIVER_MESSAGE TraceEvent_Type = 3 + TraceEvent_ADD_PEER TraceEvent_Type = 4 + TraceEvent_REMOVE_PEER TraceEvent_Type = 5 + TraceEvent_RECV_RPC TraceEvent_Type = 6 + TraceEvent_SEND_RPC TraceEvent_Type = 7 + TraceEvent_DROP_RPC TraceEvent_Type = 8 + TraceEvent_JOIN TraceEvent_Type = 9 + TraceEvent_LEAVE TraceEvent_Type = 10 + TraceEvent_GRAFT TraceEvent_Type = 11 + TraceEvent_PRUNE TraceEvent_Type = 12 +) + +// Enum value maps for TraceEvent_Type. +var ( + TraceEvent_Type_name = map[int32]string{ + 0: "PUBLISH_MESSAGE", + 1: "REJECT_MESSAGE", + 2: "DUPLICATE_MESSAGE", + 3: "DELIVER_MESSAGE", + 4: "ADD_PEER", + 5: "REMOVE_PEER", + 6: "RECV_RPC", + 7: "SEND_RPC", + 8: "DROP_RPC", + 9: "JOIN", + 10: "LEAVE", + 11: "GRAFT", + 12: "PRUNE", + } + TraceEvent_Type_value = map[string]int32{ + "PUBLISH_MESSAGE": 0, + "REJECT_MESSAGE": 1, + "DUPLICATE_MESSAGE": 2, + "DELIVER_MESSAGE": 3, + "ADD_PEER": 4, + "REMOVE_PEER": 5, + "RECV_RPC": 6, + "SEND_RPC": 7, + "DROP_RPC": 8, + "JOIN": 9, + "LEAVE": 10, + "GRAFT": 11, + "PRUNE": 12, + } +) + +func (x TraceEvent_Type) Enum() *TraceEvent_Type { + p := new(TraceEvent_Type) + *p = x + return p +} + +func (x TraceEvent_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TraceEvent_Type) Descriptor() protoreflect.EnumDescriptor { + return file_trace_proto_enumTypes[0].Descriptor() +} + +func (TraceEvent_Type) Type() protoreflect.EnumType { + return &file_trace_proto_enumTypes[0] +} + +func (x TraceEvent_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TraceEvent_Type.Descriptor instead. +func (TraceEvent_Type) EnumDescriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 0} +} + +type TraceEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type *TraceEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=blossomsub.pb.TraceEvent_Type,oneof" json:"type,omitempty"` + PeerID []byte `protobuf:"bytes,2,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"` + Timestamp *int64 `protobuf:"varint,3,opt,name=timestamp,proto3,oneof" json:"timestamp,omitempty"` + PublishMessage *TraceEvent_PublishMessage `protobuf:"bytes,4,opt,name=publishMessage,proto3,oneof" json:"publishMessage,omitempty"` + RejectMessage *TraceEvent_RejectMessage `protobuf:"bytes,5,opt,name=rejectMessage,proto3,oneof" json:"rejectMessage,omitempty"` + DuplicateMessage *TraceEvent_DuplicateMessage `protobuf:"bytes,6,opt,name=duplicateMessage,proto3,oneof" json:"duplicateMessage,omitempty"` + DeliverMessage *TraceEvent_DeliverMessage `protobuf:"bytes,7,opt,name=deliverMessage,proto3,oneof" json:"deliverMessage,omitempty"` + AddPeer *TraceEvent_AddPeer `protobuf:"bytes,8,opt,name=addPeer,proto3,oneof" json:"addPeer,omitempty"` + RemovePeer *TraceEvent_RemovePeer `protobuf:"bytes,9,opt,name=removePeer,proto3,oneof" json:"removePeer,omitempty"` + RecvRPC *TraceEvent_RecvRPC `protobuf:"bytes,10,opt,name=recvRPC,proto3,oneof" json:"recvRPC,omitempty"` + SendRPC *TraceEvent_SendRPC `protobuf:"bytes,11,opt,name=sendRPC,proto3,oneof" json:"sendRPC,omitempty"` + DropRPC *TraceEvent_DropRPC `protobuf:"bytes,12,opt,name=dropRPC,proto3,oneof" json:"dropRPC,omitempty"` + Join *TraceEvent_Join `protobuf:"bytes,13,opt,name=join,proto3,oneof" json:"join,omitempty"` + Leave *TraceEvent_Leave `protobuf:"bytes,14,opt,name=leave,proto3,oneof" json:"leave,omitempty"` + Graft *TraceEvent_Graft `protobuf:"bytes,15,opt,name=graft,proto3,oneof" json:"graft,omitempty"` + Prune *TraceEvent_Prune `protobuf:"bytes,16,opt,name=prune,proto3,oneof" json:"prune,omitempty"` +} + +func (x *TraceEvent) Reset() { + *x = TraceEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent) ProtoMessage() {} + +func (x *TraceEvent) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent.ProtoReflect.Descriptor instead. +func (*TraceEvent) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0} +} + +func (x *TraceEvent) GetType() TraceEvent_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return TraceEvent_PUBLISH_MESSAGE +} + +func (x *TraceEvent) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil +} + +func (x *TraceEvent) GetTimestamp() int64 { + if x != nil && x.Timestamp != nil { + return *x.Timestamp + } + return 0 +} + +func (x *TraceEvent) GetPublishMessage() *TraceEvent_PublishMessage { + if x != nil { + return x.PublishMessage + } + return nil +} + +func (x *TraceEvent) GetRejectMessage() *TraceEvent_RejectMessage { + if x != nil { + return x.RejectMessage + } + return nil +} + +func (x *TraceEvent) GetDuplicateMessage() *TraceEvent_DuplicateMessage { + if x != nil { + return x.DuplicateMessage + } + return nil +} + +func (x *TraceEvent) GetDeliverMessage() *TraceEvent_DeliverMessage { + if x != nil { + return x.DeliverMessage + } + return nil +} + +func (x *TraceEvent) GetAddPeer() *TraceEvent_AddPeer { + if x != nil { + return x.AddPeer + } + return nil +} + +func (x *TraceEvent) GetRemovePeer() *TraceEvent_RemovePeer { + if x != nil { + return x.RemovePeer + } + return nil +} + +func (x *TraceEvent) GetRecvRPC() *TraceEvent_RecvRPC { + if x != nil { + return x.RecvRPC + } + return nil +} + +func (x *TraceEvent) GetSendRPC() *TraceEvent_SendRPC { + if x != nil { + return x.SendRPC + } + return nil +} + +func (x *TraceEvent) GetDropRPC() *TraceEvent_DropRPC { + if x != nil { + return x.DropRPC + } + return nil +} + +func (x *TraceEvent) GetJoin() *TraceEvent_Join { + if x != nil { + return x.Join + } + return nil +} + +func (x *TraceEvent) GetLeave() *TraceEvent_Leave { + if x != nil { + return x.Leave + } + return nil +} + +func (x *TraceEvent) GetGraft() *TraceEvent_Graft { + if x != nil { + return x.Graft + } + return nil +} + +func (x *TraceEvent) GetPrune() *TraceEvent_Prune { + if x != nil { + return x.Prune + } + return nil +} + +type TraceEventBatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Batch []*TraceEvent `protobuf:"bytes,1,rep,name=batch,proto3" json:"batch,omitempty"` +} + +func (x *TraceEventBatch) Reset() { + *x = TraceEventBatch{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEventBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEventBatch) ProtoMessage() {} + +func (x *TraceEventBatch) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEventBatch.ProtoReflect.Descriptor instead. +func (*TraceEventBatch) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{1} +} + +func (x *TraceEventBatch) GetBatch() []*TraceEvent { + if x != nil { + return x.Batch + } + return nil +} + +type TraceEvent_PublishMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageID []byte `protobuf:"bytes,1,opt,name=messageID,proto3,oneof" json:"messageID,omitempty"` + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_PublishMessage) Reset() { + *x = TraceEvent_PublishMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_PublishMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_PublishMessage) ProtoMessage() {} + +func (x *TraceEvent_PublishMessage) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_PublishMessage.ProtoReflect.Descriptor instead. +func (*TraceEvent_PublishMessage) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *TraceEvent_PublishMessage) GetMessageID() []byte { + if x != nil { + return x.MessageID + } + return nil +} + +func (x *TraceEvent_PublishMessage) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_RejectMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageID []byte `protobuf:"bytes,1,opt,name=messageID,proto3,oneof" json:"messageID,omitempty"` + ReceivedFrom []byte `protobuf:"bytes,2,opt,name=receivedFrom,proto3,oneof" json:"receivedFrom,omitempty"` + Reason *string `protobuf:"bytes,3,opt,name=reason,proto3,oneof" json:"reason,omitempty"` + Bitmask []byte `protobuf:"bytes,4,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_RejectMessage) Reset() { + *x = TraceEvent_RejectMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_RejectMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_RejectMessage) ProtoMessage() {} + +func (x *TraceEvent_RejectMessage) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_RejectMessage.ProtoReflect.Descriptor instead. +func (*TraceEvent_RejectMessage) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *TraceEvent_RejectMessage) GetMessageID() []byte { + if x != nil { + return x.MessageID + } + return nil +} + +func (x *TraceEvent_RejectMessage) GetReceivedFrom() []byte { + if x != nil { + return x.ReceivedFrom + } + return nil +} + +func (x *TraceEvent_RejectMessage) GetReason() string { + if x != nil && x.Reason != nil { + return *x.Reason + } + return "" +} + +func (x *TraceEvent_RejectMessage) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_DuplicateMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageID []byte `protobuf:"bytes,1,opt,name=messageID,proto3,oneof" json:"messageID,omitempty"` + ReceivedFrom []byte `protobuf:"bytes,2,opt,name=receivedFrom,proto3,oneof" json:"receivedFrom,omitempty"` + Bitmask []byte `protobuf:"bytes,3,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_DuplicateMessage) Reset() { + *x = TraceEvent_DuplicateMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_DuplicateMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_DuplicateMessage) ProtoMessage() {} + +func (x *TraceEvent_DuplicateMessage) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_DuplicateMessage.ProtoReflect.Descriptor instead. +func (*TraceEvent_DuplicateMessage) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *TraceEvent_DuplicateMessage) GetMessageID() []byte { + if x != nil { + return x.MessageID + } + return nil +} + +func (x *TraceEvent_DuplicateMessage) GetReceivedFrom() []byte { + if x != nil { + return x.ReceivedFrom + } + return nil +} + +func (x *TraceEvent_DuplicateMessage) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_DeliverMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageID []byte `protobuf:"bytes,1,opt,name=messageID,proto3,oneof" json:"messageID,omitempty"` + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` + ReceivedFrom []byte `protobuf:"bytes,3,opt,name=receivedFrom,proto3,oneof" json:"receivedFrom,omitempty"` +} + +func (x *TraceEvent_DeliverMessage) Reset() { + *x = TraceEvent_DeliverMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_DeliverMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_DeliverMessage) ProtoMessage() {} + +func (x *TraceEvent_DeliverMessage) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_DeliverMessage.ProtoReflect.Descriptor instead. +func (*TraceEvent_DeliverMessage) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *TraceEvent_DeliverMessage) GetMessageID() []byte { + if x != nil { + return x.MessageID + } + return nil +} + +func (x *TraceEvent_DeliverMessage) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +func (x *TraceEvent_DeliverMessage) GetReceivedFrom() []byte { + if x != nil { + return x.ReceivedFrom + } + return nil +} + +type TraceEvent_AddPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"` + Proto *string `protobuf:"bytes,2,opt,name=proto,proto3,oneof" json:"proto,omitempty"` +} + +func (x *TraceEvent_AddPeer) Reset() { + *x = TraceEvent_AddPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_AddPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_AddPeer) ProtoMessage() {} + +func (x *TraceEvent_AddPeer) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_AddPeer.ProtoReflect.Descriptor instead. +func (*TraceEvent_AddPeer) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *TraceEvent_AddPeer) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil +} + +func (x *TraceEvent_AddPeer) GetProto() string { + if x != nil && x.Proto != nil { + return *x.Proto + } + return "" +} + +type TraceEvent_RemovePeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"` +} + +func (x *TraceEvent_RemovePeer) Reset() { + *x = TraceEvent_RemovePeer{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_RemovePeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_RemovePeer) ProtoMessage() {} + +func (x *TraceEvent_RemovePeer) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_RemovePeer.ProtoReflect.Descriptor instead. +func (*TraceEvent_RemovePeer) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 5} +} + +func (x *TraceEvent_RemovePeer) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil +} + +type TraceEvent_RecvRPC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReceivedFrom []byte `protobuf:"bytes,1,opt,name=receivedFrom,proto3,oneof" json:"receivedFrom,omitempty"` + Meta *TraceEvent_RPCMeta `protobuf:"bytes,2,opt,name=meta,proto3,oneof" json:"meta,omitempty"` +} + +func (x *TraceEvent_RecvRPC) Reset() { + *x = TraceEvent_RecvRPC{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_RecvRPC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_RecvRPC) ProtoMessage() {} + +func (x *TraceEvent_RecvRPC) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_RecvRPC.ProtoReflect.Descriptor instead. +func (*TraceEvent_RecvRPC) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *TraceEvent_RecvRPC) GetReceivedFrom() []byte { + if x != nil { + return x.ReceivedFrom + } + return nil +} + +func (x *TraceEvent_RecvRPC) GetMeta() *TraceEvent_RPCMeta { + if x != nil { + return x.Meta + } + return nil +} + +type TraceEvent_SendRPC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SendTo []byte `protobuf:"bytes,1,opt,name=sendTo,proto3,oneof" json:"sendTo,omitempty"` + Meta *TraceEvent_RPCMeta `protobuf:"bytes,2,opt,name=meta,proto3,oneof" json:"meta,omitempty"` +} + +func (x *TraceEvent_SendRPC) Reset() { + *x = TraceEvent_SendRPC{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_SendRPC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_SendRPC) ProtoMessage() {} + +func (x *TraceEvent_SendRPC) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_SendRPC.ProtoReflect.Descriptor instead. +func (*TraceEvent_SendRPC) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 7} +} + +func (x *TraceEvent_SendRPC) GetSendTo() []byte { + if x != nil { + return x.SendTo + } + return nil +} + +func (x *TraceEvent_SendRPC) GetMeta() *TraceEvent_RPCMeta { + if x != nil { + return x.Meta + } + return nil +} + +type TraceEvent_DropRPC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SendTo []byte `protobuf:"bytes,1,opt,name=sendTo,proto3,oneof" json:"sendTo,omitempty"` + Meta *TraceEvent_RPCMeta `protobuf:"bytes,2,opt,name=meta,proto3,oneof" json:"meta,omitempty"` +} + +func (x *TraceEvent_DropRPC) Reset() { + *x = TraceEvent_DropRPC{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_DropRPC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_DropRPC) ProtoMessage() {} + +func (x *TraceEvent_DropRPC) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_DropRPC.ProtoReflect.Descriptor instead. +func (*TraceEvent_DropRPC) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *TraceEvent_DropRPC) GetSendTo() []byte { + if x != nil { + return x.SendTo + } + return nil +} + +func (x *TraceEvent_DropRPC) GetMeta() *TraceEvent_RPCMeta { + if x != nil { + return x.Meta + } + return nil +} + +type TraceEvent_Join struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_Join) Reset() { + *x = TraceEvent_Join{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_Join) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_Join) ProtoMessage() {} + +func (x *TraceEvent_Join) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_Join.ProtoReflect.Descriptor instead. +func (*TraceEvent_Join) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *TraceEvent_Join) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_Leave struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_Leave) Reset() { + *x = TraceEvent_Leave{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_Leave) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_Leave) ProtoMessage() {} + +func (x *TraceEvent_Leave) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_Leave.ProtoReflect.Descriptor instead. +func (*TraceEvent_Leave) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *TraceEvent_Leave) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_Graft struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"` + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_Graft) Reset() { + *x = TraceEvent_Graft{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_Graft) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_Graft) ProtoMessage() {} + +func (x *TraceEvent_Graft) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_Graft.ProtoReflect.Descriptor instead. +func (*TraceEvent_Graft) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 11} +} + +func (x *TraceEvent_Graft) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil +} + +func (x *TraceEvent_Graft) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_Prune struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"` + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_Prune) Reset() { + *x = TraceEvent_Prune{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_Prune) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_Prune) ProtoMessage() {} + +func (x *TraceEvent_Prune) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_Prune.ProtoReflect.Descriptor instead. +func (*TraceEvent_Prune) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 12} +} + +func (x *TraceEvent_Prune) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil +} + +func (x *TraceEvent_Prune) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_RPCMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Messages []*TraceEvent_MessageMeta `protobuf:"bytes,1,rep,name=messages,proto3" json:"messages,omitempty"` + Subscription []*TraceEvent_SubMeta `protobuf:"bytes,2,rep,name=subscription,proto3" json:"subscription,omitempty"` + Control *TraceEvent_ControlMeta `protobuf:"bytes,3,opt,name=control,proto3,oneof" json:"control,omitempty"` +} + +func (x *TraceEvent_RPCMeta) Reset() { + *x = TraceEvent_RPCMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_RPCMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_RPCMeta) ProtoMessage() {} + +func (x *TraceEvent_RPCMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_RPCMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_RPCMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 13} +} + +func (x *TraceEvent_RPCMeta) GetMessages() []*TraceEvent_MessageMeta { + if x != nil { + return x.Messages + } + return nil +} + +func (x *TraceEvent_RPCMeta) GetSubscription() []*TraceEvent_SubMeta { + if x != nil { + return x.Subscription + } + return nil +} + +func (x *TraceEvent_RPCMeta) GetControl() *TraceEvent_ControlMeta { + if x != nil { + return x.Control + } + return nil +} + +type TraceEvent_MessageMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageID []byte `protobuf:"bytes,1,opt,name=messageID,proto3,oneof" json:"messageID,omitempty"` + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_MessageMeta) Reset() { + *x = TraceEvent_MessageMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_MessageMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_MessageMeta) ProtoMessage() {} + +func (x *TraceEvent_MessageMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_MessageMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_MessageMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 14} +} + +func (x *TraceEvent_MessageMeta) GetMessageID() []byte { + if x != nil { + return x.MessageID + } + return nil +} + +func (x *TraceEvent_MessageMeta) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_SubMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Subscribe *bool `protobuf:"varint,1,opt,name=subscribe,proto3,oneof" json:"subscribe,omitempty"` + Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_SubMeta) Reset() { + *x = TraceEvent_SubMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_SubMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_SubMeta) ProtoMessage() {} + +func (x *TraceEvent_SubMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_SubMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_SubMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 15} +} + +func (x *TraceEvent_SubMeta) GetSubscribe() bool { + if x != nil && x.Subscribe != nil { + return *x.Subscribe + } + return false +} + +func (x *TraceEvent_SubMeta) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_ControlMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ihave []*TraceEvent_ControlIHaveMeta `protobuf:"bytes,1,rep,name=ihave,proto3" json:"ihave,omitempty"` + Iwant []*TraceEvent_ControlIWantMeta `protobuf:"bytes,2,rep,name=iwant,proto3" json:"iwant,omitempty"` + Graft []*TraceEvent_ControlGraftMeta `protobuf:"bytes,3,rep,name=graft,proto3" json:"graft,omitempty"` + Prune []*TraceEvent_ControlPruneMeta `protobuf:"bytes,4,rep,name=prune,proto3" json:"prune,omitempty"` +} + +func (x *TraceEvent_ControlMeta) Reset() { + *x = TraceEvent_ControlMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_ControlMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_ControlMeta) ProtoMessage() {} + +func (x *TraceEvent_ControlMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_ControlMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_ControlMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 16} +} + +func (x *TraceEvent_ControlMeta) GetIhave() []*TraceEvent_ControlIHaveMeta { + if x != nil { + return x.Ihave + } + return nil +} + +func (x *TraceEvent_ControlMeta) GetIwant() []*TraceEvent_ControlIWantMeta { + if x != nil { + return x.Iwant + } + return nil +} + +func (x *TraceEvent_ControlMeta) GetGraft() []*TraceEvent_ControlGraftMeta { + if x != nil { + return x.Graft + } + return nil +} + +func (x *TraceEvent_ControlMeta) GetPrune() []*TraceEvent_ControlPruneMeta { + if x != nil { + return x.Prune + } + return nil +} + +type TraceEvent_ControlIHaveMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` + MessageIDs [][]byte `protobuf:"bytes,2,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"` +} + +func (x *TraceEvent_ControlIHaveMeta) Reset() { + *x = TraceEvent_ControlIHaveMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_ControlIHaveMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_ControlIHaveMeta) ProtoMessage() {} + +func (x *TraceEvent_ControlIHaveMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_ControlIHaveMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_ControlIHaveMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 17} +} + +func (x *TraceEvent_ControlIHaveMeta) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +func (x *TraceEvent_ControlIHaveMeta) GetMessageIDs() [][]byte { + if x != nil { + return x.MessageIDs + } + return nil +} + +type TraceEvent_ControlIWantMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageIDs [][]byte `protobuf:"bytes,1,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"` +} + +func (x *TraceEvent_ControlIWantMeta) Reset() { + *x = TraceEvent_ControlIWantMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_ControlIWantMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_ControlIWantMeta) ProtoMessage() {} + +func (x *TraceEvent_ControlIWantMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_ControlIWantMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_ControlIWantMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 18} +} + +func (x *TraceEvent_ControlIWantMeta) GetMessageIDs() [][]byte { + if x != nil { + return x.MessageIDs + } + return nil +} + +type TraceEvent_ControlGraftMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` +} + +func (x *TraceEvent_ControlGraftMeta) Reset() { + *x = TraceEvent_ControlGraftMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_ControlGraftMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_ControlGraftMeta) ProtoMessage() {} + +func (x *TraceEvent_ControlGraftMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_ControlGraftMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_ControlGraftMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 19} +} + +func (x *TraceEvent_ControlGraftMeta) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +type TraceEvent_ControlPruneMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"` + Peers [][]byte `protobuf:"bytes,2,rep,name=peers,proto3" json:"peers,omitempty"` +} + +func (x *TraceEvent_ControlPruneMeta) Reset() { + *x = TraceEvent_ControlPruneMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_trace_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceEvent_ControlPruneMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceEvent_ControlPruneMeta) ProtoMessage() {} + +func (x *TraceEvent_ControlPruneMeta) ProtoReflect() protoreflect.Message { + mi := &file_trace_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceEvent_ControlPruneMeta.ProtoReflect.Descriptor instead. +func (*TraceEvent_ControlPruneMeta) Descriptor() ([]byte, []int) { + return file_trace_proto_rawDescGZIP(), []int{0, 20} +} + +func (x *TraceEvent_ControlPruneMeta) GetBitmask() []byte { + if x != nil { + return x.Bitmask + } + return nil +} + +func (x *TraceEvent_ControlPruneMeta) GetPeers() [][]byte { + if x != nil { + return x.Peers + } + return nil +} + +var File_trace_proto protoreflect.FileDescriptor + +var file_trace_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x62, + 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x22, 0xfe, 0x1e, 0x0a, + 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x62, 0x6c, 0x6f, 0x73, + 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, + 0x01, 0x12, 0x21, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x88, 0x01, 0x01, 0x12, 0x55, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x62, + 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x03, 0x52, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, + 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x52, 0x0a, 0x0d, 0x72, + 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, + 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, + 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x04, 0x52, 0x0d, 0x72, + 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x5b, 0x0a, 0x10, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, + 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x05, 0x52, 0x10, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x55, 0x0a, 0x0e, + 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, + 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x06, + 0x52, 0x0e, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, + 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x48, 0x07, 0x52, 0x07, 0x61, 0x64, 0x64, 0x50, 0x65, + 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x49, 0x0a, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, + 0x65, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x62, 0x6c, 0x6f, 0x73, + 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x48, + 0x08, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, + 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x76, 0x52, 0x50, 0x43, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, + 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x63, + 0x76, 0x52, 0x50, 0x43, 0x48, 0x09, 0x52, 0x07, 0x72, 0x65, 0x63, 0x76, 0x52, 0x50, 0x43, 0x88, + 0x01, 0x01, 0x12, 0x40, 0x0a, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x50, 0x43, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, + 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x65, 0x6e, 0x64, 0x52, 0x50, 0x43, 0x48, 0x0a, 0x52, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x50, + 0x43, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x07, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x50, 0x43, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, + 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x50, 0x43, 0x48, 0x0b, 0x52, 0x07, 0x64, 0x72, 0x6f, 0x70, + 0x52, 0x50, 0x43, 0x88, 0x01, 0x01, 0x12, 0x37, 0x0a, 0x04, 0x6a, 0x6f, 0x69, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, + 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x4a, 0x6f, 0x69, 0x6e, 0x48, 0x0c, 0x52, 0x04, 0x6a, 0x6f, 0x69, 0x6e, 0x88, 0x01, 0x01, 0x12, + 0x3a, 0x0a, 0x05, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x48, + 0x0d, 0x52, 0x05, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x05, 0x67, + 0x72, 0x61, 0x66, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x62, 0x6c, 0x6f, + 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x72, 0x61, 0x66, 0x74, 0x48, 0x0e, 0x52, 0x05, 0x67, + 0x72, 0x61, 0x66, 0x74, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, + 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x48, 0x0f, 0x52, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, + 0x88, 0x01, 0x01, 0x1a, 0x6c, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, + 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, + 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x1a, 0xcd, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0c, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x12, + 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x02, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, + 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x03, 0x52, + 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, + 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x1a, 0xa8, 0x01, 0x0a, 0x10, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x01, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, + 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x02, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, + 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, + 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, + 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa6, 0x01, 0x0a, + 0x0e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, + 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, + 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, + 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x02, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, + 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, + 0x6d, 0x61, 0x73, 0x6b, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x1a, 0x56, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, + 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x05, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x44, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x34, 0x0a, + 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x06, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x44, 0x1a, 0x88, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x76, 0x52, 0x50, 0x43, 0x12, + 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, + 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, + 0x61, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x76, + 0x0a, 0x07, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x50, 0x43, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x6e, + 0x64, 0x54, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x6e, + 0x64, 0x54, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, + 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x88, + 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x42, 0x07, 0x0a, + 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x76, 0x0a, 0x07, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x50, + 0x43, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, + 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x48, + 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, + 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x31, + 0x0a, 0x04, 0x4a, 0x6f, 0x69, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, + 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x1a, 0x32, 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, + 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, + 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, + 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x5a, 0x0a, 0x05, 0x47, 0x72, 0x61, 0x66, 0x74, 0x12, 0x1b, + 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, + 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, + 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, + 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x1a, 0x5a, 0x0a, 0x05, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, + 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, + 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xe5, 0x01, + 0x0a, 0x07, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x6c, + 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x0c, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, + 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, + 0x62, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, + 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x00, 0x52, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x69, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, + 0x1a, 0x65, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1d, + 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, + 0x0a, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x95, 0x02, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x68, 0x61, 0x76, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, + 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x48, 0x61, 0x76, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x52, 0x05, 0x69, 0x68, 0x61, 0x76, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x77, 0x61, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, + 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x69, 0x77, 0x61, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x67, + 0x72, 0x61, 0x66, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, + 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, + 0x66, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x67, 0x72, 0x61, 0x66, 0x74, 0x12, 0x40, 0x0a, + 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, + 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, + 0x72, 0x75, 0x6e, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x1a, + 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x48, 0x61, 0x76, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, + 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, + 0x44, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x32, + 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, + 0x44, 0x73, 0x1a, 0x3d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, + 0x66, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, + 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x1a, 0x53, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, + 0x6b, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, + 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xcf, 0x01, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4d, + 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x55, 0x50, 0x4c, + 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x02, 0x12, + 0x13, 0x0a, 0x0f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x44, 0x44, 0x5f, 0x50, 0x45, 0x45, 0x52, + 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x5f, 0x50, 0x45, 0x45, + 0x52, 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x43, 0x56, 0x5f, 0x52, 0x50, 0x43, 0x10, + 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x45, 0x4e, 0x44, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x07, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x08, 0x12, 0x08, 0x0a, + 0x04, 0x4a, 0x4f, 0x49, 0x4e, 0x10, 0x09, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x45, 0x41, 0x56, 0x45, + 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x41, 0x46, 0x54, 0x10, 0x0b, 0x12, 0x09, 0x0a, + 0x05, 0x50, 0x52, 0x55, 0x4e, 0x45, 0x10, 0x0c, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x10, 0x0a, + 0x0e, 0x5f, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x61, 0x64, 0x64, 0x50, + 0x65, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x72, 0x65, 0x63, 0x76, 0x52, 0x50, 0x43, 0x42, 0x0a, + 0x0a, 0x08, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x50, 0x43, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x64, + 0x72, 0x6f, 0x70, 0x52, 0x50, 0x43, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6a, 0x6f, 0x69, 0x6e, 0x42, + 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x67, 0x72, + 0x61, 0x66, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x22, 0x42, 0x0a, + 0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x2f, 0x0a, 0x05, 0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x62, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x43, 0x5a, 0x41, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, + 0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, + 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x67, + 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2d, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, + 0x73, 0x75, 0x62, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_trace_proto_rawDescOnce sync.Once + file_trace_proto_rawDescData = file_trace_proto_rawDesc +) + +func file_trace_proto_rawDescGZIP() []byte { + file_trace_proto_rawDescOnce.Do(func() { + file_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_trace_proto_rawDescData) + }) + return file_trace_proto_rawDescData +} + +var file_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_trace_proto_goTypes = []interface{}{ + (TraceEvent_Type)(0), // 0: blossomsub.pb.TraceEvent.Type + (*TraceEvent)(nil), // 1: blossomsub.pb.TraceEvent + (*TraceEventBatch)(nil), // 2: blossomsub.pb.TraceEventBatch + (*TraceEvent_PublishMessage)(nil), // 3: blossomsub.pb.TraceEvent.PublishMessage + (*TraceEvent_RejectMessage)(nil), // 4: blossomsub.pb.TraceEvent.RejectMessage + (*TraceEvent_DuplicateMessage)(nil), // 5: blossomsub.pb.TraceEvent.DuplicateMessage + (*TraceEvent_DeliverMessage)(nil), // 6: blossomsub.pb.TraceEvent.DeliverMessage + (*TraceEvent_AddPeer)(nil), // 7: blossomsub.pb.TraceEvent.AddPeer + (*TraceEvent_RemovePeer)(nil), // 8: blossomsub.pb.TraceEvent.RemovePeer + (*TraceEvent_RecvRPC)(nil), // 9: blossomsub.pb.TraceEvent.RecvRPC + (*TraceEvent_SendRPC)(nil), // 10: blossomsub.pb.TraceEvent.SendRPC + (*TraceEvent_DropRPC)(nil), // 11: blossomsub.pb.TraceEvent.DropRPC + (*TraceEvent_Join)(nil), // 12: blossomsub.pb.TraceEvent.Join + (*TraceEvent_Leave)(nil), // 13: blossomsub.pb.TraceEvent.Leave + (*TraceEvent_Graft)(nil), // 14: blossomsub.pb.TraceEvent.Graft + (*TraceEvent_Prune)(nil), // 15: blossomsub.pb.TraceEvent.Prune + (*TraceEvent_RPCMeta)(nil), // 16: blossomsub.pb.TraceEvent.RPCMeta + (*TraceEvent_MessageMeta)(nil), // 17: blossomsub.pb.TraceEvent.MessageMeta + (*TraceEvent_SubMeta)(nil), // 18: blossomsub.pb.TraceEvent.SubMeta + (*TraceEvent_ControlMeta)(nil), // 19: blossomsub.pb.TraceEvent.ControlMeta + (*TraceEvent_ControlIHaveMeta)(nil), // 20: blossomsub.pb.TraceEvent.ControlIHaveMeta + (*TraceEvent_ControlIWantMeta)(nil), // 21: blossomsub.pb.TraceEvent.ControlIWantMeta + (*TraceEvent_ControlGraftMeta)(nil), // 22: blossomsub.pb.TraceEvent.ControlGraftMeta + (*TraceEvent_ControlPruneMeta)(nil), // 23: blossomsub.pb.TraceEvent.ControlPruneMeta +} +var file_trace_proto_depIdxs = []int32{ + 0, // 0: blossomsub.pb.TraceEvent.type:type_name -> blossomsub.pb.TraceEvent.Type + 3, // 1: blossomsub.pb.TraceEvent.publishMessage:type_name -> blossomsub.pb.TraceEvent.PublishMessage + 4, // 2: blossomsub.pb.TraceEvent.rejectMessage:type_name -> blossomsub.pb.TraceEvent.RejectMessage + 5, // 3: blossomsub.pb.TraceEvent.duplicateMessage:type_name -> blossomsub.pb.TraceEvent.DuplicateMessage + 6, // 4: blossomsub.pb.TraceEvent.deliverMessage:type_name -> blossomsub.pb.TraceEvent.DeliverMessage + 7, // 5: blossomsub.pb.TraceEvent.addPeer:type_name -> blossomsub.pb.TraceEvent.AddPeer + 8, // 6: blossomsub.pb.TraceEvent.removePeer:type_name -> blossomsub.pb.TraceEvent.RemovePeer + 9, // 7: blossomsub.pb.TraceEvent.recvRPC:type_name -> blossomsub.pb.TraceEvent.RecvRPC + 10, // 8: blossomsub.pb.TraceEvent.sendRPC:type_name -> blossomsub.pb.TraceEvent.SendRPC + 11, // 9: blossomsub.pb.TraceEvent.dropRPC:type_name -> blossomsub.pb.TraceEvent.DropRPC + 12, // 10: blossomsub.pb.TraceEvent.join:type_name -> blossomsub.pb.TraceEvent.Join + 13, // 11: blossomsub.pb.TraceEvent.leave:type_name -> blossomsub.pb.TraceEvent.Leave + 14, // 12: blossomsub.pb.TraceEvent.graft:type_name -> blossomsub.pb.TraceEvent.Graft + 15, // 13: blossomsub.pb.TraceEvent.prune:type_name -> blossomsub.pb.TraceEvent.Prune + 1, // 14: blossomsub.pb.TraceEventBatch.batch:type_name -> blossomsub.pb.TraceEvent + 16, // 15: blossomsub.pb.TraceEvent.RecvRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta + 16, // 16: blossomsub.pb.TraceEvent.SendRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta + 16, // 17: blossomsub.pb.TraceEvent.DropRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta + 17, // 18: blossomsub.pb.TraceEvent.RPCMeta.messages:type_name -> blossomsub.pb.TraceEvent.MessageMeta + 18, // 19: blossomsub.pb.TraceEvent.RPCMeta.subscription:type_name -> blossomsub.pb.TraceEvent.SubMeta + 19, // 20: blossomsub.pb.TraceEvent.RPCMeta.control:type_name -> blossomsub.pb.TraceEvent.ControlMeta + 20, // 21: blossomsub.pb.TraceEvent.ControlMeta.ihave:type_name -> blossomsub.pb.TraceEvent.ControlIHaveMeta + 21, // 22: blossomsub.pb.TraceEvent.ControlMeta.iwant:type_name -> blossomsub.pb.TraceEvent.ControlIWantMeta + 22, // 23: blossomsub.pb.TraceEvent.ControlMeta.graft:type_name -> blossomsub.pb.TraceEvent.ControlGraftMeta + 23, // 24: blossomsub.pb.TraceEvent.ControlMeta.prune:type_name -> blossomsub.pb.TraceEvent.ControlPruneMeta + 25, // [25:25] is the sub-list for method output_type + 25, // [25:25] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name +} + +func init() { file_trace_proto_init() } +func file_trace_proto_init() { + if File_trace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEventBatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_PublishMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_RejectMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_DuplicateMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_DeliverMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_AddPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_RemovePeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_RecvRPC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_SendRPC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_DropRPC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_Join); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_Leave); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_Graft); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_Prune); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_RPCMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_MessageMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_SubMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_ControlMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_ControlIHaveMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_ControlIWantMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_ControlGraftMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_trace_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceEvent_ControlPruneMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_trace_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[3].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[5].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[7].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[8].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[9].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[10].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[11].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[12].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[14].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[17].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[19].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[21].OneofWrappers = []interface{}{} + file_trace_proto_msgTypes[22].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_trace_proto_rawDesc, + NumEnums: 1, + NumMessages: 23, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_trace_proto_goTypes, + DependencyIndexes: file_trace_proto_depIdxs, + EnumInfos: file_trace_proto_enumTypes, + MessageInfos: file_trace_proto_msgTypes, + }.Build() + File_trace_proto = out.File + file_trace_proto_rawDesc = nil + file_trace_proto_goTypes = nil + file_trace_proto_depIdxs = nil +} diff --git a/go-libp2p-blossomsub/pb/trace.proto b/go-libp2p-blossomsub/pb/trace.proto new file mode 100644 index 0000000..6c8b3f7 --- /dev/null +++ b/go-libp2p-blossomsub/pb/trace.proto @@ -0,0 +1,152 @@ +syntax = "proto3"; + +package blossomsub.pb; + +option go_package = "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"; + +message TraceEvent { + optional Type type = 1; + optional bytes peerID = 2; + optional int64 timestamp = 3; + + optional PublishMessage publishMessage = 4; + optional RejectMessage rejectMessage = 5; + optional DuplicateMessage duplicateMessage = 6; + optional DeliverMessage deliverMessage = 7; + optional AddPeer addPeer = 8; + optional RemovePeer removePeer = 9; + optional RecvRPC recvRPC = 10; + optional SendRPC sendRPC = 11; + optional DropRPC dropRPC = 12; + optional Join join = 13; + optional Leave leave = 14; + optional Graft graft = 15; + optional Prune prune = 16; + + enum Type { + PUBLISH_MESSAGE = 0; + REJECT_MESSAGE = 1; + DUPLICATE_MESSAGE = 2; + DELIVER_MESSAGE = 3; + ADD_PEER = 4; + REMOVE_PEER = 5; + RECV_RPC = 6; + SEND_RPC = 7; + DROP_RPC = 8; + JOIN = 9; + LEAVE = 10; + GRAFT = 11; + PRUNE = 12; + } + + message PublishMessage { + optional bytes messageID = 1; + optional bytes bitmask = 2; + } + + message RejectMessage { + optional bytes messageID = 1; + optional bytes receivedFrom = 2; + optional string reason = 3; + optional bytes bitmask = 4; + } + + message DuplicateMessage { + optional bytes messageID = 1; + optional bytes receivedFrom = 2; + optional bytes bitmask = 3; + } + + message DeliverMessage { + optional bytes messageID = 1; + optional bytes bitmask = 2; + optional bytes receivedFrom = 3; + } + + message AddPeer { + optional bytes peerID = 1; + optional string proto = 2; + } + + message RemovePeer { + optional bytes peerID = 1; + } + + message RecvRPC { + optional bytes receivedFrom = 1; + optional RPCMeta meta = 2; + } + + message SendRPC { + optional bytes sendTo = 1; + optional RPCMeta meta = 2; + } + + message DropRPC { + optional bytes sendTo = 1; + optional RPCMeta meta = 2; + } + + message Join { + optional bytes bitmask = 1; + } + + message Leave { + optional bytes bitmask = 2; + } + + message Graft { + optional bytes peerID = 1; + optional bytes bitmask = 2; + } + + message Prune { + optional bytes peerID = 1; + optional bytes bitmask = 2; + } + + message RPCMeta { + repeated MessageMeta messages = 1; + repeated SubMeta subscription = 2; + optional ControlMeta control = 3; + } + + message MessageMeta { + optional bytes messageID = 1; + optional bytes bitmask = 2; + } + + message SubMeta { + optional bool subscribe = 1; + optional bytes bitmask = 2; + } + + message ControlMeta { + repeated ControlIHaveMeta ihave = 1; + repeated ControlIWantMeta iwant = 2; + repeated ControlGraftMeta graft = 3; + repeated ControlPruneMeta prune = 4; + } + + message ControlIHaveMeta { + optional bytes bitmask = 1; + repeated bytes messageIDs = 2; + } + + message ControlIWantMeta { + repeated bytes messageIDs = 1; + } + + message ControlGraftMeta { + optional bytes bitmask = 1; + } + + message ControlPruneMeta { + optional bytes bitmask = 1; + repeated bytes peers = 2; + } +} + +message TraceEventBatch { + repeated TraceEvent batch = 1; +} \ No newline at end of file diff --git a/go-libp2p-blossomsub/peer_gater.go b/go-libp2p-blossomsub/peer_gater.go new file mode 100644 index 0000000..c4610ad --- /dev/null +++ b/go-libp2p-blossomsub/peer_gater.go @@ -0,0 +1,453 @@ +package blossomsub + +import ( + "context" + "fmt" + "math/rand" + "sort" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + manet "github.com/multiformats/go-multiaddr/net" +) + +var ( + DefaultPeerGaterRetainStats = 6 * time.Hour + DefaultPeerGaterQuiet = time.Minute + DefaultPeerGaterDuplicateWeight = 0.125 + DefaultPeerGaterIgnoreWeight = 1.0 + DefaultPeerGaterRejectWeight = 16.0 + DefaultPeerGaterThreshold = 0.33 + DefaultPeerGaterGlobalDecay = ScoreParameterDecay(2 * time.Minute) + DefaultPeerGaterSourceDecay = ScoreParameterDecay(time.Hour) +) + +// PeerGaterParams groups together parameters that control the operation of the peer gater +type PeerGaterParams struct { + // when the ratio of throttled/validated messages exceeds this threshold, the gater turns on + Threshold float64 + // (linear) decay parameter for gater counters + GlobalDecay float64 // global counter decay + SourceDecay float64 // per IP counter decay + // decay interval + DecayInterval time.Duration + // counter zeroing threshold + DecayToZero float64 + // how long to retain stats + RetainStats time.Duration + // quiet interval before turning off the gater; if there are no validation throttle events + // for this interval, the gater turns off + Quiet time.Duration + // weight of duplicate message deliveries + DuplicateWeight float64 + // weight of ignored messages + IgnoreWeight float64 + // weight of rejected messages + RejectWeight float64 + + // priority bitmask delivery weights + BitmaskDeliveryWeights map[string]float64 +} + +func (p *PeerGaterParams) validate() error { + if p.Threshold <= 0 { + return fmt.Errorf("invalid Threshold; must be > 0") + } + if p.GlobalDecay <= 0 || p.GlobalDecay >= 1 { + return fmt.Errorf("invalid GlobalDecay; must be between 0 and 1") + } + if p.SourceDecay <= 0 || p.SourceDecay >= 1 { + return fmt.Errorf("invalid SourceDecay; must be between 0 and 1") + } + if p.DecayInterval < time.Second { + return fmt.Errorf("invalid DecayInterval; must be at least 1s") + } + if p.DecayToZero <= 0 || p.DecayToZero >= 1 { + return fmt.Errorf("invalid DecayToZero; must be between 0 and 1") + } + // no need to check stats retention; a value of 0 means we don't retain stats + if p.Quiet < time.Second { + return fmt.Errorf("invalud Quiet interval; must be at least 1s") + } + if p.DuplicateWeight <= 0 { + return fmt.Errorf("invalid DuplicateWeight; must be > 0") + } + if p.IgnoreWeight < 1 { + return fmt.Errorf("invalid IgnoreWeight; must be >= 1") + } + if p.RejectWeight < 1 { + return fmt.Errorf("invalud RejectWeight; must be >= 1") + } + + return nil +} + +// WithBitmaskDeliveryWeights is a fluid setter for the priority bitmask delivery weights +func (p *PeerGaterParams) WithBitmaskDeliveryWeights(w map[string]float64) *PeerGaterParams { + p.BitmaskDeliveryWeights = w + return p +} + +// NewPeerGaterParams creates a new PeerGaterParams struct, using the specified threshold and decay +// parameters and default values for all other parameters. +func NewPeerGaterParams(threshold, globalDecay, sourceDecay float64) *PeerGaterParams { + return &PeerGaterParams{ + Threshold: threshold, + GlobalDecay: globalDecay, + SourceDecay: sourceDecay, + DecayToZero: DefaultDecayToZero, + DecayInterval: DefaultDecayInterval, + RetainStats: DefaultPeerGaterRetainStats, + Quiet: DefaultPeerGaterQuiet, + DuplicateWeight: DefaultPeerGaterDuplicateWeight, + IgnoreWeight: DefaultPeerGaterIgnoreWeight, + RejectWeight: DefaultPeerGaterRejectWeight, + } +} + +// DefaultPeerGaterParams creates a new PeerGaterParams struct using default values +func DefaultPeerGaterParams() *PeerGaterParams { + return NewPeerGaterParams(DefaultPeerGaterThreshold, DefaultPeerGaterGlobalDecay, DefaultPeerGaterSourceDecay) +} + +// the gater object. +type peerGater struct { + sync.Mutex + + host host.Host + + // gater parameters + params *PeerGaterParams + + // counters + validate, throttle float64 + + // time of last validation throttle + lastThrottle time.Time + + // stats per peer.ID -- multiple peer IDs may share the same stats object if they are + // colocated in the same IP + peerStats map[peer.ID]*peerGaterStats + // stats per IP + ipStats map[string]*peerGaterStats + + // for unit tests + getIP func(peer.ID) string +} + +type peerGaterStats struct { + // number of connected peer IDs mapped to this stat object + connected int + // stats expiration time -- only valid if connected = 0 + expire time.Time + + // counters + deliver, duplicate, ignore, reject float64 +} + +// WithPeerGater is a BlossomSub router option that enables reactive validation queue +// management. +// The Gater is activated if the ratio of throttled/validated messages exceeds the specified +// threshold. +// Once active, the Gater probabilistically throttles peers _before_ they enter the validation +// queue, performing Random Early Drop. +// The throttle decision is randomized, with the probability of allowing messages to enter the +// validation queue controlled by the statistical observations of the performance of all peers +// in the IP address of the gated peer. +// The Gater deactivates if there is no validation throttlinc occurring for the specified quiet +// interval. +func WithPeerGater(params *PeerGaterParams) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + + err := params.validate() + if err != nil { + return err + } + + gs.gate = newPeerGater(ps.ctx, ps.host, params) + + // hook the tracer + if ps.tracer != nil { + ps.tracer.raw = append(ps.tracer.raw, gs.gate) + } else { + ps.tracer = &pubsubTracer{ + raw: []RawTracer{gs.gate}, + pid: ps.host.ID(), + idGen: ps.idGen, + } + } + + return nil + } +} + +func newPeerGater(ctx context.Context, host host.Host, params *PeerGaterParams) *peerGater { + pg := &peerGater{ + params: params, + peerStats: make(map[peer.ID]*peerGaterStats), + ipStats: make(map[string]*peerGaterStats), + host: host, + } + go pg.background(ctx) + return pg +} + +func (pg *peerGater) background(ctx context.Context) { + tick := time.NewTicker(pg.params.DecayInterval) + + defer tick.Stop() + + for { + select { + case <-tick.C: + pg.decayStats() + case <-ctx.Done(): + return + } + } +} + +func (pg *peerGater) decayStats() { + pg.Lock() + defer pg.Unlock() + + pg.validate *= pg.params.GlobalDecay + if pg.validate < pg.params.DecayToZero { + pg.validate = 0 + } + + pg.throttle *= pg.params.GlobalDecay + if pg.throttle < pg.params.DecayToZero { + pg.throttle = 0 + } + + now := time.Now() + for ip, st := range pg.ipStats { + if st.connected > 0 { + st.deliver *= pg.params.SourceDecay + if st.deliver < pg.params.DecayToZero { + st.deliver = 0 + } + + st.duplicate *= pg.params.SourceDecay + if st.duplicate < pg.params.DecayToZero { + st.duplicate = 0 + } + + st.ignore *= pg.params.SourceDecay + if st.ignore < pg.params.DecayToZero { + st.ignore = 0 + } + + st.reject *= pg.params.SourceDecay + if st.reject < pg.params.DecayToZero { + st.reject = 0 + } + } else if st.expire.Before(now) { + delete(pg.ipStats, ip) + } + } +} + +func (pg *peerGater) getPeerStats(p peer.ID) *peerGaterStats { + st, ok := pg.peerStats[p] + if !ok { + st = pg.getIPStats(p) + pg.peerStats[p] = st + } + return st +} + +func (pg *peerGater) getIPStats(p peer.ID) *peerGaterStats { + ip := pg.getPeerIP(p) + st, ok := pg.ipStats[ip] + if !ok { + st = &peerGaterStats{} + pg.ipStats[ip] = st + } + return st +} + +func (pg *peerGater) getPeerIP(p peer.ID) string { + if pg.getIP != nil { + return pg.getIP(p) + } + + connToIP := func(c network.Conn) string { + remote := c.RemoteMultiaddr() + ip, err := manet.ToIP(remote) + if err != nil { + log.Warnf("error determining IP for remote peer in %s: %s", remote, err) + return "" + } + return ip.String() + } + + conns := pg.host.Network().ConnsToPeer(p) + switch len(conns) { + case 0: + return "" + case 1: + return connToIP(conns[0]) + default: + // we have multiple connections -- order by number of streams and use the one with the + // most streams; it's a nightmare to track multiple IPs per peer, so pick the best one. + streams := make(map[string]int) + for _, c := range conns { + if c.Stat().Transient { + // ignore transient + continue + } + streams[c.ID()] = len(c.GetStreams()) + } + sort.Slice(conns, func(i, j int) bool { + return streams[conns[i].ID()] > streams[conns[j].ID()] + }) + return connToIP(conns[0]) + } +} + +// router interface +func (pg *peerGater) AcceptFrom(p peer.ID) AcceptStatus { + if pg == nil { + return AcceptAll + } + + pg.Lock() + defer pg.Unlock() + + // check the quiet period; if the validation queue has not throttled for more than the Quiet + // interval, we turn off the circuit breaker and accept. + if time.Since(pg.lastThrottle) > pg.params.Quiet { + return AcceptAll + } + + // no throttle events -- or they have decayed; accept. + if pg.throttle == 0 { + return AcceptAll + } + + // check the throttle/validate ration; if it is below threshold we accept. + if pg.validate != 0 && pg.throttle/pg.validate < pg.params.Threshold { + return AcceptAll + } + + st := pg.getPeerStats(p) + + // compute the goodput of the peer; the denominator is the weighted mix of message counters + total := st.deliver + pg.params.DuplicateWeight*st.duplicate + pg.params.IgnoreWeight*st.ignore + pg.params.RejectWeight*st.reject + if total == 0 { + return AcceptAll + } + + // we make a randomized decision based on the goodput of the peer. + // the probabiity is biased by adding 1 to the delivery counter so that we don't unconditionally + // throttle in the first negative event; it also ensures that a peer always has a chance of being + // accepted; this is not a sinkhole/blacklist. + threshold := (1 + st.deliver) / (1 + total) + if rand.Float64() < threshold { + return AcceptAll + } + + log.Debugf("throttling peer %s with threshold %f", p, threshold) + return AcceptControl +} + +// -- RawTracer interface methods +var _ RawTracer = (*peerGater)(nil) + +// tracer interface +func (pg *peerGater) AddPeer(p peer.ID, proto protocol.ID) { + pg.Lock() + defer pg.Unlock() + + st := pg.getPeerStats(p) + st.connected++ +} + +func (pg *peerGater) RemovePeer(p peer.ID) { + pg.Lock() + defer pg.Unlock() + + st := pg.getPeerStats(p) + st.connected-- + st.expire = time.Now().Add(pg.params.RetainStats) + + delete(pg.peerStats, p) +} + +func (pg *peerGater) Join(bitmask []byte) {} +func (pg *peerGater) Leave(bitmask []byte) {} +func (pg *peerGater) Graft(p peer.ID, bitmask []byte) {} +func (pg *peerGater) Prune(p peer.ID, bitmask []byte) {} + +func (pg *peerGater) ValidateMessage(msg *Message) { + pg.Lock() + defer pg.Unlock() + + pg.validate++ +} + +func (pg *peerGater) DeliverMessage(msg *Message) { + pg.Lock() + defer pg.Unlock() + + st := pg.getPeerStats(msg.ReceivedFrom) + + bitmask := msg.GetBitmask() + weight := pg.params.BitmaskDeliveryWeights[string(bitmask)] + + if weight == 0 { + weight = 1 + } + + st.deliver += weight +} + +func (pg *peerGater) RejectMessage(msg *Message, reason string) { + pg.Lock() + defer pg.Unlock() + + switch reason { + case RejectValidationQueueFull: + fallthrough + case RejectValidationThrottled: + pg.lastThrottle = time.Now() + pg.throttle++ + + case RejectValidationIgnored: + st := pg.getPeerStats(msg.ReceivedFrom) + st.ignore++ + + default: + st := pg.getPeerStats(msg.ReceivedFrom) + st.reject++ + } +} + +func (pg *peerGater) DuplicateMessage(msg *Message) { + pg.Lock() + defer pg.Unlock() + + st := pg.getPeerStats(msg.ReceivedFrom) + st.duplicate++ +} + +func (pg *peerGater) ThrottlePeer(p peer.ID) {} + +func (pg *peerGater) RecvRPC(rpc *RPC) {} + +func (pg *peerGater) SendRPC(rpc *RPC, p peer.ID) {} + +func (pg *peerGater) DropRPC(rpc *RPC, p peer.ID) {} + +func (pg *peerGater) UndeliverableMessage(msg *Message) {} diff --git a/go-libp2p-blossomsub/peer_gater_test.go b/go-libp2p-blossomsub/peer_gater_test.go new file mode 100644 index 0000000..0e097ab --- /dev/null +++ b/go-libp2p-blossomsub/peer_gater_test.go @@ -0,0 +1,128 @@ +package blossomsub + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestPeerGater(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + peerA := peer.ID("A") + peerAip := "1.2.3.4" + + params := NewPeerGaterParams(.1, .9, .999) + err := params.validate() + if err != nil { + t.Fatal(err) + } + + pg := newPeerGater(ctx, nil, params) + pg.getIP = func(p peer.ID) string { + switch p { + case peerA: + return peerAip + default: + return "" + } + } + + pg.AddPeer(peerA, "") + + status := pg.AcceptFrom(peerA) + if status != AcceptAll { + t.Fatal("expected AcceptAll") + } + + msg := &Message{ReceivedFrom: peerA} + + pg.ValidateMessage(msg) + status = pg.AcceptFrom(peerA) + if status != AcceptAll { + t.Fatal("expected AcceptAll") + } + + pg.RejectMessage(msg, RejectValidationQueueFull) + status = pg.AcceptFrom(peerA) + if status != AcceptAll { + t.Fatal("expected AcceptAll") + } + + pg.RejectMessage(msg, RejectValidationThrottled) + status = pg.AcceptFrom(peerA) + if status != AcceptAll { + t.Fatal("expected AcceptAll") + } + + for i := 0; i < 100; i++ { + pg.RejectMessage(msg, RejectValidationIgnored) + pg.RejectMessage(msg, RejectValidationFailed) + } + + accepted := false + for i := 0; !accepted && i < 1000; i++ { + status = pg.AcceptFrom(peerA) + if status == AcceptControl { + accepted = true + } + } + if !accepted { + t.Fatal("expected AcceptControl") + } + + for i := 0; i < 100; i++ { + pg.DeliverMessage(msg) + } + + accepted = false + for i := 0; !accepted && i < 1000; i++ { + status = pg.AcceptFrom(peerA) + if status == AcceptAll { + accepted = true + } + } + if !accepted { + t.Fatal("expected to accept at least once") + } + + for i := 0; i < 100; i++ { + pg.decayStats() + } + + status = pg.AcceptFrom(peerA) + if status != AcceptAll { + t.Fatal("expected AcceptAll") + } + + pg.RemovePeer(peerA) + pg.Lock() + _, ok := pg.peerStats[peerA] + pg.Unlock() + if ok { + t.Fatal("still have a stat record for peerA") + } + + pg.Lock() + _, ok = pg.ipStats[peerAip] + pg.Unlock() + if !ok { + t.Fatal("expected to still have a stat record for peerA's ip") + } + + pg.Lock() + pg.ipStats[peerAip].expire = time.Now() + pg.Unlock() + + time.Sleep(2 * time.Second) + + pg.Lock() + _, ok = pg.ipStats["1.2.3.4"] + pg.Unlock() + if ok { + t.Fatal("still have a stat record for peerA's ip") + } +} diff --git a/go-libp2p-blossomsub/pubsub.go b/go-libp2p-blossomsub/pubsub.go new file mode 100644 index 0000000..b38720d --- /dev/null +++ b/go-libp2p-blossomsub/pubsub.go @@ -0,0 +1,1422 @@ +package blossomsub + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/timecache" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + logging "github.com/ipfs/go-log/v2" +) + +// DefaultMaximumMessageSize is 1mb. +const DefaultMaxMessageSize = 1 << 20 + +var ( + // TimeCacheDuration specifies how long a message ID will be remembered as seen. + // Use WithSeenMessagesTTL to configure this per pubsub instance, instead of overriding the global default. + TimeCacheDuration = 120 * time.Second + + // TimeCacheStrategy specifies which type of lookup/cleanup strategy is used by the seen messages cache. + // Use WithSeenMessagesStrategy to configure this per pubsub instance, instead of overriding the global default. + TimeCacheStrategy = timecache.Strategy_FirstSeen + + // ErrSubscriptionCancelled may be returned when a subscription Next() is called after the + // subscription has been cancelled. + ErrSubscriptionCancelled = errors.New("subscription cancelled") +) + +var log = logging.Logger("pubsub") + +type ProtocolMatchFn = func(protocol.ID) func(protocol.ID) bool + +// PubSub is the implementation of the pubsub system. +type PubSub struct { + // atomic counter for seqnos + // NOTE: Must be declared at the top of the struct as we perform atomic + // operations on this field. + // + // See: https://golang.org/pkg/sync/atomic/#pkg-note-BUG + counter uint64 + + host host.Host + + rt PubSubRouter + + val *validation + + disc *discover + + tracer *pubsubTracer + + peerFilter PeerFilter + + // maxMessageSize is the maximum message size; it applies globally to all + // bitmasks. + maxMessageSize int + + // size of the outbound message channel that we maintain for each peer + peerOutboundQueueSize int + + // incoming messages from other peers + incoming chan *RPC + + // addSub is a control channel for us to add and remove subscriptions + addSub chan *addSubReq + + // addRelay is a control channel for us to add and remove relays + addRelay chan *addRelayReq + + // rmRelay is a relay cancellation channel + rmRelay chan string + + // get list of bitmasks we are subscribed to + getBitmasks chan *bitmaskReq + + // get chan of peers we are connected to + getPeers chan *listPeerReq + + // send subscription here to cancel it + cancelCh chan *Subscription + + // addSub is a channel for us to add a bitmask + addBitmask chan *addBitmaskReq + + // removeBitmask is a bitmask cancellation channel + rmBitmask chan *rmBitmaskReq + + // a notification channel for new peer connections accumulated + newPeers chan struct{} + newPeersPrioLk sync.RWMutex + newPeersMx sync.Mutex + newPeersPend map[peer.ID]struct{} + + // a notification channel for new outoging peer streams + newPeerStream chan network.Stream + + // a notification channel for errors opening new peer streams + newPeerError chan peer.ID + + // a notification channel for when our peers die + peerDead chan struct{} + peerDeadPrioLk sync.RWMutex + peerDeadMx sync.Mutex + peerDeadPend map[peer.ID]struct{} + // backoff for retrying new connections to dead peers + deadPeerBackoff *backoff + + // The set of bitmasks we are subscribed to + mySubs map[string]map[*Subscription]struct{} + + // The set of bitmasks we are relaying for + myRelays map[string]int + + // The set of bitmasks we are interested in + myBitmasks map[string]*Bitmask + + // bitmasks tracks which bitmasks each of our peers are subscribed to + bitmasks map[string]map[peer.ID]struct{} + + // sendMsg handles messages that have been validated + sendMsg chan *Message + + // addVal handles validator registration requests + addVal chan *addValReq + + // rmVal handles validator unregistration requests + rmVal chan *rmValReq + + // eval thunk in event loop + eval chan func() + + // peer blacklist + blacklist Blacklist + blacklistPeer chan peer.ID + + peers map[peer.ID]chan *RPC + + inboundStreamsMx sync.Mutex + inboundStreams map[peer.ID]network.Stream + + seenMessages timecache.TimeCache + seenMsgTTL time.Duration + seenMsgStrategy timecache.Strategy + + // generator used to compute the ID for a message + idGen *msgIDGenerator + + // key for signing messages; nil when signing is disabled + signKey crypto.PrivKey + // source ID for signed messages; corresponds to signKey, empty when signing is disabled. + // If empty, the author and seq-nr are completely omitted from the messages. + signID peer.ID + // strict mode rejects all unsigned messages prior to validation + signPolicy MessageSignaturePolicy + + // filter for tracking subscriptions in bitmasks of interest; if nil, then we track all subscriptions + subFilter SubscriptionFilter + + // protoMatchFunc is a matching function for protocol selection. + protoMatchFunc ProtocolMatchFn + + ctx context.Context + + // appSpecificRpcInspector is an auxiliary that may be set by the application to inspect incoming RPCs prior to + // processing them. The inspector is invoked on an accepted RPC right prior to handling it. + // The return value of the inspector function is an error indicating whether the RPC should be processed or not. + // If the error is nil, the RPC is processed as usual. If the error is non-nil, the RPC is dropped. + appSpecificRpcInspector func(peer.ID, *RPC) error +} + +// PubSubRouter is the message router component of PubSub. +type PubSubRouter interface { + // Protocols returns the list of protocols supported by the router. + Protocols() []protocol.ID + // Attach is invoked by the PubSub constructor to attach the router to a + // freshly initialized PubSub instance. + Attach(*PubSub) + // AddPeer notifies the router that a new peer has been connected. + AddPeer(peer.ID, protocol.ID) + // RemovePeer notifies the router that a peer has been disconnected. + RemovePeer(peer.ID) + // EnoughPeers returns whether the router needs more peers before it's ready to publish new records. + // Suggested (if greater than 0) is a suggested number of peers that the router should need. + EnoughPeers(bitmask []byte, suggested int) bool + // AcceptFrom is invoked on any incoming message before pushing it to the validation pipeline + // or processing control information. + // Allows routers with internal scoring to vet peers before committing any processing resources + // to the message and implement an effective graylist and react to validation queue overload. + AcceptFrom(peer.ID) AcceptStatus + // HandleRPC is invoked to process control messages in the RPC envelope. + // It is invoked after subscriptions and payload messages have been processed. + HandleRPC(*RPC) + // Publish is invoked to forward a new message that has been validated. + Publish(*Message) + // Join notifies the router that we want to receive and forward messages in a bitmask. + // It is invoked after the subscription announcement. + Join(bitmask []byte) + // Leave notifies the router that we are no longer interested in a bitmask. + // It is invoked after the unsubscription announcement. + Leave(bitmask []byte) +} + +type AcceptStatus int + +const ( + // AcceptNone signals to drop the incoming RPC + AcceptNone AcceptStatus = iota + // AcceptControl signals to accept the incoming RPC only for control message processing by + // the router. Included payload messages will _not_ be pushed to the validation queue. + AcceptControl + // AcceptAll signals to accept the incoming RPC for full processing + AcceptAll +) + +type Message struct { + *pb.Message + ID string + ReceivedFrom peer.ID + ValidatorData interface{} + Local bool +} + +func (m *Message) GetFrom() peer.ID { + return peer.ID(m.Message.GetFrom()) +} + +type RPC struct { + pb.RPC + + // unexported on purpose, not sending this over the wire + from peer.ID +} + +type Option func(*PubSub) error + +// NewPubSub returns a new PubSub management object. +func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option) (*PubSub, error) { + ps := &PubSub{ + host: h, + ctx: ctx, + rt: rt, + val: newValidation(), + peerFilter: DefaultPeerFilter, + disc: &discover{}, + maxMessageSize: DefaultMaxMessageSize, + peerOutboundQueueSize: 32, + signID: h.ID(), + signKey: nil, + signPolicy: StrictSign, + incoming: make(chan *RPC, 32), + newPeers: make(chan struct{}, 1), + newPeersPend: make(map[peer.ID]struct{}), + newPeerStream: make(chan network.Stream), + newPeerError: make(chan peer.ID), + peerDead: make(chan struct{}, 1), + peerDeadPend: make(map[peer.ID]struct{}), + deadPeerBackoff: newBackoff(ctx, 1000, BackoffCleanupInterval, MaxBackoffAttempts), + cancelCh: make(chan *Subscription), + getPeers: make(chan *listPeerReq), + addSub: make(chan *addSubReq), + addRelay: make(chan *addRelayReq), + rmRelay: make(chan string), + addBitmask: make(chan *addBitmaskReq), + rmBitmask: make(chan *rmBitmaskReq), + getBitmasks: make(chan *bitmaskReq), + sendMsg: make(chan *Message, 32), + addVal: make(chan *addValReq), + rmVal: make(chan *rmValReq), + eval: make(chan func()), + myBitmasks: make(map[string]*Bitmask), + mySubs: make(map[string]map[*Subscription]struct{}), + myRelays: make(map[string]int), + bitmasks: make(map[string]map[peer.ID]struct{}), + peers: make(map[peer.ID]chan *RPC), + inboundStreams: make(map[peer.ID]network.Stream), + blacklist: NewMapBlacklist(), + blacklistPeer: make(chan peer.ID), + seenMsgTTL: TimeCacheDuration, + seenMsgStrategy: TimeCacheStrategy, + idGen: newMsgIdGenerator(), + counter: uint64(time.Now().UnixNano()), + } + + for _, opt := range opts { + err := opt(ps) + if err != nil { + return nil, err + } + } + + if ps.signPolicy.mustSign() { + if ps.signID == "" { + return nil, fmt.Errorf("strict signature usage enabled but message author was disabled") + } + ps.signKey = ps.host.Peerstore().PrivKey(ps.signID) + if ps.signKey == nil { + return nil, fmt.Errorf("can't sign for peer %s: no private key", ps.signID) + } + } + + ps.seenMessages = timecache.NewTimeCacheWithStrategy(ps.seenMsgStrategy, ps.seenMsgTTL) + + if err := ps.disc.Start(ps); err != nil { + return nil, err + } + + rt.Attach(ps) + + for _, id := range rt.Protocols() { + if ps.protoMatchFunc != nil { + h.SetStreamHandlerMatch(id, ps.protoMatchFunc(id), ps.handleNewStream) + } else { + h.SetStreamHandler(id, ps.handleNewStream) + } + } + h.Network().Notify((*PubSubNotif)(ps)) + + ps.val.Start(ps) + + go ps.processLoop(ctx) + + (*PubSubNotif)(ps).Initialize() + + return ps, nil +} + +// MsgIdFunction returns a unique ID for the passed Message, and PubSub can be customized to use any +// implementation of this function by configuring it with the Option from WithMessageIdFn. +type MsgIdFunction func(pmsg *pb.Message) string + +// WithMessageIdFn is an option to customize the way a message ID is computed for a pubsub message. +// The default ID function is DefaultMsgIdFn (concatenate source and seq nr.), +// but it can be customized to e.g. the hash of the message. +func WithMessageIdFn(fn MsgIdFunction) Option { + return func(p *PubSub) error { + p.idGen.Default = fn + return nil + } +} + +// PeerFilter is used to filter pubsub peers. It should return true for peers that are accepted for +// a given bitmask. PubSub can be customized to use any implementation of this function by configuring +// it with the Option from WithPeerFilter. +type PeerFilter func(pid peer.ID, bitmask []byte) bool + +// WithPeerFilter is an option to set a filter for pubsub peers. +// The default peer filter is DefaultPeerFilter (which always returns true), but it can be customized +// to any custom implementation. +func WithPeerFilter(filter PeerFilter) Option { + return func(p *PubSub) error { + p.peerFilter = filter + return nil + } +} + +// WithPeerOutboundQueueSize is an option to set the buffer size for outbound messages to a peer +// We start dropping messages to a peer if the outbound queue if full +func WithPeerOutboundQueueSize(size int) Option { + return func(p *PubSub) error { + if size <= 0 { + return errors.New("outbound queue size must always be positive") + } + p.peerOutboundQueueSize = size + return nil + } +} + +// WithMessageSignaturePolicy sets the mode of operation for producing and verifying message signatures. +func WithMessageSignaturePolicy(policy MessageSignaturePolicy) Option { + return func(p *PubSub) error { + p.signPolicy = policy + return nil + } +} + +// WithMessageSigning enables or disables message signing (enabled by default). +// Deprecated: signature verification without message signing, +// or message signing without verification, are not recommended. +func WithMessageSigning(enabled bool) Option { + return func(p *PubSub) error { + if enabled { + p.signPolicy |= msgSigning + } else { + p.signPolicy &^= msgSigning + } + return nil + } +} + +// WithMessageAuthor sets the author for outbound messages to the given peer ID +// (defaults to the host's ID). If message signing is enabled, the private key +// must be available in the host's peerstore. +func WithMessageAuthor(author peer.ID) Option { + return func(p *PubSub) error { + author := author + if author == "" { + author = p.host.ID() + } + p.signID = author + return nil + } +} + +// WithNoAuthor omits the author and seq-number data of messages, and disables the use of signatures. +// Not recommended to use with the default message ID function, see WithMessageIdFn. +func WithNoAuthor() Option { + return func(p *PubSub) error { + p.signID = "" + p.signPolicy &^= msgSigning + return nil + } +} + +// WithStrictSignatureVerification is an option to enable or disable strict message signing. +// When enabled (which is the default), unsigned messages will be discarded. +// Deprecated: signature verification without message signing, +// or message signing without verification, are not recommended. +func WithStrictSignatureVerification(required bool) Option { + return func(p *PubSub) error { + if required { + p.signPolicy |= msgVerification + } else { + p.signPolicy &^= msgVerification + } + return nil + } +} + +// WithBlacklist provides an implementation of the blacklist; the default is a +// MapBlacklist +func WithBlacklist(b Blacklist) Option { + return func(p *PubSub) error { + p.blacklist = b + return nil + } +} + +// WithDiscovery provides a discovery mechanism used to bootstrap and provide peers into PubSub +func WithDiscovery(d discovery.Discovery, opts ...DiscoverOpt) Option { + return func(p *PubSub) error { + discoverOpts := defaultDiscoverOptions() + for _, opt := range opts { + err := opt(discoverOpts) + if err != nil { + return err + } + } + + p.disc.discovery = &pubSubDiscovery{Discovery: d, opts: discoverOpts.opts} + p.disc.options = discoverOpts + return nil + } +} + +// WithEventTracer provides a tracer for the pubsub system +func WithEventTracer(tracer EventTracer) Option { + return func(p *PubSub) error { + if p.tracer != nil { + p.tracer.tracer = tracer + } else { + p.tracer = &pubsubTracer{tracer: tracer, pid: p.host.ID(), idGen: p.idGen} + } + return nil + } +} + +// WithRawTracer adds a raw tracer to the pubsub system. +// Multiple tracers can be added using multiple invocations of the option. +func WithRawTracer(tracer RawTracer) Option { + return func(p *PubSub) error { + if p.tracer != nil { + p.tracer.raw = append(p.tracer.raw, tracer) + } else { + p.tracer = &pubsubTracer{raw: []RawTracer{tracer}, pid: p.host.ID(), idGen: p.idGen} + } + return nil + } +} + +// WithMaxMessageSize sets the global maximum message size for pubsub wire +// messages. The default value is 1MiB (DefaultMaxMessageSize). +// +// Observe the following warnings when setting this option. +// +// WARNING #1: Make sure to change the default protocol prefixes for floodsub +// (FloodSubID) and BlossomSub (BlossomSubID). This avoids accidentally joining +// the public default network, which uses the default max message size, and +// therefore will cause messages to be dropped. +// +// WARNING #2: Reducing the default max message limit is fine, if you are +// certain that your application messages will not exceed the new limit. +// However, be wary of increasing the limit, as pubsub networks are naturally +// write-amplifying, i.e. for every message we receive, we send D copies of the +// message to our peers. If those messages are large, the bandwidth requirements +// will grow linearly. Note that propagation is sent on the uplink, which +// traditionally is more constrained than the downlink. Instead, consider +// out-of-band retrieval for large messages, by sending a CID (Content-ID) or +// another type of locator, such that messages can be fetched on-demand, rather +// than being pushed proactively. Under this design, you'd use the pubsub layer +// as a signalling system, rather than a data delivery system. +func WithMaxMessageSize(maxMessageSize int) Option { + return func(ps *PubSub) error { + ps.maxMessageSize = maxMessageSize + return nil + } +} + +// WithProtocolMatchFn sets a custom matching function for protocol selection to +// be used by the protocol handler on the Host's Mux. Should be combined with +// WithBlossomSubProtocols feature function for checking if certain protocol features +// are supported +func WithProtocolMatchFn(m ProtocolMatchFn) Option { + return func(ps *PubSub) error { + ps.protoMatchFunc = m + return nil + } +} + +// WithSeenMessagesTTL configures when a previously seen message ID can be forgotten about +func WithSeenMessagesTTL(ttl time.Duration) Option { + return func(ps *PubSub) error { + ps.seenMsgTTL = ttl + return nil + } +} + +// WithSeenMessagesStrategy configures which type of lookup/cleanup strategy is used by the seen messages cache +func WithSeenMessagesStrategy(strategy timecache.Strategy) Option { + return func(ps *PubSub) error { + ps.seenMsgStrategy = strategy + return nil + } +} + +// WithAppSpecificRpcInspector sets a hook that inspect incomings RPCs prior to +// processing them. The inspector is invoked on an accepted RPC just before it +// is handled. If inspector's error is nil, the RPC is handled. Otherwise, it +// is dropped. +func WithAppSpecificRpcInspector(inspector func(peer.ID, *RPC) error) Option { + return func(ps *PubSub) error { + ps.appSpecificRpcInspector = inspector + return nil + } +} + +// processLoop handles all inputs arriving on the channels +func (p *PubSub) processLoop(ctx context.Context) { + defer func() { + // Clean up go routines. + for _, ch := range p.peers { + close(ch) + } + p.peers = nil + p.bitmasks = nil + p.seenMessages.Done() + }() + + for { + select { + case <-p.newPeers: + p.handlePendingPeers() + + case s := <-p.newPeerStream: + pid := s.Conn().RemotePeer() + + ch, ok := p.peers[pid] + if !ok { + log.Warn("new stream for unknown peer: ", pid) + s.Reset() + continue + } + + if p.blacklist.Contains(pid) { + log.Warn("closing stream for blacklisted peer: ", pid) + close(ch) + delete(p.peers, pid) + s.Reset() + continue + } + + p.rt.AddPeer(pid, s.Protocol()) + + case pid := <-p.newPeerError: + delete(p.peers, pid) + + case <-p.peerDead: + p.handleDeadPeers() + + case treq := <-p.getBitmasks: + var out []string + for t := range p.mySubs { + out = append(out, t) + } + treq.resp <- out + case bitmask := <-p.addBitmask: + p.handleAddBitmask(bitmask) + case bitmask := <-p.rmBitmask: + p.handleRemoveBitmask(bitmask) + case sub := <-p.cancelCh: + p.handleRemoveSubscription(sub) + case sub := <-p.addSub: + p.handleAddSubscription(sub) + case relay := <-p.addRelay: + p.handleAddRelay(relay) + case bitmask := <-p.rmRelay: + p.handleRemoveRelay([]byte(bitmask)) + case preq := <-p.getPeers: + tmap, ok := p.bitmasks[string(preq.bitmask)] + if preq.bitmask != nil && !ok { + preq.resp <- nil + continue + } + var peers []peer.ID + for p := range p.peers { + if preq.bitmask != nil { + _, ok := tmap[p] + if !ok { + continue + } + } + peers = append(peers, p) + } + preq.resp <- peers + case rpc := <-p.incoming: + p.handleIncomingRPC(rpc) + + case msg := <-p.sendMsg: + p.publishMessage(msg) + + case req := <-p.addVal: + p.val.AddValidator(req) + + case req := <-p.rmVal: + p.val.RemoveValidator(req) + + case thunk := <-p.eval: + thunk() + + case pid := <-p.blacklistPeer: + log.Infof("Blacklisting peer %s", pid) + p.blacklist.Add(pid) + + ch, ok := p.peers[pid] + if ok { + close(ch) + delete(p.peers, pid) + for t, tmap := range p.bitmasks { + if _, ok := tmap[pid]; ok { + delete(tmap, pid) + p.notifyLeave([]byte(t), pid) + } + } + p.rt.RemovePeer(pid) + } + + case <-ctx.Done(): + log.Info("pubsub processloop shutting down") + return + } + } +} + +func (p *PubSub) handlePendingPeers() { + p.newPeersPrioLk.Lock() + + if len(p.newPeersPend) == 0 { + p.newPeersPrioLk.Unlock() + return + } + + newPeers := p.newPeersPend + p.newPeersPend = make(map[peer.ID]struct{}) + p.newPeersPrioLk.Unlock() + + for pid := range newPeers { + if p.host.Network().Connectedness(pid) != network.Connected { + continue + } + + if _, ok := p.peers[pid]; ok { + log.Debug("already have connection to peer: ", pid) + continue + } + + if p.blacklist.Contains(pid) { + log.Warn("ignoring connection from blacklisted peer: ", pid) + continue + } + + messages := make(chan *RPC, p.peerOutboundQueueSize) + messages <- p.getHelloPacket() + go p.handleNewPeer(p.ctx, pid, messages) + p.peers[pid] = messages + } +} + +func (p *PubSub) handleDeadPeers() { + p.peerDeadPrioLk.Lock() + + if len(p.peerDeadPend) == 0 { + p.peerDeadPrioLk.Unlock() + return + } + + deadPeers := p.peerDeadPend + p.peerDeadPend = make(map[peer.ID]struct{}) + p.peerDeadPrioLk.Unlock() + + for pid := range deadPeers { + ch, ok := p.peers[pid] + if !ok { + continue + } + + close(ch) + delete(p.peers, pid) + + for t, tmap := range p.bitmasks { + if _, ok := tmap[pid]; ok { + delete(tmap, pid) + p.notifyLeave([]byte(t), pid) + } + } + + p.rt.RemovePeer(pid) + + if p.host.Network().Connectedness(pid) == network.Connected { + backoffDelay, err := p.deadPeerBackoff.updateAndGet(pid) + if err != nil { + log.Debug(err) + continue + } + + // still connected, must be a duplicate connection being closed. + // we respawn the writer as we need to ensure there is a stream active + log.Debugf("peer declared dead but still connected; respawning writer: %s", pid) + messages := make(chan *RPC, p.peerOutboundQueueSize) + messages <- p.getHelloPacket() + p.peers[pid] = messages + go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, messages) + } + } +} + +// handleAddBitmask adds a tracker for a particular bitmask. +// Only called from processLoop. +func (p *PubSub) handleAddBitmask(req *addBitmaskReq) { + bitmask := req.bitmask + bitmaskID := bitmask.bitmask + + t, ok := p.myBitmasks[string(bitmaskID)] + if ok { + req.resp <- t + return + } + + p.myBitmasks[string(bitmaskID)] = bitmask + req.resp <- bitmask +} + +// handleRemoveBitmask removes Bitmask tracker from bookkeeping. +// Only called from processLoop. +func (p *PubSub) handleRemoveBitmask(req *rmBitmaskReq) { + bitmask := p.myBitmasks[string(req.bitmask.bitmask)] + + if bitmask == nil { + req.resp <- nil + return + } + + if len(bitmask.evtHandlers) == 0 && + len(p.mySubs[string(req.bitmask.bitmask)]) == 0 && + p.myRelays[string(req.bitmask.bitmask)] == 0 { + delete(p.myBitmasks, string(bitmask.bitmask)) + req.resp <- nil + return + } + + req.resp <- fmt.Errorf("cannot close bitmask: outstanding event handlers or subscriptions") +} + +// handleRemoveSubscription removes Subscription sub from bookeeping. +// If this was the last subscription and no more relays exist for a given bitmask, +// it will also announce that this node is not subscribing to this bitmask anymore. +// Only called from processLoop. +func (p *PubSub) handleRemoveSubscription(sub *Subscription) { + subs := p.mySubs[string(sub.bitmask)] + + if subs == nil { + return + } + + sub.err = ErrSubscriptionCancelled + sub.close() + delete(subs, sub) + + if len(subs) == 0 { + delete(p.mySubs, string(sub.bitmask)) + + // stop announcing only if there are no more subs and relays + if p.myRelays[string(sub.bitmask)] == 0 { + p.disc.StopAdvertise(sub.bitmask) + p.announce(sub.bitmask, false) + p.rt.Leave(sub.bitmask) + } + } +} + +// handleAddSubscription adds a Subscription for a particular bitmask. If it is +// the first subscription and no relays exist so far for the bitmask, it will +// announce that this node subscribes to the bitmask. +// Only called from processLoop. +func (p *PubSub) handleAddSubscription(req *addSubReq) { + sub := req.sub + subs := p.mySubs[string(sub.bitmask)] + + // announce we want this bitmask if neither subs nor relays exist so far + if len(subs) == 0 && p.myRelays[string(sub.bitmask)] == 0 { + p.disc.Advertise(sub.bitmask) + p.announce(sub.bitmask, true) + p.rt.Join(sub.bitmask) + } + + // make new if not there + if subs == nil { + p.mySubs[string(sub.bitmask)] = make(map[*Subscription]struct{}) + } + + sub.cancelCh = p.cancelCh + + p.mySubs[string(sub.bitmask)][sub] = struct{}{} + + req.resp <- sub +} + +// handleAddRelay adds a relay for a particular bitmask. If it is +// the first relay and no subscriptions exist so far for the bitmask , it will +// announce that this node relays for the bitmask. +// Only called from processLoop. +func (p *PubSub) handleAddRelay(req *addRelayReq) { + bitmask := req.bitmask + + p.myRelays[string(bitmask)]++ + + // announce we want this bitmask if neither relays nor subs exist so far + if p.myRelays[string(bitmask)] == 1 && len(p.mySubs[string(bitmask)]) == 0 { + p.disc.Advertise(bitmask) + p.announce(bitmask, true) + p.rt.Join(bitmask) + } + + // flag used to prevent calling cancel function multiple times + isCancelled := false + + relayCancelFunc := func() { + if isCancelled { + return + } + + select { + case p.rmRelay <- string(bitmask): + isCancelled = true + case <-p.ctx.Done(): + } + } + + req.resp <- relayCancelFunc +} + +// handleRemoveRelay removes one relay reference from bookkeeping. +// If this was the last relay reference and no more subscriptions exist +// for a given bitmask, it will also announce that this node is not relaying +// for this bitmask anymore. +// Only called from processLoop. +func (p *PubSub) handleRemoveRelay(bitmask []byte) { + if p.myRelays[string(bitmask)] == 0 { + return + } + + p.myRelays[string(bitmask)]-- + + if p.myRelays[string(bitmask)] == 0 { + delete(p.myRelays, string(bitmask)) + + // stop announcing only if there are no more relays and subs + if len(p.mySubs[string(bitmask)]) == 0 { + p.disc.StopAdvertise(bitmask) + p.announce(bitmask, false) + p.rt.Leave(bitmask) + } + } +} + +// announce announces whether or not this node is interested in a given bitmask +// Only called from processLoop. +func (p *PubSub) announce(bitmask []byte, sub bool) { + subopt := &pb.RPC_SubOpts{ + Bitmask: bitmask, + Subscribe: sub, + } + + out := rpcWithSubs(subopt) + for pid, peer := range p.peers { + select { + case peer <- out: + p.tracer.SendRPC(out, pid) + default: + log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) + p.tracer.DropRPC(out, pid) + go p.announceRetry(pid, bitmask, sub) + } + } +} + +func (p *PubSub) announceRetry(pid peer.ID, bitmask []byte, sub bool) { + time.Sleep(time.Duration(1+rand.Intn(1000)) * time.Millisecond) + + retry := func() { + _, okSubs := p.mySubs[string(bitmask)] + _, okRelays := p.myRelays[string(bitmask)] + + ok := okSubs || okRelays + + if (ok && sub) || (!ok && !sub) { + p.doAnnounceRetry(pid, bitmask, sub) + } + } + + select { + case p.eval <- retry: + case <-p.ctx.Done(): + } +} + +func (p *PubSub) doAnnounceRetry(pid peer.ID, bitmask []byte, sub bool) { + peer, ok := p.peers[pid] + if !ok { + return + } + + subopt := &pb.RPC_SubOpts{ + Bitmask: bitmask, + Subscribe: sub, + } + + out := rpcWithSubs(subopt) + select { + case peer <- out: + p.tracer.SendRPC(out, pid) + default: + log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) + p.tracer.DropRPC(out, pid) + go p.announceRetry(pid, bitmask, sub) + } +} + +// notifySubs sends a given message to all corresponding subscribers. +// Only called from processLoop. +func (p *PubSub) notifySubs(msg *Message) { + bitmask := msg.GetBitmask() + subs := p.mySubs[string(bitmask)] + for f := range subs { + select { + case f.ch <- msg: + default: + p.tracer.UndeliverableMessage(msg) + log.Infof("Can't deliver message to subscription for bitmask %s; subscriber too slow", bitmask) + } + } +} + +// seenMessage returns whether we already saw this message before +func (p *PubSub) seenMessage(id string) bool { + return p.seenMessages.Has(id) +} + +// markSeen marks a message as seen such that seenMessage returns `true' for the given id +// returns true if the message was freshly marked +func (p *PubSub) markSeen(id string) bool { + return p.seenMessages.Add(id) +} + +// subscribedToMessage returns whether we are subscribed to one of the bitmasks +// of a given message +func (p *PubSub) subscribedToMsg(msg *pb.Message) bool { + if len(p.mySubs) == 0 { + return false + } + + bitmask := msg.GetBitmask() + _, ok := p.mySubs[string(bitmask)] + + return ok +} + +// canRelayMsg returns whether we are able to relay for one of the bitmasks +// of a given message +func (p *PubSub) canRelayMsg(msg *pb.Message) bool { + if len(p.myRelays) == 0 { + return false + } + + bitmask := msg.GetBitmask() + relays := p.myRelays[string(bitmask)] + + return relays > 0 +} + +func (p *PubSub) notifyLeave(bitmask []byte, pid peer.ID) { + if t, ok := p.myBitmasks[string(bitmask)]; ok { + t.sendNotification(PeerEvent{PeerLeave, pid}) + } +} + +func (p *PubSub) handleIncomingRPC(rpc *RPC) { + // pass the rpc through app specific validation (if any available). + if p.appSpecificRpcInspector != nil { + // check if the RPC is allowed by the external inspector + if err := p.appSpecificRpcInspector(rpc.from, rpc); err != nil { + log.Debugf("application-specific inspection failed, rejecting incoming rpc: %s", err) + return // reject the RPC + } + } + + p.tracer.RecvRPC(rpc) + + subs := rpc.GetSubscriptions() + if len(subs) != 0 && p.subFilter != nil { + var err error + subs, err = p.subFilter.FilterIncomingSubscriptions(rpc.from, subs) + if err != nil { + log.Debugf("subscription filter error: %s; ignoring RPC", err) + return + } + } + + for _, subopt := range subs { + t := subopt.GetBitmask() + + if subopt.GetSubscribe() { + tmap, ok := p.bitmasks[string(t)] + if !ok { + tmap = make(map[peer.ID]struct{}) + p.bitmasks[string(t)] = tmap + } + + if _, ok = tmap[rpc.from]; !ok { + tmap[rpc.from] = struct{}{} + if bitmask, ok := p.myBitmasks[string(t)]; ok { + peer := rpc.from + bitmask.sendNotification(PeerEvent{PeerJoin, peer}) + } + } + } else { + tmap, ok := p.bitmasks[string(t)] + if !ok { + continue + } + + if _, ok := tmap[rpc.from]; ok { + delete(tmap, rpc.from) + p.notifyLeave(t, rpc.from) + } + } + } + + // ask the router to vet the peer before commiting any processing resources + switch p.rt.AcceptFrom(rpc.from) { + case AcceptNone: + log.Debugf("received RPC from router graylisted peer %s; dropping RPC", rpc.from) + return + + case AcceptControl: + if len(rpc.GetPublish()) > 0 { + log.Debugf("peer %s was throttled by router; ignoring %d payload messages", rpc.from, len(rpc.GetPublish())) + } + p.tracer.ThrottlePeer(rpc.from) + + case AcceptAll: + for _, pmsg := range rpc.GetPublish() { + if !(p.subscribedToMsg(pmsg) || p.canRelayMsg(pmsg)) { + log.Debug("received message in bitmask we didn't subscribe to; ignoring message") + continue + } + + p.pushMsg(&Message{pmsg, "", rpc.from, nil, false}) + } + } + + p.rt.HandleRPC(rpc) +} + +// DefaultMsgIdFn returns a unique ID of the passed Message +func DefaultMsgIdFn(pmsg *pb.Message) string { + return string(pmsg.GetFrom()) + string(pmsg.GetSeqno()) +} + +// DefaultPeerFilter accepts all peers on all bitmasks +func DefaultPeerFilter(pid peer.ID, bitmask []byte) bool { + return true +} + +// pushMsg pushes a message performing validation as necessary +func (p *PubSub) pushMsg(msg *Message) { + src := msg.ReceivedFrom + // reject messages from blacklisted peers + if p.blacklist.Contains(src) { + log.Debugf("dropping message from blacklisted peer %s", src) + p.tracer.RejectMessage(msg, RejectBlacklstedPeer) + return + } + + // even if they are forwarded by good peers + if p.blacklist.Contains(msg.GetFrom()) { + log.Debugf("dropping message from blacklisted source %s", src) + p.tracer.RejectMessage(msg, RejectBlacklistedSource) + return + } + + err := p.checkSigningPolicy(msg) + if err != nil { + log.Debugf("dropping message from %s: %s", src, err) + return + } + + // reject messages claiming to be from ourselves but not locally published + self := p.host.ID() + if peer.ID(msg.GetFrom()) == self && src != self { + log.Debugf("dropping message claiming to be from self but forwarded from %s", src) + p.tracer.RejectMessage(msg, RejectSelfOrigin) + return + } + + // have we already seen and validated this message? + id := p.idGen.ID(msg) + if p.seenMessage(id) { + p.tracer.DuplicateMessage(msg) + return + } + + if !p.val.Push(src, msg) { + return + } + + if p.markSeen(id) { + p.publishMessage(msg) + } +} + +func (p *PubSub) checkSigningPolicy(msg *Message) error { + // reject unsigned messages when strict before we even process the id + if p.signPolicy.mustVerify() { + if p.signPolicy.mustSign() { + if msg.Signature == nil { + p.tracer.RejectMessage(msg, RejectMissingSignature) + return ValidationError{Reason: RejectMissingSignature} + } + // Actual signature verification happens in the validation pipeline, + // after checking if the message was already seen or not, + // to avoid unnecessary signature verification processing-cost. + } else { + if msg.Signature != nil { + p.tracer.RejectMessage(msg, RejectUnexpectedSignature) + return ValidationError{Reason: RejectUnexpectedSignature} + } + // If we are expecting signed messages, and not authoring messages, + // then do no accept seq numbers, from data, or key data. + // The default msgID function still relies on Seqno and From, + // but is not used if we are not authoring messages ourselves. + if p.signID == "" { + if msg.Seqno != nil || msg.From != nil || msg.Key != nil { + p.tracer.RejectMessage(msg, RejectUnexpectedAuthInfo) + return ValidationError{Reason: RejectUnexpectedAuthInfo} + } + } + } + } + + return nil +} + +func (p *PubSub) publishMessage(msg *Message) { + p.tracer.DeliverMessage(msg) + p.notifySubs(msg) + if !msg.Local { + p.rt.Publish(msg) + } +} + +type addBitmaskReq struct { + bitmask *Bitmask + resp chan *Bitmask +} + +type rmBitmaskReq struct { + bitmask *Bitmask + resp chan error +} + +type BitmaskOptions struct{} + +type BitmaskOpt func(t *Bitmask) error + +// WithBitmaskMessageIdFn sets custom MsgIdFunction for a Bitmask, enabling bitmasks to have own msg id generation rules. +func WithBitmaskMessageIdFn(msgId MsgIdFunction) BitmaskOpt { + return func(t *Bitmask) error { + t.p.idGen.Set(t.bitmask, msgId) + return nil + } +} + +// Join joins the bitmask and returns a Bitmask handle. Only one Bitmask handle should exist per bitmask, and Join will error if +// the Bitmask handle already exists. +func (p *PubSub) Join(bitmask []byte, opts ...BitmaskOpt) (*Bitmask, error) { + t, ok, err := p.tryJoin(bitmask, opts...) + if err != nil { + return nil, err + } + + if !ok { + return nil, fmt.Errorf("bitmask already exists") + } + + return t, nil +} + +// tryJoin is an internal function that tries to join a bitmask +// Returns the bitmask if it can be created or found +// Returns true if the bitmask was newly created, false otherwise +// Can be removed once pubsub.Publish() and pubsub.Subscribe() are removed +func (p *PubSub) tryJoin(bitmask []byte, opts ...BitmaskOpt) (*Bitmask, bool, error) { + if p.subFilter != nil && !p.subFilter.CanSubscribe(bitmask) { + return nil, false, fmt.Errorf("bitmask is not allowed by the subscription filter") + } + + t := &Bitmask{ + p: p, + bitmask: bitmask, + evtHandlers: make(map[*BitmaskEventHandler]struct{}), + } + + for _, opt := range opts { + err := opt(t) + if err != nil { + return nil, false, err + } + } + + resp := make(chan *Bitmask, 1) + select { + case t.p.addBitmask <- &addBitmaskReq{ + bitmask: t, + resp: resp, + }: + case <-t.p.ctx.Done(): + return nil, false, t.p.ctx.Err() + } + returnedBitmask := <-resp + + if returnedBitmask != t { + return returnedBitmask, false, nil + } + + return t, true, nil +} + +type addSubReq struct { + sub *Subscription + resp chan *Subscription +} + +type SubOpt func(sub *Subscription) error + +// Subscribe returns a new Subscription for the given bitmask. +// Note that subscription is not an instantaneous operation. It may take some time +// before the subscription is processed by the pubsub main loop and propagated to our peers. +// +// Deprecated: use pubsub.Join() and bitmask.Subscribe() instead +func (p *PubSub) Subscribe(bitmask []byte, opts ...SubOpt) (*Subscription, error) { + // ignore whether the bitmask was newly created or not, since either way we have a valid bitmask to work with + bitmaskHandle, _, err := p.tryJoin(bitmask) + if err != nil { + return nil, err + } + + return bitmaskHandle.Subscribe(opts...) +} + +// WithBufferSize is a Subscribe option to customize the size of the subscribe output buffer. +// The default length is 32 but it can be configured to avoid dropping messages if the consumer is not reading fast +// enough. +func WithBufferSize(size int) SubOpt { + return func(sub *Subscription) error { + sub.ch = make(chan *Message, size) + return nil + } +} + +type bitmaskReq struct { + resp chan []string +} + +// GetBitmasks returns the bitmasks this node is subscribed to. +func (p *PubSub) GetBitmasks() []string { + out := make(chan []string, 1) + select { + case p.getBitmasks <- &bitmaskReq{resp: out}: + case <-p.ctx.Done(): + return nil + } + return <-out +} + +// Publish publishes data to the given bitmask. +// +// Deprecated: use pubsub.Join() and bitmask.Publish() instead +func (p *PubSub) Publish(bitmask []byte, data []byte, opts ...PubOpt) error { + // ignore whether the bitmask was newly created or not, since either way we have a valid bitmask to work with + t, _, err := p.tryJoin(bitmask) + if err != nil { + return err + } + + return t.Publish(context.TODO(), data, opts...) +} + +func (p *PubSub) nextSeqno() []byte { + seqno := make([]byte, 8) + counter := atomic.AddUint64(&p.counter, 1) + binary.BigEndian.PutUint64(seqno, counter) + return seqno +} + +type listPeerReq struct { + resp chan []peer.ID + bitmask []byte +} + +// ListPeers returns a list of peers we are connected to in the given bitmask. +func (p *PubSub) ListPeers(bitmask []byte) []peer.ID { + out := make(chan []peer.ID) + select { + case p.getPeers <- &listPeerReq{ + resp: out, + bitmask: bitmask, + }: + case <-p.ctx.Done(): + return nil + } + return <-out +} + +// BlacklistPeer blacklists a peer; all messages from this peer will be unconditionally dropped. +func (p *PubSub) BlacklistPeer(pid peer.ID) { + select { + case p.blacklistPeer <- pid: + case <-p.ctx.Done(): + } +} + +// RegisterBitmaskValidator registers a validator for bitmask. +// By default validators are asynchronous, which means they will run in a separate goroutine. +// The number of active goroutines is controlled by global and per bitmask validator +// throttles; if it exceeds the throttle threshold, messages will be dropped. +func (p *PubSub) RegisterBitmaskValidator(bitmask []byte, val interface{}, opts ...ValidatorOpt) error { + addVal := &addValReq{ + bitmask: bitmask, + validate: val, + resp: make(chan error, 1), + } + + for _, opt := range opts { + err := opt(addVal) + if err != nil { + return err + } + } + + select { + case p.addVal <- addVal: + case <-p.ctx.Done(): + return p.ctx.Err() + } + return <-addVal.resp +} + +// UnregisterBitmaskValidator removes a validator from a bitmask. +// Returns an error if there was no validator registered with the bitmask. +func (p *PubSub) UnregisterBitmaskValidator(bitmask []byte) error { + rmVal := &rmValReq{ + bitmask: bitmask, + resp: make(chan error, 1), + } + + select { + case p.rmVal <- rmVal: + case <-p.ctx.Done(): + return p.ctx.Err() + } + return <-rmVal.resp +} + +type RelayCancelFunc func() + +type addRelayReq struct { + bitmask []byte + resp chan RelayCancelFunc +} diff --git a/go-libp2p-blossomsub/pubsub_test.go b/go-libp2p-blossomsub/pubsub_test.go new file mode 100644 index 0000000..5741d38 --- /dev/null +++ b/go-libp2p-blossomsub/pubsub_test.go @@ -0,0 +1,49 @@ +package blossomsub + +import ( + "context" + "testing" + "time" +) + +// See https://source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/issues/426 +func TestPubSubRemovesBlacklistedPeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + hosts := getNetHosts(t, ctx, 2) + + bl := NewMapBlacklist() + + psubs0 := getPubsub(ctx, hosts[0]) + psubs1 := getPubsub(ctx, hosts[1], WithBlacklist(bl)) + connect(t, hosts[0], hosts[1]) + + // Bad peer is blacklisted after it has connected. + // Calling p.BlacklistPeer directly does the right thing but we should also clean + // up the peer if it has been added the the blacklist by another means. + bl.Add(hosts[0].ID()) + + _, err := psubs0.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + + sub1, err := psubs1.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 100) + + psubs0.Publish([]byte{0x7e, 0x57}, []byte("message")) + + wctx, cancel2 := context.WithTimeout(ctx, 1*time.Second) + defer cancel2() + + _, _ = sub1.Next(wctx) + + // Explicitly cancel context so PubSub cleans up peer channels. + // Issue 426 reports a panic due to a peer channel being closed twice. + cancel() + time.Sleep(time.Millisecond * 100) +} diff --git a/go-libp2p-blossomsub/randomsub.go b/go-libp2p-blossomsub/randomsub.go new file mode 100644 index 0000000..8bde19d --- /dev/null +++ b/go-libp2p-blossomsub/randomsub.go @@ -0,0 +1,168 @@ +package blossomsub + +import ( + "context" + "math" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +const ( + RandomSubID = protocol.ID("/randomsub/1.0.0") +) + +var ( + RandomSubD = 6 +) + +// NewRandomSub returns a new PubSub object using RandomSubRouter as the router. +func NewRandomSub(ctx context.Context, h host.Host, size int, opts ...Option) (*PubSub, error) { + rt := &RandomSubRouter{ + size: size, + peers: make(map[peer.ID]protocol.ID), + } + return NewPubSub(ctx, h, rt, opts...) +} + +// RandomSubRouter is a router that implements a random propagation strategy. +// For each message, it selects the square root of the network size peers, with a min of RandomSubD, +// and forwards the message to them. +type RandomSubRouter struct { + p *PubSub + peers map[peer.ID]protocol.ID + size int + tracer *pubsubTracer +} + +func (rs *RandomSubRouter) Protocols() []protocol.ID { + return []protocol.ID{RandomSubID, FloodSubID} +} + +func (rs *RandomSubRouter) Attach(p *PubSub) { + rs.p = p + rs.tracer = p.tracer +} + +func (rs *RandomSubRouter) AddPeer(p peer.ID, proto protocol.ID) { + rs.tracer.AddPeer(p, proto) + rs.peers[p] = proto +} + +func (rs *RandomSubRouter) RemovePeer(p peer.ID) { + rs.tracer.RemovePeer(p) + delete(rs.peers, p) +} + +func (rs *RandomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool { + // check all peers in the bitmask + tmap, ok := rs.p.bitmasks[string(bitmask)] + if !ok { + return false + } + + fsPeers := 0 + rsPeers := 0 + + // count floodsub and randomsub peers + for p := range tmap { + switch rs.peers[p] { + case FloodSubID: + fsPeers++ + case RandomSubID: + rsPeers++ + } + } + + if suggested == 0 { + suggested = RandomSubD + } + + if fsPeers+rsPeers >= suggested { + return true + } + + if rsPeers >= RandomSubD { + return true + } + + return false +} + +func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus { + return AcceptAll +} + +func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {} + +func (rs *RandomSubRouter) Publish(msg *Message) { + from := msg.ReceivedFrom + + tosend := make(map[peer.ID]struct{}) + rspeers := make(map[peer.ID]struct{}) + src := peer.ID(msg.GetFrom()) + + bitmask := msg.GetBitmask() + tmap, ok := rs.p.bitmasks[string(bitmask)] + if !ok { + return + } + + for p := range tmap { + if p == from || p == src { + continue + } + + if rs.peers[p] == FloodSubID { + tosend[p] = struct{}{} + } else { + rspeers[p] = struct{}{} + } + } + + if len(rspeers) > RandomSubD { + target := RandomSubD + sqrt := int(math.Ceil(math.Sqrt(float64(rs.size)))) + if sqrt > target { + target = sqrt + } + if target > len(rspeers) { + target = len(rspeers) + } + xpeers := peerMapToList(rspeers) + shufflePeers(xpeers) + xpeers = xpeers[:target] + for _, p := range xpeers { + tosend[p] = struct{}{} + } + } else { + for p := range rspeers { + tosend[p] = struct{}{} + } + } + + out := rpcWithMessages(msg.Message) + for p := range tosend { + mch, ok := rs.p.peers[p] + if !ok { + continue + } + + select { + case mch <- out: + rs.tracer.SendRPC(out, p) + default: + log.Infof("dropping message to peer %s: queue full", p) + rs.tracer.DropRPC(out, p) + } + } +} + +func (rs *RandomSubRouter) Join(bitmask []byte) { + rs.tracer.Join(bitmask) +} + +func (rs *RandomSubRouter) Leave(bitmask []byte) { + rs.tracer.Join(bitmask) +} diff --git a/go-libp2p-blossomsub/randomsub_test.go b/go-libp2p-blossomsub/randomsub_test.go new file mode 100644 index 0000000..8e993b9 --- /dev/null +++ b/go-libp2p-blossomsub/randomsub_test.go @@ -0,0 +1,192 @@ +package blossomsub + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" +) + +func getRandomsub(ctx context.Context, h host.Host, size int, opts ...Option) *PubSub { + ps, err := NewRandomSub(ctx, h, size, opts...) + if err != nil { + panic(err) + } + return ps +} + +func getRandomsubs(ctx context.Context, hs []host.Host, size int, opts ...Option) []*PubSub { + var psubs []*PubSub + for _, h := range hs { + psubs = append(psubs, getRandomsub(ctx, h, size, opts...)) + } + return psubs +} + +func tryReceive(sub *Subscription) *Message { + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + m, err := sub.Next(ctx) + if err != nil { + return nil + } else { + return m + } +} + +func TestRandomsubSmall(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + psubs := getRandomsubs(ctx, hosts, 10) + + connectAll(t, hosts) + + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + count := 0 + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0x7e, 0x57}, msg) + + for _, sub := range subs { + if tryReceive(sub) != nil { + count++ + } + } + } + + if count < 7*len(hosts) { + t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count) + } +} + +func TestRandomsubBig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 50) + psubs := getRandomsubs(ctx, hosts, 50) + + connectSome(t, hosts, 12) + + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + count := 0 + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0x7e, 0x57}, msg) + + for _, sub := range subs { + if tryReceive(sub) != nil { + count++ + } + } + } + + if count < 7*len(hosts) { + t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count) + } +} + +func TestRandomsubMixed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 40) + fsubs := getPubsubs(ctx, hosts[:10]) + rsubs := getRandomsubs(ctx, hosts[10:], 30) + psubs := append(fsubs, rsubs...) + + connectSome(t, hosts, 12) + + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + subs = append(subs, sub) + } + + time.Sleep(time.Second) + + count := 0 + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0x7e, 0x57}, msg) + + for _, sub := range subs { + if tryReceive(sub) != nil { + count++ + } + } + } + + if count < 7*len(hosts) { + t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count) + } +} + +func TestRandomsubEnoughPeers(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 40) + fsubs := getPubsubs(ctx, hosts[:10]) + rsubs := getRandomsubs(ctx, hosts[10:], 30) + psubs := append(fsubs, rsubs...) + + connectSome(t, hosts, 12) + + for _, ps := range psubs { + _, err := ps.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + } + + time.Sleep(time.Second) + + res := make(chan bool, 1) + rsubs[0].eval <- func() { + rs := rsubs[0].rt.(*RandomSubRouter) + res <- rs.EnoughPeers([]byte{0x7e, 0x57}, 0) + } + + enough := <-res + if !enough { + t.Fatal("expected enough peers") + } + + rsubs[0].eval <- func() { + rs := rsubs[0].rt.(*RandomSubRouter) + res <- rs.EnoughPeers([]byte{0x7e, 0x57}, 100) + } + + enough = <-res + if !enough { + t.Fatal("expected enough peers") + } +} diff --git a/go-libp2p-blossomsub/score.go b/go-libp2p-blossomsub/score.go new file mode 100644 index 0000000..32a66d8 --- /dev/null +++ b/go-libp2p-blossomsub/score.go @@ -0,0 +1,1081 @@ +package blossomsub + +import ( + "context" + "fmt" + "net" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + manet "github.com/multiformats/go-multiaddr/net" +) + +type peerStats struct { + // true if the peer is currently connected + connected bool + + // expiration time of the score stats for disconnected peers + expire time.Time + + // per topc stats + bitmasks map[string]*bitmaskStats + + // IP tracking; store as string for easy processing + ips []string + + // IP whitelisting cache + ipWhitelist map[string]bool + + // behavioural pattern penalties (applied by the router) + behaviourPenalty float64 +} + +type bitmaskStats struct { + // true if the peer is in the mesh + inMesh bool + + // time when the peer was (last) GRAFTed; valid only when in mesh + graftTime time.Time + + // time in mesh (updated during refresh/decay to avoid calling gettimeofday on + // every score invocation) + meshTime time.Duration + + // first message deliveries + firstMessageDeliveries float64 + + // mesh message deliveries + meshMessageDeliveries float64 + + // true if the peer has been enough time in the mesh to activate mess message deliveries + meshMessageDeliveriesActive bool + + // sticky mesh rate failure penalty counter + meshFailurePenalty float64 + + // invalid message counter + invalidMessageDeliveries float64 +} + +type peerScore struct { + sync.Mutex + + // the score parameters + params *PeerScoreParams + + // per peer stats for score calculation + peerStats map[peer.ID]*peerStats + + // IP colocation tracking; maps IP => set of peers. + peerIPs map[string]map[peer.ID]struct{} + + // message delivery tracking + deliveries *messageDeliveries + + idGen *msgIDGenerator + host host.Host + + // debugging inspection + inspect PeerScoreInspectFn + inspectEx ExtendedPeerScoreInspectFn + inspectPeriod time.Duration +} + +var _ RawTracer = (*peerScore)(nil) + +type messageDeliveries struct { + seenMsgTTL time.Duration + + records map[string]*deliveryRecord + + // queue for cleaning up old delivery records + head *deliveryEntry + tail *deliveryEntry +} + +type deliveryRecord struct { + status int + firstSeen time.Time + validated time.Time + peers map[peer.ID]struct{} +} + +type deliveryEntry struct { + id string + expire time.Time + next *deliveryEntry +} + +// delivery record status +const ( + deliveryUnknown = iota // we don't know (yet) if the message is valid + deliveryValid // we know the message is valid + deliveryInvalid // we know the message is invalid + deliveryIgnored // we were intructed by the validator to ignore the message + deliveryThrottled // we can't tell if it is valid because validation throttled +) + +type ( + PeerScoreInspectFn = func(map[peer.ID]float64) + ExtendedPeerScoreInspectFn = func(map[peer.ID]*PeerScoreSnapshot) +) + +type PeerScoreSnapshot struct { + Score float64 + Bitmasks map[string]*BitmaskScoreSnapshot + AppSpecificScore float64 + IPColocationFactor float64 + BehaviourPenalty float64 +} + +type BitmaskScoreSnapshot struct { + TimeInMesh time.Duration + FirstMessageDeliveries float64 + MeshMessageDeliveries float64 + InvalidMessageDeliveries float64 +} + +// WithPeerScoreInspect is a BlossomSub router option that enables peer score debugging. +// When this option is enabled, the supplied function will be invoked periodically to allow +// the application to inspect or dump the scores for connected peers. +// The supplied function can have one of two signatures: +// - PeerScoreInspectFn, which takes a map of peer IDs to score. +// - ExtendedPeerScoreInspectFn, which takes a map of peer IDs to +// PeerScoreSnapshots and allows inspection of individual score +// components for debugging peer scoring. +// +// This option must be passed _after_ the WithPeerScore option. +func WithPeerScoreInspect(inspect interface{}, period time.Duration) Option { + return func(ps *PubSub) error { + gs, ok := ps.rt.(*BlossomSubRouter) + if !ok { + return fmt.Errorf("pubsub router is not BlossomSub") + } + + if gs.score == nil { + return fmt.Errorf("peer scoring is not enabled") + } + + if gs.score.inspect != nil || gs.score.inspectEx != nil { + return fmt.Errorf("duplicate peer score inspector") + } + + switch i := inspect.(type) { + case PeerScoreInspectFn: + gs.score.inspect = i + case ExtendedPeerScoreInspectFn: + gs.score.inspectEx = i + default: + return fmt.Errorf("unknown peer score insector type: %v", inspect) + } + + gs.score.inspectPeriod = period + + return nil + } +} + +// implementation +func newPeerScore(params *PeerScoreParams) *peerScore { + seenMsgTTL := params.SeenMsgTTL + if seenMsgTTL == 0 { + seenMsgTTL = TimeCacheDuration + } + return &peerScore{ + params: params, + peerStats: make(map[peer.ID]*peerStats), + peerIPs: make(map[string]map[peer.ID]struct{}), + deliveries: &messageDeliveries{seenMsgTTL: seenMsgTTL, records: make(map[string]*deliveryRecord)}, + idGen: newMsgIdGenerator(), + } +} + +// SetBitmaskScoreParams sets new score parameters for a bitmask. +// If the bitmask previously had parameters and the parameters are lowering delivery caps, +// then the score counters are recapped appropriately. +// Note: assumes that the bitmask score parameters have already been validated +func (ps *peerScore) SetBitmaskScoreParams(bitmask []byte, p *BitmaskScoreParams) error { + ps.Lock() + defer ps.Unlock() + + old, exist := ps.params.Bitmasks[string(bitmask)] + ps.params.Bitmasks[string(bitmask)] = p + + if !exist { + return nil + } + + // check to see if the counter Caps are being lowered; if that's the case we need to recap them + recap := false + if p.FirstMessageDeliveriesCap < old.FirstMessageDeliveriesCap { + recap = true + } + if p.MeshMessageDeliveriesCap < old.MeshMessageDeliveriesCap { + recap = true + } + if !recap { + return nil + } + + // recap counters for bitmask + for _, pstats := range ps.peerStats { + tstats, ok := pstats.bitmasks[string(bitmask)] + if !ok { + continue + } + + if tstats.firstMessageDeliveries > p.FirstMessageDeliveriesCap { + tstats.firstMessageDeliveries = p.FirstMessageDeliveriesCap + } + + if tstats.meshMessageDeliveries > p.MeshMessageDeliveriesCap { + tstats.meshMessageDeliveries = p.MeshMessageDeliveriesCap + } + } + + return nil +} + +// router interface +func (ps *peerScore) Start(gs *BlossomSubRouter) { + if ps == nil { + return + } + + ps.idGen = gs.p.idGen + ps.host = gs.p.host + go ps.background(gs.p.ctx) +} + +func (ps *peerScore) Score(p peer.ID) float64 { + if ps == nil { + return 0 + } + + ps.Lock() + defer ps.Unlock() + + return ps.score(p) +} + +func (ps *peerScore) score(p peer.ID) float64 { + pstats, ok := ps.peerStats[p] + if !ok { + return 0 + } + + var score float64 + + // bitmask scores + for bitmask, tstats := range pstats.bitmasks { + // the bitmask parameters + bitmaskParams, ok := ps.params.Bitmasks[string(bitmask)] + if !ok { + // we are not scoring this bitmask + continue + } + + // the bitmask score + var bitmaskScore float64 + + // P1: time in Mesh + if tstats.inMesh { + p1 := float64(tstats.meshTime / bitmaskParams.TimeInMeshQuantum) + if p1 > bitmaskParams.TimeInMeshCap { + p1 = bitmaskParams.TimeInMeshCap + } + bitmaskScore += p1 * bitmaskParams.TimeInMeshWeight + } + + // P2: first message deliveries + p2 := tstats.firstMessageDeliveries + bitmaskScore += p2 * bitmaskParams.FirstMessageDeliveriesWeight + + // P3: mesh message deliveries + if tstats.meshMessageDeliveriesActive { + if tstats.meshMessageDeliveries < bitmaskParams.MeshMessageDeliveriesThreshold { + deficit := bitmaskParams.MeshMessageDeliveriesThreshold - tstats.meshMessageDeliveries + p3 := deficit * deficit + bitmaskScore += p3 * bitmaskParams.MeshMessageDeliveriesWeight + } + } + + // P3b: + // NOTE: the weight of P3b is negative (validated in BitmaskScoreParams.validate), so this detracts. + p3b := tstats.meshFailurePenalty + bitmaskScore += p3b * bitmaskParams.MeshFailurePenaltyWeight + + // P4: invalid messages + // NOTE: the weight of P4 is negative (validated in BitmaskScoreParams.validate), so this detracts. + p4 := (tstats.invalidMessageDeliveries * tstats.invalidMessageDeliveries) + bitmaskScore += p4 * bitmaskParams.InvalidMessageDeliveriesWeight + + // update score, mixing with bitmask weight + score += bitmaskScore * bitmaskParams.BitmaskWeight + } + + // apply the bitmask score cap, if any + if ps.params.BitmaskScoreCap > 0 && score > ps.params.BitmaskScoreCap { + score = ps.params.BitmaskScoreCap + } + + // P5: application-specific score + p5 := ps.params.AppSpecificScore(p) + score += p5 * ps.params.AppSpecificWeight + + // P6: IP collocation factor + p6 := ps.ipColocationFactor(p) + score += p6 * ps.params.IPColocationFactorWeight + + // P7: behavioural pattern penalty + if pstats.behaviourPenalty > ps.params.BehaviourPenaltyThreshold { + excess := pstats.behaviourPenalty - ps.params.BehaviourPenaltyThreshold + p7 := excess * excess + score += p7 * ps.params.BehaviourPenaltyWeight + } + + return score +} + +func (ps *peerScore) ipColocationFactor(p peer.ID) float64 { + pstats, ok := ps.peerStats[p] + if !ok { + return 0 + } + + var result float64 +loop: + for _, ip := range pstats.ips { + if len(ps.params.IPColocationFactorWhitelist) > 0 { + if pstats.ipWhitelist == nil { + pstats.ipWhitelist = make(map[string]bool) + } + + whitelisted, ok := pstats.ipWhitelist[ip] + if !ok { + ipObj := net.ParseIP(ip) + for _, ipNet := range ps.params.IPColocationFactorWhitelist { + if ipNet.Contains(ipObj) { + pstats.ipWhitelist[ip] = true + continue loop + } + } + + pstats.ipWhitelist[ip] = false + } + + if whitelisted { + continue loop + } + } + + // P6 has a cliff (IPColocationFactorThreshold); it's only applied iff + // at least that many peers are connected to us from that source IP + // addr. It is quadratic, and the weight is negative (validated by + // PeerScoreParams.validate). + peersInIP := len(ps.peerIPs[ip]) + if peersInIP > ps.params.IPColocationFactorThreshold { + surpluss := float64(peersInIP - ps.params.IPColocationFactorThreshold) + result += surpluss * surpluss + } + } + + return result +} + +// behavioural pattern penalties +func (ps *peerScore) AddPenalty(p peer.ID, count int) { + if ps == nil { + return + } + + ps.Lock() + defer ps.Unlock() + + pstats, ok := ps.peerStats[p] + if !ok { + return + } + + pstats.behaviourPenalty += float64(count) +} + +// periodic maintenance +func (ps *peerScore) background(ctx context.Context) { + refreshScores := time.NewTicker(ps.params.DecayInterval) + defer refreshScores.Stop() + + refreshIPs := time.NewTicker(time.Minute) + defer refreshIPs.Stop() + + gcDeliveryRecords := time.NewTicker(time.Minute) + defer gcDeliveryRecords.Stop() + + var inspectScores <-chan time.Time + if ps.inspect != nil || ps.inspectEx != nil { + ticker := time.NewTicker(ps.inspectPeriod) + defer ticker.Stop() + // also dump at exit for one final sample + defer ps.inspectScores() + inspectScores = ticker.C + } + + for { + select { + case <-refreshScores.C: + ps.refreshScores() + + case <-refreshIPs.C: + ps.refreshIPs() + + case <-gcDeliveryRecords.C: + ps.gcDeliveryRecords() + + case <-inspectScores: + ps.inspectScores() + + case <-ctx.Done(): + return + } + } +} + +// inspectScores dumps all tracked scores into the inspect function. +func (ps *peerScore) inspectScores() { + if ps.inspect != nil { + ps.inspectScoresSimple() + } + if ps.inspectEx != nil { + ps.inspectScoresExtended() + } +} + +func (ps *peerScore) inspectScoresSimple() { + ps.Lock() + scores := make(map[peer.ID]float64, len(ps.peerStats)) + for p := range ps.peerStats { + scores[p] = ps.score(p) + } + ps.Unlock() + + // Since this is a user-injected function, it could be performing I/O, and + // we don't want to block the scorer's background loop. Therefore, we launch + // it in a separate goroutine. If the function needs to synchronise, it + // should do so locally. + go ps.inspect(scores) +} + +func (ps *peerScore) inspectScoresExtended() { + ps.Lock() + scores := make(map[peer.ID]*PeerScoreSnapshot, len(ps.peerStats)) + for p, pstats := range ps.peerStats { + pss := new(PeerScoreSnapshot) + pss.Score = ps.score(p) + if len(pstats.bitmasks) > 0 { + pss.Bitmasks = make(map[string]*BitmaskScoreSnapshot, len(pstats.bitmasks)) + for t, ts := range pstats.bitmasks { + tss := &BitmaskScoreSnapshot{ + FirstMessageDeliveries: ts.firstMessageDeliveries, + MeshMessageDeliveries: ts.meshMessageDeliveries, + InvalidMessageDeliveries: ts.invalidMessageDeliveries, + } + if ts.inMesh { + tss.TimeInMesh = ts.meshTime + } + pss.Bitmasks[t] = tss + } + } + pss.AppSpecificScore = ps.params.AppSpecificScore(p) + pss.IPColocationFactor = ps.ipColocationFactor(p) + pss.BehaviourPenalty = pstats.behaviourPenalty + scores[p] = pss + } + ps.Unlock() + + go ps.inspectEx(scores) +} + +// refreshScores decays scores, and purges score records for disconnected peers, +// once their expiry has elapsed. +func (ps *peerScore) refreshScores() { + ps.Lock() + defer ps.Unlock() + + now := time.Now() + for p, pstats := range ps.peerStats { + if !pstats.connected { + // has the retention period expired? + if now.After(pstats.expire) { + // yes, throw it away (but clean up the IP tracking first) + ps.removeIPs(p, pstats.ips) + delete(ps.peerStats, p) + } + + // we don't decay retained scores, as the peer is not active. + // this way the peer cannot reset a negative score by simply disconnecting and reconnecting, + // unless the retention period has ellapsed. + // similarly, a well behaved peer does not lose its score by getting disconnected. + continue + } + + for bitmask, tstats := range pstats.bitmasks { + // the bitmask parameters + bitmaskParams, ok := ps.params.Bitmasks[string(bitmask)] + if !ok { + // we are not scoring this bitmask + continue + } + + // decay counters + tstats.firstMessageDeliveries *= bitmaskParams.FirstMessageDeliveriesDecay + if tstats.firstMessageDeliveries < ps.params.DecayToZero { + tstats.firstMessageDeliveries = 0 + } + tstats.meshMessageDeliveries *= bitmaskParams.MeshMessageDeliveriesDecay + if tstats.meshMessageDeliveries < ps.params.DecayToZero { + tstats.meshMessageDeliveries = 0 + } + tstats.meshFailurePenalty *= bitmaskParams.MeshFailurePenaltyDecay + if tstats.meshFailurePenalty < ps.params.DecayToZero { + tstats.meshFailurePenalty = 0 + } + tstats.invalidMessageDeliveries *= bitmaskParams.InvalidMessageDeliveriesDecay + if tstats.invalidMessageDeliveries < ps.params.DecayToZero { + tstats.invalidMessageDeliveries = 0 + } + // update mesh time and activate mesh message delivery parameter if need be + if tstats.inMesh { + tstats.meshTime = now.Sub(tstats.graftTime) + if tstats.meshTime > bitmaskParams.MeshMessageDeliveriesActivation { + tstats.meshMessageDeliveriesActive = true + } + } + } + + // decay P7 counter + pstats.behaviourPenalty *= ps.params.BehaviourPenaltyDecay + if pstats.behaviourPenalty < ps.params.DecayToZero { + pstats.behaviourPenalty = 0 + } + } +} + +// refreshIPs refreshes IPs we know of peers we're tracking. +func (ps *peerScore) refreshIPs() { + ps.Lock() + defer ps.Unlock() + + // peer IPs may change, so we periodically refresh them + // + // TODO: it could be more efficient to collect connections for all peers + // from the Network, populate a new map, and replace it in place. We are + // incurring in those allocs anyway, and maybe even in more, in the form of + // slices. + for p, pstats := range ps.peerStats { + if pstats.connected { + ips := ps.getIPs(p) + ps.setIPs(p, ips, pstats.ips) + pstats.ips = ips + } + } +} + +func (ps *peerScore) gcDeliveryRecords() { + ps.Lock() + defer ps.Unlock() + + ps.deliveries.gc() +} + +// tracer interface +func (ps *peerScore) AddPeer(p peer.ID, proto protocol.ID) { + ps.Lock() + defer ps.Unlock() + + pstats, ok := ps.peerStats[p] + if !ok { + pstats = &peerStats{bitmasks: make(map[string]*bitmaskStats)} + ps.peerStats[p] = pstats + } + + pstats.connected = true + ips := ps.getIPs(p) + ps.setIPs(p, ips, pstats.ips) + pstats.ips = ips +} + +func (ps *peerScore) RemovePeer(p peer.ID) { + ps.Lock() + defer ps.Unlock() + + pstats, ok := ps.peerStats[p] + if !ok { + return + } + + // decide whether to retain the score; this currently only retains non-positive scores + // to dissuade attacks on the score function. + if ps.score(p) > 0 { + ps.removeIPs(p, pstats.ips) + delete(ps.peerStats, p) + return + } + + // furthermore, when we decide to retain the score, the firstMessageDelivery counters are + // reset to 0 and mesh delivery penalties applied. + for bitmask, tstats := range pstats.bitmasks { + tstats.firstMessageDeliveries = 0 + + threshold := ps.params.Bitmasks[string(bitmask)].MeshMessageDeliveriesThreshold + if tstats.inMesh && tstats.meshMessageDeliveriesActive && tstats.meshMessageDeliveries < threshold { + deficit := threshold - tstats.meshMessageDeliveries + tstats.meshFailurePenalty += deficit * deficit + } + + tstats.inMesh = false + } + + pstats.connected = false + pstats.expire = time.Now().Add(ps.params.RetainScore) +} + +func (ps *peerScore) Join(bitmask []byte) {} +func (ps *peerScore) Leave(bitmask []byte) {} + +func (ps *peerScore) Graft(p peer.ID, bitmask []byte) { + ps.Lock() + defer ps.Unlock() + + pstats, ok := ps.peerStats[p] + if !ok { + return + } + + tstats, ok := pstats.getBitmaskStats(bitmask, ps.params) + if !ok { + return + } + + tstats.inMesh = true + tstats.graftTime = time.Now() + tstats.meshTime = 0 + tstats.meshMessageDeliveriesActive = false +} + +func (ps *peerScore) Prune(p peer.ID, bitmask []byte) { + ps.Lock() + defer ps.Unlock() + + pstats, ok := ps.peerStats[p] + if !ok { + return + } + + tstats, ok := pstats.getBitmaskStats(bitmask, ps.params) + if !ok { + return + } + + // sticky mesh delivery rate failure penalty + threshold := ps.params.Bitmasks[string(bitmask)].MeshMessageDeliveriesThreshold + if tstats.meshMessageDeliveriesActive && tstats.meshMessageDeliveries < threshold { + deficit := threshold - tstats.meshMessageDeliveries + tstats.meshFailurePenalty += deficit * deficit + } + + tstats.inMesh = false +} + +func (ps *peerScore) ValidateMessage(msg *Message) { + ps.Lock() + defer ps.Unlock() + + // the pubsub subsystem is beginning validation; create a record to track time in + // the validation pipeline with an accurate firstSeen time. + _ = ps.deliveries.getRecord(ps.idGen.ID(msg)) +} + +func (ps *peerScore) DeliverMessage(msg *Message) { + ps.Lock() + defer ps.Unlock() + + ps.markFirstMessageDelivery(msg.ReceivedFrom, msg) + + drec := ps.deliveries.getRecord(ps.idGen.ID(msg)) + + // defensive check that this is the first delivery trace -- delivery status should be unknown + if drec.status != deliveryUnknown { + log.Debugf("unexpected delivery trace: message from %s was first seen %s ago and has delivery status %d", msg.ReceivedFrom, time.Since(drec.firstSeen), drec.status) + return + } + + // mark the message as valid and reward mesh peers that have already forwarded it to us + drec.status = deliveryValid + drec.validated = time.Now() + for p := range drec.peers { + // this check is to make sure a peer can't send us a message twice and get a double count + // if it is a first delivery. + if p != msg.ReceivedFrom { + ps.markDuplicateMessageDelivery(p, msg, time.Time{}) + } + } +} + +func (ps *peerScore) RejectMessage(msg *Message, reason string) { + ps.Lock() + defer ps.Unlock() + + switch reason { + // we don't track those messages, but we penalize the peer as they are clearly invalid + case RejectMissingSignature: + fallthrough + case RejectInvalidSignature: + fallthrough + case RejectUnexpectedSignature: + fallthrough + case RejectUnexpectedAuthInfo: + fallthrough + case RejectSelfOrigin: + ps.markInvalidMessageDelivery(msg.ReceivedFrom, msg) + return + + // we ignore those messages, so do nothing. + case RejectBlacklstedPeer: + fallthrough + case RejectBlacklistedSource: + return + + case RejectValidationQueueFull: + // the message was rejected before it entered the validation pipeline; + // we don't know if this message has a valid signature, and thus we also don't know if + // it has a valid message ID; all we can do is ignore it. + return + } + + drec := ps.deliveries.getRecord(ps.idGen.ID(msg)) + + // defensive check that this is the first rejection trace -- delivery status should be unknown + if drec.status != deliveryUnknown { + log.Debugf("unexpected rejection trace: message from %s was first seen %s ago and has delivery status %d", msg.ReceivedFrom, time.Since(drec.firstSeen), drec.status) + return + } + + switch reason { + case RejectValidationThrottled: + // if we reject with "validation throttled" we don't penalize the peer(s) that forward it + // because we don't know if it was valid. + drec.status = deliveryThrottled + // release the delivery time tracking map to free some memory early + drec.peers = nil + return + case RejectValidationIgnored: + // we were explicitly instructed by the validator to ignore the message but not penalize + // the peer + drec.status = deliveryIgnored + drec.peers = nil + return + } + + // mark the message as invalid and penalize peers that have already forwarded it. + drec.status = deliveryInvalid + + ps.markInvalidMessageDelivery(msg.ReceivedFrom, msg) + for p := range drec.peers { + ps.markInvalidMessageDelivery(p, msg) + } + + // release the delivery time tracking map to free some memory early + drec.peers = nil +} + +func (ps *peerScore) DuplicateMessage(msg *Message) { + ps.Lock() + defer ps.Unlock() + + drec := ps.deliveries.getRecord(ps.idGen.ID(msg)) + + _, ok := drec.peers[msg.ReceivedFrom] + if ok { + // we have already seen this duplicate! + return + } + + switch drec.status { + case deliveryUnknown: + // the message is being validated; track the peer delivery and wait for + // the Deliver/Reject notification. + drec.peers[msg.ReceivedFrom] = struct{}{} + + case deliveryValid: + // mark the peer delivery time to only count a duplicate delivery once. + drec.peers[msg.ReceivedFrom] = struct{}{} + ps.markDuplicateMessageDelivery(msg.ReceivedFrom, msg, drec.validated) + + case deliveryInvalid: + // we no longer track delivery time + ps.markInvalidMessageDelivery(msg.ReceivedFrom, msg) + + case deliveryThrottled: + // the message was throttled; do nothing (we don't know if it was valid) + case deliveryIgnored: + // the message was ignored; do nothing + } +} + +func (ps *peerScore) ThrottlePeer(p peer.ID) {} + +func (ps *peerScore) RecvRPC(rpc *RPC) {} + +func (ps *peerScore) SendRPC(rpc *RPC, p peer.ID) {} + +func (ps *peerScore) DropRPC(rpc *RPC, p peer.ID) {} + +func (ps *peerScore) UndeliverableMessage(msg *Message) {} + +// message delivery records +func (d *messageDeliveries) getRecord(id string) *deliveryRecord { + rec, ok := d.records[id] + if ok { + return rec + } + + now := time.Now() + + rec = &deliveryRecord{peers: make(map[peer.ID]struct{}), firstSeen: now} + d.records[id] = rec + + entry := &deliveryEntry{id: id, expire: now.Add(d.seenMsgTTL)} + if d.tail != nil { + d.tail.next = entry + d.tail = entry + } else { + d.head = entry + d.tail = entry + } + + return rec +} + +func (d *messageDeliveries) gc() { + if d.head == nil { + return + } + + now := time.Now() + for d.head != nil && now.After(d.head.expire) { + delete(d.records, d.head.id) + d.head = d.head.next + } + + if d.head == nil { + d.tail = nil + } +} + +// getBitmaskStats returns existing bitmask stats for a given a given (peer, bitmask) +// tuple, or initialises a new bitmaskStats object and inserts it in the +// peerStats, iff the bitmask is scored. +func (pstats *peerStats) getBitmaskStats(bitmask []byte, params *PeerScoreParams) (*bitmaskStats, bool) { + tstats, ok := pstats.bitmasks[string(bitmask)] + if ok { + return tstats, true + } + + _, scoredBitmask := params.Bitmasks[string(bitmask)] + if !scoredBitmask { + return nil, false + } + + tstats = &bitmaskStats{} + pstats.bitmasks[string(bitmask)] = tstats + + return tstats, true +} + +// markInvalidMessageDelivery increments the "invalid message deliveries" +// counter for all scored bitmasks the message is published in. +func (ps *peerScore) markInvalidMessageDelivery(p peer.ID, msg *Message) { + pstats, ok := ps.peerStats[p] + if !ok { + return + } + + bitmask := msg.GetBitmask() + tstats, ok := pstats.getBitmaskStats(bitmask, ps.params) + if !ok { + return + } + + tstats.invalidMessageDeliveries += 1 +} + +// markFirstMessageDelivery increments the "first message deliveries" counter +// for all scored bitmasks the message is published in, as well as the "mesh +// message deliveries" counter, if the peer is in the mesh for the bitmask. +func (ps *peerScore) markFirstMessageDelivery(p peer.ID, msg *Message) { + pstats, ok := ps.peerStats[p] + if !ok { + return + } + + bitmask := msg.GetBitmask() + tstats, ok := pstats.getBitmaskStats(bitmask, ps.params) + if !ok { + return + } + + cap := ps.params.Bitmasks[string(bitmask)].FirstMessageDeliveriesCap + tstats.firstMessageDeliveries += 1 + if tstats.firstMessageDeliveries > cap { + tstats.firstMessageDeliveries = cap + } + + if !tstats.inMesh { + return + } + + cap = ps.params.Bitmasks[string(bitmask)].MeshMessageDeliveriesCap + tstats.meshMessageDeliveries += 1 + if tstats.meshMessageDeliveries > cap { + tstats.meshMessageDeliveries = cap + } +} + +// markDuplicateMessageDelivery increments the "mesh message deliveries" counter +// for messages we've seen before, as long the message was received within the +// P3 window. +func (ps *peerScore) markDuplicateMessageDelivery(p peer.ID, msg *Message, validated time.Time) { + pstats, ok := ps.peerStats[p] + if !ok { + return + } + + bitmask := msg.GetBitmask() + tstats, ok := pstats.getBitmaskStats(bitmask, ps.params) + if !ok { + return + } + + if !tstats.inMesh { + return + } + + tparams := ps.params.Bitmasks[string(bitmask)] + + // check against the mesh delivery window -- if the validated time is passed as 0, then + // the message was received before we finished validation and thus falls within the mesh + // delivery window. + if !validated.IsZero() && time.Since(validated) > tparams.MeshMessageDeliveriesWindow { + return + } + + cap := tparams.MeshMessageDeliveriesCap + tstats.meshMessageDeliveries += 1 + if tstats.meshMessageDeliveries > cap { + tstats.meshMessageDeliveries = cap + } +} + +// getIPs gets the current IPs for a peer. +func (ps *peerScore) getIPs(p peer.ID) []string { + // in unit tests this can be nil + if ps.host == nil { + return nil + } + + conns := ps.host.Network().ConnsToPeer(p) + res := make([]string, 0, 1) + for _, c := range conns { + if c.Stat().Transient { + // ignore transient + continue + } + + remote := c.RemoteMultiaddr() + ip, err := manet.ToIP(remote) + if err != nil { + continue + } + + // ignore those; loopback is used for unit testing + if ip.IsLoopback() { + continue + } + + if len(ip.To4()) == 4 { + // IPv4 address + ip4 := ip.String() + res = append(res, ip4) + } else { + // IPv6 address -- we add both the actual address and the /64 subnet + ip6 := ip.String() + res = append(res, ip6) + + ip6mask := ip.Mask(net.CIDRMask(64, 128)).String() + res = append(res, ip6mask) + } + } + + return res +} + +// setIPs adds tracking for the new IPs in the list, and removes tracking from +// the obsolete IPs. +func (ps *peerScore) setIPs(p peer.ID, newips, oldips []string) { +addNewIPs: + // add the new IPs to the tracking + for _, ip := range newips { + // check if it is in the old ips list + for _, xip := range oldips { + if ip == xip { + continue addNewIPs + } + } + // no, it's a new one -- add it to the tracker + peers, ok := ps.peerIPs[ip] + if !ok { + peers = make(map[peer.ID]struct{}) + ps.peerIPs[ip] = peers + } + peers[p] = struct{}{} + } + +removeOldIPs: + // remove the obsolete old IPs from the tracking + for _, ip := range oldips { + // check if it is in the new ips list + for _, xip := range newips { + if ip == xip { + continue removeOldIPs + } + } + // no, it's obsolete -- remove it from the tracker + peers, ok := ps.peerIPs[ip] + if !ok { + continue + } + delete(peers, p) + if len(peers) == 0 { + delete(ps.peerIPs, ip) + } + } +} + +// removeIPs removes an IP list from the tracking list for a peer. +func (ps *peerScore) removeIPs(p peer.ID, ips []string) { + for _, ip := range ips { + peers, ok := ps.peerIPs[ip] + if !ok { + continue + } + + delete(peers, p) + if len(peers) == 0 { + delete(ps.peerIPs, ip) + } + } +} diff --git a/go-libp2p-blossomsub/score_params.go b/go-libp2p-blossomsub/score_params.go new file mode 100644 index 0000000..5e96e7e --- /dev/null +++ b/go-libp2p-blossomsub/score_params.go @@ -0,0 +1,423 @@ +package blossomsub + +import ( + "fmt" + "math" + "net" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +type PeerScoreThresholds struct { + // whether it is allowed to just set some params and not all of them. + SkipAtomicValidation bool + + // GossipThreshold is the score threshold below which gossip propagation is suppressed; + // should be negative. + GossipThreshold float64 + + // PublishThreshold is the score threshold below which we shouldn't publish when using flood + // publishing (also applies to fanout and floodsub peers); should be negative and <= GossipThreshold. + PublishThreshold float64 + + // GraylistThreshold is the score threshold below which message processing is suppressed altogether, + // implementing an effective gray list according to peer score; should be negative and <= PublishThreshold. + GraylistThreshold float64 + + // AcceptPXThreshold is the score threshold below which PX will be ignored; this should be positive + // and limited to scores attainable by bootstrappers and other trusted nodes. + AcceptPXThreshold float64 + + // OpportunisticGraftThreshold is the median mesh score threshold before triggering opportunistic + // grafting; this should have a small positive value. + OpportunisticGraftThreshold float64 +} + +func (p *PeerScoreThresholds) validate() error { + + if !p.SkipAtomicValidation || p.PublishThreshold != 0 || p.GossipThreshold != 0 || p.GraylistThreshold != 0 { + if p.GossipThreshold > 0 || isInvalidNumber(p.GossipThreshold) { + return fmt.Errorf("invalid gossip threshold; it must be <= 0 and a valid number") + } + if p.PublishThreshold > 0 || p.PublishThreshold > p.GossipThreshold || isInvalidNumber(p.PublishThreshold) { + return fmt.Errorf("invalid publish threshold; it must be <= 0 and <= gossip threshold and a valid number") + } + if p.GraylistThreshold > 0 || p.GraylistThreshold > p.PublishThreshold || isInvalidNumber(p.GraylistThreshold) { + return fmt.Errorf("invalid graylist threshold; it must be <= 0 and <= publish threshold and a valid number") + } + } + + if !p.SkipAtomicValidation || p.AcceptPXThreshold != 0 { + if p.AcceptPXThreshold < 0 || isInvalidNumber(p.AcceptPXThreshold) { + return fmt.Errorf("invalid accept PX threshold; it must be >= 0 and a valid number") + } + } + + if !p.SkipAtomicValidation || p.OpportunisticGraftThreshold != 0 { + if p.OpportunisticGraftThreshold < 0 || isInvalidNumber(p.OpportunisticGraftThreshold) { + return fmt.Errorf("invalid opportunistic grafting threshold; it must be >= 0 and a valid number") + } + } + + return nil +} + +type PeerScoreParams struct { + // whether it is allowed to just set some params and not all of them. + SkipAtomicValidation bool + + // Score parameters per bitmask. + Bitmasks map[string]*BitmaskScoreParams + + // Aggregate bitmask score cap; this limits the total contribution of bitmasks towards a positive + // score. It must be positive (or 0 for no cap). + BitmaskScoreCap float64 + + // P5: Application-specific peer scoring + AppSpecificScore func(p peer.ID) float64 + AppSpecificWeight float64 + + // P6: IP-colocation factor. + // The parameter has an associated counter which counts the number of peers with the same IP. + // If the number of peers in the same IP exceeds IPColocationFactorThreshold, then the value + // is the square of the difference, ie (PeersInSameIP - IPColocationThreshold)^2. + // If the number of peers in the same IP is less than the threshold, then the value is 0. + // The weight of the parameter MUST be negative, unless you want to disable for testing. + // Note: In order to simulate many IPs in a managable manner when testing, you can set the weight to 0 + // thus disabling the IP colocation penalty. + IPColocationFactorWeight float64 + IPColocationFactorThreshold int + IPColocationFactorWhitelist []*net.IPNet + + // P7: behavioural pattern penalties. + // This parameter has an associated counter which tracks misbehaviour as detected by the + // router. The router currently applies penalties for the following behaviors: + // - attempting to re-graft before the prune backoff time has elapsed. + // - not following up in IWANT requests for messages advertised with IHAVE. + // + // The value of the parameter is the square of the counter over the threshold, which decays with + // BehaviourPenaltyDecay. + // The weight of the parameter MUST be negative (or zero to disable). + BehaviourPenaltyWeight, BehaviourPenaltyThreshold, BehaviourPenaltyDecay float64 + + // the decay interval for parameter counters. + DecayInterval time.Duration + + // counter value below which it is considered 0. + DecayToZero float64 + + // time to remember counters for a disconnected peer. + RetainScore time.Duration + + // time to remember a message delivery for. Default to global TimeCacheDuration if 0. + SeenMsgTTL time.Duration +} + +type BitmaskScoreParams struct { + // whether it is allowed to just set some params and not all of them. + SkipAtomicValidation bool + + // The weight of the bitmask. + BitmaskWeight float64 + + // P1: time in the mesh + // This is the time the peer has been grafted in the mesh. + // The value of the parameter is the time/TimeInMeshQuantum, capped by TimeInMeshCap. + // The weight of the parameter MUST be positive (or zero to disable). + TimeInMeshWeight float64 + TimeInMeshQuantum time.Duration + TimeInMeshCap float64 + + // P2: first message deliveries + // This is the number of message deliveries in the bitmask. + // The value of the parameter is a counter, decaying with FirstMessageDeliveriesDecay, and capped + // by FirstMessageDeliveriesCap. + // The weight of the parameter MUST be positive (or zero to disable). + FirstMessageDeliveriesWeight, FirstMessageDeliveriesDecay float64 + FirstMessageDeliveriesCap float64 + + // P3: mesh message deliveries + // This is the number of message deliveries in the mesh, within the MeshMessageDeliveriesWindow of + // message validation; deliveries during validation also count and are retroactively applied + // when validation succeeds. + // This window accounts for the minimum time before a hostile mesh peer trying to game the score + // could replay back a valid message we just sent them. + // It effectively tracks first and near-first deliveries, i.e., a message seen from a mesh peer + // before we have forwarded it to them. + // The parameter has an associated counter, decaying with MeshMessageDeliveriesDecay. + // If the counter exceeds the threshold, its value is 0. + // If the counter is below the MeshMessageDeliveriesThreshold, the value is the square of + // the deficit, ie (MessageDeliveriesThreshold - counter)^2 + // The penalty is only activated after MeshMessageDeliveriesActivation time in the mesh. + // The weight of the parameter MUST be negative (or zero to disable). + MeshMessageDeliveriesWeight, MeshMessageDeliveriesDecay float64 + MeshMessageDeliveriesCap, MeshMessageDeliveriesThreshold float64 + MeshMessageDeliveriesWindow, MeshMessageDeliveriesActivation time.Duration + + // P3b: sticky mesh propagation failures + // This is a sticky penalty that applies when a peer gets pruned from the mesh with an active + // mesh message delivery penalty. + // The weight of the parameter MUST be negative (or zero to disable) + MeshFailurePenaltyWeight, MeshFailurePenaltyDecay float64 + + // P4: invalid messages + // This is the number of invalid messages in the bitmask. + // The value of the parameter is the square of the counter, decaying with + // InvalidMessageDeliveriesDecay. + // The weight of the parameter MUST be negative (or zero to disable). + InvalidMessageDeliveriesWeight, InvalidMessageDeliveriesDecay float64 +} + +// peer score parameter validation +func (p *PeerScoreParams) validate() error { + for bitmask, params := range p.Bitmasks { + err := params.validate() + if err != nil { + return fmt.Errorf("invalid score parameters for bitmask %s: %w", bitmask, err) + } + } + + if !p.SkipAtomicValidation || p.BitmaskScoreCap != 0 { + // check that the bitmask score is 0 or something positive + if p.BitmaskScoreCap < 0 || isInvalidNumber(p.BitmaskScoreCap) { + return fmt.Errorf("invalid bitmask score cap; must be positive (or 0 for no cap) and a valid number") + } + } + + // check that we have an app specific score; the weight can be anything (but expected positive) + if p.AppSpecificScore == nil { + if p.SkipAtomicValidation { + p.AppSpecificScore = func(p peer.ID) float64 { + return 0 + } + } else { + return fmt.Errorf("missing application specific score function") + } + } + + if !p.SkipAtomicValidation || p.IPColocationFactorWeight != 0 { + // check the IP collocation factor + if p.IPColocationFactorWeight > 0 || isInvalidNumber(p.IPColocationFactorWeight) { + return fmt.Errorf("invalid IPColocationFactorWeight; must be negative (or 0 to disable) and a valid number") + } + if p.IPColocationFactorWeight != 0 && p.IPColocationFactorThreshold < 1 { + return fmt.Errorf("invalid IPColocationFactorThreshold; must be at least 1") + } + } + + // check the behaviour penalty + if !p.SkipAtomicValidation || p.BehaviourPenaltyWeight != 0 || p.BehaviourPenaltyThreshold != 0 { + if p.BehaviourPenaltyWeight > 0 || isInvalidNumber(p.BehaviourPenaltyWeight) { + return fmt.Errorf("invalid BehaviourPenaltyWeight; must be negative (or 0 to disable) and a valid number") + } + if p.BehaviourPenaltyWeight != 0 && (p.BehaviourPenaltyDecay <= 0 || p.BehaviourPenaltyDecay >= 1 || isInvalidNumber(p.BehaviourPenaltyDecay)) { + return fmt.Errorf("invalid BehaviourPenaltyDecay; must be between 0 and 1") + } + if p.BehaviourPenaltyThreshold < 0 || isInvalidNumber(p.BehaviourPenaltyThreshold) { + return fmt.Errorf("invalid BehaviourPenaltyThreshold; must be >= 0 and a valid number") + } + } + + // check the decay parameters + if !p.SkipAtomicValidation || p.DecayInterval != 0 || p.DecayToZero != 0 { + if p.DecayInterval < time.Second { + return fmt.Errorf("invalid DecayInterval; must be at least 1s") + } + if p.DecayToZero <= 0 || p.DecayToZero >= 1 || isInvalidNumber(p.DecayToZero) { + return fmt.Errorf("invalid DecayToZero; must be between 0 and 1") + } + } + + // no need to check the score retention; a value of 0 means that we don't retain scores + return nil +} + +func (p *BitmaskScoreParams) validate() error { + // make sure we have a sane bitmask weight + if p.BitmaskWeight < 0 || isInvalidNumber(p.BitmaskWeight) { + return fmt.Errorf("invalid bitmask weight; must be >= 0 and a valid number") + } + + // check P1 + if err := p.validateTimeInMeshParams(); err != nil { + return err + } + + // check P2 + if err := p.validateMessageDeliveryParams(); err != nil { + return err + } + // check P3 + if err := p.validateMeshMessageDeliveryParams(); err != nil { + return err + } + + // check P3b + if err := p.validateMessageFailurePenaltyParams(); err != nil { + return err + } + + // check P4 + if err := p.validateInvalidMessageDeliveryParams(); err != nil { + return err + } + + return nil +} + +func (p *BitmaskScoreParams) validateTimeInMeshParams() error { + if p.SkipAtomicValidation { + // in non-atomic mode, parameters at their zero values are dismissed from validation. + if p.TimeInMeshWeight == 0 && p.TimeInMeshQuantum == 0 && p.TimeInMeshCap == 0 { + return nil + } + } + + // either atomic validation mode, or some parameters have been set a value, + // hence, proceed with normal validation of all related parameters in this context. + + if p.TimeInMeshQuantum == 0 { + return fmt.Errorf("invalid TimeInMeshQuantum; must be non zero") + } + if p.TimeInMeshWeight < 0 || isInvalidNumber(p.TimeInMeshWeight) { + return fmt.Errorf("invalid TimeInMeshWeight; must be positive (or 0 to disable) and a valid number") + } + if p.TimeInMeshWeight != 0 && p.TimeInMeshQuantum <= 0 { + return fmt.Errorf("invalid TimeInMeshQuantum; must be positive") + } + if p.TimeInMeshWeight != 0 && (p.TimeInMeshCap <= 0 || isInvalidNumber(p.TimeInMeshCap)) { + return fmt.Errorf("invalid TimeInMeshCap; must be positive and a valid number") + } + + return nil +} + +func (p *BitmaskScoreParams) validateMessageDeliveryParams() error { + if p.SkipAtomicValidation { + // in non-atomic mode, parameters at their zero values are dismissed from validation. + if p.FirstMessageDeliveriesWeight == 0 && p.FirstMessageDeliveriesCap == 0 && p.FirstMessageDeliveriesDecay == 0 { + return nil + } + } + + // either atomic validation mode, or some parameters have been set a value, + // hence, proceed with normal validation of all related parameters in this context. + + if p.FirstMessageDeliveriesWeight < 0 || isInvalidNumber(p.FirstMessageDeliveriesWeight) { + return fmt.Errorf("invallid FirstMessageDeliveriesWeight; must be positive (or 0 to disable) and a valid number") + } + if p.FirstMessageDeliveriesWeight != 0 && (p.FirstMessageDeliveriesDecay <= 0 || p.FirstMessageDeliveriesDecay >= 1 || isInvalidNumber(p.FirstMessageDeliveriesDecay)) { + return fmt.Errorf("invalid FirstMessageDeliveriesDecay; must be between 0 and 1") + } + if p.FirstMessageDeliveriesWeight != 0 && (p.FirstMessageDeliveriesCap <= 0 || isInvalidNumber(p.FirstMessageDeliveriesCap)) { + return fmt.Errorf("invalid FirstMessageDeliveriesCap; must be positive and a valid number") + } + + return nil +} + +func (p *BitmaskScoreParams) validateMeshMessageDeliveryParams() error { + if p.SkipAtomicValidation { + // in non-atomic mode, parameters at their zero values are dismissed from validation. + if p.MeshMessageDeliveriesWeight == 0 && + p.MeshMessageDeliveriesCap == 0 && + p.MeshMessageDeliveriesDecay == 0 && + p.MeshMessageDeliveriesThreshold == 0 && + p.MeshMessageDeliveriesWindow == 0 && + p.MeshMessageDeliveriesActivation == 0 { + return nil + } + } + + // either atomic validation mode, or some parameters have been set a value, + // hence, proceed with normal validation of all related parameters in this context. + + if p.MeshMessageDeliveriesWeight > 0 || isInvalidNumber(p.MeshMessageDeliveriesWeight) { + return fmt.Errorf("invalid MeshMessageDeliveriesWeight; must be negative (or 0 to disable) and a valid number") + } + if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesDecay <= 0 || p.MeshMessageDeliveriesDecay >= 1 || isInvalidNumber(p.MeshMessageDeliveriesDecay)) { + return fmt.Errorf("invalid MeshMessageDeliveriesDecay; must be between 0 and 1") + } + if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesCap <= 0 || isInvalidNumber(p.MeshMessageDeliveriesCap)) { + return fmt.Errorf("invalid MeshMessageDeliveriesCap; must be positive and a valid number") + } + if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesThreshold <= 0 || isInvalidNumber(p.MeshMessageDeliveriesThreshold)) { + return fmt.Errorf("invalid MeshMessageDeliveriesThreshold; must be positive and a valid number") + } + if p.MeshMessageDeliveriesWindow < 0 { + return fmt.Errorf("invalid MeshMessageDeliveriesWindow; must be non-negative") + } + if p.MeshMessageDeliveriesWeight != 0 && p.MeshMessageDeliveriesActivation < time.Second { + return fmt.Errorf("invalid MeshMessageDeliveriesActivation; must be at least 1s") + } + + return nil +} + +func (p *BitmaskScoreParams) validateMessageFailurePenaltyParams() error { + if p.SkipAtomicValidation { + // in selective mode, parameters at their zero values are dismissed from validation. + if p.MeshFailurePenaltyDecay == 0 && p.MeshFailurePenaltyWeight == 0 { + return nil + } + } + + // either atomic validation mode, or some parameters have been set a value, + // hence, proceed with normal validation of all related parameters in this context. + + if p.MeshFailurePenaltyWeight > 0 || isInvalidNumber(p.MeshFailurePenaltyWeight) { + return fmt.Errorf("invalid MeshFailurePenaltyWeight; must be negative (or 0 to disable) and a valid number") + } + if p.MeshFailurePenaltyWeight != 0 && (isInvalidNumber(p.MeshFailurePenaltyDecay) || p.MeshFailurePenaltyDecay <= 0 || p.MeshFailurePenaltyDecay >= 1) { + return fmt.Errorf("invalid MeshFailurePenaltyDecay; must be between 0 and 1") + } + + return nil +} + +func (p *BitmaskScoreParams) validateInvalidMessageDeliveryParams() error { + if p.SkipAtomicValidation { + // in selective mode, parameters at their zero values are dismissed from validation. + if p.InvalidMessageDeliveriesDecay == 0 && p.InvalidMessageDeliveriesWeight == 0 { + return nil + } + } + + // either atomic validation mode, or some parameters have been set a value, + // hence, proceed with normal validation of all related parameters in this context. + + if p.InvalidMessageDeliveriesWeight > 0 || isInvalidNumber(p.InvalidMessageDeliveriesWeight) { + return fmt.Errorf("invalid InvalidMessageDeliveriesWeight; must be negative (or 0 to disable) and a valid number") + } + if p.InvalidMessageDeliveriesDecay <= 0 || p.InvalidMessageDeliveriesDecay >= 1 || isInvalidNumber(p.InvalidMessageDeliveriesDecay) { + return fmt.Errorf("invalid InvalidMessageDeliveriesDecay; must be between 0 and 1") + } + + return nil +} + +const ( + DefaultDecayInterval = time.Second + DefaultDecayToZero = 0.01 +) + +// ScoreParameterDecay computes the decay factor for a parameter, assuming the DecayInterval is 1s +// and that the value decays to zero if it drops below 0.01 +func ScoreParameterDecay(decay time.Duration) float64 { + return ScoreParameterDecayWithBase(decay, DefaultDecayInterval, DefaultDecayToZero) +} + +// ScoreParameterDecayWithBase computes the decay factor for a parameter using base as the DecayInterval +func ScoreParameterDecayWithBase(decay time.Duration, base time.Duration, decayToZero float64) float64 { + // the decay is linear, so after n ticks the value is factor^n + // so factor^n = decayToZero => factor = decayToZero^(1/n) + ticks := float64(decay / base) + return math.Pow(decayToZero, 1/ticks) +} + +// checks whether the provided floating-point number is `Not a Number` +// or an infinite number. +func isInvalidNumber(num float64) bool { + return math.IsNaN(num) || math.IsInf(num, 0) +} diff --git a/go-libp2p-blossomsub/score_params_test.go b/go-libp2p-blossomsub/score_params_test.go new file mode 100644 index 0000000..58e7df6 --- /dev/null +++ b/go-libp2p-blossomsub/score_params_test.go @@ -0,0 +1,739 @@ +package blossomsub + +import ( + "math" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestPeerScoreThreshold_AtomicValidation(t *testing.T) { + testPeerScoreThresholdsValidation(t, false) +} + +func TestPeerScoreThreshold_SkipAtomicValidation(t *testing.T) { + testPeerScoreThresholdsValidation(t, true) +} + +func testPeerScoreThresholdsValidation(t *testing.T, skipAtomicValidation bool) { + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + PublishThreshold: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: -1, + PublishThreshold: 0, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: -1, + PublishThreshold: -2, + GraylistThreshold: 0, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + AcceptPXThreshold: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + OpportunisticGraftThreshold: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: -1, + PublishThreshold: -2, + GraylistThreshold: -3, + AcceptPXThreshold: 1, + OpportunisticGraftThreshold: 2}).validate() != nil { + t.Fatal("expected validation success") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: math.Inf(-1), + PublishThreshold: -2, + GraylistThreshold: -3, + AcceptPXThreshold: 1, + OpportunisticGraftThreshold: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: -1, + PublishThreshold: math.Inf(-1), + GraylistThreshold: -3, + AcceptPXThreshold: 1, + OpportunisticGraftThreshold: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: -1, + PublishThreshold: -2, + GraylistThreshold: math.Inf(-1), + AcceptPXThreshold: 1, + OpportunisticGraftThreshold: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: -1, + PublishThreshold: -2, + GraylistThreshold: -3, + AcceptPXThreshold: math.NaN(), + OpportunisticGraftThreshold: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreThresholds{ + SkipAtomicValidation: skipAtomicValidation, + GossipThreshold: -1, + PublishThreshold: -2, + GraylistThreshold: -3, + AcceptPXThreshold: 1, + OpportunisticGraftThreshold: math.Inf(0), + }).validate() == nil { + t.Fatal("expected validation error") + } +} + +func TestBitmaskScoreParamsValidation_InvalidParams_AtomicValidation(t *testing.T) { + testBitmaskScoreParamsValidationWithInvalidParameters(t, false) +} + +func TestBitmaskScoreParamsValidation_InvalidParams_SkipAtomicValidation(t *testing.T) { + testBitmaskScoreParamsValidationWithInvalidParameters(t, true) +} + +func testBitmaskScoreParamsValidationWithInvalidParameters(t *testing.T, skipAtomicValidation bool) { + + if skipAtomicValidation { + if (&BitmaskScoreParams{ + SkipAtomicValidation: true}).validate() != nil { + t.Fatal("expected validation success") + } + } else { + if (&BitmaskScoreParams{}).validate() == nil { + t.Fatal("expected validation failure") + } + } + + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskWeight: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshWeight: -1, + TimeInMeshQuantum: time.Second, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshWeight: 1, + TimeInMeshQuantum: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshWeight: 1, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + FirstMessageDeliveriesWeight: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: .5, + FirstMessageDeliveriesCap: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshMessageDeliveriesWeight: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: 2}).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: .5, + MeshMessageDeliveriesCap: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: .5, + MeshMessageDeliveriesCap: 5, + MeshMessageDeliveriesThreshold: -3, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: .5, + MeshMessageDeliveriesCap: 5, + MeshMessageDeliveriesThreshold: 3, + MeshMessageDeliveriesWindow: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: .5, + MeshMessageDeliveriesCap: 5, + MeshMessageDeliveriesThreshold: 3, + MeshMessageDeliveriesWindow: time.Millisecond, + MeshMessageDeliveriesActivation: time.Millisecond}).validate() == nil { + t.Fatal("expected validation error") + } + + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshFailurePenaltyWeight: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } + + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&BitmaskScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } +} + +func TestBitmaskScoreParamsValidation_ValidParams_AtomicValidation(t *testing.T) { + // Don't use these params in production! + if (&BitmaskScoreParams{ + SkipAtomicValidation: false, + BitmaskWeight: 1, + TimeInMeshWeight: 0.01, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 10, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.5, + FirstMessageDeliveriesCap: 10, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: 0.5, + MeshMessageDeliveriesCap: 10, + MeshMessageDeliveriesThreshold: 5, + MeshMessageDeliveriesWindow: time.Millisecond, + MeshMessageDeliveriesActivation: time.Second, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: 0.5, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.5, + }).validate() != nil { + t.Fatal("expected validation success") + } +} + +func TestBitmaskScoreParamsValidation_NonAtomicValidation(t *testing.T) { + // Don't use these params in production! + // In non-atomic (selective) validation mode, the subset of parameters passes + // validation if the individual parameters values pass validation. + p := &BitmaskScoreParams{} + setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) { + params.SkipAtomicValidation = true + }) + // including bitmask weight. + setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) { + params.BitmaskWeight = 1 + }) + // including time in mesh parameters. + setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) { + params.TimeInMeshWeight = 0.01 + params.TimeInMeshQuantum = time.Second + params.TimeInMeshCap = 10 + }) + // including first message delivery parameters. + setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) { + params.FirstMessageDeliveriesWeight = 1 + params.FirstMessageDeliveriesDecay = 0.5 + params.FirstMessageDeliveriesCap = 10 + }) + // including mesh message delivery parameters. + setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) { + params.MeshMessageDeliveriesWeight = -1 + params.MeshMessageDeliveriesDecay = 0.5 + params.MeshMessageDeliveriesCap = 10 + params.MeshMessageDeliveriesThreshold = 5 + params.MeshMessageDeliveriesWindow = time.Millisecond + params.MeshMessageDeliveriesActivation = time.Second + }) + // including mesh failure penalty parameters. + setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) { + params.MeshFailurePenaltyWeight = -1 + params.MeshFailurePenaltyDecay = 0.5 + }) + // including invalid message delivery parameters. + setBitmaskParamAndValidate(t, p, func(params *BitmaskScoreParams) { + params.InvalidMessageDeliveriesWeight = -1 + params.InvalidMessageDeliveriesDecay = 0.5 + }) +} + +func TestPeerScoreParamsValidation_InvalidParams_AtomicValidation(t *testing.T) { + testPeerScoreParamsValidationWithInvalidParams(t, false) +} + +func TestPeerScoreParamsValidation_InvalidParams_SkipAtomicValidation(t *testing.T) { + testPeerScoreParamsValidationWithInvalidParams(t, true) +} + +func testPeerScoreParamsValidationWithInvalidParams(t *testing.T, skipAtomicValidation bool) { + appScore := func(peer.ID) float64 { return 0 } + + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: -1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + }).validate() == nil { + t.Fatal("expected validation error") + } + + if skipAtomicValidation { + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + DecayInterval: time.Second, + DecayToZero: 0.01, + }).validate() != nil { + t.Fatal("expected validation success") + } + } else { + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + DecayInterval: time.Second, + DecayToZero: 0.01, + }).validate() == nil { + t.Fatal("expected validation error") + } + } + + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + IPColocationFactorWeight: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: -1}).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Millisecond, + DecayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: -1, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 2, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + BehaviourPenaltyWeight: 1}).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + BehaviourPenaltyWeight: -1, + }).validate() == nil { + t.Fatal("expected validation error") + } + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + BehaviourPenaltyWeight: -1, + BehaviourPenaltyDecay: 2, + }).validate() == nil { + t.Fatal("expected validation error") + } + + // Checks the bitmask parameters for invalid values such as infinite and + // NaN numbers. + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + Bitmasks: map[string]*BitmaskScoreParams{ + "test": { + BitmaskWeight: math.Inf(0), + TimeInMeshWeight: math.NaN(), + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 10, + FirstMessageDeliveriesWeight: math.Inf(1), + FirstMessageDeliveriesDecay: 0.5, + FirstMessageDeliveriesCap: 10, + MeshMessageDeliveriesWeight: math.Inf(-1), + MeshMessageDeliveriesDecay: math.NaN(), + MeshMessageDeliveriesCap: math.Inf(0), + MeshMessageDeliveriesThreshold: 5, + MeshMessageDeliveriesWindow: time.Millisecond, + MeshMessageDeliveriesActivation: time.Second, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: math.NaN(), + InvalidMessageDeliveriesWeight: math.Inf(0), + InvalidMessageDeliveriesDecay: math.NaN(), + }, + }, + }).validate() == nil { + t.Fatal("expected validation failure") + } + + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: math.Inf(0), + IPColocationFactorWeight: math.Inf(-1), + IPColocationFactorThreshold: 1, + BehaviourPenaltyWeight: math.Inf(0), + BehaviourPenaltyDecay: math.NaN(), + }).validate() == nil { + t.Fatal("expected validation failure") + } + + if (&PeerScoreParams{ + SkipAtomicValidation: skipAtomicValidation, + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + Bitmasks: map[string]*BitmaskScoreParams{ + "test": { + BitmaskWeight: -1, + TimeInMeshWeight: 0.01, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 10, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.5, + FirstMessageDeliveriesCap: 10, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: 0.5, + MeshMessageDeliveriesCap: 10, + MeshMessageDeliveriesThreshold: 5, + MeshMessageDeliveriesWindow: time.Millisecond, + MeshMessageDeliveriesActivation: time.Second, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: 0.5, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.5, + }, + }, + }).validate() == nil { + t.Fatal("expected validation failure") + } +} + +func TestPeerScoreParamsValidation_ValidParams_AtomicValidation(t *testing.T) { + appScore := func(peer.ID) float64 { return 0 } + + // don't use these params in production! + if (&PeerScoreParams{ + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + BehaviourPenaltyWeight: -1, + BehaviourPenaltyDecay: 0.999, + }).validate() != nil { + t.Fatal("expected validation success") + } + + if (&PeerScoreParams{ + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + BehaviourPenaltyWeight: -1, + BehaviourPenaltyDecay: 0.999, + }).validate() != nil { + t.Fatal("expected validation success") + } + + if (&PeerScoreParams{ + BitmaskScoreCap: 1, + AppSpecificScore: appScore, + DecayInterval: time.Second, + DecayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + Bitmasks: map[string]*BitmaskScoreParams{ + "test": { + BitmaskWeight: 1, + TimeInMeshWeight: 0.01, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 10, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.5, + FirstMessageDeliveriesCap: 10, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: 0.5, + MeshMessageDeliveriesCap: 10, + MeshMessageDeliveriesThreshold: 5, + MeshMessageDeliveriesWindow: time.Millisecond, + MeshMessageDeliveriesActivation: time.Second, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: 0.5, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.5, + }, + }, + }).validate() != nil { + t.Fatal("expected validation success") + } +} + +func TestPeerScoreParamsValidation_ValidParams_SkipAtomicValidation(t *testing.T) { + appScore := func(peer.ID) float64 { return 0 } + + // don't use these params in production! + p := &PeerScoreParams{} + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.SkipAtomicValidation = true + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.AppSpecificScore = appScore + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.DecayInterval = time.Second + params.DecayToZero = 0.01 + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.IPColocationFactorWeight = -1 + params.IPColocationFactorThreshold = 1 + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.BehaviourPenaltyWeight = -1 + params.BehaviourPenaltyDecay = 0.999 + }) + + p = &PeerScoreParams{SkipAtomicValidation: true, AppSpecificScore: appScore} + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.BitmaskScoreCap = 1 + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.DecayInterval = time.Second + params.DecayToZero = 0.01 + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.IPColocationFactorWeight = -1 + params.IPColocationFactorThreshold = 1 + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.BehaviourPenaltyWeight = -1 + params.BehaviourPenaltyDecay = 0.999 + }) + setParamAndValidate(t, p, func(params *PeerScoreParams) { + params.Bitmasks = map[string]*BitmaskScoreParams{ + "test": { + BitmaskWeight: 1, + TimeInMeshWeight: 0.01, + TimeInMeshQuantum: time.Second, + TimeInMeshCap: 10, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.5, + FirstMessageDeliveriesCap: 10, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesDecay: 0.5, + MeshMessageDeliveriesCap: 10, + MeshMessageDeliveriesThreshold: 5, + MeshMessageDeliveriesWindow: time.Millisecond, + MeshMessageDeliveriesActivation: time.Second, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: 0.5, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.5, + }, + } + }) +} + +func TestScoreParameterDecay(t *testing.T) { + decay1hr := ScoreParameterDecay(time.Hour) + if decay1hr != .9987216039048303 { + t.Fatalf("expected .9987216039048303, got %f", decay1hr) + } +} + +func setParamAndValidate(t *testing.T, params *PeerScoreParams, set func(*PeerScoreParams)) { + set(params) + if err := params.validate(); err != nil { + t.Fatalf("expected validation success, got: %s", err) + } +} + +func setBitmaskParamAndValidate(t *testing.T, params *BitmaskScoreParams, set func(bitmask *BitmaskScoreParams)) { + set(params) + if err := params.validate(); err != nil { + t.Fatalf("expected validation success, got: %s", err) + } +} diff --git a/go-libp2p-blossomsub/score_test.go b/go-libp2p-blossomsub/score_test.go new file mode 100644 index 0000000..cb6e58a --- /dev/null +++ b/go-libp2p-blossomsub/score_test.go @@ -0,0 +1,1080 @@ +package blossomsub + +import ( + "math" + "net" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestScoreTimeInMesh(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 0.5, + TimeInMeshWeight: 1, + TimeInMeshQuantum: time.Millisecond, + TimeInMeshCap: 3600, + } + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + peerA := peer.ID("A") + + // Peer score should start at 0 + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + + aScore := ps.Score(peerA) + if aScore != 0 { + t.Fatal("expected score to start at zero") + } + + // The time in mesh depends on how long the peer has been grafted + ps.Graft(peerA, mybitmask) + elapsed := bitmaskScoreParams.TimeInMeshQuantum * 200 + time.Sleep(elapsed) + + ps.refreshScores() + aScore = ps.Score(peerA) + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.TimeInMeshWeight * float64(elapsed/bitmaskScoreParams.TimeInMeshQuantum) + if aScore < expected { + t.Fatalf("Score: %f. Expected >= %f", aScore, expected) + } +} + +func TestScoreTimeInMeshCap(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 0.5, + TimeInMeshWeight: 1, + TimeInMeshQuantum: time.Millisecond, + TimeInMeshCap: 10, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + elapsed := bitmaskScoreParams.TimeInMeshQuantum * 40 + time.Sleep(elapsed) + + // The time in mesh score has a cap + ps.refreshScores() + aScore := ps.Score(peerA) + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.TimeInMeshWeight * bitmaskScoreParams.TimeInMeshCap + variance := 0.5 + if !withinVariance(aScore, expected, variance) { + t.Fatalf("Score: %f. Expected %f ± %f", aScore, expected, variance*expected) + } +} + +func TestScoreFirstMessageDeliveries(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 1.0, // test without decay for now + FirstMessageDeliveriesCap: 2000, + TimeInMeshQuantum: time.Second, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + // deliver a bunch of messages from peer A + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.DeliverMessage(&msg) + } + + ps.refreshScores() + aScore := ps.Score(peerA) + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.FirstMessageDeliveriesWeight * float64(nMessages) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } +} + +func TestScoreFirstMessageDeliveriesCap(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 1.0, // test without decay for now + FirstMessageDeliveriesCap: 50, + TimeInMeshQuantum: time.Second, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + // deliver a bunch of messages from peer A + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.DeliverMessage(&msg) + } + + ps.refreshScores() + aScore := ps.Score(peerA) + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.FirstMessageDeliveriesWeight * bitmaskScoreParams.FirstMessageDeliveriesCap + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } +} + +func TestScoreFirstMessageDeliveriesDecay(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + FirstMessageDeliveriesWeight: 1, + FirstMessageDeliveriesDecay: 0.9, // decay 10% per decay interval + FirstMessageDeliveriesCap: 2000, + TimeInMeshQuantum: time.Second, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + // deliver a bunch of messages from peer A + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.DeliverMessage(&msg) + } + + ps.refreshScores() + aScore := ps.Score(peerA) + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.FirstMessageDeliveriesWeight * bitmaskScoreParams.FirstMessageDeliveriesDecay * float64(nMessages) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + // refreshing the scores applies the decay param + decayIntervals := 10 + for i := 0; i < decayIntervals; i++ { + ps.refreshScores() + expected *= bitmaskScoreParams.FirstMessageDeliveriesDecay + } + aScore = ps.Score(peerA) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } +} + +func TestScoreMeshMessageDeliveries(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesActivation: time.Second, + MeshMessageDeliveriesWindow: 10 * time.Millisecond, + MeshMessageDeliveriesThreshold: 20, + MeshMessageDeliveriesCap: 100, + MeshMessageDeliveriesDecay: 1.0, // no decay for this test + + FirstMessageDeliveriesWeight: 0, + TimeInMeshQuantum: time.Second, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + // peer A always delivers the message first. + // peer B delivers next (within the delivery window). + // peer C delivers outside the delivery window. + // we expect peers A and B to have a score of zero, since all other parameter weights are zero. + // Peer C should have a negative score. + peerA := peer.ID("A") + peerB := peer.ID("B") + peerC := peer.ID("C") + peers := []peer.ID{peerA, peerB, peerC} + + ps := newPeerScore(params) + for _, p := range peers { + ps.AddPeer(p, "myproto") + ps.Graft(p, mybitmask) + } + + // assert that nobody has been penalized yet for not delivering messages before activation time + ps.refreshScores() + for _, p := range peers { + score := ps.Score(p) + if score < 0 { + t.Fatalf("expected no mesh delivery penalty before activation time, got score %f", score) + } + } + // wait for the activation time to kick in + time.Sleep(bitmaskScoreParams.MeshMessageDeliveriesActivation) + + // deliver a bunch of messages from peer A, with duplicates within the window from peer B, + // and duplicates outside the window from peer C. + nMessages := 100 + wg := sync.WaitGroup{} + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.DeliverMessage(&msg) + + msg.ReceivedFrom = peerB + ps.DuplicateMessage(&msg) + + // deliver duplicate from peerC after the window + wg.Add(1) + time.AfterFunc(bitmaskScoreParams.MeshMessageDeliveriesWindow+(20*time.Millisecond), func() { + msg.ReceivedFrom = peerC + ps.DuplicateMessage(&msg) + wg.Done() + }) + } + wg.Wait() + + ps.refreshScores() + aScore := ps.Score(peerA) + bScore := ps.Score(peerB) + cScore := ps.Score(peerC) + if aScore < 0 { + t.Fatalf("Expected non-negative score for peer A, got %f", aScore) + } + if bScore < 0 { + t.Fatalf("Expected non-negative score for peer B, got %f", aScore) + } + + // the penalty is the difference between the threshold and the actual mesh deliveries, squared. + // since we didn't deliver anything, this is just the value of the threshold + penalty := bitmaskScoreParams.MeshMessageDeliveriesThreshold * bitmaskScoreParams.MeshMessageDeliveriesThreshold + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.MeshMessageDeliveriesWeight * penalty + if cScore != expected { + t.Fatalf("Score: %f. Expected %f", cScore, expected) + } +} + +func TestScoreMeshMessageDeliveriesDecay(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesActivation: 0, + MeshMessageDeliveriesWindow: 10 * time.Millisecond, + MeshMessageDeliveriesThreshold: 20, + MeshMessageDeliveriesCap: 100, + MeshMessageDeliveriesDecay: 0.9, + + FirstMessageDeliveriesWeight: 0, + TimeInMeshQuantum: time.Second, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + // deliver messages from peer A + nMessages := 40 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.DeliverMessage(&msg) + } + + // we should have a positive score, since we delivered more messages than the threshold + ps.refreshScores() + aScore := ps.Score(peerA) + if aScore < 0 { + t.Fatalf("Expected non-negative score for peer A, got %f", aScore) + } + + // we need to refresh enough times for the decay to bring us below the threshold + decayedDeliveryCount := float64(nMessages) * bitmaskScoreParams.MeshMessageDeliveriesDecay + for i := 0; i < 20; i++ { + ps.refreshScores() + decayedDeliveryCount *= bitmaskScoreParams.MeshMessageDeliveriesDecay + } + aScore = ps.Score(peerA) + // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared. + deficit := bitmaskScoreParams.MeshMessageDeliveriesThreshold - decayedDeliveryCount + penalty := deficit * deficit + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.MeshMessageDeliveriesWeight * penalty + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } +} + +func TestScoreMeshFailurePenalty(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + + // the mesh failure penalty is applied when a peer is pruned while their + // mesh deliveries are under the threshold. + // for this test, we set the mesh delivery threshold, but set + // MeshMessageDeliveriesWeight to zero, so the only affect on the score + // is from the mesh failure penalty + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + MeshFailurePenaltyWeight: -1, + MeshFailurePenaltyDecay: 1.0, + + MeshMessageDeliveriesActivation: 0, + MeshMessageDeliveriesWindow: 10 * time.Millisecond, + MeshMessageDeliveriesThreshold: 20, + MeshMessageDeliveriesCap: 100, + MeshMessageDeliveriesDecay: 1.0, + + MeshMessageDeliveriesWeight: 0, + FirstMessageDeliveriesWeight: 0, + TimeInMeshQuantum: time.Second, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + peerA := peer.ID("A") + peerB := peer.ID("B") + peers := []peer.ID{peerA, peerB} + + ps := newPeerScore(params) + for _, p := range peers { + ps.AddPeer(p, "myproto") + ps.Graft(p, mybitmask) + } + + // deliver messages from peer A. peer B does nothing + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.DeliverMessage(&msg) + } + + // peers A and B should both have zero scores, since the failure penalty hasn't been applied yet + ps.refreshScores() + aScore := ps.Score(peerA) + bScore := ps.Score(peerB) + if aScore != 0 { + t.Errorf("expected peer A to have score 0.0, got %f", aScore) + } + if bScore != 0 { + t.Errorf("expected peer B to have score 0.0, got %f", bScore) + } + + // prune peer B to apply the penalty + ps.Prune(peerB, mybitmask) + ps.refreshScores() + aScore = ps.Score(peerA) + bScore = ps.Score(peerB) + + if aScore != 0 { + t.Errorf("expected peer A to have score 0.0, got %f", aScore) + } + + // penalty calculation is the same as for MeshMessageDeliveries, but multiplied by MeshFailurePenaltyWeight + // instead of MeshMessageDeliveriesWeight + penalty := bitmaskScoreParams.MeshMessageDeliveriesThreshold * bitmaskScoreParams.MeshMessageDeliveriesThreshold + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.MeshFailurePenaltyWeight * penalty + if bScore != expected { + t.Fatalf("Score: %f. Expected %f", bScore, expected) + } +} + +func TestScoreInvalidMessageDeliveries(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 1.0, + } + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.RejectMessage(&msg, RejectInvalidSignature) + } + + ps.refreshScores() + aScore := ps.Score(peerA) + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.InvalidMessageDeliveriesWeight * float64(nMessages*nMessages) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } +} + +func TestScoreInvalidMessageDeliveriesDecay(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 0.9, + } + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.RejectMessage(&msg, RejectInvalidSignature) + } + + ps.refreshScores() + aScore := ps.Score(peerA) + expected := bitmaskScoreParams.BitmaskWeight * bitmaskScoreParams.InvalidMessageDeliveriesWeight * math.Pow(bitmaskScoreParams.InvalidMessageDeliveriesDecay*float64(nMessages), 2) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + // refresh scores a few times to apply decay + for i := 0; i < 10; i++ { + ps.refreshScores() + expected *= math.Pow(bitmaskScoreParams.InvalidMessageDeliveriesDecay, 2) + } + aScore = ps.Score(peerA) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } +} + +func TestScoreRejectMessageDeliveries(t *testing.T) { + // this tests adds coverage for the dark corners of rejection tracing + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 1.0, + } + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + peerA := peer.ID("A") + peerB := peer.ID("B") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.AddPeer(peerB, "myproto") + + pbMsg := makeTestMessage(0) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + msg2 := Message{ReceivedFrom: peerB, Message: pbMsg} + + // these should have no effect in the score + ps.RejectMessage(&msg, RejectBlacklstedPeer) + ps.RejectMessage(&msg, RejectBlacklistedSource) + ps.RejectMessage(&msg, RejectValidationQueueFull) + + aScore := ps.Score(peerA) + expected := 0.0 + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + // insert a record in the message deliveries + ps.ValidateMessage(&msg) + + // this should have no effect in the score, and subsequent duplicate messages should have no + // effect either + ps.RejectMessage(&msg, RejectValidationThrottled) + ps.DuplicateMessage(&msg2) + + aScore = ps.Score(peerA) + expected = 0.0 + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + bScore := ps.Score(peerB) + expected = 0.0 + if bScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + // now clear the delivery record + ps.deliveries.head.expire = time.Now() + time.Sleep(1 * time.Millisecond) + ps.deliveries.gc() + + // insert a record in the message deliveries + ps.ValidateMessage(&msg) + + // this should have no effect in the score, and subsequent duplicate messages should have no + // effect either + ps.RejectMessage(&msg, RejectValidationIgnored) + ps.DuplicateMessage(&msg2) + + aScore = ps.Score(peerA) + expected = 0.0 + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + bScore = ps.Score(peerB) + expected = 0.0 + if bScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + // now clear the delivery record + ps.deliveries.head.expire = time.Now() + time.Sleep(1 * time.Millisecond) + ps.deliveries.gc() + + // insert a new record in the message deliveries + ps.ValidateMessage(&msg) + + // and reject the message to make sure duplicates are also penalized + ps.RejectMessage(&msg, RejectValidationFailed) + ps.DuplicateMessage(&msg2) + + aScore = ps.Score(peerA) + expected = -1.0 + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + bScore = ps.Score(peerB) + expected = -1.0 + if bScore != expected { + t.Fatalf("Score: %f. Expected %f", bScore, expected) + } + + // now clear the delivery record again + ps.deliveries.head.expire = time.Now() + time.Sleep(1 * time.Millisecond) + ps.deliveries.gc() + + // insert a new record in the message deliveries + ps.ValidateMessage(&msg) + + // and reject the message after a duplciate has arrived + ps.DuplicateMessage(&msg2) + ps.RejectMessage(&msg, RejectValidationFailed) + + aScore = ps.Score(peerA) + expected = -4.0 + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + bScore = ps.Score(peerB) + expected = -4.0 + if bScore != expected { + t.Fatalf("Score: %f. Expected %f", bScore, expected) + } +} + +func TestScoreApplicationScore(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + var appScoreValue float64 + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return appScoreValue }, + AppSpecificWeight: 0.5, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + for i := -100; i < 100; i++ { + appScoreValue = float64(i) + ps.refreshScores() + aScore := ps.Score(peerA) + expected := float64(i) * params.AppSpecificWeight + if aScore != expected { + t.Errorf("expected peer score to equal app-specific score %f, got %f", expected, aScore) + } + } +} + +func TestScoreIPColocation(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + IPColocationFactorThreshold: 1, + IPColocationFactorWeight: -1, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + + peerA := peer.ID("A") + peerB := peer.ID("B") + peerC := peer.ID("C") + peerD := peer.ID("D") + peers := []peer.ID{peerA, peerB, peerC, peerD} + + ps := newPeerScore(params) + for _, p := range peers { + ps.AddPeer(p, "myproto") + ps.Graft(p, mybitmask) + } + + // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP + setIPsForPeer(t, ps, peerA, "1.2.3.4") + setIPsForPeer(t, ps, peerB, "2.3.4.5") + setIPsForPeer(t, ps, peerC, "2.3.4.5", "3.4.5.6") + setIPsForPeer(t, ps, peerD, "2.3.4.5") + + ps.refreshScores() + aScore := ps.Score(peerA) + bScore := ps.Score(peerB) + cScore := ps.Score(peerC) + dScore := ps.Score(peerD) + + if aScore != 0 { + t.Errorf("expected peer A to have score 0.0, got %f", aScore) + } + + nShared := 3 + ipSurplus := nShared - params.IPColocationFactorThreshold + penalty := ipSurplus * ipSurplus + expected := params.IPColocationFactorWeight * float64(penalty) + for _, score := range []float64{bScore, cScore, dScore} { + if score != expected { + t.Fatalf("Score: %f. Expected %f", score, expected) + } + } +} + +func TestScoreIPColocationWhitelist(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + _, ipNet, err := net.ParseCIDR("2.3.0.0/16") + if err != nil { + t.Fatal(err) + } + + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + IPColocationFactorThreshold: 1, + IPColocationFactorWeight: -1, + IPColocationFactorWhitelist: []*net.IPNet{ipNet}, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + + peerA := peer.ID("A") + peerB := peer.ID("B") + peerC := peer.ID("C") + peerD := peer.ID("D") + peers := []peer.ID{peerA, peerB, peerC, peerD} + + ps := newPeerScore(params) + for _, p := range peers { + ps.AddPeer(p, "myproto") + ps.Graft(p, mybitmask) + } + + // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP + setIPsForPeer(t, ps, peerA, "1.2.3.4") + setIPsForPeer(t, ps, peerB, "2.3.4.5") + setIPsForPeer(t, ps, peerC, "2.3.4.5", "3.4.5.6") + setIPsForPeer(t, ps, peerD, "2.3.4.5") + + ps.refreshScores() + aScore := ps.Score(peerA) + bScore := ps.Score(peerB) + cScore := ps.Score(peerC) + dScore := ps.Score(peerD) + + if aScore != 0 { + t.Errorf("expected peer A to have score 0.0, got %f", aScore) + } + + if bScore != 0 { + t.Errorf("expected peer B to have score 0.0, got %f", aScore) + } + + if cScore != 0 { + t.Errorf("expected peer C to have score 0.0, got %f", aScore) + } + + if dScore != 0 { + t.Errorf("expected peer D to have score 0.0, got %f", aScore) + } + +} + +func TestScoreBehaviourPenalty(t *testing.T) { + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + BehaviourPenaltyWeight: -1, + BehaviourPenaltyDecay: 0.99, + } + + peerA := peer.ID("A") + + var ps *peerScore + + // first check AddPenalty on a nil peerScore + ps.AddPenalty(peerA, 1) + aScore := ps.Score(peerA) + if aScore != 0 { + t.Errorf("expected peer score to be 0, got %f", aScore) + } + + // instantiate the peerScore + ps = newPeerScore(params) + + // next AddPenalty on a non-existent peer + ps.AddPenalty(peerA, 1) + aScore = ps.Score(peerA) + if aScore != 0 { + t.Errorf("expected peer score to be 0, got %f", aScore) + } + + // add the peer and test penalties + ps.AddPeer(peerA, "myproto") + + aScore = ps.Score(peerA) + if aScore != 0 { + t.Errorf("expected peer score to be 0, got %f", aScore) + } + + ps.AddPenalty(peerA, 1) + aScore = ps.Score(peerA) + if aScore != -1 { + t.Errorf("expected peer score to be -1, got %f", aScore) + } + + ps.AddPenalty(peerA, 1) + aScore = ps.Score(peerA) + if aScore != -4 { + t.Errorf("expected peer score to be -4, got %f", aScore) + } + + ps.refreshScores() + + aScore = ps.Score(peerA) + if aScore != -3.9204 { + t.Errorf("expected peer score to be -3.9204, got %f", aScore) + } +} + +func TestScoreRetention(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return -1000 }, + AppSpecificWeight: 1.0, + Bitmasks: make(map[string]*BitmaskScoreParams), + RetainScore: time.Second, + } + + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + ps.Graft(peerA, mybitmask) + + // score should equal -1000 (app specific score) + expected := float64(-1000) + ps.refreshScores() + aScore := ps.Score(peerA) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + // disconnect & wait half of RetainScore time. should still have negative score + ps.RemovePeer(peerA) + delay := params.RetainScore / time.Duration(2) + time.Sleep(delay) + ps.refreshScores() + aScore = ps.Score(peerA) + if aScore != expected { + t.Fatalf("Score: %f. Expected %f", aScore, expected) + } + + // wait remaining time (plus a little slop) and the score should reset to zero + time.Sleep(delay + (50 * time.Millisecond)) + ps.refreshScores() + aScore = ps.Score(peerA) + if aScore != 0 { + t.Fatalf("Score: %f. Expected 0.0", aScore) + } +} + +func TestScoreRecapBitmaskParams(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesActivation: time.Second, + MeshMessageDeliveriesWindow: 10 * time.Millisecond, + MeshMessageDeliveriesThreshold: 20, + MeshMessageDeliveriesCap: 100, + MeshMessageDeliveriesDecay: 1.0, // no decay for this test + + FirstMessageDeliveriesWeight: 10, + FirstMessageDeliveriesDecay: 1.0, // no decay for this test + FirstMessageDeliveriesCap: 100, + + TimeInMeshQuantum: time.Second, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + // peer A always delivers the message first. + // peer B delivers next (within the delivery window). + // peer C delivers outside the delivery window. + // we expect peers A and B to have a score of zero, since all other parameter weights are zero. + // Peer C should have a negative score. + peerA := peer.ID("A") + peerB := peer.ID("B") + peers := []peer.ID{peerA, peerB} + + ps := newPeerScore(params) + for _, p := range peers { + ps.AddPeer(p, "myproto") + ps.Graft(p, mybitmask) + } + + // deliver a bunch of messages from peer A, with duplicates within the window from peer B, + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.DeliverMessage(&msg) + + msg.ReceivedFrom = peerB + ps.DuplicateMessage(&msg) + } + + // check that the FirstMessageDeliveries for peerA and MeshMessageDeliveries for PeerB is + // at 100 + if ps.peerStats[peerA].bitmasks[string(mybitmask)].firstMessageDeliveries != 100 { + t.Fatalf("expected 100 FirstMessageDeliveries for peerA, but got %f", ps.peerStats[peerA].bitmasks[string(mybitmask)].firstMessageDeliveries) + } + // check that the MeshMessageDeliveries for peerB and MeshMessageDeliveries for PeerB is + // at 100 + if ps.peerStats[peerB].bitmasks[string(mybitmask)].meshMessageDeliveries != 100 { + t.Fatalf("expected 100 MeshMessageDeliveries for peerB, but got %f", ps.peerStats[peerB].bitmasks[string(mybitmask)].meshMessageDeliveries) + } + + // reset the bitmask paramaters recapping the deliveries counters + newBitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + + MeshMessageDeliveriesWeight: -1, + MeshMessageDeliveriesActivation: time.Second, + MeshMessageDeliveriesWindow: 10 * time.Millisecond, + MeshMessageDeliveriesThreshold: 20, + MeshMessageDeliveriesCap: 50, + MeshMessageDeliveriesDecay: 1.0, // no decay for this test + + FirstMessageDeliveriesWeight: 10, + FirstMessageDeliveriesDecay: 1.0, // no decay for this test + FirstMessageDeliveriesCap: 50, + + TimeInMeshQuantum: time.Second, + } + + err := ps.SetBitmaskScoreParams(mybitmask, newBitmaskScoreParams) + if err != nil { + t.Fatal(err) + } + + // verify that the counters got recapped + if ps.peerStats[peerA].bitmasks[string(mybitmask)].firstMessageDeliveries != 50 { + t.Fatalf("expected 50 FirstMessageDeliveries for peerA, but got %f", ps.peerStats[peerA].bitmasks[string(mybitmask)].firstMessageDeliveries) + } + if ps.peerStats[peerB].bitmasks[string(mybitmask)].meshMessageDeliveries != 50 { + t.Fatalf("expected 50 MeshMessageDeliveries for peerB, but got %f", ps.peerStats[peerB].bitmasks[string(mybitmask)].meshMessageDeliveries) + } +} + +func TestScoreResetBitmaskParams(t *testing.T) { + // Create parameters with reasonable default values + mybitmask := []byte{0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + params := &PeerScoreParams{ + AppSpecificScore: func(peer.ID) float64 { return 0 }, + Bitmasks: make(map[string]*BitmaskScoreParams), + } + bitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -1, + InvalidMessageDeliveriesDecay: 1.0, + } + + params.Bitmasks[string(mybitmask)] = bitmaskScoreParams + + // peer A always delivers the message first. + // peer B delivers next (within the delivery window). + // peer C delivers outside the delivery window. + // we expect peers A and B to have a score of zero, since all other parameter weights are zero. + // Peer C should have a negative score. + peerA := peer.ID("A") + + ps := newPeerScore(params) + ps.AddPeer(peerA, "myproto") + + // reject a bunch of messages + nMessages := 100 + for i := 0; i < nMessages; i++ { + pbMsg := makeTestMessage(i) + pbMsg.Bitmask = mybitmask + msg := Message{ReceivedFrom: peerA, Message: pbMsg} + ps.ValidateMessage(&msg) + ps.RejectMessage(&msg, RejectValidationFailed) + } + + // check the bitmask score + aScore := ps.Score(peerA) + if aScore != -10000 { + t.Fatalf("expected a -10000 score, but got %f instead", aScore) + } + + // reset the bitmask paramaters recapping the deliveries counters + newBitmaskScoreParams := &BitmaskScoreParams{ + BitmaskWeight: 1, + TimeInMeshQuantum: time.Second, + InvalidMessageDeliveriesWeight: -10, + InvalidMessageDeliveriesDecay: 1.0, + } + + err := ps.SetBitmaskScoreParams(mybitmask, newBitmaskScoreParams) + if err != nil { + t.Fatal(err) + } + + // verify the bitmask score was adjusted + aScore = ps.Score(peerA) + if aScore != -100000 { + t.Fatalf("expected a -1000000 score, but got %f instead", aScore) + } +} + +func withinVariance(score float64, expected float64, variance float64) bool { + if expected >= 0 { + return score > expected*(1-variance) && score < expected*(1+variance) + } + return score > expected*(1+variance) && score < expected*(1-variance) +} + +// hack to set IPs for a peer without having to spin up real hosts with shared IPs +func setIPsForPeer(t *testing.T, ps *peerScore, p peer.ID, ips ...string) { + t.Helper() + ps.setIPs(p, ips, []string{}) + pstats, ok := ps.peerStats[p] + if !ok { + t.Fatal("unable to get peerStats") + } + pstats.ips = ips +} diff --git a/go-libp2p-blossomsub/sign.go b/go-libp2p-blossomsub/sign.go new file mode 100644 index 0000000..ee887bf --- /dev/null +++ b/go-libp2p-blossomsub/sign.go @@ -0,0 +1,138 @@ +package blossomsub + +import ( + "fmt" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" +) + +// MessageSignaturePolicy describes if signatures are produced, expected, and/or verified. +type MessageSignaturePolicy uint8 + +// LaxSign and LaxNoSign are deprecated. In the future msgSigning and msgVerification can be unified. +const ( + // msgSigning is set when the locally produced messages must be signed + msgSigning MessageSignaturePolicy = 1 << iota + // msgVerification is set when external messages must be verfied + msgVerification +) + +const ( + // StrictSign produces signatures and expects and verifies incoming signatures + StrictSign = msgSigning | msgVerification + // StrictNoSign does not produce signatures and drops and penalises incoming messages that carry one + StrictNoSign = msgVerification + // LaxSign produces signatures and validates incoming signatures iff one is present + // Deprecated: it is recommend to either strictly enable, or strictly disable, signatures. + LaxSign = msgSigning + // LaxNoSign does not produce signatures and validates incoming signatures iff one is present + // Deprecated: it is recommend to either strictly enable, or strictly disable, signatures. + LaxNoSign = 0 +) + +// mustVerify is true when a message signature must be verified. +// If signatures are not expected, verification checks if the signature is absent. +func (policy MessageSignaturePolicy) mustVerify() bool { + return policy&msgVerification != 0 +} + +// mustSign is true when messages should be signed, and incoming messages are expected to have a signature. +func (policy MessageSignaturePolicy) mustSign() bool { + return policy&msgSigning != 0 +} + +const SignPrefix = "libp2p-pubsub:" + +func verifyMessageSignature(m *pb.Message) error { + pubk, err := messagePubKey(m) + if err != nil { + return err + } + + xm := *m + xm.Signature = nil + xm.Key = nil + bytes, err := xm.Marshal() + if err != nil { + return err + } + + bytes = withSignPrefix(bytes) + + valid, err := pubk.Verify(bytes, m.Signature) + if err != nil { + return err + } + + if !valid { + return fmt.Errorf("invalid signature") + } + + return nil +} + +func messagePubKey(m *pb.Message) (crypto.PubKey, error) { + var pubk crypto.PubKey + + pid, err := peer.IDFromBytes(m.From) + if err != nil { + return nil, err + } + + if m.Key == nil { + // no attached key, it must be extractable from the source ID + pubk, err = pid.ExtractPublicKey() + if err != nil { + return nil, fmt.Errorf("cannot extract signing key: %s", err.Error()) + } + if pubk == nil { + return nil, fmt.Errorf("cannot extract signing key") + } + } else { + pubk, err = crypto.UnmarshalPublicKey(m.Key) + if err != nil { + return nil, fmt.Errorf("cannot unmarshal signing key: %s", err.Error()) + } + + // verify that the source ID matches the attached key + if !pid.MatchesPublicKey(pubk) { + return nil, fmt.Errorf("bad signing key; source ID %s doesn't match key", pid) + } + } + + return pubk, nil +} + +func signMessage(pid peer.ID, key crypto.PrivKey, m *pb.Message) error { + bytes, err := m.Marshal() + if err != nil { + return err + } + + bytes = withSignPrefix(bytes) + + sig, err := key.Sign(bytes) + if err != nil { + return err + } + + m.Signature = sig + + pk, _ := pid.ExtractPublicKey() + if pk == nil { + pubk, err := crypto.MarshalPublicKey(key.GetPublic()) + if err != nil { + return err + } + m.Key = pubk + } + + return nil +} + +func withSignPrefix(bytes []byte) []byte { + return append([]byte(SignPrefix), bytes...) +} diff --git a/go-libp2p-blossomsub/sign_test.go b/go-libp2p-blossomsub/sign_test.go new file mode 100644 index 0000000..2df9d74 --- /dev/null +++ b/go-libp2p-blossomsub/sign_test.go @@ -0,0 +1,43 @@ +package blossomsub + +import ( + "testing" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestSigning(t *testing.T) { + privk, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048) + if err != nil { + t.Fatal(err) + } + testSignVerify(t, privk) + + privk, _, err = crypto.GenerateKeyPair(crypto.Ed25519, 0) + if err != nil { + t.Fatal(err) + } + testSignVerify(t, privk) +} + +func testSignVerify(t *testing.T, privk crypto.PrivKey) { + id, err := peer.IDFromPublicKey(privk.GetPublic()) + if err != nil { + t.Fatal(err) + } + bitmask := []byte{0xf0, 0x00} + m := pb.Message{ + Data: []byte("abc"), + Bitmask: bitmask, + From: []byte(id), + Seqno: []byte("123"), + } + signMessage(id, privk, &m) + err = verifyMessageSignature(&m) + if err != nil { + t.Fatal(err) + } +} diff --git a/go-libp2p-blossomsub/subscription.go b/go-libp2p-blossomsub/subscription.go new file mode 100644 index 0000000..28ba12c --- /dev/null +++ b/go-libp2p-blossomsub/subscription.go @@ -0,0 +1,51 @@ +package blossomsub + +import ( + "context" + "sync" +) + +// Subscription handles the details of a particular Bitmask subscription. +// There may be many subscriptions for a given Bitmask. +type Subscription struct { + bitmask []byte + ch chan *Message + cancelCh chan<- *Subscription + ctx context.Context + err error + once sync.Once +} + +// Bitmask returns the bitmask []byte associated with the Subscription +func (sub *Subscription) Bitmask() []byte { + return sub.bitmask +} + +// Next returns the next message in our subscription +func (sub *Subscription) Next(ctx context.Context) (*Message, error) { + select { + case msg, ok := <-sub.ch: + if !ok { + return msg, sub.err + } + + return msg, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// Cancel closes the subscription. If this is the last active subscription then pubsub will send an unsubscribe +// announcement to the network. +func (sub *Subscription) Cancel() { + select { + case sub.cancelCh <- sub: + case <-sub.ctx.Done(): + } +} + +func (sub *Subscription) close() { + sub.once.Do(func() { + close(sub.ch) + }) +} diff --git a/go-libp2p-blossomsub/subscription_filter.go b/go-libp2p-blossomsub/subscription_filter.go new file mode 100644 index 0000000..e1c5a54 --- /dev/null +++ b/go-libp2p-blossomsub/subscription_filter.go @@ -0,0 +1,125 @@ +package blossomsub + +import ( + "errors" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// ErrTooManySubscriptions may be returned by a SubscriptionFilter to signal that there are too many +// subscriptions to process. +var ErrTooManySubscriptions = errors.New("too many subscriptions") + +// SubscriptionFilter is a function that tells us whether we are interested in allowing and tracking +// subscriptions for a given bitmask. +// +// The filter is consulted whenever a subscription notification is received by another peer; if the +// filter returns false, then the notification is ignored. +// +// The filter is also consulted when joining bitmasks; if the filter returns false, then the Join +// operation will result in an error. +type SubscriptionFilter interface { + // CanSubscribe returns true if the bitmask is of interest and we can subscribe to it + CanSubscribe(bitmask []byte) bool + + // FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications. + // It should filter only the subscriptions of interest and my return an error if (for instance) + // there are too many subscriptions. + FilterIncomingSubscriptions(peer.ID, []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) +} + +// WithSubscriptionFilter is a pubsub option that specifies a filter for subscriptions +// in bitmasks of interest. +func WithSubscriptionFilter(subFilter SubscriptionFilter) Option { + return func(ps *PubSub) error { + ps.subFilter = subFilter + return nil + } +} + +// NewAllowlistSubscriptionFilter creates a subscription filter that only allows explicitly +// specified bitmasks for local subscriptions and incoming peer subscriptions. +func NewAllowlistSubscriptionFilter(bitmasks ...[]byte) SubscriptionFilter { + allow := make(map[string]struct{}) + for _, bitmask := range bitmasks { + allow[string(bitmask)] = struct{}{} + } + + return &allowlistSubscriptionFilter{allow: allow} +} + +type allowlistSubscriptionFilter struct { + allow map[string]struct{} +} + +var _ SubscriptionFilter = (*allowlistSubscriptionFilter)(nil) + +func (f *allowlistSubscriptionFilter) CanSubscribe(bitmask []byte) bool { + _, ok := f.allow[string(bitmask)] + return ok +} + +func (f *allowlistSubscriptionFilter) FilterIncomingSubscriptions(from peer.ID, subs []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) { + return FilterSubscriptions(subs, f.CanSubscribe), nil +} + +// FilterSubscriptions filters (and deduplicates) a list of subscriptions. +// filter should return true if a bitmask is of interest. +func FilterSubscriptions(subs []*pb.RPC_SubOpts, filter func([]byte) bool) []*pb.RPC_SubOpts { + accept := make(map[string]*pb.RPC_SubOpts) + + for _, sub := range subs { + bitmask := sub.GetBitmask() + + if !filter(bitmask) { + continue + } + + otherSub, ok := accept[string(bitmask)] + if ok { + if sub.GetSubscribe() != otherSub.GetSubscribe() { + delete(accept, string(bitmask)) + } + } else { + accept[string(bitmask)] = sub + } + } + + if len(accept) == 0 { + return nil + } + + result := make([]*pb.RPC_SubOpts, 0, len(accept)) + for _, sub := range accept { + result = append(result, sub) + } + + return result +} + +// WrapLimitSubscriptionFilter wraps a subscription filter with a hard limit in the number of +// subscriptions allowed in an RPC message. +func WrapLimitSubscriptionFilter(filter SubscriptionFilter, limit int) SubscriptionFilter { + return &limitSubscriptionFilter{filter: filter, limit: limit} +} + +type limitSubscriptionFilter struct { + filter SubscriptionFilter + limit int +} + +var _ SubscriptionFilter = (*limitSubscriptionFilter)(nil) + +func (f *limitSubscriptionFilter) CanSubscribe(bitmask []byte) bool { + return f.filter.CanSubscribe(bitmask) +} + +func (f *limitSubscriptionFilter) FilterIncomingSubscriptions(from peer.ID, subs []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) { + if len(subs) > f.limit { + return nil, ErrTooManySubscriptions + } + + return f.filter.FilterIncomingSubscriptions(from, subs) +} diff --git a/go-libp2p-blossomsub/subscription_filter_test.go b/go-libp2p-blossomsub/subscription_filter_test.go new file mode 100644 index 0000000..1252a1e --- /dev/null +++ b/go-libp2p-blossomsub/subscription_filter_test.go @@ -0,0 +1,177 @@ +package blossomsub + +import ( + "bytes" + "context" + "testing" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestBasicSubscriptionFilter(t *testing.T) { + peerA := peer.ID("A") + + bitmask1 := []byte{0xff, 0x00, 0x00, 0x00} + bitmask2 := []byte{0x00, 0xff, 0x00, 0x00} + bitmask3 := []byte{0x00, 0x00, 0xff, 0x00} + yes := true + subs := []*pb.RPC_SubOpts{ + &pb.RPC_SubOpts{ + Bitmask: bitmask1, + Subscribe: yes, + }, + &pb.RPC_SubOpts{ + Bitmask: bitmask2, + Subscribe: yes, + }, + &pb.RPC_SubOpts{ + Bitmask: bitmask3, + Subscribe: yes, + }, + } + + filter := NewAllowlistSubscriptionFilter(bitmask1, bitmask2) + canSubscribe := filter.CanSubscribe(bitmask1) + if !canSubscribe { + t.Fatal("expected allowed subscription") + } + canSubscribe = filter.CanSubscribe(bitmask2) + if !canSubscribe { + t.Fatal("expected allowed subscription") + } + canSubscribe = filter.CanSubscribe(bitmask3) + if canSubscribe { + t.Fatal("expected disallowed subscription") + } + allowedSubs, err := filter.FilterIncomingSubscriptions(peerA, subs) + if err != nil { + t.Fatal(err) + } + if len(allowedSubs) != 2 { + t.Fatalf("expected 2 allowed subscriptions but got %d", len(allowedSubs)) + } + for _, sub := range allowedSubs { + if bytes.Equal(sub.GetBitmask(), bitmask3) { + t.Fatal("unpexted subscription to test3") + } + } + + limitFilter := WrapLimitSubscriptionFilter(filter, 2) + _, err = limitFilter.FilterIncomingSubscriptions(peerA, subs) + if err != ErrTooManySubscriptions { + t.Fatal("expected rejection because of too many subscriptions") + } +} + +func TestSubscriptionFilterDeduplication(t *testing.T) { + peerA := peer.ID("A") + + bitmask1 := []byte{0xff, 0x00, 0x00, 0x00} + bitmask2 := []byte{0x00, 0xff, 0x00, 0x00} + bitmask3 := []byte{0x00, 0x00, 0xff, 0x00} + yes := true + no := false + subs := []*pb.RPC_SubOpts{ + &pb.RPC_SubOpts{ + Bitmask: bitmask1, + Subscribe: yes, + }, + &pb.RPC_SubOpts{ + Bitmask: bitmask1, + Subscribe: yes, + }, + + &pb.RPC_SubOpts{ + Bitmask: bitmask2, + Subscribe: yes, + }, + &pb.RPC_SubOpts{ + Bitmask: bitmask2, + Subscribe: no, + }, + &pb.RPC_SubOpts{ + Bitmask: bitmask3, + Subscribe: yes, + }, + } + + filter := NewAllowlistSubscriptionFilter(bitmask1, bitmask2) + allowedSubs, err := filter.FilterIncomingSubscriptions(peerA, subs) + if err != nil { + t.Fatal(err) + } + if len(allowedSubs) != 1 { + t.Fatalf("expected 2 allowed subscriptions but got %d", len(allowedSubs)) + } + for _, sub := range allowedSubs { + if bytes.Equal(sub.GetBitmask(), bitmask3) || bytes.Equal(sub.GetBitmask(), bitmask2) { + t.Fatal("unexpected subscription") + } + } +} + +func TestSubscriptionFilterRPC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + ps1 := getPubsub(ctx, hosts[0], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0xff, 0x00, 0x00, 0x00}, []byte{0x00, 0xff, 0x00, 0x00}))) + ps2 := getPubsub(ctx, hosts[1], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0x00, 0xff, 0x00, 0x00}, []byte{0x00, 0x00, 0xff, 0x00}))) + + _ = mustSubscribe(t, ps1, []byte{0xff, 0x00, 0x00, 0x00}) + _ = mustSubscribe(t, ps1, []byte{0x00, 0xff, 0x00, 0x00}) + _ = mustSubscribe(t, ps2, []byte{0x00, 0xff, 0x00, 0x00}) + _ = mustSubscribe(t, ps2, []byte{0x00, 0x00, 0xff, 0x00}) + + // check the rejection as well + _, err := ps1.Join([]byte{0x00, 0x00, 0xff, 0x00}) + if err == nil { + t.Fatal("expected subscription error") + } + + connect(t, hosts[0], hosts[1]) + + time.Sleep(time.Second) + + var sub1, sub2, sub3 bool + ready := make(chan struct{}) + + ps1.eval <- func() { + _, sub1 = ps1.bitmasks[string([]byte{0xff, 0x00, 0x00, 0x00})][hosts[1].ID()] + _, sub2 = ps1.bitmasks[string([]byte{0x00, 0xff, 0x00, 0x00})][hosts[1].ID()] + _, sub3 = ps1.bitmasks[string([]byte{0x00, 0x00, 0xff, 0x00})][hosts[1].ID()] + ready <- struct{}{} + } + <-ready + + if sub1 { + t.Fatal("expected no subscription for test1") + } + if !sub2 { + t.Fatal("expected subscription for test2") + } + if sub3 { + t.Fatal("expected no subscription for test1") + } + + ps2.eval <- func() { + _, sub1 = ps2.bitmasks[string([]byte{0xff, 0x00, 0x00, 0x00})][hosts[0].ID()] + _, sub2 = ps2.bitmasks[string([]byte{0x00, 0xff, 0x00, 0x00})][hosts[0].ID()] + _, sub3 = ps2.bitmasks[string([]byte{0x00, 0x00, 0xff, 0x00})][hosts[0].ID()] + ready <- struct{}{} + } + <-ready + + if sub1 { + t.Fatal("expected no subscription for test1") + } + if !sub2 { + t.Fatal("expected subscription for test1") + } + if sub3 { + t.Fatal("expected no subscription for test1") + } +} diff --git a/go-libp2p-blossomsub/tag_tracer.go b/go-libp2p-blossomsub/tag_tracer.go new file mode 100644 index 0000000..0028b04 --- /dev/null +++ b/go-libp2p-blossomsub/tag_tracer.go @@ -0,0 +1,259 @@ +package blossomsub + +import ( + "fmt" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +var ( + // BlossomSubConnTagBumpMessageDelivery is the amount to add to the connection manager + // tag that tracks message deliveries. Each time a peer is the first to deliver a + // message within a bitmask, we "bump" a tag by this amount, up to a maximum + // of BlossomSubConnTagMessageDeliveryCap. + // Note that the delivery tags decay over time, decreasing by BlossomSubConnTagDecayAmount + // at every BlossomSubConnTagDecayInterval. + BlossomSubConnTagBumpMessageDelivery = 1 + + // BlossomSubConnTagDecayInterval is the decay interval for decaying connection manager tags. + BlossomSubConnTagDecayInterval = 10 * time.Minute + + // BlossomSubConnTagDecayAmount is subtracted from decaying tag values at each decay interval. + BlossomSubConnTagDecayAmount = 1 + + // BlossomSubConnTagMessageDeliveryCap is the maximum value for the connection manager tags that + // track message deliveries. + BlossomSubConnTagMessageDeliveryCap = 15 +) + +// tagTracer is an internal tracer that applies connection manager tags to peer +// connections based on their behavior. +// +// We tag a peer's connections for the following reasons: +// - Directly connected peers are tagged with BlossomSubConnTagValueDirectPeer (default 1000). +// - Mesh peers are tagged with a value of BlossomSubConnTagValueMeshPeer (default 20). +// If a peer is in multiple bitmask meshes, they'll be tagged for each. +// - For each message that we receive, we bump a delivery tag for peer that delivered the message +// first. +// The delivery tags have a maximum value, BlossomSubConnTagMessageDeliveryCap, and they decay at +// a rate of BlossomSubConnTagDecayAmount / BlossomSubConnTagDecayInterval. +type tagTracer struct { + sync.RWMutex + + cmgr connmgr.ConnManager + idGen *msgIDGenerator + decayer connmgr.Decayer + decaying map[string]connmgr.DecayingTag + direct map[peer.ID]struct{} + + // a map of message ids to the set of peers who delivered the message after the first delivery, + // but before the message was finished validating + nearFirst map[string]map[peer.ID]struct{} +} + +func newTagTracer(cmgr connmgr.ConnManager) *tagTracer { + decayer, ok := connmgr.SupportsDecay(cmgr) + if !ok { + log.Debugf("connection manager does not support decaying tags, delivery tags will not be applied") + } + return &tagTracer{ + cmgr: cmgr, + idGen: newMsgIdGenerator(), + decayer: decayer, + decaying: make(map[string]connmgr.DecayingTag), + nearFirst: make(map[string]map[peer.ID]struct{}), + } +} + +func (t *tagTracer) Start(gs *BlossomSubRouter) { + if t == nil { + return + } + + t.idGen = gs.p.idGen + t.direct = gs.direct +} + +func (t *tagTracer) tagPeerIfDirect(p peer.ID) { + if t.direct == nil { + return + } + + // tag peer if it is a direct peer + _, direct := t.direct[p] + if direct { + t.cmgr.Protect(p, "pubsub:") + } +} + +func (t *tagTracer) tagMeshPeer(p peer.ID, bitmask []byte) { + tag := bitmaskTag(bitmask) + t.cmgr.Protect(p, tag) +} + +func (t *tagTracer) untagMeshPeer(p peer.ID, bitmask []byte) { + tag := bitmaskTag(bitmask) + t.cmgr.Unprotect(p, tag) +} + +func bitmaskTag(bitmask []byte) string { + return fmt.Sprintf("pubsub:%s", bitmask) +} + +func (t *tagTracer) addDeliveryTag(bitmask []byte) { + if t.decayer == nil { + return + } + + name := fmt.Sprintf("pubsub-deliveries:%s", bitmask) + t.Lock() + defer t.Unlock() + tag, err := t.decayer.RegisterDecayingTag( + name, + BlossomSubConnTagDecayInterval, + connmgr.DecayFixed(BlossomSubConnTagDecayAmount), + connmgr.BumpSumBounded(0, BlossomSubConnTagMessageDeliveryCap)) + + if err != nil { + log.Warnf("unable to create decaying delivery tag: %s", err) + return + } + t.decaying[string(bitmask)] = tag +} + +func (t *tagTracer) removeDeliveryTag(bitmask []byte) { + t.Lock() + defer t.Unlock() + tag, ok := t.decaying[string(bitmask)] + if !ok { + return + } + err := tag.Close() + if err != nil { + log.Warnf("error closing decaying connmgr tag: %s", err) + } + delete(t.decaying, string(bitmask)) +} + +func (t *tagTracer) bumpDeliveryTag(p peer.ID, bitmask []byte) error { + t.RLock() + defer t.RUnlock() + + tag, ok := t.decaying[string(bitmask)] + if !ok { + return fmt.Errorf("no decaying tag registered for bitmask %s", bitmask) + } + return tag.Bump(p, BlossomSubConnTagBumpMessageDelivery) +} + +func (t *tagTracer) bumpTagsForMessage(p peer.ID, msg *Message) { + bitmask := msg.GetBitmask() + err := t.bumpDeliveryTag(p, bitmask) + if err != nil { + log.Warnf("error bumping delivery tag: %s", err) + } +} + +// nearFirstPeers returns the peers who delivered the message while it was still validating +func (t *tagTracer) nearFirstPeers(msg *Message) []peer.ID { + t.Lock() + defer t.Unlock() + peersMap, ok := t.nearFirst[t.idGen.ID(msg)] + if !ok { + return nil + } + peers := make([]peer.ID, 0, len(peersMap)) + for p := range peersMap { + peers = append(peers, p) + } + return peers +} + +// -- RawTracer interface methods +var _ RawTracer = (*tagTracer)(nil) + +func (t *tagTracer) AddPeer(p peer.ID, proto protocol.ID) { + t.tagPeerIfDirect(p) +} + +func (t *tagTracer) Join(bitmask []byte) { + t.addDeliveryTag(bitmask) +} + +func (t *tagTracer) DeliverMessage(msg *Message) { + nearFirst := t.nearFirstPeers(msg) + + t.bumpTagsForMessage(msg.ReceivedFrom, msg) + for _, p := range nearFirst { + t.bumpTagsForMessage(p, msg) + } + + // delete the delivery state for this message + t.Lock() + delete(t.nearFirst, t.idGen.ID(msg)) + t.Unlock() +} + +func (t *tagTracer) Leave(bitmask []byte) { + t.removeDeliveryTag(bitmask) +} + +func (t *tagTracer) Graft(p peer.ID, bitmask []byte) { + t.tagMeshPeer(p, bitmask) +} + +func (t *tagTracer) Prune(p peer.ID, bitmask []byte) { + t.untagMeshPeer(p, bitmask) +} + +func (t *tagTracer) ValidateMessage(msg *Message) { + t.Lock() + defer t.Unlock() + + // create map to start tracking the peers who deliver while we're validating + id := t.idGen.ID(msg) + if _, exists := t.nearFirst[id]; exists { + return + } + t.nearFirst[id] = make(map[peer.ID]struct{}) +} + +func (t *tagTracer) DuplicateMessage(msg *Message) { + t.Lock() + defer t.Unlock() + + id := t.idGen.ID(msg) + peers, ok := t.nearFirst[id] + if !ok { + return + } + peers[msg.ReceivedFrom] = struct{}{} +} + +func (t *tagTracer) RejectMessage(msg *Message, reason string) { + t.Lock() + defer t.Unlock() + + // We want to delete the near-first delivery tracking for messages that have passed through + // the validation pipeline. Other rejection reasons (missing signature, etc) skip the validation + // queue, so we don't want to remove the state in case the message is still validating. + switch reason { + case RejectValidationThrottled: + fallthrough + case RejectValidationIgnored: + fallthrough + case RejectValidationFailed: + delete(t.nearFirst, t.idGen.ID(msg)) + } +} + +func (t *tagTracer) RemovePeer(peer.ID) {} +func (t *tagTracer) ThrottlePeer(p peer.ID) {} +func (t *tagTracer) RecvRPC(rpc *RPC) {} +func (t *tagTracer) SendRPC(rpc *RPC, p peer.ID) {} +func (t *tagTracer) DropRPC(rpc *RPC, p peer.ID) {} +func (t *tagTracer) UndeliverableMessage(msg *Message) {} diff --git a/go-libp2p-blossomsub/tag_tracer_test.go b/go-libp2p-blossomsub/tag_tracer_test.go new file mode 100644 index 0000000..8c3561d --- /dev/null +++ b/go-libp2p-blossomsub/tag_tracer_test.go @@ -0,0 +1,260 @@ +package blossomsub + +import ( + "fmt" + "testing" + "time" + + "github.com/benbjohnson/clock" + connmgri "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/net/connmgr" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" +) + +func TestTagTracerMeshTags(t *testing.T) { + // test that tags are applied when the tagTracer sees graft and prune events + + cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute)) + if err != nil { + t.Fatal(err) + } + tt := newTagTracer(cmgr) + + p := peer.ID("a-peer") + bitmask := []byte{0xff, 0x00, 0xff, 0x00} + + tt.Join(bitmask) + tt.Graft(p, bitmask) + + tag := "pubsub:" + string(bitmask) + if !cmgr.IsProtected(p, tag) { + t.Fatal("expected the mesh peer to be protected") + } + + tt.Prune(p, bitmask) + if cmgr.IsProtected(p, tag) { + t.Fatal("expected the former mesh peer to be unprotected") + } +} + +func TestTagTracerDirectPeerTags(t *testing.T) { + // test that we add a tag to direct peers + cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute)) + if err != nil { + t.Fatal(err) + } + tt := newTagTracer(cmgr) + + p1 := peer.ID("1") + p2 := peer.ID("2") + p3 := peer.ID("3") + + // in the real world, tagTracer.direct is set in the WithDirectPeers option function + tt.direct = make(map[peer.ID]struct{}) + tt.direct[p1] = struct{}{} + + tt.AddPeer(p1, BlossomSubID_v11) + tt.AddPeer(p2, BlossomSubID_v11) + tt.AddPeer(p3, BlossomSubID_v11) + + tag := "pubsub:" + if !cmgr.IsProtected(p1, tag) { + t.Fatal("expected direct peer to be protected") + } + + for _, p := range []peer.ID{p2, p3} { + if cmgr.IsProtected(p, tag) { + t.Fatal("expected non-direct peer to be unprotected") + } + } +} + +func TestTagTracerDeliveryTags(t *testing.T) { + t.Skip("flaky test temporarily disabled; TODO: fixme") + // test decaying delivery tags + + // use fake time to test the tag decay + clk := clock.NewMock() + decayCfg := &connmgr.DecayerCfg{ + Clock: clk, + Resolution: time.Minute, + } + cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute), connmgr.DecayerConfig(decayCfg)) + if err != nil { + t.Fatal(err) + } + + tt := newTagTracer(cmgr) + + bitmask1 := []byte{0xff, 0x00, 0xff, 0x00} + bitmask2 := []byte{0x00, 0xff, 0x00, 0xff} + + p := peer.ID("a-peer") + + tt.Join(bitmask1) + tt.Join(bitmask2) + + for i := 0; i < 20; i++ { + // deliver only 5 messages to bitmask 2 (less than the cap) + bitmask := bitmask1 + if i < 5 { + bitmask = bitmask2 + } + msg := &Message{ + ReceivedFrom: p, + Message: &pb.Message{ + From: []byte(p), + Data: []byte("hello"), + Bitmask: bitmask, + }, + } + tt.DeliverMessage(msg) + } + + // we have to tick the fake clock once to apply the bump + clk.Add(time.Minute) + + tag1 := "pubsub-deliveries:" + string(bitmask1) + tag2 := "pubsub-deliveries:" + string(bitmask2) + + // the tag value for bitmask-1 should be capped at BlossomSubConnTagMessageDeliveryCap (default 15) + val := getTagValue(cmgr, p, tag1) + expected := BlossomSubConnTagMessageDeliveryCap + if val != expected { + t.Errorf("expected delivery tag to be capped at %d, was %d", expected, val) + } + + // the value for bitmask-2 should equal the number of messages delivered (5), since it was less than the cap + val = getTagValue(cmgr, p, tag2) + expected = 5 + if val != expected { + t.Errorf("expected delivery tag value = %d, got %d", expected, val) + } + + // if we jump forward a few minutes, we should see the tags decrease by 1 / 10 minutes + clk.Add(50 * time.Minute) + time.Sleep(2 * time.Second) + + val = getTagValue(cmgr, p, tag1) + expected = BlossomSubConnTagMessageDeliveryCap - 5 + // the actual expected value should be BlossomSubConnTagMessageDeliveryCap - 5, + // however due to timing issues on Travis, we consistently get BlossomSubConnTagMessageDeliveryCap - 4 + // there instead. So our assertion checks for the expected value +/- 1 + if val > expected+1 || val < expected-1 { + t.Errorf("expected delivery tag value = %d ± 1, got %d", expected, val) + } + + // the tag for bitmask-2 should have reset to zero by now, but again we add one for Travis since it's slow... + val = getTagValue(cmgr, p, tag2) + expected = 0 + if val > expected+1 || val < expected-1 { + t.Errorf("expected delivery tag value = %d ± 1, got %d", expected, val) + } + + // leaving the bitmask should remove the tag + if !tagExists(cmgr, p, tag1) { + t.Errorf("expected delivery tag %s to be applied to peer %s", tag1, p) + } + tt.Leave(bitmask1) + // advance the real clock a bit to allow the connmgr to remove the tag async + time.Sleep(time.Second) + if tagExists(cmgr, p, tag1) { + t.Errorf("expected delivery tag %s to be removed after leaving the bitmask", tag1) + } +} + +func TestTagTracerDeliveryTagsNearFirst(t *testing.T) { + // use fake time to test the tag decay + clk := clock.NewMock() + decayCfg := &connmgr.DecayerCfg{ + Clock: clk, + Resolution: time.Minute, + } + cmgr, err := connmgr.NewConnManager(5, 10, connmgr.WithGracePeriod(time.Minute), connmgr.DecayerConfig(decayCfg)) + if err != nil { + t.Fatal(err) + } + + tt := newTagTracer(cmgr) + + bitmask := []byte{0x7e, 0x57} + + p := peer.ID("a-peer") + p2 := peer.ID("another-peer") + p3 := peer.ID("slow-peer") + + tt.Join(bitmask) + + for i := 0; i < BlossomSubConnTagMessageDeliveryCap+5; i++ { + msg := &Message{ + ReceivedFrom: p, + Message: &pb.Message{ + From: []byte(p), + Data: []byte(fmt.Sprintf("msg-%d", i)), + Bitmask: bitmask, + Seqno: []byte(fmt.Sprintf("%d", i)), + }, + } + + // a duplicate of the message, received from p2 + dup := &Message{ + ReceivedFrom: p2, + Message: msg.Message, + } + + // the message starts validating as soon as we receive it from p + tt.ValidateMessage(msg) + // p2 should get near-first credit for the duplicate message that arrives before + // validation is complete + tt.DuplicateMessage(dup) + // DeliverMessage gets called when validation is complete + tt.DeliverMessage(msg) + + // p3 delivers a duplicate after validation completes & gets no credit + dup.ReceivedFrom = p3 + tt.DuplicateMessage(dup) + } + + clk.Add(time.Minute) + + // both p and p2 should get delivery tags equal to the cap + tag := "pubsub-deliveries:" + string(bitmask) + val := getTagValue(cmgr, p, tag) + if val != BlossomSubConnTagMessageDeliveryCap { + t.Errorf("expected tag %s to have val %d, was %d", tag, BlossomSubConnTagMessageDeliveryCap, val) + } + val = getTagValue(cmgr, p2, tag) + if val != BlossomSubConnTagMessageDeliveryCap { + t.Errorf("expected tag %s for near-first peer to have val %d, was %d", tag, BlossomSubConnTagMessageDeliveryCap, val) + } + + // p3 should have no delivery tag credit + val = getTagValue(cmgr, p3, tag) + if val != 0 { + t.Errorf("expected tag %s for slow peer to have val %d, was %d", tag, 0, val) + } +} + +func getTagValue(mgr connmgri.ConnManager, p peer.ID, tag string) int { + info := mgr.GetTagInfo(p) + if info == nil { + return 0 + } + val, ok := info.Tags[tag] + if !ok { + return 0 + } + return val +} + +//lint:ignore U1000 used only by skipped tests at present +func tagExists(mgr connmgri.ConnManager, p peer.ID, tag string) bool { + info := mgr.GetTagInfo(p) + if info == nil { + return false + } + _, exists := info.Tags[tag] + return exists +} diff --git a/go-libp2p-blossomsub/timecache/first_seen_cache.go b/go-libp2p-blossomsub/timecache/first_seen_cache.go new file mode 100644 index 0000000..457391c --- /dev/null +++ b/go-libp2p-blossomsub/timecache/first_seen_cache.go @@ -0,0 +1,56 @@ +package timecache + +import ( + "context" + "sync" + "time" +) + +// FirstSeenCache is a time cache that only marks the expiry of a message when first added. +type FirstSeenCache struct { + lk sync.RWMutex + m map[string]time.Time + ttl time.Duration + + done func() +} + +var _ TimeCache = (*FirstSeenCache)(nil) + +func newFirstSeenCache(ttl time.Duration) *FirstSeenCache { + tc := &FirstSeenCache{ + m: make(map[string]time.Time), + ttl: ttl, + } + + ctx, done := context.WithCancel(context.Background()) + tc.done = done + go background(ctx, &tc.lk, tc.m) + + return tc +} + +func (tc *FirstSeenCache) Done() { + tc.done() +} + +func (tc *FirstSeenCache) Has(s string) bool { + tc.lk.RLock() + defer tc.lk.RUnlock() + + _, ok := tc.m[s] + return ok +} + +func (tc *FirstSeenCache) Add(s string) bool { + tc.lk.Lock() + defer tc.lk.Unlock() + + _, ok := tc.m[s] + if ok { + return false + } + + tc.m[s] = time.Now().Add(tc.ttl) + return true +} diff --git a/go-libp2p-blossomsub/timecache/first_seen_cache_test.go b/go-libp2p-blossomsub/timecache/first_seen_cache_test.go new file mode 100644 index 0000000..59d2a59 --- /dev/null +++ b/go-libp2p-blossomsub/timecache/first_seen_cache_test.go @@ -0,0 +1,46 @@ +package timecache + +import ( + "fmt" + "testing" + "time" +) + +func TestFirstSeenCacheFound(t *testing.T) { + tc := newFirstSeenCache(time.Minute) + + tc.Add("test") + + if !tc.Has("test") { + t.Fatal("should have this key") + } +} + +func TestFirstSeenCacheExpire(t *testing.T) { + backgroundSweepInterval = time.Second + + tc := newFirstSeenCache(time.Second) + for i := 0; i < 10; i++ { + tc.Add(fmt.Sprint(i)) + time.Sleep(time.Millisecond * 100) + } + + time.Sleep(2 * time.Second) + for i := 0; i < 10; i++ { + if tc.Has(fmt.Sprint(i)) { + t.Fatalf("should have dropped this key: %s from the cache already", fmt.Sprint(i)) + } + } +} + +func TestFirstSeenCacheNotFoundAfterExpire(t *testing.T) { + backgroundSweepInterval = time.Second + + tc := newFirstSeenCache(time.Second) + tc.Add(fmt.Sprint(0)) + + time.Sleep(2 * time.Second) + if tc.Has(fmt.Sprint(0)) { + t.Fatal("should have dropped this from the cache already") + } +} diff --git a/go-libp2p-blossomsub/timecache/last_seen_cache.go b/go-libp2p-blossomsub/timecache/last_seen_cache.go new file mode 100644 index 0000000..128c299 --- /dev/null +++ b/go-libp2p-blossomsub/timecache/last_seen_cache.go @@ -0,0 +1,58 @@ +package timecache + +import ( + "context" + "sync" + "time" +) + +// LastSeenCache is a time cache that extends the expiry of a seen message when added +// or checked for presence with Has.. +type LastSeenCache struct { + lk sync.Mutex + m map[string]time.Time + ttl time.Duration + + done func() +} + +var _ TimeCache = (*LastSeenCache)(nil) + +func newLastSeenCache(ttl time.Duration) *LastSeenCache { + tc := &LastSeenCache{ + m: make(map[string]time.Time), + ttl: ttl, + } + + ctx, done := context.WithCancel(context.Background()) + tc.done = done + go background(ctx, &tc.lk, tc.m) + + return tc +} + +func (tc *LastSeenCache) Done() { + tc.done() +} + +func (tc *LastSeenCache) Add(s string) bool { + tc.lk.Lock() + defer tc.lk.Unlock() + + _, ok := tc.m[s] + tc.m[s] = time.Now().Add(tc.ttl) + + return !ok +} + +func (tc *LastSeenCache) Has(s string) bool { + tc.lk.Lock() + defer tc.lk.Unlock() + + _, ok := tc.m[s] + if ok { + tc.m[s] = time.Now().Add(tc.ttl) + } + + return ok +} diff --git a/go-libp2p-blossomsub/timecache/last_seen_cache_test.go b/go-libp2p-blossomsub/timecache/last_seen_cache_test.go new file mode 100644 index 0000000..4522026 --- /dev/null +++ b/go-libp2p-blossomsub/timecache/last_seen_cache_test.go @@ -0,0 +1,92 @@ +package timecache + +import ( + "fmt" + "testing" + "time" +) + +func TestLastSeenCacheFound(t *testing.T) { + tc := newLastSeenCache(time.Minute) + + tc.Add("test") + + if !tc.Has("test") { + t.Fatal("should have this key") + } +} + +func TestLastSeenCacheExpire(t *testing.T) { + backgroundSweepInterval = time.Second + tc := newLastSeenCache(time.Second) + for i := 0; i < 11; i++ { + tc.Add(fmt.Sprint(i)) + time.Sleep(time.Millisecond * 100) + } + + time.Sleep(2 * time.Second) + for i := 0; i < 11; i++ { + if tc.Has(fmt.Sprint(i)) { + t.Fatalf("should have dropped this key: %s from the cache already", fmt.Sprint(i)) + } + } +} + +func TestLastSeenCacheSlideForward(t *testing.T) { + t.Skip("timing is too fine grained to run in CI") + + tc := newLastSeenCache(time.Second) + i := 0 + + // T0ms: Add 8 entries with a 100ms sleep after each + for i < 8 { + tc.Add(fmt.Sprint(i)) + time.Sleep(time.Millisecond * 100) + i++ + } + + // T800ms: Lookup the first entry - this should slide the entry forward so that its expiration is a full second + // later. + if !tc.Has(fmt.Sprint(0)) { + t.Fatal("should have this key") + } + + // T800ms: Wait till after the first and second entries would have normally expired (had we not looked the first + // entry up). + time.Sleep(time.Millisecond * 400) + + // T1200ms: The first entry should still be present in the cache - this will also slide the entry forward. + if !tc.Has(fmt.Sprint(0)) { + t.Fatal("should still have this key") + } + + // T1200ms: The second entry should have expired + if tc.Has(fmt.Sprint(1)) { + t.Fatal("should have dropped this from the cache already") + } + + // T1200ms: Sleep till the first entry actually expires + time.Sleep(time.Millisecond * 1100) + + // T2300ms: Now the first entry should have expired + if tc.Has(fmt.Sprint(0)) { + t.Fatal("should have dropped this from the cache already") + } + + // And it should not have been added back + if tc.Has(fmt.Sprint(0)) { + t.Fatal("should have dropped this from the cache already") + } +} + +func TestLastSeenCacheNotFoundAfterExpire(t *testing.T) { + backgroundSweepInterval = time.Second + + tc := newLastSeenCache(time.Second) + tc.Add(fmt.Sprint(0)) + + time.Sleep(2 * time.Second) + if tc.Has(fmt.Sprint(0)) { + t.Fatal("should have dropped this from the cache already") + } +} diff --git a/go-libp2p-blossomsub/timecache/time_cache.go b/go-libp2p-blossomsub/timecache/time_cache.go new file mode 100644 index 0000000..a018e15 --- /dev/null +++ b/go-libp2p-blossomsub/timecache/time_cache.go @@ -0,0 +1,52 @@ +package timecache + +import ( + "time" + + logger "github.com/ipfs/go-log/v2" +) + +var log = logger.Logger("blossomsub/timecache") + +// Stategy is the TimeCache expiration strategy to use. +type Strategy uint8 + +const ( + // Strategy_FirstSeen expires an entry from the time it was added. + Strategy_FirstSeen Strategy = iota + // Stategy_LastSeen expires an entry from the last time it was touched by an Add or Has. + Strategy_LastSeen +) + +// TimeCache is a cahe of recently seen messages (by id). +type TimeCache interface { + // Add adds an id into the cache, if it is not already there. + // Returns true if the id was newly added to the cache. + // Depending on the implementation strategy, it may or may not update the expiry of + // an existing entry. + Add(string) bool + // Has checks the cache for the presence of an id. + // Depending on the implementation strategy, it may or may not update the expiry of + // an existing entry. + Has(string) bool + // Done signals that the user is done with this cache, which it may stop background threads + // and relinquish resources. + Done() +} + +// NewTimeCache defaults to the original ("first seen") cache implementation +func NewTimeCache(ttl time.Duration) TimeCache { + return NewTimeCacheWithStrategy(Strategy_FirstSeen, ttl) +} + +func NewTimeCacheWithStrategy(strategy Strategy, ttl time.Duration) TimeCache { + switch strategy { + case Strategy_FirstSeen: + return newFirstSeenCache(ttl) + case Strategy_LastSeen: + return newLastSeenCache(ttl) + default: + // Default to the original time cache implementation + return newFirstSeenCache(ttl) + } +} diff --git a/go-libp2p-blossomsub/timecache/util.go b/go-libp2p-blossomsub/timecache/util.go new file mode 100644 index 0000000..eaf92b3 --- /dev/null +++ b/go-libp2p-blossomsub/timecache/util.go @@ -0,0 +1,35 @@ +package timecache + +import ( + "context" + "sync" + "time" +) + +var backgroundSweepInterval = time.Minute + +func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) { + ticker := time.NewTicker(backgroundSweepInterval) + defer ticker.Stop() + + for { + select { + case now := <-ticker.C: + sweep(lk, m, now) + + case <-ctx.Done(): + return + } + } +} + +func sweep(lk sync.Locker, m map[string]time.Time, now time.Time) { + lk.Lock() + defer lk.Unlock() + + for k, expiry := range m { + if expiry.Before(now) { + delete(m, k) + } + } +} diff --git a/go-libp2p-blossomsub/trace.go b/go-libp2p-blossomsub/trace.go new file mode 100644 index 0000000..b6004d4 --- /dev/null +++ b/go-libp2p-blossomsub/trace.go @@ -0,0 +1,530 @@ +package blossomsub + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" +) + +// EventTracer is a generic event tracer interface. +// This is a high level tracing interface which delivers tracing events, as defined by the protobuf +// schema in pb/trace.proto. +type EventTracer interface { + Trace(evt *pb.TraceEvent) +} + +// RawTracer is a low level tracing interface that allows an application to trace the internal +// operation of the pubsub subsystem. +// +// Note that the tracers are invoked synchronously, which means that application tracers must +// take care to not block or modify arguments. +// +// Warning: this interface is not fixed, we may be adding new methods as necessitated by the system +// in the future. +type RawTracer interface { + // AddPeer is invoked when a new peer is added. + AddPeer(p peer.ID, proto protocol.ID) + // RemovePeer is invoked when a peer is removed. + RemovePeer(p peer.ID) + // Join is invoked when a new bitmask is joined + Join(bitmask []byte) + // Leave is invoked when a bitmask is abandoned + Leave(bitmask []byte) + // Graft is invoked when a new peer is grafted on the mesh (BlossomSub) + Graft(p peer.ID, bitmask []byte) + // Prune is invoked when a peer is pruned from the message (BlossomSub) + Prune(p peer.ID, bitmask []byte) + // ValidateMessage is invoked when a message first enters the validation pipeline. + ValidateMessage(msg *Message) + // DeliverMessage is invoked when a message is delivered + DeliverMessage(msg *Message) + // RejectMessage is invoked when a message is Rejected or Ignored. + // The reason argument can be one of the named strings Reject*. + RejectMessage(msg *Message, reason string) + // DuplicateMessage is invoked when a duplicate message is dropped. + DuplicateMessage(msg *Message) + // ThrottlePeer is invoked when a peer is throttled by the peer gater. + ThrottlePeer(p peer.ID) + // RecvRPC is invoked when an incoming RPC is received. + RecvRPC(rpc *RPC) + // SendRPC is invoked when a RPC is sent. + SendRPC(rpc *RPC, p peer.ID) + // DropRPC is invoked when an outbound RPC is dropped, typically because of a queue full. + DropRPC(rpc *RPC, p peer.ID) + // UndeliverableMessage is invoked when the consumer of Subscribe is not reading messages fast enough and + // the pressure release mechanism trigger, dropping messages. + UndeliverableMessage(msg *Message) +} + +// pubsub tracer details +type pubsubTracer struct { + tracer EventTracer + raw []RawTracer + pid peer.ID + idGen *msgIDGenerator +} + +func (t *pubsubTracer) PublishMessage(msg *Message) { + if t == nil { + return + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_PUBLISH_MESSAGE.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + PublishMessage: &pb.TraceEvent_PublishMessage{ + MessageID: []byte(t.idGen.ID(msg)), + Bitmask: msg.Message.Bitmask, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) ValidateMessage(msg *Message) { + if t == nil { + return + } + + if msg.ReceivedFrom != t.pid { + for _, tr := range t.raw { + tr.ValidateMessage(msg) + } + } +} + +func (t *pubsubTracer) RejectMessage(msg *Message, reason string) { + if t == nil { + return + } + + if msg.ReceivedFrom != t.pid { + for _, tr := range t.raw { + tr.RejectMessage(msg, reason) + } + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_REJECT_MESSAGE.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + RejectMessage: &pb.TraceEvent_RejectMessage{ + MessageID: []byte(t.idGen.ID(msg)), + ReceivedFrom: []byte(msg.ReceivedFrom), + Reason: &reason, + Bitmask: msg.Bitmask, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) DuplicateMessage(msg *Message) { + if t == nil { + return + } + + if msg.ReceivedFrom != t.pid { + for _, tr := range t.raw { + tr.DuplicateMessage(msg) + } + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_DUPLICATE_MESSAGE.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + DuplicateMessage: &pb.TraceEvent_DuplicateMessage{ + MessageID: []byte(t.idGen.ID(msg)), + ReceivedFrom: []byte(msg.ReceivedFrom), + Bitmask: msg.Bitmask, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) DeliverMessage(msg *Message) { + if t == nil { + return + } + + if msg.ReceivedFrom != t.pid { + for _, tr := range t.raw { + tr.DeliverMessage(msg) + } + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_DELIVER_MESSAGE.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + DeliverMessage: &pb.TraceEvent_DeliverMessage{ + MessageID: []byte(t.idGen.ID(msg)), + Bitmask: msg.Bitmask, + ReceivedFrom: []byte(msg.ReceivedFrom), + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) AddPeer(p peer.ID, proto protocol.ID) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.AddPeer(p, proto) + } + + if t.tracer == nil { + return + } + + protoStr := string(proto) + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_ADD_PEER.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + AddPeer: &pb.TraceEvent_AddPeer{ + PeerID: []byte(p), + Proto: &protoStr, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) RemovePeer(p peer.ID) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.RemovePeer(p) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_REMOVE_PEER.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + RemovePeer: &pb.TraceEvent_RemovePeer{ + PeerID: []byte(p), + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) RecvRPC(rpc *RPC) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.RecvRPC(rpc) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_RECV_RPC.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + RecvRPC: &pb.TraceEvent_RecvRPC{ + ReceivedFrom: []byte(rpc.from), + Meta: t.traceRPCMeta(rpc), + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) SendRPC(rpc *RPC, p peer.ID) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.SendRPC(rpc, p) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_SEND_RPC.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + SendRPC: &pb.TraceEvent_SendRPC{ + SendTo: []byte(p), + Meta: t.traceRPCMeta(rpc), + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) DropRPC(rpc *RPC, p peer.ID) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.DropRPC(rpc, p) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_DROP_RPC.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + DropRPC: &pb.TraceEvent_DropRPC{ + SendTo: []byte(p), + Meta: t.traceRPCMeta(rpc), + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) UndeliverableMessage(msg *Message) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.UndeliverableMessage(msg) + } +} + +func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta { + rpcMeta := new(pb.TraceEvent_RPCMeta) + + var msgs []*pb.TraceEvent_MessageMeta + for _, m := range rpc.Publish { + msgs = append(msgs, &pb.TraceEvent_MessageMeta{ + MessageID: []byte(t.idGen.RawID(m)), + Bitmask: m.Bitmask, + }) + } + rpcMeta.Messages = msgs + + var subs []*pb.TraceEvent_SubMeta + for _, sub := range rpc.Subscriptions { + subs = append(subs, &pb.TraceEvent_SubMeta{ + Subscribe: &sub.Subscribe, + Bitmask: sub.Bitmask, + }) + } + rpcMeta.Subscription = subs + + if rpc.Control != nil { + var ihave []*pb.TraceEvent_ControlIHaveMeta + for _, ctl := range rpc.Control.Ihave { + var mids [][]byte + for _, mid := range ctl.MessageIDs { + mids = append(mids, []byte(mid)) + } + ihave = append(ihave, &pb.TraceEvent_ControlIHaveMeta{ + Bitmask: ctl.Bitmask, + MessageIDs: mids, + }) + } + + var iwant []*pb.TraceEvent_ControlIWantMeta + for _, ctl := range rpc.Control.Iwant { + var mids [][]byte + for _, mid := range ctl.MessageIDs { + mids = append(mids, []byte(mid)) + } + iwant = append(iwant, &pb.TraceEvent_ControlIWantMeta{ + MessageIDs: mids, + }) + } + + var graft []*pb.TraceEvent_ControlGraftMeta + for _, ctl := range rpc.Control.Graft { + graft = append(graft, &pb.TraceEvent_ControlGraftMeta{ + Bitmask: ctl.Bitmask, + }) + } + + var prune []*pb.TraceEvent_ControlPruneMeta + for _, ctl := range rpc.Control.Prune { + peers := make([][]byte, 0, len(ctl.Peers)) + for _, pi := range ctl.Peers { + peers = append(peers, pi.PeerID) + } + prune = append(prune, &pb.TraceEvent_ControlPruneMeta{ + Bitmask: ctl.Bitmask, + Peers: peers, + }) + } + + rpcMeta.Control = &pb.TraceEvent_ControlMeta{ + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + } + } + + return rpcMeta +} + +func (t *pubsubTracer) Join(bitmask []byte) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.Join(bitmask) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_JOIN.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + Join: &pb.TraceEvent_Join{ + Bitmask: bitmask, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) Leave(bitmask []byte) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.Leave(bitmask) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_LEAVE.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + Leave: &pb.TraceEvent_Leave{ + Bitmask: bitmask, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) Graft(p peer.ID, bitmask []byte) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.Graft(p, bitmask) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_GRAFT.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + Graft: &pb.TraceEvent_Graft{ + PeerID: []byte(p), + Bitmask: bitmask, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) Prune(p peer.ID, bitmask []byte) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.Prune(p, bitmask) + } + + if t.tracer == nil { + return + } + + now := time.Now().UnixNano() + evt := &pb.TraceEvent{ + Type: pb.TraceEvent_PRUNE.Enum(), + PeerID: []byte(t.pid), + Timestamp: &now, + Prune: &pb.TraceEvent_Prune{ + PeerID: []byte(p), + Bitmask: bitmask, + }, + } + + t.tracer.Trace(evt) +} + +func (t *pubsubTracer) ThrottlePeer(p peer.ID) { + if t == nil { + return + } + + for _, tr := range t.raw { + tr.ThrottlePeer(p) + } +} diff --git a/go-libp2p-blossomsub/trace_test.go b/go-libp2p-blossomsub/trace_test.go new file mode 100644 index 0000000..e12aa6d --- /dev/null +++ b/go-libp2p-blossomsub/trace_test.go @@ -0,0 +1,323 @@ +package blossomsub + +import ( + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "sync" + "testing" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + + bhost "github.com/libp2p/go-libp2p/p2p/host/blank" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + + "github.com/libp2p/go-msgio/protoio" +) + +func testWithTracer(t *testing.T, tracer EventTracer) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getBlossomSubs(ctx, hosts, + WithEventTracer(tracer), + // to bootstrap from star topology + WithPeerExchange(true), + // to exercise the score paths in the tracer + WithPeerScore( + &PeerScoreParams{ + BitmaskScoreCap: 100, + AppSpecificScore: func(peer.ID) float64 { return 0 }, + DecayInterval: time.Second, + DecayToZero: 0.01, + }, + &PeerScoreThresholds{ + GossipThreshold: -1, + PublishThreshold: -2, + GraylistThreshold: -3, + OpportunisticGraftThreshold: 1, + })) + + // add a validator that rejects some messages to exercise those code paths in the tracer + for _, ps := range psubs { + ps.RegisterBitmaskValidator([]byte{0x7e, 57}, func(ctx context.Context, p peer.ID, msg *Message) bool { + if string(msg.Data) == "invalid!" { + return false + } else { + return true + } + }) + } + + // this is the star topology test so that we make sure we get some PRUNEs and cover that code path + + // add all peer addresses to the peerstores + // this is necessary because we can't have signed address records witout identify + // pushing them + for i := range hosts { + for j := range hosts { + if i == j { + continue + } + hosts[i].Peerstore().AddAddrs(hosts[j].ID(), hosts[j].Addrs(), peerstore.PermanentAddrTTL) + } + } + + // build the star + for i := 1; i < 20; i++ { + connect(t, hosts[0], hosts[i]) + } + + // build the mesh + var subs []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0x7e, 0x57}) + if err != nil { + t.Fatal(err) + } + go func(sub *Subscription) { + for { + _, err := sub.Next(ctx) + if err != nil { + return + } + } + }(sub) + subs = append(subs, sub) + } + + // wait for the mesh to build + time.Sleep(5 * time.Second) + + // publish some messages + for i := 0; i < 20; i++ { + if i%7 == 0 { + psubs[i].Publish([]byte{0x7e, 0x57}, []byte("invalid!")) + } else { + msg := []byte(fmt.Sprintf("message %d", i)) + psubs[i].Publish([]byte{0x7e, 0x57}, msg) + } + } + + // wait a bit for propagation and call it day + time.Sleep(time.Second) + + // close all subscriptions to get some leave events + for _, sub := range subs { + sub.Cancel() + } + + // wait for the leave to take effect + time.Sleep(time.Second) +} + +type traceStats struct { + publish, reject, duplicate, deliver, add, remove, recv, send, drop, join, leave, graft, prune int +} + +func (t *traceStats) process(evt *pb.TraceEvent) { + // fmt.Printf("process event %s\n", evt.GetType()) + switch evt.GetType() { + case pb.TraceEvent_PUBLISH_MESSAGE: + t.publish++ + case pb.TraceEvent_REJECT_MESSAGE: + t.reject++ + case pb.TraceEvent_DUPLICATE_MESSAGE: + t.duplicate++ + case pb.TraceEvent_DELIVER_MESSAGE: + t.deliver++ + case pb.TraceEvent_ADD_PEER: + t.add++ + case pb.TraceEvent_REMOVE_PEER: + t.remove++ + case pb.TraceEvent_RECV_RPC: + t.recv++ + case pb.TraceEvent_SEND_RPC: + t.send++ + case pb.TraceEvent_DROP_RPC: + t.drop++ + case pb.TraceEvent_JOIN: + t.join++ + case pb.TraceEvent_LEAVE: + t.leave++ + case pb.TraceEvent_GRAFT: + t.graft++ + case pb.TraceEvent_PRUNE: + t.prune++ + } +} + +func (ts *traceStats) check(t *testing.T) { + if ts.publish == 0 { + t.Fatal("expected non-zero count") + } + if ts.duplicate == 0 { + t.Fatal("expected non-zero count") + } + if ts.deliver == 0 { + t.Fatal("expected non-zero count") + } + if ts.reject == 0 { + t.Fatal("expected non-zero count") + } + if ts.add == 0 { + t.Fatal("expected non-zero count") + } + if ts.recv == 0 { + t.Fatal("expected non-zero count") + } + if ts.send == 0 { + t.Fatal("expected non-zero count") + } + if ts.join == 0 { + t.Fatal("expected non-zero count") + } + if ts.leave == 0 { + t.Fatal("expected non-zero count") + } + if ts.graft == 0 { + t.Fatal("expected non-zero count") + } + if ts.prune == 0 { + t.Fatal("expected non-zero count") + } +} + +func TestJSONTracer(t *testing.T) { + tracer, err := NewJSONTracer("/tmp/trace.out.json") + if err != nil { + t.Fatal(err) + } + + testWithTracer(t, tracer) + time.Sleep(time.Second) + tracer.Close() + + var stats traceStats + var evt pb.TraceEvent + + f, err := os.Open("/tmp/trace.out.json") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + dec := json.NewDecoder(f) + for { + evt.Reset() + err := dec.Decode(&evt) + if err != nil { + break + } + + stats.process(&evt) + } + + stats.check(t) +} + +func TestPBTracer(t *testing.T) { + tracer, err := NewPBTracer("/tmp/trace.out.pb") + if err != nil { + t.Fatal(err) + } + + testWithTracer(t, tracer) + time.Sleep(time.Second) + tracer.Close() + + var stats traceStats + var evt pb.TraceEvent + + f, err := os.Open("/tmp/trace.out.pb") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + r := protoio.NewDelimitedReader(f, 1<<20) + for { + evt.Reset() + err := r.ReadMsg(&evt) + if err != nil { + break + } + + stats.process(&evt) + } + + stats.check(t) +} + +type mockRemoteTracer struct { + mx sync.Mutex + ts traceStats +} + +func (mrt *mockRemoteTracer) handleStream(s network.Stream) { + defer s.Close() + + gzr, err := gzip.NewReader(s) + if err != nil { + panic(err) + } + + r := protoio.NewDelimitedReader(gzr, 1<<24) + + var batch pb.TraceEventBatch + for { + batch.Reset() + err := r.ReadMsg(&batch) + if err != nil { + if err != io.EOF { + s.Reset() + } + return + } + + mrt.mx.Lock() + for _, evt := range batch.GetBatch() { + mrt.ts.process(evt) + } + mrt.mx.Unlock() + } +} + +func (mrt *mockRemoteTracer) check(t *testing.T) { + mrt.mx.Lock() + defer mrt.mx.Unlock() + mrt.ts.check(t) +} + +func TestRemoteTracer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) + h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) + defer h1.Close() + defer h2.Close() + + mrt := &mockRemoteTracer{} + h1.SetStreamHandler(RemoteTracerProtoID, mrt.handleStream) + + tracer, err := NewRemoteTracer(ctx, h2, peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()}) + if err != nil { + t.Fatal(err) + } + + testWithTracer(t, tracer) + time.Sleep(time.Second) + tracer.Close() + + mrt.check(t) +} diff --git a/go-libp2p-blossomsub/tracer.go b/go-libp2p-blossomsub/tracer.go new file mode 100644 index 0000000..ae03532 --- /dev/null +++ b/go-libp2p-blossomsub/tracer.go @@ -0,0 +1,310 @@ +package blossomsub + +import ( + "compress/gzip" + "context" + "encoding/json" + "io" + "os" + "sync" + "time" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/libp2p/go-msgio/protoio" +) + +var TraceBufferSize = 1 << 16 // 64K ought to be enough for everyone; famous last words. +var MinTraceBatchSize = 16 + +// rejection reasons +const ( + RejectBlacklstedPeer = "blacklisted peer" + RejectBlacklistedSource = "blacklisted source" + RejectMissingSignature = "missing signature" + RejectUnexpectedSignature = "unexpected signature" + RejectUnexpectedAuthInfo = "unexpected auth info" + RejectInvalidSignature = "invalid signature" + RejectValidationQueueFull = "validation queue full" + RejectValidationThrottled = "validation throttled" + RejectValidationFailed = "validation failed" + RejectValidationIgnored = "validation ignored" + RejectSelfOrigin = "self originated message" +) + +type basicTracer struct { + ch chan struct{} + mx sync.Mutex + buf []*pb.TraceEvent + lossy bool + closed bool +} + +func (t *basicTracer) Trace(evt *pb.TraceEvent) { + t.mx.Lock() + defer t.mx.Unlock() + + if t.closed { + return + } + + if t.lossy && len(t.buf) > TraceBufferSize { + log.Debug("trace buffer overflow; dropping trace event") + } else { + t.buf = append(t.buf, evt) + } + + select { + case t.ch <- struct{}{}: + default: + } +} + +func (t *basicTracer) Close() { + t.mx.Lock() + defer t.mx.Unlock() + if !t.closed { + t.closed = true + close(t.ch) + } +} + +// JSONTracer is a tracer that writes events to a file, encoded in ndjson. +type JSONTracer struct { + basicTracer + w io.WriteCloser +} + +func NewStdoutJSONTracer() (*JSONTracer, error) { + tr := &JSONTracer{w: os.Stdout, basicTracer: basicTracer{ch: make(chan struct{}, 1)}} + go tr.doWrite() + + return tr, nil +} + +// NewJsonTracer creates a new JSONTracer writing traces to file. +func NewJSONTracer(file string) (*JSONTracer, error) { + return OpenJSONTracer(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) +} + +// OpenJSONTracer creates a new JSONTracer, with explicit control of OpenFile flags and permissions. +func OpenJSONTracer(file string, flags int, perm os.FileMode) (*JSONTracer, error) { + f, err := os.OpenFile(file, flags, perm) + if err != nil { + return nil, err + } + + tr := &JSONTracer{w: f, basicTracer: basicTracer{ch: make(chan struct{}, 1)}} + go tr.doWrite() + + return tr, nil +} + +func (t *JSONTracer) doWrite() { + var buf []*pb.TraceEvent + enc := json.NewEncoder(t.w) + for { + _, ok := <-t.ch + + t.mx.Lock() + tmp := t.buf + t.buf = buf[:0] + buf = tmp + t.mx.Unlock() + + for i, evt := range buf { + err := enc.Encode(evt) + if err != nil { + log.Warnf("error writing event trace: %s", err.Error()) + } + buf[i] = nil + } + + if !ok { + t.w.Close() + return + } + } +} + +var _ EventTracer = (*JSONTracer)(nil) + +// PBTracer is a tracer that writes events to a file, as delimited protobufs. +type PBTracer struct { + basicTracer + w io.WriteCloser +} + +func NewPBTracer(file string) (*PBTracer, error) { + return OpenPBTracer(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) +} + +// OpenPBTracer creates a new PBTracer, with explicit control of OpenFile flags and permissions. +func OpenPBTracer(file string, flags int, perm os.FileMode) (*PBTracer, error) { + f, err := os.OpenFile(file, flags, perm) + if err != nil { + return nil, err + } + + tr := &PBTracer{w: f, basicTracer: basicTracer{ch: make(chan struct{}, 1)}} + go tr.doWrite() + + return tr, nil +} + +func (t *PBTracer) doWrite() { + var buf []*pb.TraceEvent + w := protoio.NewDelimitedWriter(t.w) + for { + _, ok := <-t.ch + + t.mx.Lock() + tmp := t.buf + t.buf = buf[:0] + buf = tmp + t.mx.Unlock() + + for i, evt := range buf { + err := w.WriteMsg(evt) + if err != nil { + log.Warnf("error writing event trace: %s", err.Error()) + } + buf[i] = nil + } + + if !ok { + t.w.Close() + return + } + } +} + +var _ EventTracer = (*PBTracer)(nil) + +const RemoteTracerProtoID = protocol.ID("/libp2p/pubsub/tracer/1.0.0") + +// RemoteTracer is a tracer that sends trace events to a remote peer +type RemoteTracer struct { + basicTracer + ctx context.Context + host host.Host + peer peer.ID +} + +// NewRemoteTracer constructs a RemoteTracer, tracing to the peer identified by pi +func NewRemoteTracer(ctx context.Context, host host.Host, pi peer.AddrInfo) (*RemoteTracer, error) { + tr := &RemoteTracer{ctx: ctx, host: host, peer: pi.ID, basicTracer: basicTracer{ch: make(chan struct{}, 1), lossy: true}} + host.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.PermanentAddrTTL) + go tr.doWrite() + return tr, nil +} + +func (t *RemoteTracer) doWrite() { + var buf []*pb.TraceEvent + + s, err := t.openStream() + if err != nil { + log.Debugf("error opening remote tracer stream: %s", err.Error()) + return + } + + var batch pb.TraceEventBatch + + gzipW := gzip.NewWriter(s) + w := protoio.NewDelimitedWriter(gzipW) + + for { + _, ok := <-t.ch + + // deadline for batch accumulation + deadline := time.Now().Add(time.Second) + + t.mx.Lock() + for len(t.buf) < MinTraceBatchSize && time.Now().Before(deadline) { + t.mx.Unlock() + time.Sleep(100 * time.Millisecond) + t.mx.Lock() + } + + tmp := t.buf + t.buf = buf[:0] + buf = tmp + t.mx.Unlock() + + if len(buf) == 0 { + goto end + } + + batch.Batch = buf + + err = w.WriteMsg(&batch) + if err != nil { + log.Debugf("error writing trace event batch: %s", err) + goto end + } + + err = gzipW.Flush() + if err != nil { + log.Debugf("error flushin gzip stream: %s", err) + goto end + } + + end: + // nil out the buffer to gc consumed events + for i := range buf { + buf[i] = nil + } + + if !ok { + if err != nil { + s.Reset() + } else { + gzipW.Close() + s.Close() + } + return + } + + if err != nil { + s.Reset() + s, err = t.openStream() + if err != nil { + log.Debugf("error opening remote tracer stream: %s", err.Error()) + return + } + + gzipW.Reset(s) + } + } +} + +func (t *RemoteTracer) openStream() (network.Stream, error) { + for { + ctx, cancel := context.WithTimeout(t.ctx, time.Minute) + s, err := t.host.NewStream(ctx, t.peer, RemoteTracerProtoID) + cancel() + if err != nil { + if t.ctx.Err() != nil { + return nil, err + } + + // wait a minute and try again, to account for transient server downtime + select { + case <-time.After(time.Minute): + continue + case <-t.ctx.Done(): + return nil, t.ctx.Err() + } + } + + return s, nil + } +} + +var _ EventTracer = (*RemoteTracer)(nil) diff --git a/go-libp2p-blossomsub/validation.go b/go-libp2p-blossomsub/validation.go new file mode 100644 index 0000000..f8f3519 --- /dev/null +++ b/go-libp2p-blossomsub/validation.go @@ -0,0 +1,590 @@ +package blossomsub + +import ( + "context" + "fmt" + "runtime" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + defaultValidateQueueSize = 32 + defaultValidateConcurrency = 1024 + defaultValidateThrottle = 8192 +) + +// ValidationError is an error that may be signalled from message publication when the message +// fails validation +type ValidationError struct { + Reason string +} + +func (e ValidationError) Error() string { + return e.Reason +} + +// Validator is a function that validates a message with a binary decision: accept or reject. +type Validator func(context.Context, peer.ID, *Message) bool + +// ValidatorEx is an extended validation function that validates a message with an enumerated decision +type ValidatorEx func(context.Context, peer.ID, *Message) ValidationResult + +// ValidationResult represents the decision of an extended validator +type ValidationResult int + +const ( + // ValidationAccept is a validation decision that indicates a valid message that should be accepted and + // delivered to the application and forwarded to the network. + ValidationAccept = ValidationResult(0) + // ValidationReject is a validation decision that indicates an invalid message that should not be + // delivered to the application or forwarded to the application. Furthermore the peer that forwarded + // the message should be penalized by peer scoring routers. + ValidationReject = ValidationResult(1) + // ValidationIgnore is a validation decision that indicates a message that should be ignored: it will + // be neither delivered to the application nor forwarded to the network. However, in contrast to + // ValidationReject, the peer that forwarded the message must not be penalized by peer scoring routers. + ValidationIgnore = ValidationResult(2) + // internal + validationThrottled = ValidationResult(-1) +) + +// ValidatorOpt is an option for RegisterBitmaskValidator. +type ValidatorOpt func(addVal *addValReq) error + +// validation represents the validator pipeline. +// The validator pipeline performs signature validation and runs a +// sequence of user-configured validators per-bitmask. It is possible to +// adjust various concurrency parameters, such as the number of +// workers and the max number of simultaneous validations. The user +// can also attach inline validators that will be executed +// synchronously; this may be useful to prevent superfluous +// context-switching for lightweight tasks. +type validation struct { + p *PubSub + + tracer *pubsubTracer + + // mx protects the validator map + mx sync.Mutex + // bitmaskVals tracks per bitmask validators + bitmaskVals map[string]*validatorImpl + + // defaultVals tracks default validators applicable to all bitmasks + defaultVals []*validatorImpl + + // validateQ is the front-end to the validation pipeline + validateQ chan *validateReq + + // validateThrottle limits the number of active validation goroutines + validateThrottle chan struct{} + + // this is the number of synchronous validation workers + validateWorkers int +} + +// validation requests +type validateReq struct { + vals []*validatorImpl + src peer.ID + msg *Message +} + +// representation of bitmask validators +type validatorImpl struct { + bitmask []byte + validate ValidatorEx + validateTimeout time.Duration + validateThrottle chan struct{} + validateInline bool +} + +// async request to add a bitmask validators +type addValReq struct { + bitmask []byte + validate interface{} + timeout time.Duration + throttle int + inline bool + resp chan error +} + +// async request to remove a bitmask validator +type rmValReq struct { + bitmask []byte + resp chan error +} + +// newValidation creates a new validation pipeline +func newValidation() *validation { + return &validation{ + bitmaskVals: make(map[string]*validatorImpl), + validateQ: make(chan *validateReq, defaultValidateQueueSize), + validateThrottle: make(chan struct{}, defaultValidateThrottle), + validateWorkers: runtime.NumCPU(), + } +} + +// Start attaches the validation pipeline to a pubsub instance and starts background +// workers +func (v *validation) Start(p *PubSub) { + v.p = p + v.tracer = p.tracer + for i := 0; i < v.validateWorkers; i++ { + go v.validateWorker() + } +} + +// AddValidator adds a new validator +func (v *validation) AddValidator(req *addValReq) { + val, err := v.makeValidator(req) + if err != nil { + req.resp <- err + return + } + + v.mx.Lock() + defer v.mx.Unlock() + + bitmask := val.bitmask + + _, ok := v.bitmaskVals[string(bitmask)] + if ok { + req.resp <- fmt.Errorf("duplicate validator for bitmask %s", bitmask) + return + } + + v.bitmaskVals[string(bitmask)] = val + req.resp <- nil +} + +func (v *validation) makeValidator(req *addValReq) (*validatorImpl, error) { + makeValidatorEx := func(v Validator) ValidatorEx { + return func(ctx context.Context, p peer.ID, msg *Message) ValidationResult { + if v(ctx, p, msg) { + return ValidationAccept + } else { + return ValidationReject + } + } + } + + var validator ValidatorEx + switch v := req.validate.(type) { + case func(ctx context.Context, p peer.ID, msg *Message) bool: + validator = makeValidatorEx(Validator(v)) + case Validator: + validator = makeValidatorEx(v) + + case func(ctx context.Context, p peer.ID, msg *Message) ValidationResult: + validator = ValidatorEx(v) + case ValidatorEx: + validator = v + + default: + bitmask := req.bitmask + if req.bitmask == nil { + bitmask = []byte{0xff, 0xff} + } + return nil, fmt.Errorf("unknown validator type for bitmask %s; must be an instance of Validator or ValidatorEx", bitmask) + } + + val := &validatorImpl{ + bitmask: req.bitmask, + validate: validator, + validateTimeout: 0, + validateThrottle: make(chan struct{}, defaultValidateConcurrency), + validateInline: req.inline, + } + + if req.timeout > 0 { + val.validateTimeout = req.timeout + } + + if req.throttle > 0 { + val.validateThrottle = make(chan struct{}, req.throttle) + } + + return val, nil +} + +// RemoveValidator removes an existing validator +func (v *validation) RemoveValidator(req *rmValReq) { + v.mx.Lock() + defer v.mx.Unlock() + + bitmask := req.bitmask + + _, ok := v.bitmaskVals[string(bitmask)] + if ok { + delete(v.bitmaskVals, string(bitmask)) + req.resp <- nil + } else { + req.resp <- fmt.Errorf("no validator for bitmask %s", bitmask) + } +} + +// PushLocal synchronously pushes a locally published message and performs applicable +// validations. +// Returns an error if validation fails +func (v *validation) PushLocal(msg *Message) error { + v.p.tracer.PublishMessage(msg) + + err := v.p.checkSigningPolicy(msg) + if err != nil { + return err + } + + vals := v.getValidators(msg) + return v.validate(vals, msg.ReceivedFrom, msg, true) +} + +// Push pushes a message into the validation pipeline. +// It returns true if the message can be forwarded immediately without validation. +func (v *validation) Push(src peer.ID, msg *Message) bool { + vals := v.getValidators(msg) + + if len(vals) > 0 || msg.Signature != nil { + select { + case v.validateQ <- &validateReq{vals, src, msg}: + default: + log.Debugf("message validation throttled: queue full; dropping message from %s", src) + v.tracer.RejectMessage(msg, RejectValidationQueueFull) + } + return false + } + + return true +} + +// getValidators returns all validators that apply to a given message +func (v *validation) getValidators(msg *Message) []*validatorImpl { + v.mx.Lock() + defer v.mx.Unlock() + + var vals []*validatorImpl + vals = append(vals, v.defaultVals...) + + bitmask := msg.GetBitmask() + + val, ok := v.bitmaskVals[string(bitmask)] + if !ok { + return vals + } + + return append(vals, val) +} + +// validateWorker is an active goroutine performing inline validation +func (v *validation) validateWorker() { + for { + select { + case req := <-v.validateQ: + v.validate(req.vals, req.src, req.msg, false) + case <-v.p.ctx.Done(): + return + } + } +} + +// validate performs validation and only sends the message if all validators succeed +func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, synchronous bool) error { + // If signature verification is enabled, but signing is disabled, + // the Signature is required to be nil upon receiving the message in PubSub.pushMsg. + if msg.Signature != nil { + if !v.validateSignature(msg) { + log.Debugf("message signature validation failed; dropping message from %s", src) + v.tracer.RejectMessage(msg, RejectInvalidSignature) + return ValidationError{Reason: RejectInvalidSignature} + } + } + + // we can mark the message as seen now that we have verified the signature + // and avoid invoking user validators more than once + id := v.p.idGen.ID(msg) + if !v.p.markSeen(id) { + v.tracer.DuplicateMessage(msg) + return nil + } else { + v.tracer.ValidateMessage(msg) + } + + var inline, async []*validatorImpl + for _, val := range vals { + if val.validateInline || synchronous { + inline = append(inline, val) + } else { + async = append(async, val) + } + } + + // apply inline (synchronous) validators + result := ValidationAccept +loop: + for _, val := range inline { + switch val.validateMsg(v.p.ctx, src, msg) { + case ValidationAccept: + case ValidationReject: + result = ValidationReject + break loop + case ValidationIgnore: + result = ValidationIgnore + } + } + + if result == ValidationReject { + log.Debugf("message validation failed; dropping message from %s", src) + v.tracer.RejectMessage(msg, RejectValidationFailed) + return ValidationError{Reason: RejectValidationFailed} + } + + // apply async validators + if len(async) > 0 { + select { + case v.validateThrottle <- struct{}{}: + go func() { + v.doValidateBitmask(async, src, msg, result) + <-v.validateThrottle + }() + default: + log.Debugf("message validation throttled; dropping message from %s", src) + v.tracer.RejectMessage(msg, RejectValidationThrottled) + } + return nil + } + + if result == ValidationIgnore { + v.tracer.RejectMessage(msg, RejectValidationIgnored) + return ValidationError{Reason: RejectValidationIgnored} + } + + // no async validators, accepted message, send it! + select { + case v.p.sendMsg <- msg: + return nil + case <-v.p.ctx.Done(): + return v.p.ctx.Err() + } +} + +func (v *validation) validateSignature(msg *Message) bool { + err := verifyMessageSignature(msg.Message) + if err != nil { + log.Debugf("signature verification error: %s", err.Error()) + return false + } + + return true +} + +func (v *validation) doValidateBitmask(vals []*validatorImpl, src peer.ID, msg *Message, r ValidationResult) { + result := v.validateBitmask(vals, src, msg) + + if result == ValidationAccept && r != ValidationAccept { + result = r + } + + switch result { + case ValidationAccept: + v.p.sendMsg <- msg + case ValidationReject: + log.Debugf("message validation failed; dropping message from %s", src) + v.tracer.RejectMessage(msg, RejectValidationFailed) + return + case ValidationIgnore: + log.Debugf("message validation punted; ignoring message from %s", src) + v.tracer.RejectMessage(msg, RejectValidationIgnored) + return + case validationThrottled: + log.Debugf("message validation throttled; ignoring message from %s", src) + v.tracer.RejectMessage(msg, RejectValidationThrottled) + + default: + // BUG: this would be an internal programming error, so a panic seems appropiate. + panic(fmt.Errorf("unexpected validation result: %d", result)) + } +} + +func (v *validation) validateBitmask(vals []*validatorImpl, src peer.ID, msg *Message) ValidationResult { + if len(vals) == 1 { + return v.validateSingleBitmask(vals[0], src, msg) + } + + ctx, cancel := context.WithCancel(v.p.ctx) + defer cancel() + + rch := make(chan ValidationResult, len(vals)) + rcount := 0 + + for _, val := range vals { + rcount++ + + select { + case val.validateThrottle <- struct{}{}: + go func(val *validatorImpl) { + rch <- val.validateMsg(ctx, src, msg) + <-val.validateThrottle + }(val) + + default: + log.Debugf("validation throttled for bitmask %s", val.bitmask) + rch <- validationThrottled + } + } + + result := ValidationAccept +loop: + for i := 0; i < rcount; i++ { + switch <-rch { + case ValidationAccept: + case ValidationReject: + result = ValidationReject + break loop + case ValidationIgnore: + // throttled validation has the same effect, but takes precedence over Ignore as it is not + // known whether the throttled validator would have signaled rejection. + if result != validationThrottled { + result = ValidationIgnore + } + case validationThrottled: + result = validationThrottled + } + } + + return result +} + +// fast path for single bitmask validation that avoids the extra goroutine +func (v *validation) validateSingleBitmask(val *validatorImpl, src peer.ID, msg *Message) ValidationResult { + select { + case val.validateThrottle <- struct{}{}: + res := val.validateMsg(v.p.ctx, src, msg) + <-val.validateThrottle + return res + + default: + log.Debugf("validation throttled for bitmask %s", val.bitmask) + return validationThrottled + } +} + +func (val *validatorImpl) validateMsg(ctx context.Context, src peer.ID, msg *Message) ValidationResult { + start := time.Now() + defer func() { + log.Debugf("validation done; took %s", time.Since(start)) + }() + + if val.validateTimeout > 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, val.validateTimeout) + defer cancel() + } + + r := val.validate(ctx, src, msg) + switch r { + case ValidationAccept: + fallthrough + case ValidationReject: + fallthrough + case ValidationIgnore: + return r + + default: + log.Warnf("Unexpected result from validator: %d; ignoring message", r) + return ValidationIgnore + } +} + +// / Options +// WithDefaultValidator adds a validator that applies to all bitmasks by default; it can be used +// more than once and add multiple validators. Having a defult validator does not inhibit registering +// a per bitmask validator. +func WithDefaultValidator(val interface{}, opts ...ValidatorOpt) Option { + return func(ps *PubSub) error { + addVal := &addValReq{ + validate: val, + } + + for _, opt := range opts { + err := opt(addVal) + if err != nil { + return err + } + } + + val, err := ps.val.makeValidator(addVal) + if err != nil { + return err + } + + ps.val.defaultVals = append(ps.val.defaultVals, val) + return nil + } +} + +// WithValidateQueueSize sets the buffer of validate queue. Defaults to 32. +// When queue is full, validation is throttled and new messages are dropped. +func WithValidateQueueSize(n int) Option { + return func(ps *PubSub) error { + if n > 0 { + ps.val.validateQ = make(chan *validateReq, n) + return nil + } + return fmt.Errorf("validate queue size must be > 0") + } +} + +// WithValidateThrottle sets the upper bound on the number of active validation +// goroutines across all bitmasks. The default is 8192. +func WithValidateThrottle(n int) Option { + return func(ps *PubSub) error { + ps.val.validateThrottle = make(chan struct{}, n) + return nil + } +} + +// WithValidateWorkers sets the number of synchronous validation worker goroutines. +// Defaults to NumCPU. +// +// The synchronous validation workers perform signature validation, apply inline +// user validators, and schedule asynchronous user validators. +// You can adjust this parameter to devote less cpu time to synchronous validation. +func WithValidateWorkers(n int) Option { + return func(ps *PubSub) error { + if n > 0 { + ps.val.validateWorkers = n + return nil + } + return fmt.Errorf("number of validation workers must be > 0") + } +} + +// WithValidatorTimeout is an option that sets a timeout for an (asynchronous) bitmask validator. +// By default there is no timeout in asynchronous validators. +func WithValidatorTimeout(timeout time.Duration) ValidatorOpt { + return func(addVal *addValReq) error { + addVal.timeout = timeout + return nil + } +} + +// WithValidatorConcurrency is an option that sets the bitmask validator throttle. +// This controls the number of active validation goroutines for the bitmask; the default is 1024. +func WithValidatorConcurrency(n int) ValidatorOpt { + return func(addVal *addValReq) error { + addVal.throttle = n + return nil + } +} + +// WithValidatorInline is an option that sets the validation disposition to synchronous: +// it will be executed inline in validation front-end, without spawning a new goroutine. +// This is suitable for simple or cpu-bound validators that do not block. +func WithValidatorInline(inline bool) ValidatorOpt { + return func(addVal *addValReq) error { + addVal.inline = inline + return nil + } +} diff --git a/go-libp2p-blossomsub/validation_builtin.go b/go-libp2p-blossomsub/validation_builtin.go new file mode 100644 index 0000000..6660621 --- /dev/null +++ b/go-libp2p-blossomsub/validation_builtin.go @@ -0,0 +1,101 @@ +package blossomsub + +import ( + "context" + "encoding/binary" + "sync" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// PeerMetadataStore is an interface for storing and retrieving per peer metadata +type PeerMetadataStore interface { + // Get retrieves the metadata associated with a peer; + // It should return nil if there is no metadata associated with the peer and not an error. + Get(context.Context, peer.ID) ([]byte, error) + // Put sets the metadata associated with a peer. + Put(context.Context, peer.ID, []byte) error +} + +// BasicSeqnoValidator is a basic validator, usable as a default validator, that ignores replayed +// messages outside the seen cache window. The validator uses the message seqno as a peer-specific +// nonce to decide whether the message should be propagated, comparing to the maximal nonce store +// in the peer metadata store. This is useful to ensure that there can be no infinitely propagating +// messages in the network regardless of the seen cache span and network diameter. +// It requires that pubsub is instantiated with a strict message signing policy and that seqnos +// are not disabled, ie it doesn't support anonymous mode. +// +// Warning: See https://github.com/libp2p/rust-libp2p/issues/3453 +// TL;DR: rust is currently violating the spec by issuing a random seqno, which creates an +// interoperability hazard. We expect this issue to be addressed in the not so distant future, +// but keep this in mind if you are in a mixed environment with (older) rust nodes. +type BasicSeqnoValidator struct { + mx sync.RWMutex + meta PeerMetadataStore +} + +// NewBasicSeqnoValidator constructs a BasicSeqnoValidator using the givven PeerMetadataStore. +func NewBasicSeqnoValidator(meta PeerMetadataStore) ValidatorEx { + val := &BasicSeqnoValidator{ + meta: meta, + } + return val.validate +} + +func (v *BasicSeqnoValidator) validate(ctx context.Context, _ peer.ID, m *Message) ValidationResult { + p := m.GetFrom() + + v.mx.RLock() + nonceBytes, err := v.meta.Get(ctx, p) + v.mx.RUnlock() + + if err != nil { + log.Warn("error retrieving peer nonce: %s", err) + return ValidationIgnore + } + + var nonce uint64 + if len(nonceBytes) > 0 { + nonce = binary.BigEndian.Uint64(nonceBytes) + } + + var seqno uint64 + seqnoBytes := m.GetSeqno() + if len(seqnoBytes) > 0 { + seqno = binary.BigEndian.Uint64(seqnoBytes) + } + + // compare against the largest seen nonce + if seqno <= nonce { + return ValidationIgnore + } + + // get the nonce and compare again with an exclusive lock before commiting (cf concurrent validation) + v.mx.Lock() + defer v.mx.Unlock() + + nonceBytes, err = v.meta.Get(ctx, p) + if err != nil { + log.Warn("error retrieving peer nonce: %s", err) + return ValidationIgnore + } + + if len(nonceBytes) > 0 { + nonce = binary.BigEndian.Uint64(nonceBytes) + } + + if seqno <= nonce { + return ValidationIgnore + } + + // update the nonce + nonceBytes = make([]byte, 8) + binary.BigEndian.PutUint64(nonceBytes, seqno) + + err = v.meta.Put(ctx, p, nonceBytes) + if err != nil { + log.Warn("error storing peer nonce: %s", err) + } + + return ValidationAccept +} diff --git a/go-libp2p-blossomsub/validation_builtin_test.go b/go-libp2p-blossomsub/validation_builtin_test.go new file mode 100644 index 0000000..4b626fa --- /dev/null +++ b/go-libp2p-blossomsub/validation_builtin_test.go @@ -0,0 +1,278 @@ +package blossomsub + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "testing" + "time" + + pool "github.com/libp2p/go-buffer-pool" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-msgio" + "github.com/multiformats/go-varint" + + pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" +) + +var rng *rand.Rand + +func init() { + rng = rand.New(rand.NewSource(314159)) +} + +func TestBasicSeqnoValidator1(t *testing.T) { + testBasicSeqnoValidator(t, time.Minute) +} + +func TestBasicSeqnoValidator2(t *testing.T) { + testBasicSeqnoValidator(t, time.Nanosecond) +} + +func testBasicSeqnoValidator(t *testing.T, ttl time.Duration) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getPubsubsWithOptionC(ctx, hosts, + func(i int) Option { + return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore())) + }, + func(i int) Option { + return WithSeenMessagesTTL(ttl) + }, + ) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + // connectAll(t, hosts) + sparseConnect(t, hosts) + + time.Sleep(time.Millisecond * 100) + + for i := 0; i < 100; i++ { + msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i)) + + owner := rng.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestBasicSeqnoValidatorReplay(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 20) + psubs := getPubsubsWithOptionC(ctx, hosts[:19], + func(i int) Option { + return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore())) + }, + func(i int) Option { + return WithSeenMessagesTTL(time.Nanosecond) + }, + ) + _ = newReplayActor(t, ctx, hosts[19]) + + var msgs []*Subscription + for _, ps := range psubs { + subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20}) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + sparseConnect(t, hosts) + + time.Sleep(time.Millisecond * 100) + + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i)) + + owner := rng.Intn(len(psubs)) + + psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg) + + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + if !bytes.Equal(msg, got.Data) { + t.Fatal("got wrong message!") + } + } + } + + for _, sub := range msgs { + assertNeverReceives(t, sub, time.Second) + } +} + +type mockPeerMetadataStore struct { + meta map[peer.ID][]byte +} + +func newMockPeerMetadataStore() *mockPeerMetadataStore { + return &mockPeerMetadataStore{ + meta: make(map[peer.ID][]byte), + } +} + +func (m *mockPeerMetadataStore) Get(ctx context.Context, p peer.ID) ([]byte, error) { + v, ok := m.meta[p] + if !ok { + return nil, nil + } + return v, nil +} + +func (m *mockPeerMetadataStore) Put(ctx context.Context, p peer.ID, v []byte) error { + m.meta[p] = v + return nil +} + +type replayActor struct { + t *testing.T + + ctx context.Context + h host.Host + + mx sync.Mutex + out map[peer.ID]network.Stream +} + +func newReplayActor(t *testing.T, ctx context.Context, h host.Host) *replayActor { + replay := &replayActor{t: t, ctx: ctx, h: h, out: make(map[peer.ID]network.Stream)} + h.SetStreamHandler(FloodSubID, replay.handleStream) + h.Network().Notify(&network.NotifyBundle{ConnectedF: replay.connected}) + return replay +} + +func (r *replayActor) handleStream(s network.Stream) { + defer s.Close() + + p := s.Conn().RemotePeer() + + rd := msgio.NewVarintReaderSize(s, 65536) + for { + msgbytes, err := rd.ReadMsg() + if err != nil { + s.Reset() + rd.ReleaseMsg(msgbytes) + return + } + + rpc := new(pb.RPC) + err = rpc.Unmarshal(msgbytes) + rd.ReleaseMsg(msgbytes) + if err != nil { + s.Reset() + return + } + + // subscribe to the same bitmasks as our peer + subs := rpc.GetSubscriptions() + if len(subs) != 0 { + go r.send(p, &pb.RPC{Subscriptions: subs}) + } + + // replay all received messages + for _, pmsg := range rpc.GetPublish() { + go r.replay(pmsg) + } + } +} + +func (r *replayActor) send(p peer.ID, rpc *pb.RPC) { + r.mx.Lock() + defer r.mx.Unlock() + + s, ok := r.out[p] + if !ok { + r.t.Logf("cannot send message to %s: no stream", p) + return + } + + size := uint64(rpc.Size()) + + buf := pool.Get(varint.UvarintSize(size) + int(size)) + defer pool.Put(buf) + + n := binary.PutUvarint(buf, size) + + _, err := rpc.MarshalTo(buf[n:]) + if err != nil { + r.t.Logf("replay: error marshalling message: %s", err) + return + } + + _, err = s.Write(buf) + if err != nil { + r.t.Logf("replay: error sending message: %s", err) + } +} + +func (r *replayActor) replay(msg *pb.Message) { + // replay the message 10 times to a random subset of peers + for i := 0; i < 10; i++ { + delay := time.Duration(1+rng.Intn(20)) * time.Millisecond + time.Sleep(delay) + + var peers []peer.ID + r.mx.Lock() + for p, _ := range r.out { + if rng.Intn(2) > 0 { + peers = append(peers, p) + } + } + r.mx.Unlock() + + rpc := &pb.RPC{Publish: []*pb.Message{msg}} + r.t.Logf("replaying msg to %d peers", len(peers)) + for _, p := range peers { + r.send(p, rpc) + } + } +} + +func (r *replayActor) handleConnected(p peer.ID) { + s, err := r.h.NewStream(r.ctx, p, FloodSubID) + if err != nil { + r.t.Logf("replay: error opening stream: %s", err) + return + } + + r.mx.Lock() + defer r.mx.Unlock() + r.out[p] = s +} + +func (r *replayActor) connected(_ network.Network, conn network.Conn) { + go r.handleConnected(conn.RemotePeer()) +} diff --git a/go-libp2p-blossomsub/validation_test.go b/go-libp2p-blossomsub/validation_test.go new file mode 100644 index 0000000..361fa97 --- /dev/null +++ b/go-libp2p-blossomsub/validation_test.go @@ -0,0 +1,334 @@ +package blossomsub + +import ( + "bytes" + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestRegisterUnregisterValidator(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 1) + psubs := getPubsubs(ctx, hosts) + + err := psubs[0].RegisterBitmaskValidator([]byte{0xf0, 0x00}, func(context.Context, peer.ID, *Message) bool { + return true + }) + if err != nil { + t.Fatal(err) + } + + err = psubs[0].UnregisterBitmaskValidator([]byte{0xf0, 0x00}) + if err != nil { + t.Fatal(err) + } + + err = psubs[0].UnregisterBitmaskValidator([]byte{0xf0, 0x00}) + if err == nil { + t.Fatal("Unregistered bogus bitmask validator") + } +} + +func TestRegisterValidatorEx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 3) + psubs := getPubsubs(ctx, hosts) + + err := psubs[0].RegisterBitmaskValidator([]byte{0x7e, 0x57}, + Validator(func(context.Context, peer.ID, *Message) bool { + return true + })) + if err != nil { + t.Fatal(err) + } + + err = psubs[1].RegisterBitmaskValidator([]byte{0x7e, 0x57}, + ValidatorEx(func(context.Context, peer.ID, *Message) ValidationResult { + return ValidationAccept + })) + if err != nil { + t.Fatal(err) + } + + err = psubs[2].RegisterBitmaskValidator([]byte{0x7e, 0x57}, "bogus") + if err == nil { + t.Fatal("expected error") + } +} + +func TestValidate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + err := psubs[1].RegisterBitmaskValidator(bitmask, func(ctx context.Context, from peer.ID, msg *Message) bool { + return !bytes.Contains(msg.Data, []byte("illegal")) + }) + if err != nil { + t.Fatal(err) + } + + sub, err := psubs[1].Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 50) + + msgs := []struct { + msg []byte + validates bool + }{ + {msg: []byte("this is a legal message"), validates: true}, + {msg: []byte("there also is nothing controversial about this message"), validates: true}, + {msg: []byte("openly illegal content will be censored"), validates: false}, + {msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true}, + } + + for _, tc := range msgs { + err := psubs[0].Publish(bitmask, tc.msg) + if err != nil { + t.Fatal(err) + } + + select { + case msg := <-sub.ch: + if !tc.validates { + t.Log(msg) + t.Error("expected message validation to filter out the message") + } + case <-time.After(333 * time.Millisecond): + if tc.validates { + t.Error("expected message validation to accept the message") + } + } + } +} + +func TestValidate2(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 1) + psubs := getPubsubs(ctx, hosts) + + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + err := psubs[0].RegisterBitmaskValidator(bitmask, func(ctx context.Context, from peer.ID, msg *Message) bool { + return !bytes.Contains(msg.Data, []byte("illegal")) + }) + if err != nil { + t.Fatal(err) + } + + msgs := []struct { + msg []byte + validates bool + }{ + {msg: []byte("this is a legal message"), validates: true}, + {msg: []byte("there also is nothing controversial about this message"), validates: true}, + {msg: []byte("openly illegal content will be censored"), validates: false}, + {msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true}, + } + + for _, tc := range msgs { + err := psubs[0].Publish(bitmask, tc.msg) + if tc.validates { + if err != nil { + t.Fatal(err) + } + } else { + if err == nil { + t.Fatal("expected validation to fail for this message") + } + } + } +} + +func TestValidateOverload(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + type msg struct { + msg []byte + validates bool + } + + tcs := []struct { + msgs []msg + + maxConcurrency int + }{ + { + maxConcurrency: 10, + msgs: []msg{ + {msg: []byte("this is a legal message"), validates: true}, + {msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true}, + {msg: []byte("there also is nothing controversial about this message"), validates: true}, + {msg: []byte("also fine"), validates: true}, + {msg: []byte("still, all good"), validates: true}, + {msg: []byte("this is getting boring"), validates: true}, + {msg: []byte([]byte{0xf0, 0x00}), validates: true}, + {msg: []byte([]byte{0xf0, 0x0b, 0xa1, 0x20}), validates: true}, + {msg: []byte("foofoo"), validates: true}, + {msg: []byte("barfoo"), validates: true}, + {msg: []byte("oh no!"), validates: false}, + }, + }, + { + maxConcurrency: 2, + msgs: []msg{ + {msg: []byte("this is a legal message"), validates: true}, + {msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true}, + {msg: []byte("oh no!"), validates: false}, + }, + }, + } + + for tci, tc := range tcs { + t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) { + hosts := getNetHosts(t, ctx, 2) + psubs := getPubsubs(ctx, hosts) + + connect(t, hosts[0], hosts[1]) + bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20} + + block := make(chan struct{}) + + err := psubs[1].RegisterBitmaskValidator(bitmask, + func(ctx context.Context, from peer.ID, msg *Message) bool { + <-block + return true + }, + WithValidatorConcurrency(tc.maxConcurrency)) + + if err != nil { + t.Fatal(err) + } + + sub, err := psubs[1].Subscribe(bitmask) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 50) + + if len(tc.msgs) != tc.maxConcurrency+1 { + t.Fatalf("expected number of messages sent to be maxConcurrency+1. Got %d, expected %d", len(tc.msgs), tc.maxConcurrency+1) + } + + p := psubs[0] + + var wg sync.WaitGroup + wg.Add(1) + go func() { + for _, tmsg := range tc.msgs { + select { + case msg := <-sub.ch: + if !tmsg.validates { + t.Log(msg) + t.Error("expected message validation to drop the message because all validator goroutines are taken") + } + case <-time.After(time.Second): + if tmsg.validates { + t.Error("expected message validation to accept the message") + } + } + } + wg.Done() + }() + + for _, tmsg := range tc.msgs { + err := p.Publish(bitmask, tmsg.msg) + if err != nil { + t.Fatal(err) + } + } + + // wait a bit before unblocking the validator goroutines + time.Sleep(500 * time.Millisecond) + close(block) + + wg.Wait() + }) + } +} + +func TestValidateAssortedOptions(t *testing.T) { + // this test adds coverage for various options that are not covered in other tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getNetHosts(t, ctx, 10) + psubs := getPubsubs(ctx, hosts, + WithValidateQueueSize(10), + WithValidateThrottle(10), + WithValidateWorkers(10)) + + sparseConnect(t, hosts) + + for _, psub := range psubs { + err := psub.RegisterBitmaskValidator([]byte{0xff, 0x00, 0x00, 0x00}, + func(context.Context, peer.ID, *Message) bool { + return true + }, + WithValidatorTimeout(100*time.Millisecond)) + if err != nil { + t.Fatal(err) + } + + err = psub.RegisterBitmaskValidator([]byte{0x00, 0xff, 0x00, 0x00}, + func(context.Context, peer.ID, *Message) bool { + return true + }, + WithValidatorInline(true)) + if err != nil { + t.Fatal(err) + } + } + + var subs1, subs2 []*Subscription + for _, ps := range psubs { + sub, err := ps.Subscribe([]byte{0xff, 0x00, 0x00, 0x00}) + if err != nil { + t.Fatal(err) + } + subs1 = append(subs1, sub) + + sub, err = ps.Subscribe([]byte{0x00, 0xff, 0x00, 0x00}) + if err != nil { + t.Fatal(err) + } + subs2 = append(subs2, sub) + } + + time.Sleep(time.Second) + + for i := 0; i < 10; i++ { + msg := []byte(fmt.Sprintf("message %d", i)) + + psubs[i].Publish([]byte{0xff, 0x00, 0x00, 0x00}, msg) + for _, sub := range subs1 { + assertReceive(t, sub, msg) + } + + psubs[i].Publish([]byte{0x00, 0xff, 0x00, 0x00}, msg) + for _, sub := range subs2 { + assertReceive(t, sub, msg) + } + } +}