tighten up the defers on mutexes, it's hitting hard locks

This commit is contained in:
Cassandra Heart 2024-08-11 22:56:30 -05:00
parent 9d0cf0bc68
commit 1e60549f89
No known key found for this signature in database
GPG Key ID: 6352152859385958
17 changed files with 252 additions and 147 deletions

View File

@ -51,7 +51,6 @@ func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Dur
func (b *backoff) updateAndGet(id peer.ID) (time.Duration, error) { func (b *backoff) updateAndGet(id peer.ID) (time.Duration, error) {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock()
h, ok := b.info[id] h, ok := b.info[id]
switch { switch {
@ -62,6 +61,7 @@ func (b *backoff) updateAndGet(id peer.ID) (time.Duration, error) {
attempts: 0, attempts: 0,
} }
case h.attempts >= b.maxAttempts: case h.attempts >= b.maxAttempts:
b.mu.Unlock()
return 0, fmt.Errorf("peer %s has reached its maximum backoff attempts", id) return 0, fmt.Errorf("peer %s has reached its maximum backoff attempts", id)
case h.duration < MinBackoffDelay: case h.duration < MinBackoffDelay:
@ -78,27 +78,29 @@ func (b *backoff) updateAndGet(id peer.ID) (time.Duration, error) {
h.attempts += 1 h.attempts += 1
h.lastTried = time.Now() h.lastTried = time.Now()
b.info[id] = h b.info[id] = h
b.mu.Unlock()
return h.duration, nil return h.duration, nil
} }
func (b *backoff) cleanup() { func (b *backoff) cleanup() {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock()
for id, h := range b.info { for id, h := range b.info {
if time.Since(h.lastTried) > TimeToLive { if time.Since(h.lastTried) > TimeToLive {
delete(b.info, id) delete(b.info, id)
} }
} }
b.mu.Unlock()
} }
func (b *backoff) cleanupLoop(ctx context.Context) { func (b *backoff) cleanupLoop(ctx context.Context) {
ticker := time.NewTicker(b.ci) ticker := time.NewTicker(b.ci)
defer ticker.Stop()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
ticker.Stop()
return // pubsub shutting down return // pubsub shutting down
case <-ticker.C: case <-ticker.C:
b.cleanup() b.cleanup()

View File

@ -48,9 +48,9 @@ func (t *Bitmask) SetScoreParams(p *BitmaskScoreParams) error {
} }
t.mux.Lock() t.mux.Lock()
defer t.mux.Unlock()
if t.closed { if t.closed {
t.mux.Unlock()
return ErrBitmaskClosed return ErrBitmaskClosed
} }
@ -74,9 +74,11 @@ func (t *Bitmask) SetScoreParams(p *BitmaskScoreParams) error {
select { select {
case t.p.eval <- update: case t.p.eval <- update:
err = <-result err = <-result
t.mux.Unlock()
return err return err
case <-t.p.ctx.Done(): case <-t.p.ctx.Done():
t.mux.Unlock()
return t.p.ctx.Err() return t.p.ctx.Err()
} }
} }
@ -85,8 +87,8 @@ func (t *Bitmask) SetScoreParams(p *BitmaskScoreParams) error {
// Multiple event handlers may be created and will operate independently of each other // Multiple event handlers may be created and will operate independently of each other
func (t *Bitmask) EventHandler(opts ...BitmaskEventHandlerOpt) (*BitmaskEventHandler, error) { func (t *Bitmask) EventHandler(opts ...BitmaskEventHandlerOpt) (*BitmaskEventHandler, error) {
t.mux.RLock() t.mux.RLock()
defer t.mux.RUnlock()
if t.closed { if t.closed {
t.mux.RUnlock()
return nil, ErrBitmaskClosed return nil, ErrBitmaskClosed
} }
@ -101,6 +103,7 @@ func (t *Bitmask) EventHandler(opts ...BitmaskEventHandlerOpt) (*BitmaskEventHan
for _, opt := range opts { for _, opt := range opts {
err := opt(h) err := opt(h)
if err != nil { if err != nil {
t.mux.RUnlock()
return nil, err return nil, err
} }
} }
@ -120,21 +123,23 @@ func (t *Bitmask) EventHandler(opts ...BitmaskEventHandlerOpt) (*BitmaskEventHan
done <- struct{}{} done <- struct{}{}
}: }:
case <-t.p.ctx.Done(): case <-t.p.ctx.Done():
t.mux.RUnlock()
return nil, t.p.ctx.Err() return nil, t.p.ctx.Err()
} }
<-done <-done
t.mux.RUnlock()
return h, nil return h, nil
} }
func (t *Bitmask) sendNotification(evt PeerEvent) { func (t *Bitmask) sendNotification(evt PeerEvent) {
t.evtHandlerMux.RLock() t.evtHandlerMux.RLock()
defer t.evtHandlerMux.RUnlock()
for h := range t.evtHandlers { for h := range t.evtHandlers {
h.sendNotification(evt) h.sendNotification(evt)
} }
t.evtHandlerMux.RUnlock()
} }
// Subscribe returns a new Subscription for the bitmask. // Subscribe returns a new Subscription for the bitmask.
@ -142,8 +147,9 @@ func (t *Bitmask) sendNotification(evt PeerEvent) {
// before the subscription is processed by the pubsub main loop and propagated to our peers. // before the subscription is processed by the pubsub main loop and propagated to our peers.
func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) { func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) {
t.mux.RLock() t.mux.RLock()
defer t.mux.RUnlock()
if t.closed { if t.closed {
t.mux.RUnlock()
return nil, ErrBitmaskClosed return nil, ErrBitmaskClosed
} }
@ -155,6 +161,7 @@ func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) {
for _, opt := range opts { for _, opt := range opts {
err := opt(sub) err := opt(sub)
if err != nil { if err != nil {
t.mux.RUnlock()
return nil, err return nil, err
} }
} }
@ -173,10 +180,13 @@ func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) {
resp: out, resp: out,
}: }:
case <-t.p.ctx.Done(): case <-t.p.ctx.Done():
t.mux.RUnlock()
return nil, t.p.ctx.Err() return nil, t.p.ctx.Err()
} }
return <-out, nil subOut := <-out
t.mux.RUnlock()
return subOut, nil
} }
// Relay enables message relaying for the bitmask and returns a reference // Relay enables message relaying for the bitmask and returns a reference
@ -184,8 +194,9 @@ func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) {
// To completely disable the relay, all references must be cancelled. // To completely disable the relay, all references must be cancelled.
func (t *Bitmask) Relay() (RelayCancelFunc, error) { func (t *Bitmask) Relay() (RelayCancelFunc, error) {
t.mux.RLock() t.mux.RLock()
defer t.mux.RUnlock()
if t.closed { if t.closed {
t.mux.RUnlock()
return nil, ErrBitmaskClosed return nil, ErrBitmaskClosed
} }
@ -199,10 +210,13 @@ func (t *Bitmask) Relay() (RelayCancelFunc, error) {
resp: out, resp: out,
}: }:
case <-t.p.ctx.Done(): case <-t.p.ctx.Done():
t.mux.RUnlock()
return nil, t.p.ctx.Err() return nil, t.p.ctx.Err()
} }
return <-out, nil cancelFunc := <-out
t.mux.RUnlock()
return cancelFunc, nil
} }
// RouterReady is a function that decides if a router is ready to publish // RouterReady is a function that decides if a router is ready to publish
@ -222,8 +236,9 @@ type PubOpt func(pub *PublishOptions) error
// Publish publishes data to bitmask. // Publish publishes data to bitmask.
func (t *Bitmask) Publish(ctx context.Context, bitmask []byte, data []byte, opts ...PubOpt) error { func (t *Bitmask) Publish(ctx context.Context, bitmask []byte, data []byte, opts ...PubOpt) error {
t.mux.RLock() t.mux.RLock()
defer t.mux.RUnlock()
if t.closed { if t.closed {
t.mux.RUnlock()
return ErrBitmaskClosed return ErrBitmaskClosed
} }
@ -234,6 +249,7 @@ func (t *Bitmask) Publish(ctx context.Context, bitmask []byte, data []byte, opts
for _, opt := range opts { for _, opt := range opts {
err := opt(pub) err := opt(pub)
if err != nil { if err != nil {
t.mux.RUnlock()
return err return err
} }
} }
@ -241,9 +257,11 @@ func (t *Bitmask) Publish(ctx context.Context, bitmask []byte, data []byte, opts
if pub.customKey != nil && !pub.local { if pub.customKey != nil && !pub.local {
key, pid = pub.customKey() key, pid = pub.customKey()
if key == nil { if key == nil {
t.mux.RUnlock()
return ErrNilSignKey return ErrNilSignKey
} }
if len(pid) == 0 { if len(pid) == 0 {
t.mux.RUnlock()
return ErrEmptyPeerID return ErrEmptyPeerID
} }
} }
@ -262,6 +280,7 @@ func (t *Bitmask) Publish(ctx context.Context, bitmask []byte, data []byte, opts
m.From = []byte(pid) m.From = []byte(pid)
err := signMessage(pid, key, m) err := signMessage(pid, key, m)
if err != nil { if err != nil {
t.mux.RUnlock()
return err return err
} }
} }
@ -286,28 +305,43 @@ func (t *Bitmask) Publish(ctx context.Context, bitmask []byte, data []byte, opts
res <- done res <- done
}: }:
if <-res { if <-res {
if ticker != nil {
ticker.Stop()
}
break readyLoop break readyLoop
} }
case <-t.p.ctx.Done(): case <-t.p.ctx.Done():
if ticker != nil {
ticker.Stop()
}
t.mux.RUnlock()
return t.p.ctx.Err() return t.p.ctx.Err()
case <-ctx.Done(): case <-ctx.Done():
if ticker != nil {
ticker.Stop()
}
t.mux.RUnlock()
return ctx.Err() return ctx.Err()
} }
if ticker == nil { if ticker == nil {
ticker = time.NewTicker(200 * time.Millisecond) ticker = time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
} }
select { select {
case <-ticker.C: case <-ticker.C:
case <-ctx.Done(): case <-ctx.Done():
ticker.Stop()
t.mux.RUnlock()
return fmt.Errorf("router is not ready: %w", ctx.Err()) return fmt.Errorf("router is not ready: %w", ctx.Err())
} }
} }
} }
} }
return t.p.val.PushLocal(&Message{m, nil, t.p.host.ID(), nil, pub.local}) err := t.p.val.PushLocal(&Message{m, nil, t.p.host.ID(), nil, pub.local})
t.mux.RUnlock()
return err
} }
// WithReadiness returns a publishing option for only publishing when the router is ready. // WithReadiness returns a publishing option for only publishing when the router is ready.
@ -347,8 +381,9 @@ func WithSecretKeyAndPeerId(key crypto.PrivKey, pid peer.ID) PubOpt {
// Does not error if the bitmask is already closed. // Does not error if the bitmask is already closed.
func (t *Bitmask) Close() error { func (t *Bitmask) Close() error {
t.mux.Lock() t.mux.Lock()
defer t.mux.Unlock()
if t.closed { if t.closed {
t.mux.Unlock()
return nil return nil
} }
@ -357,6 +392,7 @@ func (t *Bitmask) Close() error {
select { select {
case t.p.rmBitmask <- req: case t.p.rmBitmask <- req:
case <-t.p.ctx.Done(): case <-t.p.ctx.Done():
t.mux.Unlock()
return t.p.ctx.Err() return t.p.ctx.Err()
} }
@ -366,18 +402,22 @@ func (t *Bitmask) Close() error {
t.closed = true t.closed = true
} }
t.mux.Unlock()
return err return err
} }
// ListPeers returns a list of peers we are connected to in the given bitmask. // ListPeers returns a list of peers we are connected to in the given bitmask.
func (t *Bitmask) ListPeers() []peer.ID { func (t *Bitmask) ListPeers() []peer.ID {
t.mux.RLock() t.mux.RLock()
defer t.mux.RUnlock()
if t.closed { if t.closed {
t.mux.RUnlock()
return []peer.ID{} return []peer.ID{}
} }
return t.p.ListPeers(t.bitmask) l := t.p.ListPeers(t.bitmask)
t.mux.RUnlock()
return l
} }
type EventType int type EventType int

View File

@ -554,7 +554,6 @@ func (bs *BlossomSubRouter) manageAddrBook() {
log.Errorf("failed to subscribe to peer identification events: %v", err) log.Errorf("failed to subscribe to peer identification events: %v", err)
return return
} }
defer sub.Close()
for { for {
select { select {
@ -566,6 +565,7 @@ func (bs *BlossomSubRouter) manageAddrBook() {
log.Warnf("failed to close addr book: %v", errClose) log.Warnf("failed to close addr book: %v", errClose)
} }
} }
sub.Close()
return return
case ev := <-sub.Out(): case ev := <-sub.Out():
switch ev := ev.(type) { switch ev := ev.(type) {
@ -1435,7 +1435,6 @@ func (bs *BlossomSubRouter) heartbeatTimer() {
} }
ticker := time.NewTicker(bs.params.HeartbeatInterval) ticker := time.NewTicker(bs.params.HeartbeatInterval)
defer ticker.Stop()
for { for {
select { select {
@ -1443,9 +1442,11 @@ func (bs *BlossomSubRouter) heartbeatTimer() {
select { select {
case bs.p.eval <- bs.heartbeat: case bs.p.eval <- bs.heartbeat:
case <-bs.p.ctx.Done(): case <-bs.p.ctx.Done():
ticker.Stop()
return return
} }
case <-bs.p.ctx.Done(): case <-bs.p.ctx.Done():
ticker.Stop()
return return
} }
} }
@ -1453,14 +1454,6 @@ func (bs *BlossomSubRouter) heartbeatTimer() {
func (bs *BlossomSubRouter) heartbeat() { func (bs *BlossomSubRouter) heartbeat() {
start := time.Now() start := time.Now()
defer func() {
if bs.params.SlowHeartbeatWarning > 0 {
slowWarning := time.Duration(bs.params.SlowHeartbeatWarning * float64(bs.params.HeartbeatInterval))
if dt := time.Since(start); dt > slowWarning {
log.Warnw("slow heartbeat", "took", dt)
}
}
}()
bs.heartbeatTicks++ bs.heartbeatTicks++
@ -1666,6 +1659,13 @@ func (bs *BlossomSubRouter) heartbeat() {
// 2nd arg are mesh peers excluded from gossip. We already push // 2nd arg are mesh peers excluded from gossip. We already push
// messages to them, so its redundant to gossip IHAVEs. // messages to them, so its redundant to gossip IHAVEs.
bs.emitGossip(bitmask, peers) bs.emitGossip(bitmask, peers)
if bs.params.SlowHeartbeatWarning > 0 {
slowWarning := time.Duration(bs.params.SlowHeartbeatWarning * float64(bs.params.HeartbeatInterval))
if dt := time.Since(start); dt > slowWarning {
log.Warnw("slow heartbeat", "took", dt)
}
}
} }
// expire fanout for bitmasks we haven't published to in a while // expire fanout for bitmasks we haven't published to in a while

View File

@ -55,14 +55,6 @@ func (p *PubSub) handleNewStream(s network.Stream) {
p.inboundStreams[peer] = s p.inboundStreams[peer] = s
p.inboundStreamsMx.Unlock() p.inboundStreamsMx.Unlock()
defer func() {
p.inboundStreamsMx.Lock()
if p.inboundStreams[peer] == s {
delete(p.inboundStreams, peer)
}
p.inboundStreamsMx.Unlock()
}()
r := msgio.NewVarintReaderSize(s, p.maxMessageSize) r := msgio.NewVarintReaderSize(s, p.maxMessageSize)
for { for {
msgbytes, err := r.ReadMsg() msgbytes, err := r.ReadMsg()
@ -77,6 +69,11 @@ func (p *PubSub) handleNewStream(s network.Stream) {
s.Close() s.Close()
} }
p.inboundStreamsMx.Lock()
if p.inboundStreams[peer] == s {
delete(p.inboundStreams, peer)
}
p.inboundStreamsMx.Unlock()
return return
} }
if len(msgbytes) == 0 { if len(msgbytes) == 0 {
@ -91,6 +88,11 @@ func (p *PubSub) handleNewStream(s network.Stream) {
if err != nil { if err != nil {
s.Reset() s.Reset()
log.Warnf("bogus rpc from %s: %s", s.Conn().RemotePeer(), err) log.Warnf("bogus rpc from %s: %s", s.Conn().RemotePeer(), err)
p.inboundStreamsMx.Lock()
if p.inboundStreams[peer] == s {
delete(p.inboundStreams, peer)
}
p.inboundStreamsMx.Unlock()
return return
} }
@ -100,6 +102,11 @@ func (p *PubSub) handleNewStream(s network.Stream) {
case <-p.ctx.Done(): case <-p.ctx.Done():
// Close is useless because the other side isn't reading. // Close is useless because the other side isn't reading.
s.Reset() s.Reset()
p.inboundStreamsMx.Lock()
if p.inboundStreams[peer] == s {
delete(p.inboundStreams, peer)
}
p.inboundStreamsMx.Unlock()
return return
} }
} }
@ -161,37 +168,38 @@ func (p *PubSub) handlePeerDead(s network.Stream) {
} }
func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) { func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) {
writeRpc := func(rpc *RPC) error {
size := uint64(rpc.Size())
buf := pool.Get(varint.UvarintSize(size) + int(size))
defer pool.Put(buf)
n := binary.PutUvarint(buf, size)
_, err := rpc.MarshalTo(buf[n:])
if err != nil {
return err
}
_, err = s.Write(buf)
return err
}
defer s.Close()
for { for {
select { select {
case rpc, ok := <-outgoing: case rpc, ok := <-outgoing:
if !ok { if !ok {
s.Close()
return return
} }
err := writeRpc(rpc) size := uint64(rpc.Size())
buf := pool.Get(varint.UvarintSize(size) + int(size))
n := binary.PutUvarint(buf, size)
_, err := rpc.MarshalTo(buf[n:])
if err != nil { if err != nil {
s.Reset() s.Reset()
log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err)
s.Close()
return return
} }
_, err = s.Write(buf)
if err != nil {
s.Reset()
log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err)
s.Close()
return
}
pool.Put(buf)
case <-ctx.Done(): case <-ctx.Done():
s.Close()
return return
} }
} }

View File

@ -120,7 +120,6 @@ func (d *discover) pollTimer() {
} }
ticker := time.NewTicker(DiscoveryPollInterval) ticker := time.NewTicker(DiscoveryPollInterval)
defer ticker.Stop()
for { for {
select { select {
@ -128,9 +127,11 @@ func (d *discover) pollTimer() {
select { select {
case d.p.eval <- d.requestDiscovery: case d.p.eval <- d.requestDiscovery:
case <-d.p.ctx.Done(): case <-d.p.ctx.Done():
ticker.Stop()
return return
} }
case <-d.p.ctx.Done(): case <-d.p.ctx.Done():
ticker.Stop()
return return
} }
} }
@ -197,7 +198,6 @@ func (d *discover) Advertise(bitmask []byte) {
} }
t := time.NewTimer(next) t := time.NewTimer(next)
defer t.Stop()
for advertisingCtx.Err() == nil { for advertisingCtx.Err() == nil {
select { select {
@ -211,9 +211,11 @@ func (d *discover) Advertise(bitmask []byte) {
} }
t.Reset(next) t.Reset(next)
case <-advertisingCtx.Done(): case <-advertisingCtx.Done():
t.Stop()
return return
} }
} }
t.Stop()
}() }()
} }
@ -248,7 +250,6 @@ func (d *discover) Bootstrap(ctx context.Context, bitmask []byte, ready RouterRe
if !t.Stop() { if !t.Stop() {
<-t.C <-t.C
} }
defer t.Stop()
for { for {
// Check if ready for publishing // Check if ready for publishing
@ -259,11 +260,14 @@ func (d *discover) Bootstrap(ctx context.Context, bitmask []byte, ready RouterRe
bootstrapped <- done bootstrapped <- done
}: }:
if <-bootstrapped { if <-bootstrapped {
t.Stop()
return true return true
} }
case <-d.p.ctx.Done(): case <-d.p.ctx.Done():
t.Stop()
return false return false
case <-ctx.Done(): case <-ctx.Done():
t.Stop()
return false return false
} }
@ -272,16 +276,20 @@ func (d *discover) Bootstrap(ctx context.Context, bitmask []byte, ready RouterRe
select { select {
case d.discoverQ <- disc: case d.discoverQ <- disc:
case <-d.p.ctx.Done(): case <-d.p.ctx.Done():
t.Stop()
return false return false
case <-ctx.Done(): case <-ctx.Done():
t.Stop()
return false return false
} }
select { select {
case <-disc.done: case <-disc.done:
case <-d.p.ctx.Done(): case <-d.p.ctx.Done():
t.Stop()
return false return false
case <-ctx.Done(): case <-ctx.Done():
t.Stop()
return false return false
} }
@ -289,8 +297,10 @@ func (d *discover) Bootstrap(ctx context.Context, bitmask []byte, ready RouterRe
select { select {
case <-t.C: case <-t.C:
case <-d.p.ctx.Done(): case <-d.p.ctx.Done():
t.Stop()
return false return false
case <-ctx.Done(): case <-ctx.Done():
t.Stop()
return false return false
} }
} }
@ -298,15 +308,16 @@ func (d *discover) Bootstrap(ctx context.Context, bitmask []byte, ready RouterRe
func (d *discover) handleDiscovery(ctx context.Context, bitmask []byte, opts []discovery.Option) { func (d *discover) handleDiscovery(ctx context.Context, bitmask []byte, opts []discovery.Option) {
discoverCtx, cancel := context.WithTimeout(ctx, time.Second*10) discoverCtx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
peerCh, err := d.discovery.FindPeers(discoverCtx, string(bitmask), opts...) peerCh, err := d.discovery.FindPeers(discoverCtx, string(bitmask), opts...)
if err != nil { if err != nil {
log.Debugf("error finding peers for bitmask %s: %v", bitmask, err) log.Debugf("error finding peers for bitmask %s: %v", bitmask, err)
cancel()
return return
} }
d.connector.Connect(ctx, peerCh) d.connector.Connect(ctx, peerCh)
cancel()
} }
type discoverReq struct { type discoverReq struct {

View File

@ -54,7 +54,6 @@ func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs [][]byte) {
mid := msgIDs[idx] mid := msgIDs[idx]
gt.Lock() gt.Lock()
defer gt.Unlock()
promises, ok := gt.promises[string(mid)] promises, ok := gt.promises[string(mid)]
if !ok { if !ok {
@ -72,6 +71,8 @@ func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs [][]byte) {
} }
peerPromises[string(mid)] = struct{}{} peerPromises[string(mid)] = struct{}{}
} }
gt.Unlock()
} }
// returns the number of broken promises for each peer who didn't follow up // returns the number of broken promises for each peer who didn't follow up
@ -82,7 +83,6 @@ func (gt *gossipTracer) GetBrokenPromises() map[peer.ID]int {
} }
gt.Lock() gt.Lock()
defer gt.Unlock()
var res map[peer.ID]int var res map[peer.ID]int
now := time.Now() now := time.Now()
@ -111,6 +111,7 @@ func (gt *gossipTracer) GetBrokenPromises() map[peer.ID]int {
} }
} }
gt.Unlock()
return res return res
} }
@ -120,10 +121,10 @@ func (gt *gossipTracer) fulfillPromise(msg *Message) {
mid := gt.idGen.ID(msg) mid := gt.idGen.ID(msg)
gt.Lock() gt.Lock()
defer gt.Unlock()
promises, ok := gt.promises[string(mid)] promises, ok := gt.promises[string(mid)]
if !ok { if !ok {
gt.Unlock()
return return
} }
delete(gt.promises, string(mid)) delete(gt.promises, string(mid))
@ -138,6 +139,8 @@ func (gt *gossipTracer) fulfillPromise(msg *Message) {
} }
} }
} }
gt.Unlock()
} }
func (gt *gossipTracer) DeliverMessage(msg *Message) { func (gt *gossipTracer) DeliverMessage(msg *Message) {
@ -181,10 +184,10 @@ func (gt *gossipTracer) UndeliverableMessage(msg *Message) {}
func (gt *gossipTracer) ThrottlePeer(p peer.ID) { func (gt *gossipTracer) ThrottlePeer(p peer.ID) {
gt.Lock() gt.Lock()
defer gt.Unlock()
peerPromises, ok := gt.peerPromises[p] peerPromises, ok := gt.peerPromises[p]
if !ok { if !ok {
gt.Unlock()
return return
} }
@ -197,4 +200,5 @@ func (gt *gossipTracer) ThrottlePeer(p peer.ID) {
} }
delete(gt.peerPromises, p) delete(gt.peerPromises, p)
gt.Unlock()
} }

View File

@ -204,13 +204,12 @@ func newPeerGater(ctx context.Context, host host.Host, params *PeerGaterParams)
func (pg *peerGater) background(ctx context.Context) { func (pg *peerGater) background(ctx context.Context) {
tick := time.NewTicker(pg.params.DecayInterval) tick := time.NewTicker(pg.params.DecayInterval)
defer tick.Stop()
for { for {
select { select {
case <-tick.C: case <-tick.C:
pg.decayStats() pg.decayStats()
case <-ctx.Done(): case <-ctx.Done():
tick.Stop()
return return
} }
} }
@ -218,7 +217,6 @@ func (pg *peerGater) background(ctx context.Context) {
func (pg *peerGater) decayStats() { func (pg *peerGater) decayStats() {
pg.Lock() pg.Lock()
defer pg.Unlock()
pg.validate *= pg.params.GlobalDecay pg.validate *= pg.params.GlobalDecay
if pg.validate < pg.params.DecayToZero { if pg.validate < pg.params.DecayToZero {
@ -256,6 +254,8 @@ func (pg *peerGater) decayStats() {
delete(pg.ipStats, ip) delete(pg.ipStats, ip)
} }
} }
pg.Unlock()
} }
func (pg *peerGater) getPeerStats(p peer.ID) *peerGaterStats { func (pg *peerGater) getPeerStats(p peer.ID) *peerGaterStats {
@ -323,21 +323,23 @@ func (pg *peerGater) AcceptFrom(p peer.ID) AcceptStatus {
} }
pg.Lock() pg.Lock()
defer pg.Unlock()
// check the quiet period; if the validation queue has not throttled for more than the Quiet // check the quiet period; if the validation queue has not throttled for more than the Quiet
// interval, we turn off the circuit breaker and accept. // interval, we turn off the circuit breaker and accept.
if time.Since(pg.lastThrottle) > pg.params.Quiet { if time.Since(pg.lastThrottle) > pg.params.Quiet {
pg.Unlock()
return AcceptAll return AcceptAll
} }
// no throttle events -- or they have decayed; accept. // no throttle events -- or they have decayed; accept.
if pg.throttle == 0 { if pg.throttle == 0 {
pg.Unlock()
return AcceptAll return AcceptAll
} }
// check the throttle/validate ration; if it is below threshold we accept. // check the throttle/validate ration; if it is below threshold we accept.
if pg.validate != 0 && pg.throttle/pg.validate < pg.params.Threshold { if pg.validate != 0 && pg.throttle/pg.validate < pg.params.Threshold {
pg.Unlock()
return AcceptAll return AcceptAll
} }
@ -346,6 +348,7 @@ func (pg *peerGater) AcceptFrom(p peer.ID) AcceptStatus {
// compute the goodput of the peer; the denominator is the weighted mix of message counters // compute the goodput of the peer; the denominator is the weighted mix of message counters
total := st.deliver + pg.params.DuplicateWeight*st.duplicate + pg.params.IgnoreWeight*st.ignore + pg.params.RejectWeight*st.reject total := st.deliver + pg.params.DuplicateWeight*st.duplicate + pg.params.IgnoreWeight*st.ignore + pg.params.RejectWeight*st.reject
if total == 0 { if total == 0 {
pg.Unlock()
return AcceptAll return AcceptAll
} }
@ -355,10 +358,12 @@ func (pg *peerGater) AcceptFrom(p peer.ID) AcceptStatus {
// accepted; this is not a sinkhole/blacklist. // accepted; this is not a sinkhole/blacklist.
threshold := (1 + st.deliver) / (1 + total) threshold := (1 + st.deliver) / (1 + total)
if rand.Float64() < threshold { if rand.Float64() < threshold {
pg.Unlock()
return AcceptAll return AcceptAll
} }
log.Debugf("throttling peer %s with threshold %f", p, threshold) log.Debugf("throttling peer %s with threshold %f", p, threshold)
pg.Unlock()
return AcceptControl return AcceptControl
} }
@ -368,21 +373,21 @@ var _ RawTracer = (*peerGater)(nil)
// tracer interface // tracer interface
func (pg *peerGater) AddPeer(p peer.ID, proto protocol.ID) { func (pg *peerGater) AddPeer(p peer.ID, proto protocol.ID) {
pg.Lock() pg.Lock()
defer pg.Unlock()
st := pg.getPeerStats(p) st := pg.getPeerStats(p)
st.connected++ st.connected++
pg.Unlock()
} }
func (pg *peerGater) RemovePeer(p peer.ID) { func (pg *peerGater) RemovePeer(p peer.ID) {
pg.Lock() pg.Lock()
defer pg.Unlock()
st := pg.getPeerStats(p) st := pg.getPeerStats(p)
st.connected-- st.connected--
st.expire = time.Now().Add(pg.params.RetainStats) st.expire = time.Now().Add(pg.params.RetainStats)
delete(pg.peerStats, p) delete(pg.peerStats, p)
pg.Unlock()
} }
func (pg *peerGater) Join(bitmask []byte) {} func (pg *peerGater) Join(bitmask []byte) {}
@ -392,14 +397,13 @@ func (pg *peerGater) Prune(p peer.ID, bitmask []byte) {}
func (pg *peerGater) ValidateMessage(msg *Message) { func (pg *peerGater) ValidateMessage(msg *Message) {
pg.Lock() pg.Lock()
defer pg.Unlock()
pg.validate++ pg.validate++
pg.Unlock()
} }
func (pg *peerGater) DeliverMessage(msg *Message) { func (pg *peerGater) DeliverMessage(msg *Message) {
pg.Lock() pg.Lock()
defer pg.Unlock()
st := pg.getPeerStats(msg.ReceivedFrom) st := pg.getPeerStats(msg.ReceivedFrom)
@ -411,11 +415,11 @@ func (pg *peerGater) DeliverMessage(msg *Message) {
} }
st.deliver += weight st.deliver += weight
pg.Unlock()
} }
func (pg *peerGater) RejectMessage(msg *Message, reason string) { func (pg *peerGater) RejectMessage(msg *Message, reason string) {
pg.Lock() pg.Lock()
defer pg.Unlock()
switch reason { switch reason {
case RejectValidationQueueFull: case RejectValidationQueueFull:
@ -432,14 +436,15 @@ func (pg *peerGater) RejectMessage(msg *Message, reason string) {
st := pg.getPeerStats(msg.ReceivedFrom) st := pg.getPeerStats(msg.ReceivedFrom)
st.reject++ st.reject++
} }
pg.Unlock()
} }
func (pg *peerGater) DuplicateMessage(msg *Message) { func (pg *peerGater) DuplicateMessage(msg *Message) {
pg.Lock() pg.Lock()
defer pg.Unlock()
st := pg.getPeerStats(msg.ReceivedFrom) st := pg.getPeerStats(msg.ReceivedFrom)
st.duplicate++ st.duplicate++
pg.Unlock()
} }
func (pg *peerGater) ThrottlePeer(p peer.ID) {} func (pg *peerGater) ThrottlePeer(p peer.ID) {}

View File

@ -20,7 +20,6 @@ func (ps *PubSub) watchForNewPeers(ctx context.Context) {
log.Errorf("failed to subscribe to peer identification events: %v", err) log.Errorf("failed to subscribe to peer identification events: %v", err)
return return
} }
defer sub.Close()
ps.newPeersPrioLk.RLock() ps.newPeersPrioLk.RLock()
ps.newPeersMx.Lock() ps.newPeersMx.Lock()
@ -68,6 +67,7 @@ func (ps *PubSub) watchForNewPeers(ctx context.Context) {
var ev any var ev any
select { select {
case <-ctx.Done(): case <-ctx.Done():
sub.Close()
return return
case ev = <-sub.Out(): case ev = <-sub.Out():
} }
@ -95,7 +95,7 @@ func (ps *PubSub) watchForNewPeers(ctx context.Context) {
} }
} }
} }
sub.Close()
} }
func (ps *PubSub) notifyNewPeer(peer peer.ID) { func (ps *PubSub) notifyNewPeer(peer peer.ID) {

View File

@ -200,12 +200,12 @@ func newPeerScore(params *PeerScoreParams) *peerScore {
// Note: assumes that the bitmask score parameters have already been validated // Note: assumes that the bitmask score parameters have already been validated
func (ps *peerScore) SetBitmaskScoreParams(bitmask []byte, p *BitmaskScoreParams) error { func (ps *peerScore) SetBitmaskScoreParams(bitmask []byte, p *BitmaskScoreParams) error {
ps.Lock() ps.Lock()
defer ps.Unlock()
old, exist := ps.params.Bitmasks[string(bitmask)] old, exist := ps.params.Bitmasks[string(bitmask)]
ps.params.Bitmasks[string(bitmask)] = p ps.params.Bitmasks[string(bitmask)] = p
if !exist { if !exist {
ps.Unlock()
return nil return nil
} }
@ -218,6 +218,7 @@ func (ps *peerScore) SetBitmaskScoreParams(bitmask []byte, p *BitmaskScoreParams
recap = true recap = true
} }
if !recap { if !recap {
ps.Unlock()
return nil return nil
} }
@ -236,7 +237,7 @@ func (ps *peerScore) SetBitmaskScoreParams(bitmask []byte, p *BitmaskScoreParams
tstats.meshMessageDeliveries = p.MeshMessageDeliveriesCap tstats.meshMessageDeliveries = p.MeshMessageDeliveriesCap
} }
} }
ps.Unlock()
return nil return nil
} }
@ -257,9 +258,10 @@ func (ps *peerScore) Score(p peer.ID) float64 {
} }
ps.Lock() ps.Lock()
defer ps.Unlock()
return ps.score(p) score := ps.score(p)
ps.Unlock()
return score
} }
func (ps *peerScore) score(p peer.ID) float64 { func (ps *peerScore) score(p peer.ID) float64 {
@ -394,14 +396,15 @@ func (ps *peerScore) AddPenalty(p peer.ID, count int) {
} }
ps.Lock() ps.Lock()
defer ps.Unlock()
pstats, ok := ps.peerStats[p] pstats, ok := ps.peerStats[p]
if !ok { if !ok {
ps.Unlock()
return return
} }
pstats.behaviourPenalty += float64(count) pstats.behaviourPenalty += float64(count)
ps.Unlock()
} }
// periodic maintenance // periodic maintenance
@ -503,7 +506,6 @@ func (ps *peerScore) inspectScoresExtended() {
// once their expiry has elapsed. // once their expiry has elapsed.
func (ps *peerScore) refreshScores() { func (ps *peerScore) refreshScores() {
ps.Lock() ps.Lock()
defer ps.Unlock()
now := time.Now() now := time.Now()
for p, pstats := range ps.peerStats { for p, pstats := range ps.peerStats {
@ -562,12 +564,13 @@ func (ps *peerScore) refreshScores() {
pstats.behaviourPenalty = 0 pstats.behaviourPenalty = 0
} }
} }
ps.Unlock()
} }
// refreshIPs refreshes IPs we know of peers we're tracking. // refreshIPs refreshes IPs we know of peers we're tracking.
func (ps *peerScore) refreshIPs() { func (ps *peerScore) refreshIPs() {
ps.Lock() ps.Lock()
defer ps.Unlock()
// peer IPs may change, so we periodically refresh them // peer IPs may change, so we periodically refresh them
// //
@ -582,19 +585,20 @@ func (ps *peerScore) refreshIPs() {
pstats.ips = ips pstats.ips = ips
} }
} }
ps.Unlock()
} }
func (ps *peerScore) gcDeliveryRecords() { func (ps *peerScore) gcDeliveryRecords() {
ps.Lock() ps.Lock()
defer ps.Unlock()
ps.deliveries.gc() ps.deliveries.gc()
ps.Unlock()
} }
// tracer interface // tracer interface
func (ps *peerScore) AddPeer(p peer.ID, proto protocol.ID) { func (ps *peerScore) AddPeer(p peer.ID, proto protocol.ID) {
ps.Lock() ps.Lock()
defer ps.Unlock()
pstats, ok := ps.peerStats[p] pstats, ok := ps.peerStats[p]
if !ok { if !ok {
@ -606,14 +610,15 @@ func (ps *peerScore) AddPeer(p peer.ID, proto protocol.ID) {
ips := ps.getIPs(p) ips := ps.getIPs(p)
ps.setIPs(p, ips, pstats.ips) ps.setIPs(p, ips, pstats.ips)
pstats.ips = ips pstats.ips = ips
ps.Unlock()
} }
func (ps *peerScore) RemovePeer(p peer.ID) { func (ps *peerScore) RemovePeer(p peer.ID) {
ps.Lock() ps.Lock()
defer ps.Unlock()
pstats, ok := ps.peerStats[p] pstats, ok := ps.peerStats[p]
if !ok { if !ok {
ps.Unlock()
return return
} }
@ -622,6 +627,7 @@ func (ps *peerScore) RemovePeer(p peer.ID) {
if ps.score(p) > 0 { if ps.score(p) > 0 {
ps.removeIPs(p, pstats.ips) ps.removeIPs(p, pstats.ips)
delete(ps.peerStats, p) delete(ps.peerStats, p)
ps.Unlock()
return return
} }
@ -641,6 +647,7 @@ func (ps *peerScore) RemovePeer(p peer.ID) {
pstats.connected = false pstats.connected = false
pstats.expire = time.Now().Add(ps.params.RetainScore) pstats.expire = time.Now().Add(ps.params.RetainScore)
ps.Unlock()
} }
func (ps *peerScore) Join(bitmask []byte) {} func (ps *peerScore) Join(bitmask []byte) {}
@ -648,15 +655,16 @@ func (ps *peerScore) Leave(bitmask []byte) {}
func (ps *peerScore) Graft(p peer.ID, bitmask []byte) { func (ps *peerScore) Graft(p peer.ID, bitmask []byte) {
ps.Lock() ps.Lock()
defer ps.Unlock()
pstats, ok := ps.peerStats[p] pstats, ok := ps.peerStats[p]
if !ok { if !ok {
ps.Unlock()
return return
} }
tstats, ok := pstats.getBitmaskStats(bitmask, ps.params) tstats, ok := pstats.getBitmaskStats(bitmask, ps.params)
if !ok { if !ok {
ps.Unlock()
return return
} }
@ -664,19 +672,21 @@ func (ps *peerScore) Graft(p peer.ID, bitmask []byte) {
tstats.graftTime = time.Now() tstats.graftTime = time.Now()
tstats.meshTime = 0 tstats.meshTime = 0
tstats.meshMessageDeliveriesActive = false tstats.meshMessageDeliveriesActive = false
ps.Unlock()
} }
func (ps *peerScore) Prune(p peer.ID, bitmask []byte) { func (ps *peerScore) Prune(p peer.ID, bitmask []byte) {
ps.Lock() ps.Lock()
defer ps.Unlock()
pstats, ok := ps.peerStats[p] pstats, ok := ps.peerStats[p]
if !ok { if !ok {
ps.Unlock()
return return
} }
tstats, ok := pstats.getBitmaskStats(bitmask, ps.params) tstats, ok := pstats.getBitmaskStats(bitmask, ps.params)
if !ok { if !ok {
ps.Unlock()
return return
} }
@ -688,20 +698,20 @@ func (ps *peerScore) Prune(p peer.ID, bitmask []byte) {
} }
tstats.inMesh = false tstats.inMesh = false
ps.Unlock()
} }
func (ps *peerScore) ValidateMessage(msg *Message) { func (ps *peerScore) ValidateMessage(msg *Message) {
ps.Lock() ps.Lock()
defer ps.Unlock()
// the pubsub subsystem is beginning validation; create a record to track time in // the pubsub subsystem is beginning validation; create a record to track time in
// the validation pipeline with an accurate firstSeen time. // the validation pipeline with an accurate firstSeen time.
_ = ps.deliveries.getRecord(ps.idGen.ID(msg)) _ = ps.deliveries.getRecord(ps.idGen.ID(msg))
ps.Unlock()
} }
func (ps *peerScore) DeliverMessage(msg *Message) { func (ps *peerScore) DeliverMessage(msg *Message) {
ps.Lock() ps.Lock()
defer ps.Unlock()
ps.markFirstMessageDelivery(msg.ReceivedFrom, msg) ps.markFirstMessageDelivery(msg.ReceivedFrom, msg)
@ -710,6 +720,7 @@ func (ps *peerScore) DeliverMessage(msg *Message) {
// defensive check that this is the first delivery trace -- delivery status should be unknown // defensive check that this is the first delivery trace -- delivery status should be unknown
if drec.status != deliveryUnknown { if drec.status != deliveryUnknown {
log.Debugf("unexpected delivery trace: message from %s was first seen %s ago and has delivery status %d", msg.ReceivedFrom, time.Since(drec.firstSeen), drec.status) log.Debugf("unexpected delivery trace: message from %s was first seen %s ago and has delivery status %d", msg.ReceivedFrom, time.Since(drec.firstSeen), drec.status)
ps.Unlock()
return return
} }
@ -723,11 +734,11 @@ func (ps *peerScore) DeliverMessage(msg *Message) {
ps.markDuplicateMessageDelivery(p, msg, time.Time{}) ps.markDuplicateMessageDelivery(p, msg, time.Time{})
} }
} }
ps.Unlock()
} }
func (ps *peerScore) RejectMessage(msg *Message, reason string) { func (ps *peerScore) RejectMessage(msg *Message, reason string) {
ps.Lock() ps.Lock()
defer ps.Unlock()
switch reason { switch reason {
// we don't track those messages, but we penalize the peer as they are clearly invalid // we don't track those messages, but we penalize the peer as they are clearly invalid
@ -741,18 +752,21 @@ func (ps *peerScore) RejectMessage(msg *Message, reason string) {
fallthrough fallthrough
case RejectSelfOrigin: case RejectSelfOrigin:
ps.markInvalidMessageDelivery(msg.ReceivedFrom, msg) ps.markInvalidMessageDelivery(msg.ReceivedFrom, msg)
ps.Unlock()
return return
// we ignore those messages, so do nothing. // we ignore those messages, so do nothing.
case RejectBlacklstedPeer: case RejectBlacklstedPeer:
fallthrough fallthrough
case RejectBlacklistedSource: case RejectBlacklistedSource:
ps.Unlock()
return return
case RejectValidationQueueFull: case RejectValidationQueueFull:
// the message was rejected before it entered the validation pipeline; // the message was rejected before it entered the validation pipeline;
// we don't know if this message has a valid signature, and thus we also don't know if // we don't know if this message has a valid signature, and thus we also don't know if
// it has a valid message ID; all we can do is ignore it. // it has a valid message ID; all we can do is ignore it.
ps.Unlock()
return return
} }
@ -761,6 +775,7 @@ func (ps *peerScore) RejectMessage(msg *Message, reason string) {
// defensive check that this is the first rejection trace -- delivery status should be unknown // defensive check that this is the first rejection trace -- delivery status should be unknown
if drec.status != deliveryUnknown { if drec.status != deliveryUnknown {
log.Debugf("unexpected rejection trace: message from %s was first seen %s ago and has delivery status %d", msg.ReceivedFrom, time.Since(drec.firstSeen), drec.status) log.Debugf("unexpected rejection trace: message from %s was first seen %s ago and has delivery status %d", msg.ReceivedFrom, time.Since(drec.firstSeen), drec.status)
ps.Unlock()
return return
} }
@ -771,12 +786,14 @@ func (ps *peerScore) RejectMessage(msg *Message, reason string) {
drec.status = deliveryThrottled drec.status = deliveryThrottled
// release the delivery time tracking map to free some memory early // release the delivery time tracking map to free some memory early
drec.peers = nil drec.peers = nil
ps.Unlock()
return return
case RejectValidationIgnored: case RejectValidationIgnored:
// we were explicitly instructed by the validator to ignore the message but not penalize // we were explicitly instructed by the validator to ignore the message but not penalize
// the peer // the peer
drec.status = deliveryIgnored drec.status = deliveryIgnored
drec.peers = nil drec.peers = nil
ps.Unlock()
return return
} }
@ -790,17 +807,18 @@ func (ps *peerScore) RejectMessage(msg *Message, reason string) {
// release the delivery time tracking map to free some memory early // release the delivery time tracking map to free some memory early
drec.peers = nil drec.peers = nil
ps.Unlock()
} }
func (ps *peerScore) DuplicateMessage(msg *Message) { func (ps *peerScore) DuplicateMessage(msg *Message) {
ps.Lock() ps.Lock()
defer ps.Unlock()
drec := ps.deliveries.getRecord(ps.idGen.ID(msg)) drec := ps.deliveries.getRecord(ps.idGen.ID(msg))
_, ok := drec.peers[msg.ReceivedFrom] _, ok := drec.peers[msg.ReceivedFrom]
if ok { if ok {
// we have already seen this duplicate! // we have already seen this duplicate!
ps.Unlock()
return return
} }
@ -824,6 +842,7 @@ func (ps *peerScore) DuplicateMessage(msg *Message) {
case deliveryIgnored: case deliveryIgnored:
// the message was ignored; do nothing // the message was ignored; do nothing
} }
ps.Unlock()
} }
func (ps *peerScore) ThrottlePeer(p peer.ID) {} func (ps *peerScore) ThrottlePeer(p peer.ID) {}

View File

@ -111,7 +111,7 @@ func (t *tagTracer) addDeliveryTag(bitmask []byte) {
name := "pubsub-deliveries:" + string(bitmask) name := "pubsub-deliveries:" + string(bitmask)
t.Lock() t.Lock()
defer t.Unlock()
tag, err := t.decayer.RegisterDecayingTag( tag, err := t.decayer.RegisterDecayingTag(
name, name,
BlossomSubConnTagDecayInterval, BlossomSubConnTagDecayInterval,
@ -120,16 +120,19 @@ func (t *tagTracer) addDeliveryTag(bitmask []byte) {
if err != nil { if err != nil {
log.Warnf("unable to create decaying delivery tag: %s", err) log.Warnf("unable to create decaying delivery tag: %s", err)
t.Unlock()
return return
} }
t.decaying[string(bitmask)] = tag t.decaying[string(bitmask)] = tag
t.Unlock()
} }
func (t *tagTracer) removeDeliveryTag(bitmask []byte) { func (t *tagTracer) removeDeliveryTag(bitmask []byte) {
t.Lock() t.Lock()
defer t.Unlock()
tag, ok := t.decaying[string(bitmask)] tag, ok := t.decaying[string(bitmask)]
if !ok { if !ok {
t.Unlock()
return return
} }
err := tag.Close() err := tag.Close()
@ -137,17 +140,20 @@ func (t *tagTracer) removeDeliveryTag(bitmask []byte) {
log.Warnf("error closing decaying connmgr tag: %s", err) log.Warnf("error closing decaying connmgr tag: %s", err)
} }
delete(t.decaying, string(bitmask)) delete(t.decaying, string(bitmask))
t.Unlock()
} }
func (t *tagTracer) bumpDeliveryTag(p peer.ID, bitmask []byte) error { func (t *tagTracer) bumpDeliveryTag(p peer.ID, bitmask []byte) error {
t.RLock() t.RLock()
defer t.RUnlock()
tag, ok := t.decaying[string(bitmask)] tag, ok := t.decaying[string(bitmask)]
if !ok { if !ok {
t.RUnlock()
return fmt.Errorf("no decaying tag registered for bitmask %s", bitmask) return fmt.Errorf("no decaying tag registered for bitmask %s", bitmask)
} }
return tag.Bump(p, BlossomSubConnTagBumpMessageDelivery) err := tag.Bump(p, BlossomSubConnTagBumpMessageDelivery)
t.RUnlock()
return err
} }
func (t *tagTracer) bumpTagsForMessage(p peer.ID, msg *Message) { func (t *tagTracer) bumpTagsForMessage(p peer.ID, msg *Message) {
@ -161,15 +167,17 @@ func (t *tagTracer) bumpTagsForMessage(p peer.ID, msg *Message) {
// nearFirstPeers returns the peers who delivered the message while it was still validating // nearFirstPeers returns the peers who delivered the message while it was still validating
func (t *tagTracer) nearFirstPeers(msg *Message) []peer.ID { func (t *tagTracer) nearFirstPeers(msg *Message) []peer.ID {
t.Lock() t.Lock()
defer t.Unlock()
peersMap, ok := t.nearFirst[string(t.idGen.ID(msg))] peersMap, ok := t.nearFirst[string(t.idGen.ID(msg))]
if !ok { if !ok {
t.Unlock()
return nil return nil
} }
peers := make([]peer.ID, 0, len(peersMap)) peers := make([]peer.ID, 0, len(peersMap))
for p := range peersMap { for p := range peersMap {
peers = append(peers, p) peers = append(peers, p)
} }
t.Unlock()
return peers return peers
} }
@ -212,31 +220,32 @@ func (t *tagTracer) Prune(p peer.ID, bitmask []byte) {
func (t *tagTracer) ValidateMessage(msg *Message) { func (t *tagTracer) ValidateMessage(msg *Message) {
t.Lock() t.Lock()
defer t.Unlock()
// create map to start tracking the peers who deliver while we're validating // create map to start tracking the peers who deliver while we're validating
id := t.idGen.ID(msg) id := t.idGen.ID(msg)
if _, exists := t.nearFirst[string(id)]; exists { if _, exists := t.nearFirst[string(id)]; exists {
t.Unlock()
return return
} }
t.nearFirst[string(id)] = make(map[peer.ID]struct{}) t.nearFirst[string(id)] = make(map[peer.ID]struct{})
t.Unlock()
} }
func (t *tagTracer) DuplicateMessage(msg *Message) { func (t *tagTracer) DuplicateMessage(msg *Message) {
t.Lock() t.Lock()
defer t.Unlock()
id := t.idGen.ID(msg) id := t.idGen.ID(msg)
peers, ok := t.nearFirst[string(id)] peers, ok := t.nearFirst[string(id)]
if !ok { if !ok {
t.Unlock()
return return
} }
peers[msg.ReceivedFrom] = struct{}{} peers[msg.ReceivedFrom] = struct{}{}
t.Unlock()
} }
func (t *tagTracer) RejectMessage(msg *Message, reason string) { func (t *tagTracer) RejectMessage(msg *Message, reason string) {
t.Lock() t.Lock()
defer t.Unlock()
// We want to delete the near-first delivery tracking for messages that have passed through // We want to delete the near-first delivery tracking for messages that have passed through
// the validation pipeline. Other rejection reasons (missing signature, etc) skip the validation // the validation pipeline. Other rejection reasons (missing signature, etc) skip the validation
@ -249,6 +258,7 @@ func (t *tagTracer) RejectMessage(msg *Message, reason string) {
case RejectValidationFailed: case RejectValidationFailed:
delete(t.nearFirst, string(t.idGen.ID(msg))) delete(t.nearFirst, string(t.idGen.ID(msg)))
} }
t.Unlock()
} }
func (t *tagTracer) RemovePeer(peer.ID) {} func (t *tagTracer) RemovePeer(peer.ID) {}

View File

@ -36,21 +36,22 @@ func (tc *FirstSeenCache) Done() {
func (tc *FirstSeenCache) Has(s string) bool { func (tc *FirstSeenCache) Has(s string) bool {
tc.lk.RLock() tc.lk.RLock()
defer tc.lk.RUnlock()
_, ok := tc.m[s] _, ok := tc.m[s]
tc.lk.RUnlock()
return ok return ok
} }
func (tc *FirstSeenCache) Add(s string) bool { func (tc *FirstSeenCache) Add(s string) bool {
tc.lk.Lock() tc.lk.Lock()
defer tc.lk.Unlock()
_, ok := tc.m[s] _, ok := tc.m[s]
if ok { if ok {
tc.lk.Unlock()
return false return false
} }
tc.m[s] = time.Now().Add(tc.ttl) tc.m[s] = time.Now().Add(tc.ttl)
tc.lk.Unlock()
return true return true
} }

View File

@ -37,22 +37,20 @@ func (tc *LastSeenCache) Done() {
func (tc *LastSeenCache) Add(s string) bool { func (tc *LastSeenCache) Add(s string) bool {
tc.lk.Lock() tc.lk.Lock()
defer tc.lk.Unlock()
_, ok := tc.m[s] _, ok := tc.m[s]
tc.m[s] = time.Now().Add(tc.ttl) tc.m[s] = time.Now().Add(tc.ttl)
tc.lk.Unlock()
return !ok return !ok
} }
func (tc *LastSeenCache) Has(s string) bool { func (tc *LastSeenCache) Has(s string) bool {
tc.lk.Lock() tc.lk.Lock()
defer tc.lk.Unlock()
_, ok := tc.m[s] _, ok := tc.m[s]
if ok { if ok {
tc.m[s] = time.Now().Add(tc.ttl) tc.m[s] = time.Now().Add(tc.ttl)
} }
tc.lk.Unlock()
return ok return ok
} }

View File

@ -10,7 +10,6 @@ var backgroundSweepInterval = time.Minute
func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) { func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) {
ticker := time.NewTicker(backgroundSweepInterval) ticker := time.NewTicker(backgroundSweepInterval)
defer ticker.Stop()
for { for {
select { select {
@ -18,6 +17,7 @@ func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) {
sweep(lk, m, now) sweep(lk, m, now)
case <-ctx.Done(): case <-ctx.Done():
ticker.Stop()
return return
} }
} }
@ -25,11 +25,12 @@ func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) {
func sweep(lk sync.Locker, m map[string]time.Time, now time.Time) { func sweep(lk sync.Locker, m map[string]time.Time, now time.Time) {
lk.Lock() lk.Lock()
defer lk.Unlock()
for k, expiry := range m { for k, expiry := range m {
if expiry.Before(now) { if expiry.Before(now) {
delete(m, k) delete(m, k)
} }
} }
lk.Unlock()
} }

View File

@ -148,7 +148,6 @@ func (t *pubsubTracer) DuplicateMessage(msg *Message) {
return return
} }
// disable for now
// now := time.Now().UnixNano() // now := time.Now().UnixNano()
// evt := &pb.TraceEvent{ // evt := &pb.TraceEvent{
// Type: pb.TraceEvent_DUPLICATE_MESSAGE.Enum(), // Type: pb.TraceEvent_DUPLICATE_MESSAGE.Enum(),
@ -179,19 +178,19 @@ func (t *pubsubTracer) DeliverMessage(msg *Message) {
return return
} }
now := time.Now().UnixNano() // now := time.Now().UnixNano()
evt := &pb.TraceEvent{ // evt := &pb.TraceEvent{
Type: pb.TraceEvent_DELIVER_MESSAGE.Enum(), // Type: pb.TraceEvent_DELIVER_MESSAGE.Enum(),
PeerID: []byte(t.pid), // PeerID: []byte(t.pid),
Timestamp: &now, // Timestamp: &now,
DeliverMessage: &pb.TraceEvent_DeliverMessage{ // DeliverMessage: &pb.TraceEvent_DeliverMessage{
MessageID: []byte(t.idGen.ID(msg)), // MessageID: []byte(t.idGen.ID(msg)),
Bitmask: msg.Bitmask, // Bitmask: msg.Bitmask,
ReceivedFrom: []byte(msg.ReceivedFrom), // ReceivedFrom: []byte(msg.ReceivedFrom),
}, // },
} // }
t.tracer.Trace(evt) // t.tracer.Trace(evt)
} }
func (t *pubsubTracer) AddPeer(p peer.ID, proto protocol.ID) { func (t *pubsubTracer) AddPeer(p peer.ID, proto protocol.ID) {
@ -261,7 +260,6 @@ func (t *pubsubTracer) RecvRPC(rpc *RPC) {
return return
} }
// disable for now
// now := time.Now().UnixNano() // now := time.Now().UnixNano()
// evt := &pb.TraceEvent{ // evt := &pb.TraceEvent{
// Type: pb.TraceEvent_RECV_RPC.Enum(), // Type: pb.TraceEvent_RECV_RPC.Enum(),
@ -289,7 +287,6 @@ func (t *pubsubTracer) SendRPC(rpc *RPC, p peer.ID) {
return return
} }
// disable for now
// now := time.Now().UnixNano() // now := time.Now().UnixNano()
// evt := &pb.TraceEvent{ // evt := &pb.TraceEvent{
// Type: pb.TraceEvent_SEND_RPC.Enum(), // Type: pb.TraceEvent_SEND_RPC.Enum(),
@ -317,7 +314,6 @@ func (t *pubsubTracer) DropRPC(rpc *RPC, p peer.ID) {
return return
} }
// disable for now
// now := time.Now().UnixNano() // now := time.Now().UnixNano()
// evt := &pb.TraceEvent{ // evt := &pb.TraceEvent{
// Type: pb.TraceEvent_DROP_RPC.Enum(), // Type: pb.TraceEvent_DROP_RPC.Enum(),
@ -345,19 +341,19 @@ func (t *pubsubTracer) UndeliverableMessage(msg *Message) {
return return
} }
now := time.Now().UnixNano() // now := time.Now().UnixNano()
evt := &pb.TraceEvent{ // evt := &pb.TraceEvent{
Type: pb.TraceEvent_UNDELIVERABLE_MESSAGE.Enum(), // Type: pb.TraceEvent_UNDELIVERABLE_MESSAGE.Enum(),
PeerID: []byte(t.pid), // PeerID: []byte(t.pid),
Timestamp: &now, // Timestamp: &now,
UndeliverableMessage: &pb.TraceEvent_UndeliverableMessage{ // UndeliverableMessage: &pb.TraceEvent_UndeliverableMessage{
MessageID: []byte(t.idGen.ID(msg)), // MessageID: []byte(t.idGen.ID(msg)),
Bitmask: msg.Bitmask, // Bitmask: msg.Bitmask,
ReceivedFrom: []byte(msg.ReceivedFrom), // ReceivedFrom: []byte(msg.ReceivedFrom),
}, // },
} // }
t.tracer.Trace(evt) // t.tracer.Trace(evt)
} }
func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta { func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta {

View File

@ -48,9 +48,9 @@ type basicTracer struct {
func (t *basicTracer) Trace(evt *pb.TraceEvent) { func (t *basicTracer) Trace(evt *pb.TraceEvent) {
t.mx.Lock() t.mx.Lock()
defer t.mx.Unlock()
if t.closed { if t.closed {
t.mx.Unlock()
return return
} }
@ -59,6 +59,7 @@ func (t *basicTracer) Trace(evt *pb.TraceEvent) {
} else { } else {
t.buf = append(t.buf, evt) t.buf = append(t.buf, evt)
} }
t.mx.Unlock()
select { select {
case t.ch <- struct{}{}: case t.ch <- struct{}{}:
@ -68,11 +69,11 @@ func (t *basicTracer) Trace(evt *pb.TraceEvent) {
func (t *basicTracer) Close() { func (t *basicTracer) Close() {
t.mx.Lock() t.mx.Lock()
defer t.mx.Unlock()
if !t.closed { if !t.closed {
t.closed = true t.closed = true
close(t.ch) close(t.ch)
} }
t.mx.Unlock()
} }
// JSONTracer is a tracer that writes events to a file, encoded in ndjson. // JSONTracer is a tracer that writes events to a file, encoded in ndjson.

View File

@ -146,17 +146,18 @@ func (v *validation) AddValidator(req *addValReq) {
} }
v.mx.Lock() v.mx.Lock()
defer v.mx.Unlock()
bitmask := val.bitmask bitmask := val.bitmask
_, ok := v.bitmaskVals[string(bitmask)] _, ok := v.bitmaskVals[string(bitmask)]
if ok { if ok {
v.mx.Unlock()
req.resp <- fmt.Errorf("duplicate validator for bitmask %s", bitmask) req.resp <- fmt.Errorf("duplicate validator for bitmask %s", bitmask)
return return
} }
v.bitmaskVals[string(bitmask)] = val v.bitmaskVals[string(bitmask)] = val
v.mx.Unlock()
req.resp <- nil req.resp <- nil
} }
@ -213,15 +214,16 @@ func (v *validation) makeValidator(req *addValReq) (*validatorImpl, error) {
// RemoveValidator removes an existing validator // RemoveValidator removes an existing validator
func (v *validation) RemoveValidator(req *rmValReq) { func (v *validation) RemoveValidator(req *rmValReq) {
v.mx.Lock() v.mx.Lock()
defer v.mx.Unlock()
bitmask := req.bitmask bitmask := req.bitmask
_, ok := v.bitmaskVals[string(bitmask)] _, ok := v.bitmaskVals[string(bitmask)]
if ok { if ok {
delete(v.bitmaskVals, string(bitmask)) delete(v.bitmaskVals, string(bitmask))
v.mx.Unlock()
req.resp <- nil req.resp <- nil
} else { } else {
v.mx.Unlock()
req.resp <- fmt.Errorf("no validator for bitmask %s", bitmask) req.resp <- fmt.Errorf("no validator for bitmask %s", bitmask)
} }
} }
@ -262,7 +264,6 @@ func (v *validation) Push(src peer.ID, msg *Message) bool {
// getValidators returns all validators that apply to a given message // getValidators returns all validators that apply to a given message
func (v *validation) getValidators(msg *Message) []*validatorImpl { func (v *validation) getValidators(msg *Message) []*validatorImpl {
v.mx.Lock() v.mx.Lock()
defer v.mx.Unlock()
var vals []*validatorImpl var vals []*validatorImpl
vals = append(vals, v.defaultVals...) vals = append(vals, v.defaultVals...)
@ -271,10 +272,13 @@ func (v *validation) getValidators(msg *Message) []*validatorImpl {
val, ok := v.bitmaskVals[string(bitmask)] val, ok := v.bitmaskVals[string(bitmask)]
if !ok { if !ok {
v.mx.Unlock()
return vals return vals
} }
return append(vals, val) impls := append(vals, val)
v.mx.Unlock()
return impls
} }
// validateWorker is an active goroutine performing inline validation // validateWorker is an active goroutine performing inline validation
@ -413,7 +417,6 @@ func (v *validation) validateBitmask(vals []*validatorImpl, src peer.ID, msg *Me
} }
ctx, cancel := context.WithCancel(v.p.ctx) ctx, cancel := context.WithCancel(v.p.ctx)
defer cancel()
rch := make(chan ValidationResult, len(vals)) rch := make(chan ValidationResult, len(vals))
rcount := 0 rcount := 0
@ -433,6 +436,7 @@ func (v *validation) validateBitmask(vals []*validatorImpl, src peer.ID, msg *Me
rch <- validationThrottled rch <- validationThrottled
} }
} }
cancel()
result := ValidationAccept result := ValidationAccept
loop: loop:
@ -472,14 +476,10 @@ func (v *validation) validateSingleBitmask(val *validatorImpl, src peer.ID, msg
func (val *validatorImpl) validateMsg(ctx context.Context, src peer.ID, msg *Message) ValidationResult { func (val *validatorImpl) validateMsg(ctx context.Context, src peer.ID, msg *Message) ValidationResult {
start := time.Now() start := time.Now()
defer func() { var cancel func() = nil
log.Debugf("validation done; took %s", time.Since(start))
}()
if val.validateTimeout > 0 { if val.validateTimeout > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, val.validateTimeout) ctx, cancel = context.WithTimeout(ctx, val.validateTimeout)
defer cancel()
} }
r := val.validate(ctx, src, msg) r := val.validate(ctx, src, msg)
@ -489,10 +489,18 @@ func (val *validatorImpl) validateMsg(ctx context.Context, src peer.ID, msg *Mes
case ValidationReject: case ValidationReject:
fallthrough fallthrough
case ValidationIgnore: case ValidationIgnore:
log.Debugf("validation done; took %s", time.Since(start))
if cancel != nil {
cancel()
}
return r return r
default: default:
log.Warnf("Unexpected result from validator: %d; ignoring message", r) log.Warnf("Unexpected result from validator: %d; ignoring message", r)
log.Debugf("validation done; took %s", time.Since(start))
if cancel != nil {
cancel()
}
return ValidationIgnore return ValidationIgnore
} }
} }

View File

@ -72,11 +72,11 @@ func (v *BasicSeqnoValidator) validate(ctx context.Context, _ peer.ID, m *Messag
// get the nonce and compare again with an exclusive lock before commiting (cf concurrent validation) // get the nonce and compare again with an exclusive lock before commiting (cf concurrent validation)
v.mx.Lock() v.mx.Lock()
defer v.mx.Unlock()
nonceBytes, err = v.meta.Get(ctx, p) nonceBytes, err = v.meta.Get(ctx, p)
if err != nil { if err != nil {
log.Warn("error retrieving peer nonce: %s", err) log.Warn("error retrieving peer nonce: %s", err)
v.mx.Unlock()
return ValidationIgnore return ValidationIgnore
} }
@ -85,6 +85,7 @@ func (v *BasicSeqnoValidator) validate(ctx context.Context, _ peer.ID, m *Messag
} }
if seqno <= nonce { if seqno <= nonce {
v.mx.Unlock()
return ValidationIgnore return ValidationIgnore
} }
@ -96,6 +97,6 @@ func (v *BasicSeqnoValidator) validate(ctx context.Context, _ peer.ID, m *Messag
if err != nil { if err != nil {
log.Warn("error storing peer nonce: %s", err) log.Warn("error storing peer nonce: %s", err)
} }
v.mx.Unlock()
return ValidationAccept return ValidationAccept
} }