perf grinding at the extremes

- removed defer/recover/panic where warranted
- found and eliminated race conditions when extremely high connection pressure was hit
- resolved race condition in blossomsub around mesh maintenance
- incorporated resource manager scaling to work off low/hi watermark values
This commit is contained in:
Cassandra Heart 2024-08-21 14:45:25 -05:00
parent da8fcccf0d
commit 6a7cbab864
No known key found for this signature in database
GPG Key ID: 6352152859385958
57 changed files with 751 additions and 302 deletions

View File

@ -23,9 +23,11 @@ func TestBackoff_Update(t *testing.T) {
b := newBackoff(ctx, size, cleanupInterval, maxBackoffAttempts) b := newBackoff(ctx, size, cleanupInterval, maxBackoffAttempts)
b.mu.Lock()
if len(b.info) > 0 { if len(b.info) > 0 {
t.Fatal("non-empty info map for backoff") t.Fatal("non-empty info map for backoff")
} }
b.mu.Unlock()
if d, err := b.updateAndGet(id1); d != time.Duration(0) || err != nil { if d, err := b.updateAndGet(id1); d != time.Duration(0) || err != nil {
t.Fatalf("invalid initialization: %v, \t, %s", d, err) t.Fatalf("invalid initialization: %v, \t, %s", d, err)
@ -64,9 +66,11 @@ func TestBackoff_Update(t *testing.T) {
t.Fatalf("invalid backoff result, expected: %v, got: %v", MinBackoffDelay, got) t.Fatalf("invalid backoff result, expected: %v, got: %v", MinBackoffDelay, got)
} }
b.mu.Lock()
// sets last tried of id2 to long ago that it resets back upon next try. // sets last tried of id2 to long ago that it resets back upon next try.
// update attempts on id2 are below threshold, hence peer should never go beyond backoff attempt threshold. // update attempts on id2 are below threshold, hence peer should never go beyond backoff attempt threshold.
b.info[id2].lastTried = time.Now().Add(-TimeToLive) b.info[id2].lastTried = time.Now().Add(-TimeToLive)
b.mu.Unlock()
got, err = b.updateAndGet(id2) got, err = b.updateAndGet(id2)
if err != nil { if err != nil {
t.Fatalf("unexpected error post update: %s", err) t.Fatalf("unexpected error post update: %s", err)
@ -75,10 +79,11 @@ func TestBackoff_Update(t *testing.T) {
t.Fatalf("invalid ttl expiration, expected: %v, got: %v", time.Duration(0), got) t.Fatalf("invalid ttl expiration, expected: %v, got: %v", time.Duration(0), got)
} }
b.mu.Lock()
if len(b.info) != 2 { if len(b.info) != 2 {
t.Fatalf("pre-invalidation attempt, info map size mismatch, expected: %d, got: %d", 2, len(b.info)) t.Fatalf("pre-invalidation attempt, info map size mismatch, expected: %d, got: %d", 2, len(b.info))
} }
b.mu.Unlock()
} }
func TestBackoff_Clean(t *testing.T) { func TestBackoff_Clean(t *testing.T) {
@ -96,12 +101,16 @@ func TestBackoff_Clean(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unexpected error post update: %s", err) t.Fatalf("unexpected error post update: %s", err)
} }
b.mu.Lock()
b.info[id].lastTried = time.Now().Add(-TimeToLive) // enforces expiry b.info[id].lastTried = time.Now().Add(-TimeToLive) // enforces expiry
b.mu.Unlock()
} }
b.mu.Lock()
if len(b.info) != size { if len(b.info) != size {
t.Fatalf("info map size mismatch, expected: %d, got: %d", size, len(b.info)) t.Fatalf("info map size mismatch, expected: %d, got: %d", size, len(b.info))
} }
b.mu.Unlock()
// waits for a cleanup loop to kick-in // waits for a cleanup loop to kick-in
time.Sleep(2 * cleanupInterval) time.Sleep(2 * cleanupInterval)
@ -115,8 +124,10 @@ func TestBackoff_Clean(t *testing.T) {
t.Fatalf("invalid backoff result, expected: %v, got: %v", time.Duration(0), got) t.Fatalf("invalid backoff result, expected: %v, got: %v", time.Duration(0), got)
} }
b.mu.Lock()
// except "some-new-peer" every other records must be cleaned up // except "some-new-peer" every other records must be cleaned up
if len(b.info) != 1 { if len(b.info) != 1 {
t.Fatalf("info map size mismatch, expected: %d, got: %d", 1, len(b.info)) t.Fatalf("info map size mismatch, expected: %d, got: %d", 1, len(b.info))
} }
b.mu.Unlock()
} }

View File

@ -8,6 +8,7 @@ import (
"math/rand" "math/rand"
"slices" "slices"
"sort" "sort"
"sync"
"time" "time"
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
@ -451,6 +452,7 @@ type BlossomSubRouter struct {
backoff map[string]map[peer.ID]time.Time // prune backoff backoff map[string]map[peer.ID]time.Time // prune backoff
connect chan connectInfo // px connection requests connect chan connectInfo // px connection requests
cab peerstore.AddrBook cab peerstore.AddrBook
meshMx sync.RWMutex
protos []protocol.ID protos []protocol.ID
feature BlossomSubFeatureTest feature BlossomSubFeatureTest
@ -625,9 +627,11 @@ func (bs *BlossomSubRouter) RemovePeer(p peer.ID) {
log.Debugf("PEERDOWN: Remove disconnected peer %s", p) log.Debugf("PEERDOWN: Remove disconnected peer %s", p)
bs.tracer.RemovePeer(p) bs.tracer.RemovePeer(p)
delete(bs.peers, p) delete(bs.peers, p)
bs.meshMx.Lock()
for _, peers := range bs.mesh { for _, peers := range bs.mesh {
delete(peers, p) delete(peers, p)
} }
bs.meshMx.Unlock()
for _, peers := range bs.fanout { for _, peers := range bs.fanout {
delete(peers, p) delete(peers, p)
} }
@ -651,8 +655,10 @@ func (bs *BlossomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
} }
} }
bs.meshMx.RLock()
// BlossomSub peers // BlossomSub peers
bsPeers = len(bs.mesh[string(bitmask)]) bsPeers = len(bs.mesh[string(bitmask)])
bs.meshMx.RUnlock()
if suggested == 0 { if suggested == 0 {
suggested = bs.params.Dlo suggested = bs.params.Dlo
@ -724,7 +730,9 @@ func (bs *BlossomSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb
iwant := make(map[string]struct{}) iwant := make(map[string]struct{})
for _, ihave := range ctl.GetIhave() { for _, ihave := range ctl.GetIhave() {
bitmask := ihave.GetBitmask() bitmask := ihave.GetBitmask()
bs.meshMx.RLock()
_, ok := bs.mesh[string(bitmask)] _, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if !ok { if !ok {
continue continue
} }
@ -830,7 +838,9 @@ func (bs *BlossomSubRouter) handleGraft(p peer.ID, ctl *pb.ControlMessage) []*pb
continue continue
} }
bs.meshMx.RLock()
peers, ok := bs.mesh[string(bitmask)] peers, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if !ok { if !ok {
// don't do PX when there is an unknown bitmask to avoid leaking our peers // don't do PX when there is an unknown bitmask to avoid leaking our peers
doPX = false doPX = false
@ -919,7 +929,9 @@ func (bs *BlossomSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) {
for _, prune := range ctl.GetPrune() { for _, prune := range ctl.GetPrune() {
bitmask := prune.GetBitmask() bitmask := prune.GetBitmask()
bs.meshMx.RLock()
peers, ok := bs.mesh[string(bitmask)] peers, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if !ok { if !ok {
continue continue
} }
@ -1101,7 +1113,9 @@ func (bs *BlossomSubRouter) Publish(msg *Message) {
} }
// BlossomSub peers // BlossomSub peers
bs.meshMx.RLock()
gmap, ok := bs.mesh[string(bitmask)] gmap, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if !ok { if !ok {
// we are not in the mesh for bitmask, use fanout peers // we are not in the mesh for bitmask, use fanout peers
gmap, ok = bs.fanout[string(bitmask)] gmap, ok = bs.fanout[string(bitmask)]
@ -1137,7 +1151,9 @@ func (bs *BlossomSubRouter) Publish(msg *Message) {
} }
func (bs *BlossomSubRouter) Join(bitmask []byte) { func (bs *BlossomSubRouter) Join(bitmask []byte) {
bs.meshMx.RLock()
gmap, ok := bs.mesh[string(bitmask)] gmap, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if ok { if ok {
return return
} }
@ -1172,7 +1188,9 @@ func (bs *BlossomSubRouter) Join(bitmask []byte) {
} }
} }
bs.meshMx.Lock()
bs.mesh[string(bitmask)] = gmap bs.mesh[string(bitmask)] = gmap
bs.meshMx.Unlock()
delete(bs.fanout, string(bitmask)) delete(bs.fanout, string(bitmask))
delete(bs.lastpub, string(bitmask)) delete(bs.lastpub, string(bitmask))
} else { } else {
@ -1184,7 +1202,9 @@ func (bs *BlossomSubRouter) Join(bitmask []byte) {
return !direct && !doBackOff && bs.score.Score(p) >= 0 return !direct && !doBackOff && bs.score.Score(p) >= 0
}) })
gmap = peerListToMap(peers) gmap = peerListToMap(peers)
bs.meshMx.Lock()
bs.mesh[string(bitmask)] = gmap bs.mesh[string(bitmask)] = gmap
bs.meshMx.Unlock()
} }
for p := range gmap { for p := range gmap {
@ -1195,7 +1215,9 @@ func (bs *BlossomSubRouter) Join(bitmask []byte) {
} }
func (bs *BlossomSubRouter) Leave(bitmask []byte) { func (bs *BlossomSubRouter) Leave(bitmask []byte) {
bs.meshMx.RLock()
gmap, ok := bs.mesh[string(bitmask)] gmap, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if !ok { if !ok {
return return
} }
@ -1203,7 +1225,9 @@ func (bs *BlossomSubRouter) Leave(bitmask []byte) {
log.Debugf("LEAVE %s", bitmask) log.Debugf("LEAVE %s", bitmask)
bs.tracer.Leave(bitmask) bs.tracer.Leave(bitmask)
bs.meshMx.Lock()
delete(bs.mesh, string(bitmask)) delete(bs.mesh, string(bitmask))
bs.meshMx.Unlock()
for p := range gmap { for p := range gmap {
log.Debugf("LEAVE: Remove mesh link to %s in %s", p, bitmask) log.Debugf("LEAVE: Remove mesh link to %s in %s", p, bitmask)
@ -1252,7 +1276,9 @@ func (bs *BlossomSubRouter) sendRPC(p peer.ID, out *RPC) {
delete(bs.gossip, p) delete(bs.gossip, p)
} }
bs.p.peersMx.RLock()
mch, ok := bs.p.peers[p] mch, ok := bs.p.peers[p]
bs.p.peersMx.RUnlock()
if !ok { if !ok {
return return
} }
@ -1485,6 +1511,7 @@ func (bs *BlossomSubRouter) heartbeat() {
} }
// maintain the mesh for bitmasks we have joined // maintain the mesh for bitmasks we have joined
bs.meshMx.Lock()
for bitmask, peers := range bs.mesh { for bitmask, peers := range bs.mesh {
bitmask := []byte(bitmask) bitmask := []byte(bitmask)
prunePeer := func(p peer.ID) { prunePeer := func(p peer.ID) {
@ -1667,6 +1694,7 @@ func (bs *BlossomSubRouter) heartbeat() {
} }
} }
} }
bs.meshMx.Unlock()
// expire fanout for bitmasks we haven't published to in a while // expire fanout for bitmasks we haven't published to in a while
now := time.Now().UnixNano() now := time.Now().UnixNano()
@ -1921,7 +1949,9 @@ func (bs *BlossomSubRouter) piggybackControl(p peer.ID, out *RPC, ctl *pb.Contro
for _, graft := range ctl.GetGraft() { for _, graft := range ctl.GetGraft() {
bitmask := graft.GetBitmask() bitmask := graft.GetBitmask()
bs.meshMx.RLock()
peers, ok := bs.mesh[string(bitmask)] peers, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if !ok { if !ok {
continue continue
} }
@ -1933,7 +1963,9 @@ func (bs *BlossomSubRouter) piggybackControl(p peer.ID, out *RPC, ctl *pb.Contro
for _, prune := range ctl.GetPrune() { for _, prune := range ctl.GetPrune() {
bitmask := prune.GetBitmask() bitmask := prune.GetBitmask()
bs.meshMx.RLock()
peers, ok := bs.mesh[string(bitmask)] peers, ok := bs.mesh[string(bitmask)]
bs.meshMx.RUnlock()
if !ok { if !ok {
toprune = append(toprune, prune) toprune = append(toprune, prune)
continue continue

View File

@ -65,7 +65,7 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
data := make([]byte, 16) data := make([]byte, 16)
rand.Read(data) rand.Read(data)
if err = bitmasks[0].Publish(ctx, bitmasks[0].bitmask, data); err != nil { if err := bitmasks[0].Publish(ctx, bitmasks[0].bitmask, data); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -676,6 +676,7 @@ func TestBlossomSubAttackInvalidMessageSpam(t *testing.T) {
ps, err := NewBlossomSub(ctx, legit, ps, err := NewBlossomSub(ctx, legit,
WithEventTracer(tracer), WithEventTracer(tracer),
WithPeerScore(params, thresholds), WithPeerScore(params, thresholds),
WithMessageSignaturePolicy(StrictSign),
) )
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -2257,7 +2257,9 @@ func TestBlossomSubJoinBitmask(t *testing.T) {
time.Sleep(time.Second) time.Sleep(time.Second)
router0.meshMx.RLock()
meshMap := router0.mesh[string([]byte{0x00, 0x00, 0x80, 0x00})] meshMap := router0.mesh[string([]byte{0x00, 0x00, 0x80, 0x00})]
router0.meshMx.RUnlock()
if len(meshMap) != 1 { if len(meshMap) != 1 {
t.Fatalf("Unexpect peer included in the mesh") t.Fatalf("Unexpect peer included in the mesh")
} }
@ -2820,14 +2822,14 @@ func TestBloomRouting(t *testing.T) {
// Normally the expectation is that any subscription will do when using a bloom bitmask // Normally the expectation is that any subscription will do when using a bloom bitmask
// But we need to verify one gets it. // But we need to verify one gets it.
g := sync.WaitGroup{} g := sync.WaitGroup{}
g.Add(len(sub)) g.Add(len(sub) + 1)
errch := make(chan error) errch := make(chan error)
var errs []error var errs []error
for _, s := range sub { for _, s := range sub {
s := s s := s
go func() { go func() {
defer g.Done() defer g.Done()
nctx, _ := context.WithDeadline(ctx, time.Now().Add(10*time.Millisecond)) nctx, _ := context.WithDeadline(ctx, time.Now().Add(100*time.Millisecond))
got, err := s.Next(nctx) got, err := s.Next(nctx)
if err != nil { if err != nil {
errch <- err errch <- err
@ -2842,7 +2844,7 @@ func TestBloomRouting(t *testing.T) {
} }
go func() { go func() {
for i := 0; i < len(sub); i++ { for _ = range sub {
select { select {
case err := <-errch: case err := <-errch:
if err != nil { if err != nil {
@ -2850,6 +2852,7 @@ func TestBloomRouting(t *testing.T) {
} }
} }
} }
g.Done()
}() }()
g.Wait() g.Wait()
if len(errs) == len(sub) { if len(errs) == len(sub) {
@ -2919,6 +2922,7 @@ func TestBloomPropagationOverSubTreeTopology(t *testing.T) {
} }
for _, subs := range chs { for _, subs := range chs {
subs := subs
g := sync.WaitGroup{} g := sync.WaitGroup{}
g.Add(len(subs)) g.Add(len(subs))
nctx, cancel := context.WithCancel(ctx) nctx, cancel := context.WithCancel(ctx)
@ -3141,6 +3145,7 @@ func containsBitmask(bitmask []byte, slice []byte) bool {
func assertReceivedBitmaskSubgroup(t *testing.T, ctx context.Context, subs [][]*Subscription, msg []byte) { func assertReceivedBitmaskSubgroup(t *testing.T, ctx context.Context, subs [][]*Subscription, msg []byte) {
for i, subs := range subs { for i, subs := range subs {
subs := subs
g := sync.WaitGroup{} g := sync.WaitGroup{}
g.Add(len(subs)) g.Add(len(subs))
nctx, cancel := context.WithCancel(ctx) nctx, cancel := context.WithCancel(ctx)
@ -3148,7 +3153,7 @@ func assertReceivedBitmaskSubgroup(t *testing.T, ctx context.Context, subs [][]*
for _, s := range subs { for _, s := range subs {
s := s s := s
go func() { go func() {
nctx, _ := context.WithDeadline(nctx, time.Now().Add(10*time.Millisecond)) nctx, _ := context.WithDeadline(nctx, time.Now().Add(100*time.Millisecond))
got, err := s.Next(nctx) got, err := s.Next(nctx)
if err != nil { if err != nil {
g.Done() g.Done()

View File

@ -151,6 +151,7 @@ type PubSub struct {
blacklistPeer chan peer.ID blacklistPeer chan peer.ID
peers map[peer.ID]chan *RPC peers map[peer.ID]chan *RPC
peersMx sync.RWMutex
inboundStreamsMx sync.Mutex inboundStreamsMx sync.Mutex
inboundStreams map[peer.ID]network.Stream inboundStreams map[peer.ID]network.Stream
@ -565,11 +566,13 @@ func WithAppSpecificRpcInspector(inspector func(peer.ID, *RPC) error) Option {
// processLoop handles all inputs arriving on the channels // processLoop handles all inputs arriving on the channels
func (p *PubSub) processLoop(ctx context.Context) { func (p *PubSub) processLoop(ctx context.Context) {
defer func() { defer func() {
p.peersMx.Lock()
// Clean up go routines. // Clean up go routines.
for _, ch := range p.peers { for _, ch := range p.peers {
close(ch) close(ch)
} }
p.peers = nil p.peers = nil
p.peersMx.Unlock()
p.bitmasks = nil p.bitmasks = nil
p.seenMessages.Done() p.seenMessages.Done()
}() }()
@ -582,7 +585,9 @@ func (p *PubSub) processLoop(ctx context.Context) {
case s := <-p.newPeerStream: case s := <-p.newPeerStream:
pid := s.Conn().RemotePeer() pid := s.Conn().RemotePeer()
p.peersMx.RLock()
ch, ok := p.peers[pid] ch, ok := p.peers[pid]
p.peersMx.RUnlock()
if !ok { if !ok {
log.Warn("new stream for unknown peer: ", pid) log.Warn("new stream for unknown peer: ", pid)
s.Reset() s.Reset()
@ -592,7 +597,9 @@ func (p *PubSub) processLoop(ctx context.Context) {
if p.blacklist.Contains(pid) { if p.blacklist.Contains(pid) {
log.Warn("closing stream for blacklisted peer: ", pid) log.Warn("closing stream for blacklisted peer: ", pid)
close(ch) close(ch)
p.peersMx.Lock()
delete(p.peers, pid) delete(p.peers, pid)
p.peersMx.Unlock()
s.Reset() s.Reset()
continue continue
} }
@ -600,7 +607,9 @@ func (p *PubSub) processLoop(ctx context.Context) {
p.rt.AddPeer(pid, s.Protocol()) p.rt.AddPeer(pid, s.Protocol())
case pid := <-p.newPeerError: case pid := <-p.newPeerError:
p.peersMx.Lock()
delete(p.peers, pid) delete(p.peers, pid)
p.peersMx.Unlock()
case <-p.peerDead: case <-p.peerDead:
p.handleDeadPeers() p.handleDeadPeers()
@ -650,10 +659,14 @@ func (p *PubSub) processLoop(ctx context.Context) {
log.Infof("Blacklisting peer %s", pid) log.Infof("Blacklisting peer %s", pid)
p.blacklist.Add(pid) p.blacklist.Add(pid)
p.peersMx.RLock()
ch, ok := p.peers[pid] ch, ok := p.peers[pid]
p.peersMx.RUnlock()
if ok { if ok {
close(ch) close(ch)
p.peersMx.Lock()
delete(p.peers, pid) delete(p.peers, pid)
p.peersMx.Unlock()
for t, tmap := range p.bitmasks { for t, tmap := range p.bitmasks {
if _, ok := tmap[pid]; ok { if _, ok := tmap[pid]; ok {
delete(tmap, pid) delete(tmap, pid)
@ -682,6 +695,7 @@ peerloop:
} }
var peerset []peer.ID var peerset []peer.ID
p.peersMx.RLock()
for p := range p.peers { for p := range p.peers {
_, ok := tmap[p] _, ok := tmap[p]
if !ok { if !ok {
@ -689,6 +703,7 @@ peerloop:
} }
peerset = append(peerset, p) peerset = append(peerset, p)
} }
p.peersMx.RUnlock()
if len(peers) == 0 { if len(peers) == 0 {
peers = peerset peers = peerset
@ -728,10 +743,13 @@ func (p *PubSub) handlePendingPeers() {
continue continue
} }
p.peersMx.RLock()
if _, ok := p.peers[pid]; ok { if _, ok := p.peers[pid]; ok {
p.peersMx.RUnlock()
log.Debug("already have connection to peer: ", pid) log.Debug("already have connection to peer: ", pid)
continue continue
} }
p.peersMx.RUnlock()
if p.blacklist.Contains(pid) { if p.blacklist.Contains(pid) {
log.Warn("ignoring connection from blacklisted peer: ", pid) log.Warn("ignoring connection from blacklisted peer: ", pid)
@ -741,7 +759,9 @@ func (p *PubSub) handlePendingPeers() {
messages := make(chan *RPC, p.peerOutboundQueueSize) messages := make(chan *RPC, p.peerOutboundQueueSize)
messages <- p.getHelloPacket() messages <- p.getHelloPacket()
go p.handleNewPeer(p.ctx, pid, messages) go p.handleNewPeer(p.ctx, pid, messages)
p.peersMx.Lock()
p.peers[pid] = messages p.peers[pid] = messages
p.peersMx.Unlock()
} }
} }
@ -758,13 +778,17 @@ func (p *PubSub) handleDeadPeers() {
p.peerDeadPrioLk.Unlock() p.peerDeadPrioLk.Unlock()
for pid := range deadPeers { for pid := range deadPeers {
p.peersMx.RLock()
ch, ok := p.peers[pid] ch, ok := p.peers[pid]
p.peersMx.RUnlock()
if !ok { if !ok {
continue continue
} }
close(ch) close(ch)
p.peersMx.Lock()
delete(p.peers, pid) delete(p.peers, pid)
p.peersMx.Unlock()
for t, tmap := range p.bitmasks { for t, tmap := range p.bitmasks {
if _, ok := tmap[pid]; ok { if _, ok := tmap[pid]; ok {
@ -787,7 +811,9 @@ func (p *PubSub) handleDeadPeers() {
log.Debugf("peer declared dead but still connected; respawning writer: %s", pid) log.Debugf("peer declared dead but still connected; respawning writer: %s", pid)
messages := make(chan *RPC, p.peerOutboundQueueSize) messages := make(chan *RPC, p.peerOutboundQueueSize)
messages <- p.getHelloPacket() messages <- p.getHelloPacket()
p.peersMx.Lock()
p.peers[pid] = messages p.peers[pid] = messages
p.peersMx.Unlock()
go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, messages) go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, messages)
} }
} }
@ -951,6 +977,7 @@ func (p *PubSub) announce(bitmask []byte, sub bool) {
} }
out := rpcWithSubs(subopt) out := rpcWithSubs(subopt)
p.peersMx.RLock()
for pid, peer := range p.peers { for pid, peer := range p.peers {
select { select {
case peer <- out: case peer <- out:
@ -961,6 +988,7 @@ func (p *PubSub) announce(bitmask []byte, sub bool) {
go p.announceRetry(pid, bitmask, sub) go p.announceRetry(pid, bitmask, sub)
} }
} }
p.peersMx.RUnlock()
} }
func (p *PubSub) announceRetry(pid peer.ID, bitmask []byte, sub bool) { func (p *PubSub) announceRetry(pid peer.ID, bitmask []byte, sub bool) {
@ -984,7 +1012,9 @@ func (p *PubSub) announceRetry(pid peer.ID, bitmask []byte, sub bool) {
} }
func (p *PubSub) doAnnounceRetry(pid peer.ID, bitmask []byte, sub bool) { func (p *PubSub) doAnnounceRetry(pid peer.ID, bitmask []byte, sub bool) {
p.peersMx.RLock()
peer, ok := p.peers[pid] peer, ok := p.peers[pid]
p.peersMx.RUnlock()
if !ok { if !ok {
return return
} }

View File

@ -198,10 +198,10 @@ func (as *AmbientAutoNAT) background() {
// probe finished. // probe finished.
case err, ok := <-as.dialResponses: case err, ok := <-as.dialResponses:
if !ok { if !ok {
close(as.backgroundRunning)
as.subscriber.Close()
as.emitReachabilityChanged.Close()
timer.Stop() timer.Stop()
as.emitReachabilityChanged.Close()
as.subscriber.Close()
close(as.backgroundRunning)
return return
} }
if IsDialRefused(err) { if IsDialRefused(err) {
@ -215,10 +215,10 @@ func (as *AmbientAutoNAT) background() {
timerRunning = false timerRunning = false
retryProbe = false retryProbe = false
case <-as.ctx.Done(): case <-as.ctx.Done():
close(as.backgroundRunning)
as.subscriber.Close()
as.emitReachabilityChanged.Close()
timer.Stop() timer.Stop()
as.emitReachabilityChanged.Close()
as.subscriber.Close()
close(as.backgroundRunning)
return return
} }

View File

@ -563,8 +563,8 @@ func (h *BasicHost) background() {
case <-ticker.C: case <-ticker.C:
case <-h.addrChangeChan: case <-h.addrChangeChan:
case <-h.ctx.Done(): case <-h.ctx.Done():
h.refCount.Done()
ticker.Stop() ticker.Stop()
h.refCount.Done()
return return
} }
} }

View File

@ -177,9 +177,9 @@ func NewStatsTraceReporter() (StatsTraceReporter, error) {
func (r StatsTraceReporter) ConsumeEvent(evt TraceEvt) { func (r StatsTraceReporter) ConsumeEvent(evt TraceEvt) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
r.consumeEventWithLabelSlice(evt, tags) r.consumeEventWithLabelSlice(evt, tags)
metricshelper.PutStringSlice(tags)
} }
// Separate func so that we can test that this function does not allocate. The syncPool may allocate. // Separate func so that we can test that this function does not allocate. The syncPool may allocate.

View File

@ -212,9 +212,9 @@ type TraceEvt struct {
func (t *trace) push(evt TraceEvt) { func (t *trace) push(evt TraceEvt) {
t.mx.Lock() t.mx.Lock()
defer t.mx.Unlock()
if t.done { if t.done {
t.mx.Unlock()
return return
} }
evt.Time = time.Now().Format(time.RFC3339Nano) evt.Time = time.Now().Format(time.RFC3339Nano)
@ -229,19 +229,13 @@ func (t *trace) push(evt TraceEvt) {
if t.path != "" { if t.path != "" {
t.pendingWrites = append(t.pendingWrites, evt) t.pendingWrites = append(t.pendingWrites, evt)
} }
t.mx.Unlock()
} }
func (t *trace) backgroundWriter(out io.WriteCloser) { func (t *trace) backgroundWriter(out io.WriteCloser) {
defer t.wg.Done()
defer out.Close()
gzOut := gzip.NewWriter(out) gzOut := gzip.NewWriter(out)
defer gzOut.Close()
jsonOut := json.NewEncoder(gzOut) jsonOut := json.NewEncoder(gzOut)
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(time.Second)
defer ticker.Stop()
var pend []interface{} var pend []interface{}
@ -267,6 +261,11 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
t.mx.Lock() t.mx.Lock()
t.done = true t.done = true
t.mx.Unlock() t.mx.Unlock()
ticker.Stop()
gzOut.Close()
out.Close()
t.wg.Done()
return return
} }
@ -275,6 +274,11 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
t.mx.Lock() t.mx.Lock()
t.done = true t.done = true
t.mx.Unlock() t.mx.Unlock()
ticker.Stop()
gzOut.Close()
out.Close()
t.wg.Done()
return return
} }
@ -282,11 +286,19 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
getEvents() getEvents()
if len(pend) == 0 { if len(pend) == 0 {
ticker.Stop()
gzOut.Close()
out.Close()
t.wg.Done()
return return
} }
if err := t.writeEvents(pend, jsonOut); err != nil { if err := t.writeEvents(pend, jsonOut); err != nil {
log.Warnf("error writing rcmgr trace: %s", err) log.Warnf("error writing rcmgr trace: %s", err)
ticker.Stop()
gzOut.Close()
out.Close()
t.wg.Done()
return return
} }
@ -294,6 +306,10 @@ func (t *trace) backgroundWriter(out io.WriteCloser) {
log.Warnf("error flushing rcmgr trace: %s", err) log.Warnf("error flushing rcmgr trace: %s", err)
} }
ticker.Stop()
gzOut.Close()
out.Close()
t.wg.Done()
return return
} }
} }

View File

@ -306,13 +306,11 @@ func (h *Host) Serve() error {
h.httpTransportInit() h.httpTransportInit()
closedWaitingForListeners := false closedWaitingForListeners := false
defer func() {
if len(h.ListenAddrs) == 0 && h.StreamHost == nil {
if !closedWaitingForListeners { if !closedWaitingForListeners {
close(h.httpTransport.waitingForListeners) close(h.httpTransport.waitingForListeners)
} }
}()
if len(h.ListenAddrs) == 0 && h.StreamHost == nil {
return ErrNoListeners return ErrNoListeners
} }
@ -329,6 +327,9 @@ func (h *Host) Serve() error {
if h.StreamHost != nil { if h.StreamHost != nil {
listener, err := streamHostListen(h.StreamHost) listener, err := streamHostListen(h.StreamHost)
if err != nil { if err != nil {
if !closedWaitingForListeners {
close(h.httpTransport.waitingForListeners)
}
return err return err
} }
h.httpTransport.listeners = append(h.httpTransport.listeners, listener) h.httpTransport.listeners = append(h.httpTransport.listeners, listener)
@ -348,6 +349,9 @@ func (h *Host) Serve() error {
err := h.setupListeners(errCh) err := h.setupListeners(errCh)
if err != nil { if err != nil {
closeAllListeners() closeAllListeners()
if !closedWaitingForListeners {
close(h.httpTransport.waitingForListeners)
}
return err return err
} }
@ -356,6 +360,9 @@ func (h *Host) Serve() error {
if len(h.httpTransport.listeners) == 0 || len(h.httpTransport.listenAddrs) == 0 { if len(h.httpTransport.listeners) == 0 || len(h.httpTransport.listenAddrs) == 0 {
closeAllListeners() closeAllListeners()
if !closedWaitingForListeners {
close(h.httpTransport.waitingForListeners)
}
return ErrNoListeners return ErrNoListeners
} }
@ -372,7 +379,9 @@ func (h *Host) Serve() error {
<-errCh <-errCh
} }
close(errCh) close(errCh)
if !closedWaitingForListeners {
close(h.httpTransport.waitingForListeners)
}
return err return err
} }
@ -437,8 +446,9 @@ func (s *streamReadCloser) Close() error {
func (rt *streamRoundTripper) GetPeerMetadata() (PeerMeta, error) { func (rt *streamRoundTripper) GetPeerMetadata() (PeerMeta, error) {
ctx := context.Background() ctx := context.Background()
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout)) ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
defer cancel() peerMeta, err := rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server)
return rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server) cancel()
return peerMeta, err
} }
// RoundTrip implements http.RoundTripper. // RoundTrip implements http.RoundTripper.
@ -460,11 +470,11 @@ func (rt *streamRoundTripper) RoundTrip(r *http.Request) (*http.Response, error)
r.Header.Add("connection", "close") r.Header.Add("connection", "close")
go func() { go func() {
defer s.CloseWrite()
r.Write(s) r.Write(s)
if r.Body != nil { if r.Body != nil {
r.Body.Close() r.Body.Close()
} }
s.CloseWrite()
}() }()
if deadline, ok := r.Context().Deadline(); ok { if deadline, ok := r.Context().Deadline(); ok {
@ -511,12 +521,13 @@ func (rt *roundTripperForSpecificServer) GetPeerMetadata() (PeerMeta, error) {
ctx := context.Background() ctx := context.Background()
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout)) ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
defer cancel()
wk, err := rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server) wk, err := rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server)
if err == nil { if err == nil {
rt.cachedProtos = wk rt.cachedProtos = wk
cancel()
return wk, nil return wk, nil
} }
cancel()
return wk, err return wk, err
} }
@ -579,14 +590,15 @@ func (rt *namespacedRoundTripper) RoundTrip(r *http.Request) (*http.Response, er
func (h *Host) NamespaceRoundTripper(roundtripper http.RoundTripper, p protocol.ID, server peer.ID) (*namespacedRoundTripper, error) { func (h *Host) NamespaceRoundTripper(roundtripper http.RoundTripper, p protocol.ID, server peer.ID) (*namespacedRoundTripper, error) {
ctx := context.Background() ctx := context.Background()
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout)) ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
defer cancel()
protos, err := h.getAndStorePeerMetadata(ctx, roundtripper, server) protos, err := h.getAndStorePeerMetadata(ctx, roundtripper, server)
if err != nil { if err != nil {
cancel()
return &namespacedRoundTripper{}, err return &namespacedRoundTripper{}, err
} }
v, ok := protos[p] v, ok := protos[p]
if !ok { if !ok {
cancel()
return &namespacedRoundTripper{}, fmt.Errorf("no protocol %s for server %s", p, server) return &namespacedRoundTripper{}, fmt.Errorf("no protocol %s for server %s", p, server)
} }
@ -598,9 +610,11 @@ func (h *Host) NamespaceRoundTripper(roundtripper http.RoundTripper, p protocol.
u, err := url.Parse(path) u, err := url.Parse(path)
if err != nil { if err != nil {
cancel()
return &namespacedRoundTripper{}, fmt.Errorf("invalid path %s for protocol %s for server %s", v.Path, p, server) return &namespacedRoundTripper{}, fmt.Errorf("invalid path %s for protocol %s for server %s", v.Path, p, server)
} }
cancel()
return &namespacedRoundTripper{ return &namespacedRoundTripper{
RoundTripper: roundtripper, RoundTripper: roundtripper,
protocolPrefix: u.Path, protocolPrefix: u.Path,
@ -866,9 +880,9 @@ func requestPeerMeta(ctx context.Context, roundtripper http.RoundTripper, wellKn
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
} }
@ -878,9 +892,11 @@ func requestPeerMeta(ctx context.Context, roundtripper http.RoundTripper, wellKn
N: peerMetadataLimit, N: peerMetadataLimit,
}).Decode(&meta) }).Decode(&meta)
if err != nil { if err != nil {
resp.Body.Close()
return nil, err return nil, err
} }
resp.Body.Close()
return meta, nil return meta, nil
} }

View File

@ -48,20 +48,23 @@ func SendPing(client http.Client) error {
if err != nil { if err != nil {
return err return err
} }
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return fmt.Errorf("unexpected status code: %d", resp.StatusCode) return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
} }
rBody := [pingSize]byte{} rBody := [pingSize]byte{}
_, err = io.ReadFull(resp.Body, rBody[:]) _, err = io.ReadFull(resp.Body, rBody[:])
if err != nil { if err != nil {
resp.Body.Close()
return err return err
} }
if !bytes.Equal(body[:], rBody[:]) { if !bytes.Equal(body[:], rBody[:]) {
resp.Body.Close()
return errors.New("ping body mismatch") return errors.New("ping body mismatch")
} }
resp.Body.Close()
return nil return nil
} }

View File

@ -133,9 +133,8 @@ func (cg *BasicConnectionGater) BlockPeer(p peer.ID) error {
} }
cg.Lock() cg.Lock()
defer cg.Unlock()
cg.blockedPeers[p] = struct{}{} cg.blockedPeers[p] = struct{}{}
cg.Unlock()
return nil return nil
} }
@ -150,23 +149,21 @@ func (cg *BasicConnectionGater) UnblockPeer(p peer.ID) error {
} }
cg.Lock() cg.Lock()
defer cg.Unlock()
delete(cg.blockedPeers, p) delete(cg.blockedPeers, p)
cg.Unlock()
return nil return nil
} }
// ListBlockedPeers return a list of blocked peers // ListBlockedPeers return a list of blocked peers
func (cg *BasicConnectionGater) ListBlockedPeers() []peer.ID { func (cg *BasicConnectionGater) ListBlockedPeers() []peer.ID {
cg.RLock() cg.RLock()
defer cg.RUnlock()
result := make([]peer.ID, 0, len(cg.blockedPeers)) result := make([]peer.ID, 0, len(cg.blockedPeers))
for p := range cg.blockedPeers { for p := range cg.blockedPeers {
result = append(result, p) result = append(result, p)
} }
cg.RUnlock()
return result return result
} }
@ -182,10 +179,9 @@ func (cg *BasicConnectionGater) BlockAddr(ip net.IP) error {
} }
cg.Lock() cg.Lock()
defer cg.Unlock()
cg.blockedAddrs[ip.String()] = struct{}{} cg.blockedAddrs[ip.String()] = struct{}{}
cg.Unlock()
return nil return nil
} }
@ -200,24 +196,22 @@ func (cg *BasicConnectionGater) UnblockAddr(ip net.IP) error {
} }
cg.Lock() cg.Lock()
defer cg.Unlock()
delete(cg.blockedAddrs, ip.String()) delete(cg.blockedAddrs, ip.String())
cg.Unlock()
return nil return nil
} }
// ListBlockedAddrs return a list of blocked IP addresses // ListBlockedAddrs return a list of blocked IP addresses
func (cg *BasicConnectionGater) ListBlockedAddrs() []net.IP { func (cg *BasicConnectionGater) ListBlockedAddrs() []net.IP {
cg.RLock() cg.RLock()
defer cg.RUnlock()
result := make([]net.IP, 0, len(cg.blockedAddrs)) result := make([]net.IP, 0, len(cg.blockedAddrs))
for ipStr := range cg.blockedAddrs { for ipStr := range cg.blockedAddrs {
ip := net.ParseIP(ipStr) ip := net.ParseIP(ipStr)
result = append(result, ip) result = append(result, ip)
} }
cg.RUnlock()
return result return result
} }
@ -233,10 +227,9 @@ func (cg *BasicConnectionGater) BlockSubnet(ipnet *net.IPNet) error {
} }
cg.Lock() cg.Lock()
defer cg.Unlock()
cg.blockedSubnets[ipnet.String()] = ipnet cg.blockedSubnets[ipnet.String()] = ipnet
cg.Unlock()
return nil return nil
} }
@ -251,23 +244,21 @@ func (cg *BasicConnectionGater) UnblockSubnet(ipnet *net.IPNet) error {
} }
cg.Lock() cg.Lock()
defer cg.Unlock()
delete(cg.blockedSubnets, ipnet.String()) delete(cg.blockedSubnets, ipnet.String())
cg.Unlock()
return nil return nil
} }
// ListBlockedSubnets return a list of blocked IP subnets // ListBlockedSubnets return a list of blocked IP subnets
func (cg *BasicConnectionGater) ListBlockedSubnets() []*net.IPNet { func (cg *BasicConnectionGater) ListBlockedSubnets() []*net.IPNet {
cg.RLock() cg.RLock()
defer cg.RUnlock()
result := make([]*net.IPNet, 0, len(cg.blockedSubnets)) result := make([]*net.IPNet, 0, len(cg.blockedSubnets))
for _, ipnet := range cg.blockedSubnets { for _, ipnet := range cg.blockedSubnets {
result = append(result, ipnet) result = append(result, ipnet)
} }
cg.RUnlock()
return result return result
} }
@ -276,60 +267,66 @@ var _ connmgr.ConnectionGater = (*BasicConnectionGater)(nil)
func (cg *BasicConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) { func (cg *BasicConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) {
cg.RLock() cg.RLock()
defer cg.RUnlock()
_, block := cg.blockedPeers[p] _, block := cg.blockedPeers[p]
cg.RUnlock()
return !block return !block
} }
func (cg *BasicConnectionGater) InterceptAddrDial(p peer.ID, a ma.Multiaddr) (allow bool) { func (cg *BasicConnectionGater) InterceptAddrDial(p peer.ID, a ma.Multiaddr) (allow bool) {
// we have already filtered blocked peers in InterceptPeerDial, so we just check the IP // we have already filtered blocked peers in InterceptPeerDial, so we just check the IP
cg.RLock() cg.RLock()
defer cg.RUnlock()
ip, err := manet.ToIP(a) ip, err := manet.ToIP(a)
if err != nil { if err != nil {
cg.RUnlock()
log.Warnf("error converting multiaddr to IP addr: %s", err) log.Warnf("error converting multiaddr to IP addr: %s", err)
return true return true
} }
_, block := cg.blockedAddrs[ip.String()] _, block := cg.blockedAddrs[ip.String()]
if block { if block {
cg.RUnlock()
return false return false
} }
for _, ipnet := range cg.blockedSubnets { for _, ipnet := range cg.blockedSubnets {
if ipnet.Contains(ip) { if ipnet.Contains(ip) {
cg.RUnlock()
return false return false
} }
} }
cg.RUnlock()
return true return true
} }
func (cg *BasicConnectionGater) InterceptAccept(cma network.ConnMultiaddrs) (allow bool) { func (cg *BasicConnectionGater) InterceptAccept(cma network.ConnMultiaddrs) (allow bool) {
cg.RLock() cg.RLock()
defer cg.RUnlock()
a := cma.RemoteMultiaddr() a := cma.RemoteMultiaddr()
ip, err := manet.ToIP(a) ip, err := manet.ToIP(a)
if err != nil { if err != nil {
cg.RUnlock()
log.Warnf("error converting multiaddr to IP addr: %s", err) log.Warnf("error converting multiaddr to IP addr: %s", err)
return true return true
} }
_, block := cg.blockedAddrs[ip.String()] _, block := cg.blockedAddrs[ip.String()]
if block { if block {
cg.RUnlock()
return false return false
} }
for _, ipnet := range cg.blockedSubnets { for _, ipnet := range cg.blockedSubnets {
if ipnet.Contains(ip) { if ipnet.Contains(ip) {
cg.RUnlock()
return false return false
} }
} }
cg.RUnlock()
return true return true
} }
@ -341,9 +338,9 @@ func (cg *BasicConnectionGater) InterceptSecured(dir network.Direction, p peer.I
// we have already filtered addrs in InterceptAccept, so we just check the peer ID // we have already filtered addrs in InterceptAccept, so we just check the peer ID
cg.RLock() cg.RLock()
defer cg.RUnlock()
_, block := cg.blockedPeers[p] _, block := cg.blockedPeers[p]
cg.RUnlock()
return !block return !block
} }

View File

@ -169,8 +169,6 @@ func (cm *BasicConnMgr) memoryEmergency() {
} }
cm.trimMutex.Lock() cm.trimMutex.Lock()
defer atomic.AddUint64(&cm.trimCount, 1)
defer cm.trimMutex.Unlock()
// Trim connections without paying attention to the silence period. // Trim connections without paying attention to the silence period.
for _, c := range cm.getConnsToCloseEmergency(target) { for _, c := range cm.getConnsToCloseEmergency(target) {
@ -182,6 +180,8 @@ func (cm *BasicConnMgr) memoryEmergency() {
cm.lastTrimMu.Lock() cm.lastTrimMu.Lock()
cm.lastTrim = cm.clock.Now() cm.lastTrim = cm.clock.Now()
cm.lastTrimMu.Unlock() cm.lastTrimMu.Unlock()
atomic.AddUint64(&cm.trimCount, 1)
cm.trimMutex.Unlock()
} }
func (cm *BasicConnMgr) Close() error { func (cm *BasicConnMgr) Close() error {
@ -198,7 +198,6 @@ func (cm *BasicConnMgr) Close() error {
func (cm *BasicConnMgr) Protect(id peer.ID, tag string) { func (cm *BasicConnMgr) Protect(id peer.ID, tag string) {
cm.plk.Lock() cm.plk.Lock()
defer cm.plk.Unlock()
tags, ok := cm.protected[id] tags, ok := cm.protected[id]
if !ok { if !ok {
@ -206,37 +205,42 @@ func (cm *BasicConnMgr) Protect(id peer.ID, tag string) {
cm.protected[id] = tags cm.protected[id] = tags
} }
tags[tag] = struct{}{} tags[tag] = struct{}{}
cm.plk.Unlock()
} }
func (cm *BasicConnMgr) Unprotect(id peer.ID, tag string) (protected bool) { func (cm *BasicConnMgr) Unprotect(id peer.ID, tag string) (protected bool) {
cm.plk.Lock() cm.plk.Lock()
defer cm.plk.Unlock()
tags, ok := cm.protected[id] tags, ok := cm.protected[id]
if !ok { if !ok {
cm.plk.Unlock()
return false return false
} }
if delete(tags, tag); len(tags) == 0 { if delete(tags, tag); len(tags) == 0 {
delete(cm.protected, id) delete(cm.protected, id)
cm.plk.Unlock()
return false return false
} }
cm.plk.Unlock()
return true return true
} }
func (cm *BasicConnMgr) IsProtected(id peer.ID, tag string) (protected bool) { func (cm *BasicConnMgr) IsProtected(id peer.ID, tag string) (protected bool) {
cm.plk.Lock() cm.plk.Lock()
defer cm.plk.Unlock()
tags, ok := cm.protected[id] tags, ok := cm.protected[id]
if !ok { if !ok {
cm.plk.Unlock()
return false return false
} }
if tag == "" { if tag == "" {
cm.plk.Unlock()
return true return true
} }
_, protected = tags[tag] _, protected = tags[tag]
cm.plk.Unlock()
return protected return protected
} }
@ -281,22 +285,30 @@ func (p peerInfos) SortByValueAndStreams(segments *segments, sortByMoreStreams b
// lock this to protect from concurrent modifications from connect/disconnect events // lock this to protect from concurrent modifications from connect/disconnect events
leftSegment := segments.get(left.id) leftSegment := segments.get(left.id)
leftSegment.Lock() leftSegment.Lock()
defer leftSegment.Unlock()
rightSegment := segments.get(right.id) rightSegment := segments.get(right.id)
rsLocked := false
if leftSegment != rightSegment { if leftSegment != rightSegment {
// These two peers are not in the same segment, lets get the lock // These two peers are not in the same segment, lets get the lock
rightSegment.Lock() rightSegment.Lock()
defer rightSegment.Unlock() rsLocked = true
} }
segments.bucketsMu.Unlock() segments.bucketsMu.Unlock()
// temporary peers are preferred for pruning. // temporary peers are preferred for pruning.
if left.temp != right.temp { if left.temp != right.temp {
leftSegment.Unlock()
if rsLocked {
rightSegment.Unlock()
}
return left.temp return left.temp
} }
// otherwise, compare by value. // otherwise, compare by value.
if left.value != right.value { if left.value != right.value {
leftSegment.Unlock()
if rsLocked {
rightSegment.Unlock()
}
return left.value < right.value return left.value < right.value
} }
incomingAndStreams := func(m map[network.Conn]time.Time) (incoming bool, numStreams int) { incomingAndStreams := func(m map[network.Conn]time.Time) (incoming bool, numStreams int) {
@ -313,13 +325,25 @@ func (p peerInfos) SortByValueAndStreams(segments *segments, sortByMoreStreams b
rightIncoming, rightStreams := incomingAndStreams(right.conns) rightIncoming, rightStreams := incomingAndStreams(right.conns)
// prefer closing inactive connections (no streams open) // prefer closing inactive connections (no streams open)
if rightStreams != leftStreams && (leftStreams == 0 || rightStreams == 0) { if rightStreams != leftStreams && (leftStreams == 0 || rightStreams == 0) {
leftSegment.Unlock()
if rsLocked {
rightSegment.Unlock()
}
return leftStreams < rightStreams return leftStreams < rightStreams
} }
// incoming connections are preferred for pruning // incoming connections are preferred for pruning
if leftIncoming != rightIncoming { if leftIncoming != rightIncoming {
leftSegment.Unlock()
if rsLocked {
rightSegment.Unlock()
}
return leftIncoming return leftIncoming
} }
leftSegment.Unlock()
if rsLocked {
rightSegment.Unlock()
}
if sortByMoreStreams { if sortByMoreStreams {
// prune connections with a higher number of streams first // prune connections with a higher number of streams first
return rightStreams < leftStreams return rightStreams < leftStreams
@ -345,15 +369,12 @@ func (cm *BasicConnMgr) TrimOpenConns(_ context.Context) {
} }
func (cm *BasicConnMgr) background() { func (cm *BasicConnMgr) background() {
defer cm.refCount.Done()
interval := cm.cfg.gracePeriod / 2 interval := cm.cfg.gracePeriod / 2
if cm.cfg.silencePeriod != 0 { if cm.cfg.silencePeriod != 0 {
interval = cm.cfg.silencePeriod interval = cm.cfg.silencePeriod
} }
ticker := cm.clock.Ticker(interval) ticker := cm.clock.Ticker(interval)
defer ticker.Stop()
for { for {
select { select {
@ -363,6 +384,8 @@ func (cm *BasicConnMgr) background() {
continue continue
} }
case <-cm.ctx.Done(): case <-cm.ctx.Done():
cm.refCount.Done()
ticker.Stop()
return return
} }
cm.trim() cm.trim()
@ -373,7 +396,6 @@ func (cm *BasicConnMgr) doTrim() {
// This logic is mimicking the implementation of sync.Once in the standard library. // This logic is mimicking the implementation of sync.Once in the standard library.
count := atomic.LoadUint64(&cm.trimCount) count := atomic.LoadUint64(&cm.trimCount)
cm.trimMutex.Lock() cm.trimMutex.Lock()
defer cm.trimMutex.Unlock()
if count == atomic.LoadUint64(&cm.trimCount) { if count == atomic.LoadUint64(&cm.trimCount) {
cm.trim() cm.trim()
cm.lastTrimMu.Lock() cm.lastTrimMu.Lock()
@ -381,6 +403,7 @@ func (cm *BasicConnMgr) doTrim() {
cm.lastTrimMu.Unlock() cm.lastTrimMu.Unlock()
atomic.AddUint64(&cm.trimCount, 1) atomic.AddUint64(&cm.trimCount, 1)
} }
cm.trimMutex.Unlock()
} }
// trim starts the trim, if the last trim happened before the configured silence period. // trim starts the trim, if the last trim happened before the configured silence period.
@ -544,10 +567,10 @@ func (cm *BasicConnMgr) getConnsToClose() []network.Conn {
func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo { func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
s := cm.segments.get(p) s := cm.segments.get(p)
s.Lock() s.Lock()
defer s.Unlock()
pi, ok := s.peers[p] pi, ok := s.peers[p]
if !ok { if !ok {
s.Unlock()
return nil return nil
} }
@ -567,7 +590,7 @@ func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
for c, t := range pi.conns { for c, t := range pi.conns {
out.Conns[c.RemoteMultiaddr().String()] = t out.Conns[c.RemoteMultiaddr().String()] = t
} }
s.Unlock()
return out return out
} }
@ -575,37 +598,37 @@ func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
func (cm *BasicConnMgr) TagPeer(p peer.ID, tag string, val int) { func (cm *BasicConnMgr) TagPeer(p peer.ID, tag string, val int) {
s := cm.segments.get(p) s := cm.segments.get(p)
s.Lock() s.Lock()
defer s.Unlock()
pi := s.tagInfoFor(p, cm.clock.Now()) pi := s.tagInfoFor(p, cm.clock.Now())
// Update the total value of the peer. // Update the total value of the peer.
pi.value += val - pi.tags[tag] pi.value += val - pi.tags[tag]
pi.tags[tag] = val pi.tags[tag] = val
s.Unlock()
} }
// UntagPeer is called to disassociate a string and integer from a given peer. // UntagPeer is called to disassociate a string and integer from a given peer.
func (cm *BasicConnMgr) UntagPeer(p peer.ID, tag string) { func (cm *BasicConnMgr) UntagPeer(p peer.ID, tag string) {
s := cm.segments.get(p) s := cm.segments.get(p)
s.Lock() s.Lock()
defer s.Unlock()
pi, ok := s.peers[p] pi, ok := s.peers[p]
if !ok { if !ok {
log.Info("tried to remove tag from untracked peer: ", p) log.Info("tried to remove tag from untracked peer: ", p)
s.Unlock()
return return
} }
// Update the total value of the peer. // Update the total value of the peer.
pi.value -= pi.tags[tag] pi.value -= pi.tags[tag]
delete(pi.tags, tag) delete(pi.tags, tag)
s.Unlock()
} }
// UpsertTag is called to insert/update a peer tag // UpsertTag is called to insert/update a peer tag
func (cm *BasicConnMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) { func (cm *BasicConnMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) {
s := cm.segments.get(p) s := cm.segments.get(p)
s.Lock() s.Lock()
defer s.Unlock()
pi := s.tagInfoFor(p, cm.clock.Now()) pi := s.tagInfoFor(p, cm.clock.Now())
@ -613,6 +636,7 @@ func (cm *BasicConnMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) {
newval := upsert(oldval) newval := upsert(oldval)
pi.value += newval - oldval pi.value += newval - oldval
pi.tags[tag] = newval pi.tags[tag] = newval
s.Unlock()
} }
// CMInfo holds the configuration for BasicConnMgr, as well as status data. // CMInfo holds the configuration for BasicConnMgr, as well as status data.
@ -670,7 +694,6 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
p := c.RemotePeer() p := c.RemotePeer()
s := cm.segments.get(p) s := cm.segments.get(p)
s.Lock() s.Lock()
defer s.Unlock()
id := c.RemotePeer() id := c.RemotePeer()
pinfo, ok := s.peers[id] pinfo, ok := s.peers[id]
@ -694,11 +717,13 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
_, ok = pinfo.conns[c] _, ok = pinfo.conns[c]
if ok { if ok {
log.Error("received connected notification for conn we are already tracking: ", p) log.Error("received connected notification for conn we are already tracking: ", p)
s.Unlock()
return return
} }
pinfo.conns[c] = cm.clock.Now() pinfo.conns[c] = cm.clock.Now()
cm.connCount.Add(1) cm.connCount.Add(1)
s.Unlock()
} }
// Disconnected is called by notifiers to inform that an existing connection has been closed or terminated. // Disconnected is called by notifiers to inform that an existing connection has been closed or terminated.
@ -709,17 +734,18 @@ func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) {
p := c.RemotePeer() p := c.RemotePeer()
s := cm.segments.get(p) s := cm.segments.get(p)
s.Lock() s.Lock()
defer s.Unlock()
cinf, ok := s.peers[p] cinf, ok := s.peers[p]
if !ok { if !ok {
log.Error("received disconnected notification for peer we are not tracking: ", p) log.Error("received disconnected notification for peer we are not tracking: ", p)
s.Unlock()
return return
} }
_, ok = cinf.conns[c] _, ok = cinf.conns[c]
if !ok { if !ok {
log.Error("received disconnected notification for conn we are not tracking: ", p) log.Error("received disconnected notification for conn we are not tracking: ", p)
s.Unlock()
return return
} }
@ -728,6 +754,7 @@ func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) {
delete(s.peers, p) delete(s.peers, p)
} }
cm.connCount.Add(-1) cm.connCount.Add(-1)
s.Unlock()
} }
// Listen is no-op in this implementation. // Listen is no-op in this implementation.

View File

@ -100,9 +100,9 @@ func NewDecayer(cfg *DecayerCfg, mgr *BasicConnMgr) (*decayer, error) {
func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decayFn connmgr.DecayFn, bumpFn connmgr.BumpFn) (connmgr.DecayingTag, error) { func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decayFn connmgr.DecayFn, bumpFn connmgr.BumpFn) (connmgr.DecayingTag, error) {
d.tagsMu.Lock() d.tagsMu.Lock()
defer d.tagsMu.Unlock()
if _, ok := d.knownTags[name]; ok { if _, ok := d.knownTags[name]; ok {
d.tagsMu.Unlock()
return nil, fmt.Errorf("decaying tag with name %s already exists", name) return nil, fmt.Errorf("decaying tag with name %s already exists", name)
} }
@ -128,6 +128,7 @@ func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decay
} }
d.knownTags[name] = tag d.knownTags[name] = tag
d.tagsMu.Unlock()
return tag, nil return tag, nil
} }
@ -150,10 +151,7 @@ func (d *decayer) Close() error {
// 2. Applies score bumps. // 2. Applies score bumps.
// 3. Yields when closed. // 3. Yields when closed.
func (d *decayer) process() { func (d *decayer) process() {
defer close(d.doneCh)
ticker := d.clock.Ticker(d.cfg.Resolution) ticker := d.clock.Ticker(d.cfg.Resolution)
defer ticker.Stop()
var ( var (
bmp bumpCmd bmp bumpCmd
@ -276,6 +274,8 @@ func (d *decayer) process() {
} }
case <-d.closeCh: case <-d.closeCh:
ticker.Stop()
close(d.doneCh)
return return
} }
} }

View File

@ -63,8 +63,8 @@ func DiscoverNAT(ctx context.Context) (*NAT, error) {
} }
nat.refCount.Add(1) nat.refCount.Add(1)
go func() { go func() {
defer nat.refCount.Done()
nat.background() nat.background()
nat.refCount.Done()
}() }()
return nat, nil return nat, nil
} }
@ -101,15 +101,17 @@ func (nat *NAT) Close() error {
func (nat *NAT) GetMapping(protocol string, port int) (addr netip.AddrPort, found bool) { func (nat *NAT) GetMapping(protocol string, port int) (addr netip.AddrPort, found bool) {
nat.mappingmu.Lock() nat.mappingmu.Lock()
defer nat.mappingmu.Unlock()
if !nat.extAddr.IsValid() { if !nat.extAddr.IsValid() {
nat.mappingmu.Unlock()
return netip.AddrPort{}, false return netip.AddrPort{}, false
} }
extPort, found := nat.mappings[entry{protocol: protocol, port: port}] extPort, found := nat.mappings[entry{protocol: protocol, port: port}]
if !found { if !found {
nat.mappingmu.Unlock()
return netip.AddrPort{}, false return netip.AddrPort{}, false
} }
nat.mappingmu.Unlock()
return netip.AddrPortFrom(nat.extAddr, uint16(extPort)), true return netip.AddrPortFrom(nat.extAddr, uint16(extPort)), true
} }
@ -126,9 +128,9 @@ func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error
} }
nat.mappingmu.Lock() nat.mappingmu.Lock()
defer nat.mappingmu.Unlock()
if nat.closed { if nat.closed {
nat.mappingmu.Unlock()
return errors.New("closed") return errors.New("closed")
} }
@ -136,6 +138,7 @@ func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error
// allowing users -- in the optimistic case -- to use results right after. // allowing users -- in the optimistic case -- to use results right after.
extPort := nat.establishMapping(ctx, protocol, port) extPort := nat.establishMapping(ctx, protocol, port)
nat.mappings[entry{protocol: protocol, port: port}] = extPort nat.mappings[entry{protocol: protocol, port: port}] = extPort
nat.mappingmu.Unlock()
return nil return nil
} }
@ -143,17 +146,19 @@ func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error
// It blocks until the NAT has removed the mapping. // It blocks until the NAT has removed the mapping.
func (nat *NAT) RemoveMapping(ctx context.Context, protocol string, port int) error { func (nat *NAT) RemoveMapping(ctx context.Context, protocol string, port int) error {
nat.mappingmu.Lock() nat.mappingmu.Lock()
defer nat.mappingmu.Unlock()
switch protocol { switch protocol {
case "tcp", "udp": case "tcp", "udp":
e := entry{protocol: protocol, port: port} e := entry{protocol: protocol, port: port}
if _, ok := nat.mappings[e]; ok { if _, ok := nat.mappings[e]; ok {
delete(nat.mappings, e) delete(nat.mappings, e)
nat.mappingmu.Unlock()
return nat.nat.DeletePortMapping(ctx, protocol, port) return nat.nat.DeletePortMapping(ctx, protocol, port)
} }
nat.mappingmu.Unlock()
return errors.New("unknown mapping") return errors.New("unknown mapping")
default: default:
nat.mappingmu.Unlock()
return fmt.Errorf("invalid protocol: %s", protocol) return fmt.Errorf("invalid protocol: %s", protocol)
} }
} }
@ -166,7 +171,6 @@ func (nat *NAT) background() {
nextAddrUpdate := now.Add(CacheTime) nextAddrUpdate := now.Add(CacheTime)
t := time.NewTimer(minTime(nextMappingUpdate, nextAddrUpdate).Sub(now)) // don't use a ticker here. We don't know how long establishing the mappings takes. t := time.NewTimer(minTime(nextMappingUpdate, nextAddrUpdate).Sub(now)) // don't use a ticker here. We don't know how long establishing the mappings takes.
defer t.Stop()
var in []entry var in []entry
var out []int // port numbers var out []int // port numbers
@ -209,12 +213,13 @@ func (nat *NAT) background() {
case <-nat.ctx.Done(): case <-nat.ctx.Done():
nat.mappingmu.Lock() nat.mappingmu.Lock()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
for e := range nat.mappings { for e := range nat.mappings {
delete(nat.mappings, e) delete(nat.mappings, e)
nat.nat.DeletePortMapping(ctx, e.protocol, e.port) nat.nat.DeletePortMapping(ctx, e.protocol, e.port)
} }
nat.mappingmu.Unlock() nat.mappingmu.Unlock()
t.Stop()
cancel()
return return
} }
} }

View File

@ -60,11 +60,12 @@ func (c *pskConn) Write(in []byte) (int, error) {
c.writeS20 = salsa20.New(c.psk, nonce) c.writeS20 = salsa20.New(c.psk, nonce)
} }
out := pool.Get(len(in)) out := pool.Get(len(in))
defer pool.Put(out)
c.writeS20.XORKeyStream(out, in) // encrypt c.writeS20.XORKeyStream(out, in) // encrypt
return c.Conn.Write(out) // send n, err := c.Conn.Write(out)
pool.Put(out)
return n, err // send
} }
var _ net.Conn = (*pskConn)(nil) var _ net.Conn = (*pskConn)(nil)

View File

@ -25,7 +25,6 @@ func setupPSKConns(ctx context.Context, t *testing.T) (net.Conn, net.Conn) {
func TestPSKSimpelMessges(t *testing.T) { func TestPSKSimpelMessges(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO()) ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
psk1, psk2 := setupPSKConns(ctx, t) psk1, psk2 := setupPSKConns(ctx, t)
msg1 := []byte("hello world") msg1 := []byte("hello world")
@ -53,11 +52,11 @@ func TestPSKSimpelMessges(t *testing.T) {
if !bytes.Equal(msg1, out1) { if !bytes.Equal(msg1, out1) {
t.Fatalf("input and output are not the same") t.Fatalf("input and output are not the same")
} }
cancel()
} }
func TestPSKFragmentation(t *testing.T) { func TestPSKFragmentation(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO()) ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
psk1, psk2 := setupPSKConns(ctx, t) psk1, psk2 := setupPSKConns(ctx, t)
@ -87,4 +86,5 @@ func TestPSKFragmentation(t *testing.T) {
if err := <-wch; err != nil { if err := <-wch; err != nil {
t.Fatal(err) t.Fatal(err)
} }
cancel()
} }

View File

@ -51,12 +51,12 @@ func (n *network) getDialer(network string) *dialer {
n.mu.RUnlock() n.mu.RUnlock()
if d == nil { if d == nil {
n.mu.Lock() n.mu.Lock()
defer n.mu.Unlock()
if n.dialer == nil { if n.dialer == nil {
n.dialer = newDialer(n.listeners) n.dialer = newDialer(n.listeners)
} }
d = n.dialer d = n.dialer
n.mu.Unlock()
} }
return d return d
} }

View File

@ -68,7 +68,6 @@ func (t *Transport) Listen(laddr ma.Multiaddr) (manet.Listener, error) {
} }
n.mu.Lock() n.mu.Lock()
defer n.mu.Unlock()
if n.listeners == nil { if n.listeners == nil {
n.listeners = make(map[*listener]struct{}) n.listeners = make(map[*listener]struct{})
@ -76,5 +75,6 @@ func (t *Transport) Listen(laddr ma.Multiaddr) (manet.Listener, error) {
n.listeners[list] = struct{}{} n.listeners[list] = struct{}{}
n.dialer = nil n.dialer = nil
n.mu.Unlock()
return list, nil return list, nil
} }

View File

@ -77,13 +77,13 @@ type blackHoleFilter struct {
// fraction over the last n outcomes is less than the minSuccessFraction of the filter. // fraction over the last n outcomes is less than the minSuccessFraction of the filter.
func (b *blackHoleFilter) RecordResult(success bool) { func (b *blackHoleFilter) RecordResult(success bool) {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock()
if b.state == blackHoleStateBlocked && success { if b.state == blackHoleStateBlocked && success {
// If the call succeeds in a blocked state we reset to allowed. // If the call succeeds in a blocked state we reset to allowed.
// This is better than slowly accumulating values till we cross the minSuccessFraction // This is better than slowly accumulating values till we cross the minSuccessFraction
// threshold since a blackhole is a binary property. // threshold since a blackhole is a binary property.
b.reset() b.reset()
b.mu.Unlock()
return return
} }
@ -101,22 +101,25 @@ func (b *blackHoleFilter) RecordResult(success bool) {
b.updateState() b.updateState()
b.trackMetrics() b.trackMetrics()
b.mu.Unlock()
} }
// HandleRequest returns the result of applying the black hole filter for the request. // HandleRequest returns the result of applying the black hole filter for the request.
func (b *blackHoleFilter) HandleRequest() blackHoleResult { func (b *blackHoleFilter) HandleRequest() blackHoleResult {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock()
b.requests++ b.requests++
b.trackMetrics() b.trackMetrics()
if b.state == blackHoleStateAllowed { if b.state == blackHoleStateAllowed {
b.mu.Unlock()
return blackHoleResultAllowed return blackHoleResultAllowed
} else if b.state == blackHoleStateProbing || b.requests%b.n == 0 { } else if b.state == blackHoleStateProbing || b.requests%b.n == 0 {
b.mu.Unlock()
return blackHoleResultProbing return blackHoleResultProbing
} else { } else {
b.mu.Unlock()
return blackHoleResultBlocked return blackHoleResultBlocked
} }
} }

View File

@ -50,18 +50,19 @@ func newConnectednessEventEmitter(connectedness func(peer.ID) network.Connectedn
func (c *connectednessEventEmitter) AddConn(p peer.ID) { func (c *connectednessEventEmitter) AddConn(p peer.ID) {
c.mx.RLock() c.mx.RLock()
defer c.mx.RUnlock()
if c.ctx.Err() != nil { if c.ctx.Err() != nil {
c.mx.RUnlock()
return return
} }
c.newConns <- p c.newConns <- p
c.mx.RUnlock()
} }
func (c *connectednessEventEmitter) RemoveConn(p peer.ID) { func (c *connectednessEventEmitter) RemoveConn(p peer.ID) {
c.mx.RLock() c.mx.RLock()
defer c.mx.RUnlock()
if c.ctx.Err() != nil { if c.ctx.Err() != nil {
c.mx.RUnlock()
return return
} }
@ -80,6 +81,7 @@ func (c *connectednessEventEmitter) RemoveConn(p peer.ID) {
case c.removeConnNotif <- struct{}{}: case c.removeConnNotif <- struct{}{}:
default: default:
} }
c.mx.RUnlock()
} }
func (c *connectednessEventEmitter) Close() { func (c *connectednessEventEmitter) Close() {
@ -88,7 +90,6 @@ func (c *connectednessEventEmitter) Close() {
} }
func (c *connectednessEventEmitter) runEmitter() { func (c *connectednessEventEmitter) runEmitter() {
defer c.wg.Done()
for { for {
select { select {
case p := <-c.newConns: case p := <-c.newConns:
@ -97,7 +98,6 @@ func (c *connectednessEventEmitter) runEmitter() {
c.sendConnRemovedNotifications() c.sendConnRemovedNotifications()
case <-c.ctx.Done(): case <-c.ctx.Done():
c.mx.Lock() // Wait for all pending AddConn & RemoveConn operations to complete c.mx.Lock() // Wait for all pending AddConn & RemoveConn operations to complete
defer c.mx.Unlock()
for { for {
select { select {
case p := <-c.newConns: case p := <-c.newConns:
@ -105,6 +105,8 @@ func (c *connectednessEventEmitter) runEmitter() {
case <-c.removeConnNotif: case <-c.removeConnNotif:
c.sendConnRemovedNotifications() c.sendConnRemovedNotifications()
default: default:
c.mx.Unlock()
c.wg.Done()
return return
} }
} }

View File

@ -67,7 +67,6 @@ func (ad *activeDial) dial(ctx context.Context) (*Conn, error) {
func (ds *dialSync) getActiveDial(p peer.ID) (*activeDial, error) { func (ds *dialSync) getActiveDial(p peer.ID) (*activeDial, error) {
ds.mutex.Lock() ds.mutex.Lock()
defer ds.mutex.Unlock()
actd, ok := ds.dials[p] actd, ok := ds.dials[p]
if !ok { if !ok {
@ -84,6 +83,7 @@ func (ds *dialSync) getActiveDial(p peer.ID) (*activeDial, error) {
} }
// increase ref count before dropping mutex // increase ref count before dropping mutex
actd.refCnt++ actd.refCnt++
ds.mutex.Unlock()
return actd, nil return actd, nil
} }
@ -98,7 +98,6 @@ func (ds *dialSync) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
conn, err := ad.dial(ctx) conn, err := ad.dial(ctx)
ds.mutex.Lock() ds.mutex.Lock()
defer ds.mutex.Unlock()
ad.refCnt-- ad.refCnt--
if ad.refCnt == 0 { if ad.refCnt == 0 {
@ -111,5 +110,6 @@ func (ds *dialSync) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
delete(ds.dials, p) delete(ds.dials, p)
} }
ds.mutex.Unlock()
return conn, err return conn, err
} }

View File

@ -109,8 +109,6 @@ func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest, cl Clock) *dia
// The loop exits when w.reqch is closed. // The loop exits when w.reqch is closed.
func (w *dialWorker) loop() { func (w *dialWorker) loop() {
w.wg.Add(1) w.wg.Add(1)
defer w.wg.Done()
defer w.s.limiter.clearAllPeerDials(w.peer)
// dq is used to pace dials to different addresses of the peer // dq is used to pace dials to different addresses of the peer
dq := newDialQueue() dq := newDialQueue()
@ -120,7 +118,6 @@ func (w *dialWorker) loop() {
startTime := w.cl.Now() startTime := w.cl.Now()
// dialTimer is the dialTimer used to trigger dials // dialTimer is the dialTimer used to trigger dials
dialTimer := w.cl.InstantTimer(startTime.Add(math.MaxInt64)) dialTimer := w.cl.InstantTimer(startTime.Add(math.MaxInt64))
defer dialTimer.Stop()
timerRunning := true timerRunning := true
// scheduleNextDial updates timer for triggering the next dial // scheduleNextDial updates timer for triggering the next dial
@ -164,6 +161,9 @@ loop:
if w.s.metricsTracer != nil { if w.s.metricsTracer != nil {
w.s.metricsTracer.DialCompleted(w.connected, totalDials) w.s.metricsTracer.DialCompleted(w.connected, totalDials)
} }
w.wg.Done()
w.s.limiter.clearAllPeerDials(w.peer)
dialTimer.Stop()
return return
} }
// We have received a new request. If we do not have a suitable connection, // We have received a new request. If we do not have a suitable connection,

View File

@ -124,12 +124,12 @@ func (dl *dialLimiter) freePeerToken(dj *dialJob) {
func (dl *dialLimiter) finishedDial(dj *dialJob) { func (dl *dialLimiter) finishedDial(dj *dialJob) {
dl.lk.Lock() dl.lk.Lock()
defer dl.lk.Unlock()
if dl.shouldConsumeFd(dj.addr) { if dl.shouldConsumeFd(dj.addr) {
dl.freeFDToken() dl.freeFDToken()
} }
dl.freePeerToken(dj) dl.freePeerToken(dj)
dl.lk.Unlock()
} }
func (dl *dialLimiter) shouldConsumeFd(addr ma.Multiaddr) bool { func (dl *dialLimiter) shouldConsumeFd(addr ma.Multiaddr) bool {
@ -182,33 +182,32 @@ func (dl *dialLimiter) addCheckPeerLimit(dj *dialJob) {
// it will put it on the waitlist for the requested token. // it will put it on the waitlist for the requested token.
func (dl *dialLimiter) AddDialJob(dj *dialJob) { func (dl *dialLimiter) AddDialJob(dj *dialJob) {
dl.lk.Lock() dl.lk.Lock()
defer dl.lk.Unlock()
log.Debugf("[limiter] adding a dial job through limiter: %v", dj.addr) log.Debugf("[limiter] adding a dial job through limiter: %v", dj.addr)
dl.addCheckPeerLimit(dj) dl.addCheckPeerLimit(dj)
dl.lk.Unlock()
} }
func (dl *dialLimiter) clearAllPeerDials(p peer.ID) { func (dl *dialLimiter) clearAllPeerDials(p peer.ID) {
dl.lk.Lock() dl.lk.Lock()
defer dl.lk.Unlock()
delete(dl.waitingOnPeerLimit, p) delete(dl.waitingOnPeerLimit, p)
log.Debugf("[limiter] clearing all peer dials: %v", p) log.Debugf("[limiter] clearing all peer dials: %v", p)
// NB: the waitingOnFd list doesn't need to be cleaned out here, we will // NB: the waitingOnFd list doesn't need to be cleaned out here, we will
// remove them as we encounter them because they are 'cancelled' at this // remove them as we encounter them because they are 'cancelled' at this
// point // point
dl.lk.Unlock()
} }
// executeDial calls the dialFunc, and reports the result through the response // executeDial calls the dialFunc, and reports the result through the response
// channel when finished. Once the response is sent it also releases all tokens // channel when finished. Once the response is sent it also releases all tokens
// it held during the dial. // it held during the dial.
func (dl *dialLimiter) executeDial(j *dialJob) { func (dl *dialLimiter) executeDial(j *dialJob) {
defer dl.finishedDial(j)
if j.cancelled() { if j.cancelled() {
dl.finishedDial(j)
return return
} }
dctx, cancel := context.WithTimeout(j.ctx, j.timeout) dctx, cancel := context.WithTimeout(j.ctx, j.timeout)
defer cancel()
con, err := dl.dialFunc(dctx, j.peer, j.addr, j.resp) con, err := dl.dialFunc(dctx, j.peer, j.addr, j.resp)
kind := transport.UpdateKindDialSuccessful kind := transport.UpdateKindDialSuccessful
@ -222,4 +221,6 @@ func (dl *dialLimiter) executeDial(j *dialJob) {
con.Close() con.Close()
} }
} }
dl.finishedDial(j)
cancel()
} }

View File

@ -328,10 +328,10 @@ func (s *Swarm) close() {
if closer, ok := t.(io.Closer); ok { if closer, ok := t.(io.Closer); ok {
wg.Add(1) wg.Add(1)
go func(c io.Closer) { go func(c io.Closer) {
defer wg.Done()
if err := closer.Close(); err != nil { if err := closer.Close(); err != nil {
log.Errorf("error when closing down transport %T: %s", c, err) log.Errorf("error when closing down transport %T: %s", c, err)
} }
wg.Done()
}(closer) }(closer)
} }
} }
@ -526,14 +526,12 @@ func (s *Swarm) waitForDirectConn(ctx context.Context, p peer.ID) (*Conn, error)
// apply the DialPeer timeout // apply the DialPeer timeout
ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx)) ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
defer cancel()
// Wait for notification. // Wait for notification.
select { select {
case <-ctx.Done(): case <-ctx.Done():
// Remove ourselves from the notification list // Remove ourselves from the notification list
s.directConnNotifs.Lock() s.directConnNotifs.Lock()
defer s.directConnNotifs.Unlock()
s.directConnNotifs.m[p] = slices.DeleteFunc( s.directConnNotifs.m[p] = slices.DeleteFunc(
s.directConnNotifs.m[p], s.directConnNotifs.m[p],
@ -542,17 +540,22 @@ func (s *Swarm) waitForDirectConn(ctx context.Context, p peer.ID) (*Conn, error)
if len(s.directConnNotifs.m[p]) == 0 { if len(s.directConnNotifs.m[p]) == 0 {
delete(s.directConnNotifs.m, p) delete(s.directConnNotifs.m, p)
} }
s.directConnNotifs.Unlock()
cancel()
return nil, ctx.Err() return nil, ctx.Err()
case <-ch: case <-ch:
// We do not need to remove ourselves from the list here as the notifier // We do not need to remove ourselves from the list here as the notifier
// clears the map entry // clears the map entry
c := s.bestConnToPeer(p) c := s.bestConnToPeer(p)
if c == nil { if c == nil {
cancel()
return nil, network.ErrNoConn return nil, network.ErrNoConn
} }
if c.Stat().Limited { if c.Stat().Limited {
cancel()
return nil, network.ErrLimitedConn return nil, network.ErrLimitedConn
} }
cancel()
return c, nil return c, nil
} }
} }
@ -562,12 +565,12 @@ func (s *Swarm) ConnsToPeer(p peer.ID) []network.Conn {
// TODO: Consider sorting the connection list best to worst. Currently, // TODO: Consider sorting the connection list best to worst. Currently,
// it's sorted oldest to newest. // it's sorted oldest to newest.
s.conns.RLock() s.conns.RLock()
defer s.conns.RUnlock()
conns := s.conns.m[p] conns := s.conns.m[p]
output := make([]network.Conn, len(conns)) output := make([]network.Conn, len(conns))
for i, c := range conns { for i, c := range conns {
output[i] = c output[i] = c
} }
s.conns.RUnlock()
return output return output
} }
@ -610,7 +613,6 @@ func (s *Swarm) bestConnToPeer(p peer.ID) *Conn {
// For now, prefers direct connections over Relayed connections. // For now, prefers direct connections over Relayed connections.
// For tie-breaking, select the newest non-closed connection with the most streams. // For tie-breaking, select the newest non-closed connection with the most streams.
s.conns.RLock() s.conns.RLock()
defer s.conns.RUnlock()
var best *Conn var best *Conn
for _, c := range s.conns.m[p] { for _, c := range s.conns.m[p] {
@ -622,6 +624,7 @@ func (s *Swarm) bestConnToPeer(p peer.ID) *Conn {
best = c best = c
} }
} }
s.conns.RUnlock()
return best return best
} }
@ -648,9 +651,9 @@ func isDirectConn(c *Conn) bool {
// network.Connected`. // network.Connected`.
func (s *Swarm) Connectedness(p peer.ID) network.Connectedness { func (s *Swarm) Connectedness(p peer.ID) network.Connectedness {
s.conns.RLock() s.conns.RLock()
defer s.conns.RUnlock() connectedness := s.connectednessUnlocked(p)
s.conns.RUnlock()
return s.connectednessUnlocked(p) return connectedness
} }
// connectednessUnlocked returns the connectedness of a peer. // connectednessUnlocked returns the connectedness of a peer.
@ -676,7 +679,6 @@ func (s *Swarm) connectednessUnlocked(p peer.ID) network.Connectedness {
// Conns returns a slice of all connections. // Conns returns a slice of all connections.
func (s *Swarm) Conns() []network.Conn { func (s *Swarm) Conns() []network.Conn {
s.conns.RLock() s.conns.RLock()
defer s.conns.RUnlock()
conns := make([]network.Conn, 0, len(s.conns.m)) conns := make([]network.Conn, 0, len(s.conns.m))
for _, cs := range s.conns.m { for _, cs := range s.conns.m {
@ -684,6 +686,7 @@ func (s *Swarm) Conns() []network.Conn {
conns = append(conns, c) conns = append(conns, c)
} }
} }
s.conns.RUnlock()
return conns return conns
} }
@ -720,12 +723,13 @@ func (s *Swarm) ClosePeer(p peer.ID) error {
// Peers returns a copy of the set of peers swarm is connected to. // Peers returns a copy of the set of peers swarm is connected to.
func (s *Swarm) Peers() []peer.ID { func (s *Swarm) Peers() []peer.ID {
s.conns.RLock() s.conns.RLock()
defer s.conns.RUnlock()
peers := make([]peer.ID, 0, len(s.conns.m)) peers := make([]peer.ID, 0, len(s.conns.m))
for p := range s.conns.m { for p := range s.conns.m {
peers = append(peers, p) peers = append(peers, p)
} }
s.conns.RUnlock()
return peers return peers
} }

View File

@ -11,8 +11,9 @@ import (
// ListenAddresses returns a list of addresses at which this swarm listens. // ListenAddresses returns a list of addresses at which this swarm listens.
func (s *Swarm) ListenAddresses() []ma.Multiaddr { func (s *Swarm) ListenAddresses() []ma.Multiaddr {
s.listeners.RLock() s.listeners.RLock()
defer s.listeners.RUnlock() mas := s.listenAddressesNoLock()
return s.listenAddressesNoLock() s.listeners.RUnlock()
return mas
} }
func (s *Swarm) listenAddressesNoLock() []ma.Multiaddr { func (s *Swarm) listenAddressesNoLock() []ma.Multiaddr {

View File

@ -88,13 +88,13 @@ func (c *Conn) doClose() {
go func() { go func() {
// prevents us from issuing close notifications before finishing the open notifications // prevents us from issuing close notifications before finishing the open notifications
c.notifyLk.Lock() c.notifyLk.Lock()
defer c.notifyLk.Unlock()
// Only notify for disconnection if we notified for connection // Only notify for disconnection if we notified for connection
c.swarm.notifyAll(func(f network.Notifiee) { c.swarm.notifyAll(func(f network.Notifiee) {
f.Disconnected(c.swarm, c) f.Disconnected(c.swarm, c)
}) })
c.swarm.refs.Done() c.swarm.refs.Done()
c.notifyLk.Unlock()
}() }()
} }
@ -112,11 +112,11 @@ func (c *Conn) removeStream(s *Stream) {
// swarm ref count. // swarm ref count.
func (c *Conn) start() { func (c *Conn) start() {
go func() { go func() {
defer c.swarm.refs.Done()
defer c.Close()
for { for {
ts, err := c.conn.AcceptStream() ts, err := c.conn.AcceptStream()
if err != nil { if err != nil {
c.swarm.refs.Done()
c.Close()
return return
} }
scope, err := c.swarm.ResourceManager().OpenStream(c.RemotePeer(), network.DirInbound) scope, err := c.swarm.ResourceManager().OpenStream(c.RemotePeer(), network.DirInbound)
@ -192,8 +192,9 @@ func (c *Conn) ConnState() network.ConnectionState {
// Stat returns metadata pertaining to this connection // Stat returns metadata pertaining to this connection
func (c *Conn) Stat() network.ConnStats { func (c *Conn) Stat() network.ConnStats {
c.streams.Lock() c.streams.Lock()
defer c.streams.Unlock() stats := c.stat
return c.stat c.streams.Unlock()
return stats
} }
// NewStream returns a new Stream from this connection // NewStream returns a new Stream from this connection
@ -260,11 +261,11 @@ func (c *Conn) addStream(ts network.MuxedStream, dir network.Direction, scope ne
// GetStreams returns the streams associated with this connection. // GetStreams returns the streams associated with this connection.
func (c *Conn) GetStreams() []network.Stream { func (c *Conn) GetStreams() []network.Stream {
c.streams.Lock() c.streams.Lock()
defer c.streams.Unlock()
streams := make([]network.Stream, 0, len(c.streams.m)) streams := make([]network.Stream, 0, len(c.streams.m))
for s := range c.streams.m { for s := range c.streams.m {
streams = append(streams, s) streams = append(streams, s)
} }
c.streams.Unlock()
return streams return streams
} }

View File

@ -122,10 +122,10 @@ func (db *DialBackoff) init(ctx context.Context) {
func (db *DialBackoff) background(ctx context.Context) { func (db *DialBackoff) background(ctx context.Context) {
ticker := time.NewTicker(BackoffMax) ticker := time.NewTicker(BackoffMax)
defer ticker.Stop()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
ticker.Stop()
return return
case <-ticker.C: case <-ticker.C:
db.cleanup() db.cleanup()
@ -137,9 +137,9 @@ func (db *DialBackoff) background(ctx context.Context) {
// peer p at address addr // peer p at address addr
func (db *DialBackoff) Backoff(p peer.ID, addr ma.Multiaddr) (backoff bool) { func (db *DialBackoff) Backoff(p peer.ID, addr ma.Multiaddr) (backoff bool) {
db.lock.RLock() db.lock.RLock()
defer db.lock.RUnlock()
ap, found := db.entries[p][string(addr.Bytes())] ap, found := db.entries[p][string(addr.Bytes())]
db.lock.RUnlock()
return found && time.Now().Before(ap.until) return found && time.Now().Before(ap.until)
} }
@ -163,7 +163,6 @@ var BackoffMax = time.Minute * 5
func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) { func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
saddr := string(addr.Bytes()) saddr := string(addr.Bytes())
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock()
bp, ok := db.entries[p] bp, ok := db.entries[p]
if !ok { if !ok {
bp = make(map[string]*backoffAddr, 1) bp = make(map[string]*backoffAddr, 1)
@ -175,6 +174,7 @@ func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
tries: 1, tries: 1,
until: time.Now().Add(BackoffBase), until: time.Now().Add(BackoffBase),
} }
db.lock.Unlock()
return return
} }
@ -184,19 +184,19 @@ func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
} }
ba.until = time.Now().Add(backoffTime) ba.until = time.Now().Add(backoffTime)
ba.tries++ ba.tries++
db.lock.Unlock()
} }
// Clear removes a backoff record. Clients should call this after a // Clear removes a backoff record. Clients should call this after a
// successful Dial. // successful Dial.
func (db *DialBackoff) Clear(p peer.ID) { func (db *DialBackoff) Clear(p peer.ID) {
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock()
delete(db.entries, p) delete(db.entries, p)
db.lock.Unlock()
} }
func (db *DialBackoff) cleanup() { func (db *DialBackoff) cleanup() {
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock()
now := time.Now() now := time.Now()
for p, e := range db.entries { for p, e := range db.entries {
good := false good := false
@ -214,6 +214,7 @@ func (db *DialBackoff) cleanup() {
delete(db.entries, p) delete(db.entries, p)
} }
} }
db.lock.Unlock()
} }
// DialPeer connects to a peer. Use network.WithForceDirectDial to force a // DialPeer connects to a peer. Use network.WithForceDirectDial to force a
@ -260,7 +261,6 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
// apply the DialPeer timeout // apply the DialPeer timeout
ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx)) ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
defer cancel()
conn, err = s.dsync.Dial(ctx, p) conn, err = s.dsync.Dial(ctx, p)
if err == nil { if err == nil {
@ -269,8 +269,10 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
if conn.RemotePeer() != p { if conn.RemotePeer() != p {
conn.Close() conn.Close()
log.Errorw("Handshake failed to properly authenticate peer", "authenticated", conn.RemotePeer(), "expected", p) log.Errorw("Handshake failed to properly authenticate peer", "authenticated", conn.RemotePeer(), "expected", p)
cancel()
return nil, fmt.Errorf("unexpected peer") return nil, fmt.Errorf("unexpected peer")
} }
cancel()
return conn, nil return conn, nil
} }
@ -278,14 +280,17 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
if ctx.Err() != nil { if ctx.Err() != nil {
// Context error trumps any dial errors as it was likely the ultimate cause. // Context error trumps any dial errors as it was likely the ultimate cause.
cancel()
return nil, ctx.Err() return nil, ctx.Err()
} }
if s.ctx.Err() != nil { if s.ctx.Err() != nil {
// Ok, so the swarm is shutting down. // Ok, so the swarm is shutting down.
cancel()
return nil, ErrSwarmClosed return nil, ErrSwarmClosed
} }
cancel()
return nil, err return nil, err
} }

View File

@ -105,7 +105,7 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
}) })
go func() { go func() {
defer func() { cleanup := func() {
s.listeners.Lock() s.listeners.Lock()
_, ok := s.listeners.m[list] _, ok := s.listeners.m[list]
if ok { if ok {
@ -124,13 +124,14 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
n.ListenClose(s, maddr) n.ListenClose(s, maddr)
}) })
s.refs.Done() s.refs.Done()
}() }
for { for {
c, err := list.Accept() c, err := list.Accept()
if err != nil { if err != nil {
if !errors.Is(err, transport.ErrListenerClosed) { if !errors.Is(err, transport.ErrListenerClosed) {
log.Errorf("swarm listener for %s accept error: %s", a, err) log.Errorf("swarm listener for %s accept error: %s", a, err)
} }
cleanup()
return return
} }
canonicallog.LogPeerStatus(100, c.RemotePeer(), c.RemoteMultiaddr(), "connection_status", "established", "dir", "inbound") canonicallog.LogPeerStatus(100, c.RemotePeer(), c.RemoteMultiaddr(), "connection_status", "established", "dir", "inbound")
@ -141,17 +142,19 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
log.Debugf("swarm listener accepted connection: %s <-> %s", c.LocalMultiaddr(), c.RemoteMultiaddr()) log.Debugf("swarm listener accepted connection: %s <-> %s", c.LocalMultiaddr(), c.RemoteMultiaddr())
s.refs.Add(1) s.refs.Add(1)
go func() { go func() {
defer s.refs.Done()
_, err := s.addConn(c, network.DirInbound) _, err := s.addConn(c, network.DirInbound)
switch err { switch err {
case nil: case nil:
case ErrSwarmClosed: case ErrSwarmClosed:
// ignore. // ignore.
s.refs.Done()
return return
default: default:
log.Warnw("adding connection failed", "to", a, "error", err) log.Warnw("adding connection failed", "to", a, "error", err)
s.refs.Done()
return return
} }
s.refs.Done()
}() }()
} }
}() }()

View File

@ -183,7 +183,6 @@ func appendConnectionState(tags []string, cs network.ConnectionState) []string {
func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, cs network.ConnectionState, laddr ma.Multiaddr) { func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, cs network.ConnectionState, laddr ma.Multiaddr) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, metricshelper.GetDirection(dir)) *tags = append(*tags, metricshelper.GetDirection(dir))
*tags = appendConnectionState(*tags, cs) *tags = appendConnectionState(*tags, cs)
@ -195,11 +194,11 @@ func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey,
*tags = append(*tags, metricshelper.GetDirection(dir)) *tags = append(*tags, metricshelper.GetDirection(dir))
*tags = append(*tags, p.Type().String()) *tags = append(*tags, p.Type().String())
keyTypes.WithLabelValues(*tags...).Inc() keyTypes.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }
func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) { func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, metricshelper.GetDirection(dir)) *tags = append(*tags, metricshelper.GetDirection(dir))
*tags = appendConnectionState(*tags, cs) *tags = appendConnectionState(*tags, cs)
@ -207,16 +206,17 @@ func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Du
*tags = append(*tags, ipv) *tags = append(*tags, ipv)
connsClosed.WithLabelValues(*tags...).Inc() connsClosed.WithLabelValues(*tags...).Inc()
connDuration.WithLabelValues(*tags...).Observe(duration.Seconds()) connDuration.WithLabelValues(*tags...).Observe(duration.Seconds())
metricshelper.PutStringSlice(tags)
} }
func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) { func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = appendConnectionState(*tags, cs) *tags = appendConnectionState(*tags, cs)
ipv, _ := metricshelper.GetIPVersion(laddr) ipv, _ := metricshelper.GetIPVersion(laddr)
*tags = append(*tags, ipv) *tags = append(*tags, ipv)
connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds()) connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds())
metricshelper.PutStringSlice(tags)
} }
func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause error) { func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause error) {
@ -246,17 +246,16 @@ func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause er
} }
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, transport, e) *tags = append(*tags, transport, e)
ipv, _ := metricshelper.GetIPVersion(addr) ipv, _ := metricshelper.GetIPVersion(addr)
*tags = append(*tags, ipv) *tags = append(*tags, ipv)
dialError.WithLabelValues(*tags...).Inc() dialError.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }
func (m *metricsTracer) DialCompleted(success bool, totalDials int) { func (m *metricsTracer) DialCompleted(success bool, totalDials int) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
if success { if success {
*tags = append(*tags, "success") *tags = append(*tags, "success")
} else { } else {
@ -272,6 +271,7 @@ func (m *metricsTracer) DialCompleted(success bool, totalDials int) {
} }
*tags = append(*tags, numDials) *tags = append(*tags, numDials)
dialsPerPeer.WithLabelValues(*tags...).Inc() dialsPerPeer.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }
func (m *metricsTracer) DialRankingDelay(d time.Duration) { func (m *metricsTracer) DialRankingDelay(d time.Duration) {
@ -281,11 +281,11 @@ func (m *metricsTracer) DialRankingDelay(d time.Duration) {
func (m *metricsTracer) UpdatedBlackHoleFilterState(name string, state blackHoleState, func (m *metricsTracer) UpdatedBlackHoleFilterState(name string, state blackHoleState,
nextProbeAfter int, successFraction float64) { nextProbeAfter int, successFraction float64) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, name) *tags = append(*tags, name)
blackHoleFilterState.WithLabelValues(*tags...).Set(float64(state)) blackHoleFilterState.WithLabelValues(*tags...).Set(float64(state))
blackHoleFilterSuccessFraction.WithLabelValues(*tags...).Set(successFraction) blackHoleFilterSuccessFraction.WithLabelValues(*tags...).Set(successFraction)
blackHoleFilterNextRequestAllowedAfter.WithLabelValues(*tags...).Set(float64(nextProbeAfter)) blackHoleFilterNextRequestAllowedAfter.WithLabelValues(*tags...).Set(float64(nextProbeAfter))
metricshelper.PutStringSlice(tags)
} }

View File

@ -93,8 +93,8 @@ func (s *Stream) Reset() error {
func (s *Stream) closeAndRemoveStream() { func (s *Stream) closeAndRemoveStream() {
s.closeMx.Lock() s.closeMx.Lock()
defer s.closeMx.Unlock()
if s.isClosed { if s.isClosed {
s.closeMx.Unlock()
return return
} }
s.isClosed = true s.isClosed = true
@ -104,6 +104,7 @@ func (s *Stream) closeAndRemoveStream() {
if s.acceptStreamGoroutineCompleted { if s.acceptStreamGoroutineCompleted {
s.conn.removeStream(s) s.conn.removeStream(s)
} }
s.closeMx.Unlock()
} }
// CloseWrite closes the stream for writing, flushing all data and sending an EOF. // CloseWrite closes the stream for writing, flushing all data and sending an EOF.
@ -121,14 +122,15 @@ func (s *Stream) CloseRead() error {
func (s *Stream) completeAcceptStreamGoroutine() { func (s *Stream) completeAcceptStreamGoroutine() {
s.closeMx.Lock() s.closeMx.Lock()
defer s.closeMx.Unlock()
if s.acceptStreamGoroutineCompleted { if s.acceptStreamGoroutineCompleted {
s.closeMx.Unlock()
return return
} }
s.acceptStreamGoroutineCompleted = true s.acceptStreamGoroutineCompleted = true
if s.isClosed { if s.isClosed {
s.conn.removeStream(s) s.conn.removeStream(s)
} }
s.closeMx.Unlock()
} }
// Protocol returns the protocol negotiated on this stream (if set). // Protocol returns the protocol negotiated on this stream (if set).

View File

@ -51,8 +51,9 @@ func (t *transportConn) Scope() network.ConnScope {
} }
func (t *transportConn) Close() error { func (t *transportConn) Close() error {
defer t.scope.Done() err := t.MuxedConn.Close()
return t.MuxedConn.Close() t.scope.Done()
return err
} }
func (t *transportConn) ConnState() network.ConnectionState { func (t *transportConn) ConnState() network.ConnectionState {

View File

@ -59,7 +59,7 @@ func (l *listener) Close() error {
// mechanism while still allowing us to negotiate connections in parallel. // mechanism while still allowing us to negotiate connections in parallel.
func (l *listener) handleIncoming() { func (l *listener) handleIncoming() {
var wg sync.WaitGroup var wg sync.WaitGroup
defer func() { cleanup := func() {
// make sure we're closed // make sure we're closed
l.Listener.Close() l.Listener.Close()
if l.err == nil { if l.err == nil {
@ -68,7 +68,7 @@ func (l *listener) handleIncoming() {
wg.Wait() wg.Wait()
close(l.incoming) close(l.incoming)
}() }
var catcher tec.TempErrCatcher var catcher tec.TempErrCatcher
for l.ctx.Err() == nil { for l.ctx.Err() == nil {
@ -80,6 +80,7 @@ func (l *listener) handleIncoming() {
continue continue
} }
l.err = err l.err = err
cleanup()
return return
} }
catcher.Reset() catcher.Reset()
@ -114,10 +115,7 @@ func (l *listener) handleIncoming() {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done()
ctx, cancel := context.WithTimeout(l.ctx, l.upgrader.acceptTimeout) ctx, cancel := context.WithTimeout(l.ctx, l.upgrader.acceptTimeout)
defer cancel()
conn, err := l.upgrader.Upgrade(ctx, l.transport, maconn, network.DirInbound, "", connScope) conn, err := l.upgrader.Upgrade(ctx, l.transport, maconn, network.DirInbound, "", connScope)
if err != nil { if err != nil {
@ -128,6 +126,8 @@ func (l *listener) handleIncoming() {
maconn.LocalMultiaddr(), maconn.LocalMultiaddr(),
maconn.RemoteMultiaddr()) maconn.RemoteMultiaddr())
connScope.Done() connScope.Done()
wg.Done()
cancel()
return return
} }
@ -139,7 +139,6 @@ func (l *listener) handleIncoming() {
// simply ensures that calls to Wait block while we're // simply ensures that calls to Wait block while we're
// over the threshold. // over the threshold.
l.threshold.Acquire() l.threshold.Acquire()
defer l.threshold.Release()
select { select {
case l.incoming <- conn: case l.incoming <- conn:
@ -154,8 +153,12 @@ func (l *listener) handleIncoming() {
// instead of hanging onto them. // instead of hanging onto them.
conn.Close() conn.Close()
} }
wg.Done()
cancel()
l.threshold.Release()
}() }()
} }
cleanup()
} }
// Accept accepts a connection. // Accept accepts a connection.

View File

@ -122,20 +122,19 @@ func (c *Conn) Stat() network.ConnStats {
// implicitly because the connection manager closed the underlying relay connection. // implicitly because the connection manager closed the underlying relay connection.
func (c *Conn) tagHop() { func (c *Conn) tagHop() {
c.client.mx.Lock() c.client.mx.Lock()
defer c.client.mx.Unlock()
p := c.stream.Conn().RemotePeer() p := c.stream.Conn().RemotePeer()
c.client.hopCount[p]++ c.client.hopCount[p]++
if c.client.hopCount[p] == 1 { if c.client.hopCount[p] == 1 {
c.client.host.ConnManager().TagPeer(p, "relay-hop-stream", HopTagWeight) c.client.host.ConnManager().TagPeer(p, "relay-hop-stream", HopTagWeight)
} }
c.client.mx.Unlock()
} }
// untagHop removes the relay-hop-stream tag if necessary; it is invoked when a relayed connection // untagHop removes the relay-hop-stream tag if necessary; it is invoked when a relayed connection
// is closed. // is closed.
func (c *Conn) untagHop() { func (c *Conn) untagHop() {
c.client.mx.Lock() c.client.mx.Lock()
defer c.client.mx.Unlock()
p := c.stream.Conn().RemotePeer() p := c.stream.Conn().RemotePeer()
c.client.hopCount[p]-- c.client.hopCount[p]--
@ -143,6 +142,7 @@ func (c *Conn) untagHop() {
c.client.host.ConnManager().UntagPeer(p, "relay-hop-stream") c.client.host.ConnManager().UntagPeer(p, "relay-hop-stream")
delete(c.client.hopCount, p) delete(c.client.hopCount, p)
} }
c.client.mx.Unlock()
} }
type capableConnWithStat interface { type capableConnWithStat interface {

View File

@ -130,12 +130,15 @@ func (c *Client) dialPeer(ctx context.Context, relay, dest peer.AddrInfo) (*Conn
} }
dialCtx, cancel := context.WithTimeout(ctx, DialRelayTimeout) dialCtx, cancel := context.WithTimeout(ctx, DialRelayTimeout)
defer cancel()
s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop) s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop)
if err != nil { if err != nil {
cancel()
return nil, fmt.Errorf("error opening hop stream to relay: %w", err) return nil, fmt.Errorf("error opening hop stream to relay: %w", err)
} }
return c.connect(s, dest) conn, err := c.connect(s, dest)
cancel()
return conn, err
} }
func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) { func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
@ -143,11 +146,9 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
s.Reset() s.Reset()
return nil, err return nil, err
} }
defer s.Scope().ReleaseMemory(maxMessageSize)
rd := util.NewDelimitedReader(s, maxMessageSize) rd := util.NewDelimitedReader(s, maxMessageSize)
wr := util.NewDelimitedWriter(s) wr := util.NewDelimitedWriter(s)
defer rd.Close()
var msg pbv2.HopMessage var msg pbv2.HopMessage
@ -159,6 +160,8 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
err := wr.WriteMsg(&msg) err := wr.WriteMsg(&msg)
if err != nil { if err != nil {
s.Reset() s.Reset()
s.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return nil, err return nil, err
} }
@ -167,6 +170,8 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
err = rd.ReadMsg(&msg) err = rd.ReadMsg(&msg)
if err != nil { if err != nil {
s.Reset() s.Reset()
s.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return nil, err return nil, err
} }
@ -174,12 +179,16 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
if msg.GetType() != pbv2.HopMessage_STATUS { if msg.GetType() != pbv2.HopMessage_STATUS {
s.Reset() s.Reset()
s.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return nil, newRelayError("unexpected relay response; not a status message (%d)", msg.GetType()) return nil, newRelayError("unexpected relay response; not a status message (%d)", msg.GetType())
} }
status := msg.GetStatus() status := msg.GetStatus()
if status != pbv2.Status_OK { if status != pbv2.Status_OK {
s.Reset() s.Reset()
s.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return nil, newRelayError("error opening relay circuit: %s (%d)", pbv2.Status_name[int32(status)], status) return nil, newRelayError("error opening relay circuit: %s (%d)", pbv2.Status_name[int32(status)], status)
} }
@ -193,5 +202,7 @@ func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
stat.Extra[StatLimitData] = limit.GetData() stat.Extra[StatLimitData] = limit.GetData()
} }
s.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return &Conn{stream: s, remote: dest, stat: stat, client: c}, nil return &Conn{stream: s, remote: dest, stat: stat, client: c}, nil
} }

View File

@ -19,18 +19,18 @@ func (c *Client) handleStreamV2(s network.Stream) {
s.SetReadDeadline(time.Now().Add(StreamTimeout)) s.SetReadDeadline(time.Now().Add(StreamTimeout))
rd := util.NewDelimitedReader(s, maxMessageSize) rd := util.NewDelimitedReader(s, maxMessageSize)
defer rd.Close()
writeResponse := func(status pbv2.Status) error { writeResponse := func(status pbv2.Status) error {
s.SetWriteDeadline(time.Now().Add(StreamTimeout)) s.SetWriteDeadline(time.Now().Add(StreamTimeout))
defer s.SetWriteDeadline(time.Time{})
wr := util.NewDelimitedWriter(s) wr := util.NewDelimitedWriter(s)
var msg pbv2.StopMessage var msg pbv2.StopMessage
msg.Type = pbv2.StopMessage_STATUS.Enum() msg.Type = pbv2.StopMessage_STATUS.Enum()
msg.Status = status.Enum() msg.Status = status.Enum()
return wr.WriteMsg(&msg) err := wr.WriteMsg(&msg)
s.SetWriteDeadline(time.Time{})
return err
} }
handleError := func(status pbv2.Status) { handleError := func(status pbv2.Status) {
@ -49,6 +49,7 @@ func (c *Client) handleStreamV2(s network.Stream) {
err := rd.ReadMsg(&msg) err := rd.ReadMsg(&msg)
if err != nil { if err != nil {
handleError(pbv2.Status_MALFORMED_MESSAGE) handleError(pbv2.Status_MALFORMED_MESSAGE)
rd.Close()
return return
} }
// reset stream deadline as message has been read // reset stream deadline as message has been read
@ -56,12 +57,14 @@ func (c *Client) handleStreamV2(s network.Stream) {
if msg.GetType() != pbv2.StopMessage_CONNECT { if msg.GetType() != pbv2.StopMessage_CONNECT {
handleError(pbv2.Status_UNEXPECTED_MESSAGE) handleError(pbv2.Status_UNEXPECTED_MESSAGE)
rd.Close()
return return
} }
src, err := util.PeerToPeerInfoV2(msg.GetPeer()) src, err := util.PeerToPeerInfoV2(msg.GetPeer())
if err != nil { if err != nil {
handleError(pbv2.Status_MALFORMED_MESSAGE) handleError(pbv2.Status_MALFORMED_MESSAGE)
rd.Close()
return return
} }
@ -87,4 +90,5 @@ func (c *Client) handleStreamV2(s network.Stream) {
case <-time.After(AcceptTimeout): case <-time.After(AcceptTimeout):
handleError(pbv2.Status_CONNECTION_FAILED) handleError(pbv2.Status_CONNECTION_FAILED)
} }
rd.Close()
} }

View File

@ -69,11 +69,9 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
if err != nil { if err != nil {
return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "failed to open stream", err: err} return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "failed to open stream", err: err}
} }
defer s.Close()
rd := util.NewDelimitedReader(s, maxMessageSize) rd := util.NewDelimitedReader(s, maxMessageSize)
wr := util.NewDelimitedWriter(s) wr := util.NewDelimitedWriter(s)
defer rd.Close()
var msg pbv2.HopMessage var msg pbv2.HopMessage
msg.Type = pbv2.HopMessage_RESERVE.Enum() msg.Type = pbv2.HopMessage_RESERVE.Enum()
@ -82,6 +80,8 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
if err := wr.WriteMsg(&msg); err != nil { if err := wr.WriteMsg(&msg); err != nil {
s.Reset() s.Reset()
s.Close()
rd.Close()
return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error writing reservation message", err: err} return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error writing reservation message", err: err}
} }
@ -89,10 +89,14 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
if err := rd.ReadMsg(&msg); err != nil { if err := rd.ReadMsg(&msg); err != nil {
s.Reset() s.Reset()
s.Close()
rd.Close()
return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error reading reservation response message: %w", err: err} return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error reading reservation response message: %w", err: err}
} }
if msg.GetType() != pbv2.HopMessage_STATUS { if msg.GetType() != pbv2.HopMessage_STATUS {
s.Close()
rd.Close()
return nil, ReservationError{ return nil, ReservationError{
Status: pbv2.Status_MALFORMED_MESSAGE, Status: pbv2.Status_MALFORMED_MESSAGE,
Reason: fmt.Sprintf("unexpected relay response: not a status message (%d)", msg.GetType()), Reason: fmt.Sprintf("unexpected relay response: not a status message (%d)", msg.GetType()),
@ -100,17 +104,23 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
} }
if status := msg.GetStatus(); status != pbv2.Status_OK { if status := msg.GetStatus(); status != pbv2.Status_OK {
s.Close()
rd.Close()
return nil, ReservationError{Status: msg.GetStatus(), Reason: "reservation failed"} return nil, ReservationError{Status: msg.GetStatus(), Reason: "reservation failed"}
} }
rsvp := msg.GetReservation() rsvp := msg.GetReservation()
if rsvp == nil { if rsvp == nil {
s.Close()
rd.Close()
return nil, ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE, Reason: "missing reservation info"} return nil, ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE, Reason: "missing reservation info"}
} }
result := &Reservation{} result := &Reservation{}
result.Expiration = time.Unix(int64(rsvp.GetExpire()), 0) result.Expiration = time.Unix(int64(rsvp.GetExpire()), 0)
if result.Expiration.Before(time.Now()) { if result.Expiration.Before(time.Now()) {
s.Close()
rd.Close()
return nil, ReservationError{ return nil, ReservationError{
Status: pbv2.Status_MALFORMED_MESSAGE, Status: pbv2.Status_MALFORMED_MESSAGE,
Reason: fmt.Sprintf("received reservation with expiration date in the past: %s", result.Expiration), Reason: fmt.Sprintf("received reservation with expiration date in the past: %s", result.Expiration),
@ -132,6 +142,8 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
if voucherBytes != nil { if voucherBytes != nil {
_, rec, err := record.ConsumeEnvelope(voucherBytes, proto.RecordDomain) _, rec, err := record.ConsumeEnvelope(voucherBytes, proto.RecordDomain)
if err != nil { if err != nil {
s.Close()
rd.Close()
return nil, ReservationError{ return nil, ReservationError{
Status: pbv2.Status_MALFORMED_MESSAGE, Status: pbv2.Status_MALFORMED_MESSAGE,
Reason: fmt.Sprintf("error consuming voucher envelope: %s", err), Reason: fmt.Sprintf("error consuming voucher envelope: %s", err),
@ -141,6 +153,8 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
voucher, ok := rec.(*proto.ReservationVoucher) voucher, ok := rec.(*proto.ReservationVoucher)
if !ok { if !ok {
s.Close()
rd.Close()
return nil, ReservationError{ return nil, ReservationError{
Status: pbv2.Status_MALFORMED_MESSAGE, Status: pbv2.Status_MALFORMED_MESSAGE,
Reason: fmt.Sprintf("unexpected voucher record type: %+T", rec), Reason: fmt.Sprintf("unexpected voucher record type: %+T", rec),
@ -155,5 +169,7 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
result.LimitData = limit.GetData() result.LimitData = limit.GetData()
} }
s.Close()
rd.Close()
return result, nil return result, nil
} }

View File

@ -48,27 +48,30 @@ func newConstraints(rc *Resources) *constraints {
// If adding this reservation violates IP constraints, an error is returned. // If adding this reservation violates IP constraints, an error is returned.
func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error { func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error {
c.mutex.Lock() c.mutex.Lock()
defer c.mutex.Unlock()
now := time.Now() now := time.Now()
c.cleanup(now) c.cleanup(now)
if len(c.total) >= c.rc.MaxReservations { if len(c.total) >= c.rc.MaxReservations {
c.mutex.Unlock()
return errTooManyReservations return errTooManyReservations
} }
ip, err := manet.ToIP(a) ip, err := manet.ToIP(a)
if err != nil { if err != nil {
c.mutex.Unlock()
return errors.New("no IP address associated with peer") return errors.New("no IP address associated with peer")
} }
peerReservations := c.peers[p] peerReservations := c.peers[p]
if len(peerReservations) >= c.rc.MaxReservationsPerPeer { if len(peerReservations) >= c.rc.MaxReservationsPerPeer {
c.mutex.Unlock()
return errTooManyReservationsForPeer return errTooManyReservationsForPeer
} }
ipReservations := c.ips[ip.String()] ipReservations := c.ips[ip.String()]
if len(ipReservations) >= c.rc.MaxReservationsPerIP { if len(ipReservations) >= c.rc.MaxReservationsPerIP {
c.mutex.Unlock()
return errTooManyReservationsForIP return errTooManyReservationsForIP
} }
@ -79,6 +82,7 @@ func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error {
if asn != 0 { if asn != 0 {
asnReservations = c.asns[asn] asnReservations = c.asns[asn]
if len(asnReservations) >= c.rc.MaxReservationsPerASN { if len(asnReservations) >= c.rc.MaxReservationsPerASN {
c.mutex.Unlock()
return errTooManyReservationsForASN return errTooManyReservationsForASN
} }
} }
@ -97,6 +101,7 @@ func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error {
asnReservations = append(asnReservations, expiry) asnReservations = append(asnReservations, expiry)
c.asns[asn] = asnReservations c.asns[asn] = asnReservations
} }
c.mutex.Unlock()
return nil return nil
} }

View File

@ -163,24 +163,23 @@ func (mt *metricsTracer) RelayStatus(enabled bool) {
func (mt *metricsTracer) ConnectionOpened() { func (mt *metricsTracer) ConnectionOpened() {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, "opened") *tags = append(*tags, "opened")
connectionsTotal.WithLabelValues(*tags...).Add(1) connectionsTotal.WithLabelValues(*tags...).Add(1)
metricshelper.PutStringSlice(tags)
} }
func (mt *metricsTracer) ConnectionClosed(d time.Duration) { func (mt *metricsTracer) ConnectionClosed(d time.Duration) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, "closed") *tags = append(*tags, "closed")
connectionsTotal.WithLabelValues(*tags...).Add(1) connectionsTotal.WithLabelValues(*tags...).Add(1)
connectionDurationSeconds.Observe(d.Seconds()) connectionDurationSeconds.Observe(d.Seconds())
metricshelper.PutStringSlice(tags)
} }
func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) { func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
respStatus := getResponseStatus(status) respStatus := getResponseStatus(status)
@ -191,11 +190,11 @@ func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) {
*tags = append(*tags, getRejectionReason(status)) *tags = append(*tags, getRejectionReason(status))
connectionRejectionsTotal.WithLabelValues(*tags...).Add(1) connectionRejectionsTotal.WithLabelValues(*tags...).Add(1)
} }
metricshelper.PutStringSlice(tags)
} }
func (mt *metricsTracer) ReservationAllowed(isRenewal bool) { func (mt *metricsTracer) ReservationAllowed(isRenewal bool) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
if isRenewal { if isRenewal {
*tags = append(*tags, "renewed") *tags = append(*tags, "renewed")
} else { } else {
@ -203,19 +202,19 @@ func (mt *metricsTracer) ReservationAllowed(isRenewal bool) {
} }
reservationsTotal.WithLabelValues(*tags...).Add(1) reservationsTotal.WithLabelValues(*tags...).Add(1)
metricshelper.PutStringSlice(tags)
} }
func (mt *metricsTracer) ReservationClosed(cnt int) { func (mt *metricsTracer) ReservationClosed(cnt int) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, "closed") *tags = append(*tags, "closed")
reservationsTotal.WithLabelValues(*tags...).Add(float64(cnt)) reservationsTotal.WithLabelValues(*tags...).Add(float64(cnt))
metricshelper.PutStringSlice(tags)
} }
func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) { func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
respStatus := getResponseStatus(status) respStatus := getResponseStatus(status)
@ -226,6 +225,7 @@ func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) {
*tags = append(*tags, getRejectionReason(status)) *tags = append(*tags, getRejectionReason(status))
reservationRejectionsTotal.WithLabelValues(*tags...).Add(1) reservationRejectionsTotal.WithLabelValues(*tags...).Add(1)
} }
metricshelper.PutStringSlice(tags)
} }
func (mt *metricsTracer) BytesTransferred(cnt int) { func (mt *metricsTracer) BytesTransferred(cnt int) {

View File

@ -146,10 +146,8 @@ func (r *Relay) handleStream(s network.Stream) {
s.Reset() s.Reset()
return return
} }
defer s.Scope().ReleaseMemory(maxMessageSize)
rd := util.NewDelimitedReader(s, maxMessageSize) rd := util.NewDelimitedReader(s, maxMessageSize)
defer rd.Close()
s.SetReadDeadline(time.Now().Add(StreamTimeout)) s.SetReadDeadline(time.Now().Add(StreamTimeout))
@ -158,6 +156,8 @@ func (r *Relay) handleStream(s network.Stream) {
err := rd.ReadMsg(&msg) err := rd.ReadMsg(&msg)
if err != nil { if err != nil {
r.handleError(s, pbv2.Status_MALFORMED_MESSAGE) r.handleError(s, pbv2.Status_MALFORMED_MESSAGE)
s.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return return
} }
// reset stream deadline as message has been read // reset stream deadline as message has been read
@ -176,22 +176,25 @@ func (r *Relay) handleStream(s network.Stream) {
default: default:
r.handleError(s, pbv2.Status_MALFORMED_MESSAGE) r.handleError(s, pbv2.Status_MALFORMED_MESSAGE)
} }
s.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
} }
func (r *Relay) handleReserve(s network.Stream) pbv2.Status { func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
defer s.Close()
p := s.Conn().RemotePeer() p := s.Conn().RemotePeer()
a := s.Conn().RemoteMultiaddr() a := s.Conn().RemoteMultiaddr()
if isRelayAddr(a) { if isRelayAddr(a) {
log.Debugf("refusing relay reservation for %s; reservation attempt over relay connection") log.Debugf("refusing relay reservation for %s; reservation attempt over relay connection")
r.handleError(s, pbv2.Status_PERMISSION_DENIED) r.handleError(s, pbv2.Status_PERMISSION_DENIED)
s.Close()
return pbv2.Status_PERMISSION_DENIED return pbv2.Status_PERMISSION_DENIED
} }
if r.acl != nil && !r.acl.AllowReserve(p, a) { if r.acl != nil && !r.acl.AllowReserve(p, a) {
log.Debugf("refusing relay reservation for %s; permission denied", p) log.Debugf("refusing relay reservation for %s; permission denied", p)
r.handleError(s, pbv2.Status_PERMISSION_DENIED) r.handleError(s, pbv2.Status_PERMISSION_DENIED)
s.Close()
return pbv2.Status_PERMISSION_DENIED return pbv2.Status_PERMISSION_DENIED
} }
@ -202,6 +205,7 @@ func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
r.mx.Unlock() r.mx.Unlock()
log.Debugf("refusing relay reservation for %s; relay closed", p) log.Debugf("refusing relay reservation for %s; relay closed", p)
r.handleError(s, pbv2.Status_PERMISSION_DENIED) r.handleError(s, pbv2.Status_PERMISSION_DENIED)
s.Close()
return pbv2.Status_PERMISSION_DENIED return pbv2.Status_PERMISSION_DENIED
} }
now := time.Now() now := time.Now()
@ -212,6 +216,7 @@ func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
r.mx.Unlock() r.mx.Unlock()
log.Debugf("refusing relay reservation for %s; IP constraint violation: %s", p, err) log.Debugf("refusing relay reservation for %s; IP constraint violation: %s", p, err)
r.handleError(s, pbv2.Status_RESERVATION_REFUSED) r.handleError(s, pbv2.Status_RESERVATION_REFUSED)
s.Close()
return pbv2.Status_RESERVATION_REFUSED return pbv2.Status_RESERVATION_REFUSED
} }
} }
@ -232,8 +237,10 @@ func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
if err := r.writeResponse(s, pbv2.Status_OK, r.makeReservationMsg(p, expire), r.makeLimitMsg(p)); err != nil { if err := r.writeResponse(s, pbv2.Status_OK, r.makeReservationMsg(p, expire), r.makeLimitMsg(p)); err != nil {
log.Debugf("error writing reservation response; retracting reservation for %s", p) log.Debugf("error writing reservation response; retracting reservation for %s", p)
s.Reset() s.Reset()
s.Close()
return pbv2.Status_CONNECTION_FAILED return pbv2.Status_CONNECTION_FAILED
} }
s.Close()
return pbv2.Status_OK return pbv2.Status_OK
} }
@ -324,7 +331,6 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
} }
ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout) ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout)
defer cancel()
ctx = network.WithNoDial(ctx, "relay connect") ctx = network.WithNoDial(ctx, "relay connect")
@ -333,6 +339,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
log.Debugf("error opening relay stream to %s: %s", dest.ID, err) log.Debugf("error opening relay stream to %s: %s", dest.ID, err)
cleanup() cleanup()
r.handleError(s, pbv2.Status_CONNECTION_FAILED) r.handleError(s, pbv2.Status_CONNECTION_FAILED)
cancel()
return pbv2.Status_CONNECTION_FAILED return pbv2.Status_CONNECTION_FAILED
} }
@ -345,6 +352,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
if err := bs.Scope().SetService(ServiceName); err != nil { if err := bs.Scope().SetService(ServiceName); err != nil {
log.Debugf("error attaching stream to relay service: %s", err) log.Debugf("error attaching stream to relay service: %s", err)
fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED) fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
cancel()
return pbv2.Status_RESOURCE_LIMIT_EXCEEDED return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
} }
@ -352,13 +360,12 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil { if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
log.Debugf("error reserving memory for stream: %s", err) log.Debugf("error reserving memory for stream: %s", err)
fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED) fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
cancel()
return pbv2.Status_RESOURCE_LIMIT_EXCEEDED return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
} }
defer bs.Scope().ReleaseMemory(maxMessageSize)
rd := util.NewDelimitedReader(bs, maxMessageSize) rd := util.NewDelimitedReader(bs, maxMessageSize)
wr := util.NewDelimitedWriter(bs) wr := util.NewDelimitedWriter(bs)
defer rd.Close()
var stopmsg pbv2.StopMessage var stopmsg pbv2.StopMessage
stopmsg.Type = pbv2.StopMessage_CONNECT.Enum() stopmsg.Type = pbv2.StopMessage_CONNECT.Enum()
@ -371,6 +378,10 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
if err != nil { if err != nil {
log.Debugf("error writing stop handshake") log.Debugf("error writing stop handshake")
fail(pbv2.Status_CONNECTION_FAILED) fail(pbv2.Status_CONNECTION_FAILED)
cancel()
bs.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return pbv2.Status_CONNECTION_FAILED return pbv2.Status_CONNECTION_FAILED
} }
@ -380,18 +391,30 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
if err != nil { if err != nil {
log.Debugf("error reading stop response: %s", err.Error()) log.Debugf("error reading stop response: %s", err.Error())
fail(pbv2.Status_CONNECTION_FAILED) fail(pbv2.Status_CONNECTION_FAILED)
cancel()
bs.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return pbv2.Status_CONNECTION_FAILED return pbv2.Status_CONNECTION_FAILED
} }
if t := stopmsg.GetType(); t != pbv2.StopMessage_STATUS { if t := stopmsg.GetType(); t != pbv2.StopMessage_STATUS {
log.Debugf("unexpected stop response; not a status message (%d)", t) log.Debugf("unexpected stop response; not a status message (%d)", t)
fail(pbv2.Status_CONNECTION_FAILED) fail(pbv2.Status_CONNECTION_FAILED)
cancel()
bs.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return pbv2.Status_CONNECTION_FAILED return pbv2.Status_CONNECTION_FAILED
} }
if status := stopmsg.GetStatus(); status != pbv2.Status_OK { if status := stopmsg.GetStatus(); status != pbv2.Status_OK {
log.Debugf("relay stop failure: %d", status) log.Debugf("relay stop failure: %d", status)
fail(pbv2.Status_CONNECTION_FAILED) fail(pbv2.Status_CONNECTION_FAILED)
cancel()
bs.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return pbv2.Status_CONNECTION_FAILED return pbv2.Status_CONNECTION_FAILED
} }
@ -407,6 +430,10 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
bs.Reset() bs.Reset()
s.Reset() s.Reset()
cleanup() cleanup()
cancel()
bs.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return pbv2.Status_CONNECTION_FAILED return pbv2.Status_CONNECTION_FAILED
} }
@ -437,6 +464,9 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Statu
go r.relayUnlimited(bs, s, dest.ID, src, done) go r.relayUnlimited(bs, s, dest.ID, src, done)
} }
cancel()
bs.Scope().ReleaseMemory(maxMessageSize)
rd.Close()
return pbv2.Status_OK return pbv2.Status_OK
} }
@ -461,10 +491,7 @@ func (r *Relay) rmConn(p peer.ID) {
} }
func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, limit int64, done func()) { func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, limit int64, done func()) {
defer done()
buf := pool.Get(r.rc.BufferSize) buf := pool.Get(r.rc.BufferSize)
defer pool.Put(buf)
limitedSrc := io.LimitReader(src, limit) limitedSrc := io.LimitReader(src, limit)
@ -484,13 +511,13 @@ func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, li
} }
log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID) log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID)
done()
pool.Put(buf)
} }
func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID, done func()) { func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID, done func()) {
defer done()
buf := pool.Get(r.rc.BufferSize) buf := pool.Get(r.rc.BufferSize)
defer pool.Put(buf)
count, err := r.copyWithBuffer(dest, src, buf) count, err := r.copyWithBuffer(dest, src, buf)
if err != nil { if err != nil {
@ -504,6 +531,9 @@ func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID,
} }
log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID) log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID)
done()
pool.Put(buf)
} }
// errInvalidWrite means that a write returned an impossible count. // errInvalidWrite means that a write returned an impossible count.
@ -560,7 +590,6 @@ func (r *Relay) handleError(s network.Stream, status pbv2.Status) {
func (r *Relay) writeResponse(s network.Stream, status pbv2.Status, rsvp *pbv2.Reservation, limit *pbv2.Limit) error { func (r *Relay) writeResponse(s network.Stream, status pbv2.Status, rsvp *pbv2.Reservation, limit *pbv2.Limit) error {
s.SetWriteDeadline(time.Now().Add(StreamTimeout)) s.SetWriteDeadline(time.Now().Add(StreamTimeout))
defer s.SetWriteDeadline(time.Time{})
wr := util.NewDelimitedWriter(s) wr := util.NewDelimitedWriter(s)
var msg pbv2.HopMessage var msg pbv2.HopMessage
@ -569,7 +598,9 @@ func (r *Relay) writeResponse(s network.Stream, status pbv2.Status, rsvp *pbv2.R
msg.Reservation = rsvp msg.Reservation = rsvp
msg.Limit = limit msg.Limit = limit
return wr.WriteMsg(&msg) err := wr.WriteMsg(&msg)
s.SetWriteDeadline(time.Time{})
return err
} }
func (r *Relay) makeReservationMsg(p peer.ID, expire time.Time) *pbv2.Reservation { func (r *Relay) makeReservationMsg(p peer.ID, expire time.Time) *pbv2.Reservation {
@ -629,13 +660,13 @@ func (r *Relay) makeLimitMsg(p peer.ID) *pbv2.Limit {
func (r *Relay) background() { func (r *Relay) background() {
ticker := time.NewTicker(time.Minute) ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
r.gc() r.gc()
case <-r.ctx.Done(): case <-r.ctx.Done():
ticker.Stop()
return return
} }
} }
@ -643,7 +674,6 @@ func (r *Relay) background() {
func (r *Relay) gc() { func (r *Relay) gc() {
r.mx.Lock() r.mx.Lock()
defer r.mx.Unlock()
now := time.Now() now := time.Now()
cnt := 0 cnt := 0
@ -663,6 +693,7 @@ func (r *Relay) gc() {
delete(r.conns, p) delete(r.conns, p)
} }
} }
r.mx.Unlock()
} }
func (r *Relay) disconnected(n network.Network, c network.Conn) { func (r *Relay) disconnected(n network.Network, c network.Conn) {

View File

@ -67,18 +67,21 @@ func newHolePuncher(h host.Host, ids identify.IDService, tracer *tracer, filter
func (hp *holePuncher) beginDirectConnect(p peer.ID) error { func (hp *holePuncher) beginDirectConnect(p peer.ID) error {
hp.closeMx.RLock() hp.closeMx.RLock()
defer hp.closeMx.RUnlock()
if hp.closed { if hp.closed {
hp.closeMx.RUnlock()
return ErrClosed return ErrClosed
} }
hp.activeMx.Lock() hp.activeMx.Lock()
defer hp.activeMx.Unlock()
if _, ok := hp.active[p]; ok { if _, ok := hp.active[p]; ok {
hp.activeMx.Unlock()
hp.closeMx.RUnlock()
return ErrHolePunchActive return ErrHolePunchActive
} }
hp.active[p] = struct{}{} hp.active[p] = struct{}{}
hp.activeMx.Unlock()
hp.closeMx.RUnlock()
return nil return nil
} }
@ -90,13 +93,11 @@ func (hp *holePuncher) DirectConnect(p peer.ID) error {
return err return err
} }
defer func() { err := hp.directConnect(p)
hp.activeMx.Lock() hp.activeMx.Lock()
delete(hp.active, p) delete(hp.active, p)
hp.activeMx.Unlock() hp.activeMx.Unlock()
}() return err
return hp.directConnect(p)
} }
func (hp *holePuncher) directConnect(rp peer.ID) error { func (hp *holePuncher) directConnect(rp peer.ID) error {
@ -181,14 +182,15 @@ func (hp *holePuncher) initiateHolePunch(rp peer.ID) ([]ma.Multiaddr, []ma.Multi
if err != nil { if err != nil {
return nil, nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err) return nil, nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err)
} }
defer str.Close()
addr, obsAddr, rtt, err := hp.initiateHolePunchImpl(str) addr, obsAddr, rtt, err := hp.initiateHolePunchImpl(str)
if err != nil { if err != nil {
log.Debugf("%s", err) log.Debugf("%s", err)
str.Reset() str.Reset()
str.Close()
return addr, obsAddr, rtt, err return addr, obsAddr, rtt, err
} }
str.Close()
return addr, obsAddr, rtt, err return addr, obsAddr, rtt, err
} }
@ -200,7 +202,6 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil { if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
return nil, nil, 0, fmt.Errorf("error reserving memory for stream: %s", err) return nil, nil, 0, fmt.Errorf("error reserving memory for stream: %s", err)
} }
defer str.Scope().ReleaseMemory(maxMsgSize)
w := pbio.NewDelimitedWriter(str) w := pbio.NewDelimitedWriter(str)
rd := pbio.NewDelimitedReader(str, maxMsgSize) rd := pbio.NewDelimitedReader(str, maxMsgSize)
@ -213,6 +214,7 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
obsAddrs = hp.filter.FilterLocal(str.Conn().RemotePeer(), obsAddrs) obsAddrs = hp.filter.FilterLocal(str.Conn().RemotePeer(), obsAddrs)
} }
if len(obsAddrs) == 0 { if len(obsAddrs) == 0 {
str.Scope().ReleaseMemory(maxMsgSize)
return nil, nil, 0, errors.New("aborting hole punch initiation as we have no public address") return nil, nil, 0, errors.New("aborting hole punch initiation as we have no public address")
} }
@ -222,16 +224,19 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
ObsAddrs: addrsToBytes(obsAddrs), ObsAddrs: addrsToBytes(obsAddrs),
}); err != nil { }); err != nil {
str.Reset() str.Reset()
str.Scope().ReleaseMemory(maxMsgSize)
return nil, nil, 0, err return nil, nil, 0, err
} }
// wait for a CONNECT message from the remote peer // wait for a CONNECT message from the remote peer
var msg pb.HolePunch var msg pb.HolePunch
if err := rd.ReadMsg(&msg); err != nil { if err := rd.ReadMsg(&msg); err != nil {
str.Scope().ReleaseMemory(maxMsgSize)
return nil, nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err) return nil, nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err)
} }
rtt := time.Since(start) rtt := time.Since(start)
if t := msg.GetType(); t != pb.HolePunch_CONNECT { if t := msg.GetType(); t != pb.HolePunch_CONNECT {
str.Scope().ReleaseMemory(maxMsgSize)
return nil, nil, 0, fmt.Errorf("expect CONNECT message, got %s", t) return nil, nil, 0, fmt.Errorf("expect CONNECT message, got %s", t)
} }
@ -241,12 +246,15 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
} }
if len(addrs) == 0 { if len(addrs) == 0 {
str.Scope().ReleaseMemory(maxMsgSize)
return nil, nil, 0, errors.New("didn't receive any public addresses in CONNECT") return nil, nil, 0, errors.New("didn't receive any public addresses in CONNECT")
} }
if err := w.WriteMsg(&pb.HolePunch{Type: pb.HolePunch_SYNC.Enum()}); err != nil { if err := w.WriteMsg(&pb.HolePunch{Type: pb.HolePunch_SYNC.Enum()}); err != nil {
str.Scope().ReleaseMemory(maxMsgSize)
return nil, nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err) return nil, nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err)
} }
str.Scope().ReleaseMemory(maxMsgSize)
return addrs, obsAddrs, rtt, nil return addrs, obsAddrs, rtt, nil
} }
@ -269,17 +277,17 @@ func (nn *netNotifiee) Connected(_ network.Network, conn network.Conn) {
if conn.Stat().Direction == network.DirInbound && isRelayAddress(conn.RemoteMultiaddr()) { if conn.Stat().Direction == network.DirInbound && isRelayAddress(conn.RemoteMultiaddr()) {
hs.refCount.Add(1) hs.refCount.Add(1)
go func() { go func() {
defer hs.refCount.Done()
select { select {
// waiting for Identify here will allow us to access the peer's public and observed addresses // waiting for Identify here will allow us to access the peer's public and observed addresses
// that we can dial to for a hole punch. // that we can dial to for a hole punch.
case <-hs.ids.IdentifyWait(conn): case <-hs.ids.IdentifyWait(conn):
case <-hs.ctx.Done(): case <-hs.ctx.Done():
hs.refCount.Done()
return return
} }
_ = hs.DirectConnect(conn.RemotePeer()) _ = hs.DirectConnect(conn.RemotePeer())
hs.refCount.Done()
}() }()
} }
} }

View File

@ -108,7 +108,6 @@ func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int, func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int,
remoteAddrs []ma.Multiaddr, localAddrs []ma.Multiaddr, directConn network.ConnMultiaddrs) { remoteAddrs []ma.Multiaddr, localAddrs []ma.Multiaddr, directConn network.ConnMultiaddrs) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, side, getNumAttemptString(numAttempts)) *tags = append(*tags, side, getNumAttemptString(numAttempts))
var dipv, dtransport string var dipv, dtransport string
@ -165,6 +164,7 @@ func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int,
*tags = append(*tags, outcome) *tags = append(*tags, outcome)
hpOutcomesTotal.WithLabelValues(*tags...).Inc() hpOutcomesTotal.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }
func getNumAttemptString(numAttempt int) string { func getNumAttemptString(numAttempt int) string {
@ -177,11 +177,11 @@ func getNumAttemptString(numAttempt int) string {
func (mt *metricsTracer) DirectDialFinished(success bool) { func (mt *metricsTracer) DirectDialFinished(success bool) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
if success { if success {
*tags = append(*tags, "success") *tags = append(*tags, "success")
} else { } else {
*tags = append(*tags, "failed") *tags = append(*tags, "failed")
} }
directDialsTotal.WithLabelValues(*tags...).Inc() directDialsTotal.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }

View File

@ -93,8 +93,6 @@ func NewService(h host.Host, ids identify.IDService, opts ...Option) (*Service,
} }
func (s *Service) watchForPublicAddr() { func (s *Service) watchForPublicAddr() {
defer s.refCount.Done()
log.Debug("waiting until we have at least one public address", "peer", s.host.ID()) log.Debug("waiting until we have at least one public address", "peer", s.host.ID())
// TODO: We should have an event here that fires when identify discovers a new // TODO: We should have an event here that fires when identify discovers a new
@ -104,7 +102,6 @@ func (s *Service) watchForPublicAddr() {
duration := 250 * time.Millisecond duration := 250 * time.Millisecond
const maxDuration = 5 * time.Second const maxDuration = 5 * time.Second
t := time.NewTimer(duration) t := time.NewTimer(duration)
defer t.Stop()
for { for {
if containsPublicAddr(s.ids.OwnObservedAddrs()) { if containsPublicAddr(s.ids.OwnObservedAddrs()) {
log.Debug("Host now has a public address. Starting holepunch protocol.") log.Debug("Host now has a public address. Starting holepunch protocol.")
@ -114,6 +111,8 @@ func (s *Service) watchForPublicAddr() {
select { select {
case <-s.ctx.Done(): case <-s.ctx.Done():
s.refCount.Done()
t.Stop()
return return
case <-t.C: case <-t.C:
duration *= 2 duration *= 2
@ -128,15 +127,22 @@ func (s *Service) watchForPublicAddr() {
sub, err := s.host.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{}, eventbus.Name("holepunch")) sub, err := s.host.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{}, eventbus.Name("holepunch"))
if err != nil { if err != nil {
log.Debugf("failed to subscripe to Reachability event: %s", err) log.Debugf("failed to subscripe to Reachability event: %s", err)
s.refCount.Done()
t.Stop()
return return
} }
defer sub.Close()
for { for {
select { select {
case <-s.ctx.Done(): case <-s.ctx.Done():
s.refCount.Done()
t.Stop()
sub.Close()
return return
case e, ok := <-sub.Out(): case e, ok := <-sub.Out():
if !ok { if !ok {
s.refCount.Done()
t.Stop()
sub.Close()
return return
} }
if e.(event.EvtLocalReachabilityChanged).Reachability != network.ReachabilityPrivate { if e.(event.EvtLocalReachabilityChanged).Reachability != network.ReachabilityPrivate {
@ -146,6 +152,10 @@ func (s *Service) watchForPublicAddr() {
s.holePuncher = newHolePuncher(s.host, s.ids, s.tracer, s.filter) s.holePuncher = newHolePuncher(s.host, s.ids, s.tracer, s.filter)
s.holePuncherMx.Unlock() s.holePuncherMx.Unlock()
close(s.hasPublicAddrsChan) close(s.hasPublicAddrsChan)
s.refCount.Done()
t.Stop()
sub.Close()
return return
} }
} }
@ -185,7 +195,6 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
log.Debugf("error reserving memory for stream: %s", err) log.Debugf("error reserving memory for stream: %s", err)
return 0, nil, nil, err return 0, nil, nil, err
} }
defer str.Scope().ReleaseMemory(maxMsgSize)
wr := pbio.NewDelimitedWriter(str) wr := pbio.NewDelimitedWriter(str)
rd := pbio.NewDelimitedReader(str, maxMsgSize) rd := pbio.NewDelimitedReader(str, maxMsgSize)
@ -196,9 +205,11 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
str.SetDeadline(time.Now().Add(StreamTimeout)) str.SetDeadline(time.Now().Add(StreamTimeout))
if err := rd.ReadMsg(msg); err != nil { if err := rd.ReadMsg(msg); err != nil {
str.Scope().ReleaseMemory(maxMsgSize)
return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err) return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err)
} }
if t := msg.GetType(); t != pb.HolePunch_CONNECT { if t := msg.GetType(); t != pb.HolePunch_CONNECT {
str.Scope().ReleaseMemory(maxMsgSize)
return 0, nil, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t) return 0, nil, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t)
} }
@ -209,6 +220,7 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
log.Debugw("received hole punch request", "peer", str.Conn().RemotePeer(), "addrs", obsDial) log.Debugw("received hole punch request", "peer", str.Conn().RemotePeer(), "addrs", obsDial)
if len(obsDial) == 0 { if len(obsDial) == 0 {
str.Scope().ReleaseMemory(maxMsgSize)
return 0, nil, nil, errors.New("expected CONNECT message to contain at least one address") return 0, nil, nil, errors.New("expected CONNECT message to contain at least one address")
} }
@ -218,17 +230,21 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remo
msg.ObsAddrs = addrsToBytes(ownAddrs) msg.ObsAddrs = addrsToBytes(ownAddrs)
tstart := time.Now() tstart := time.Now()
if err := wr.WriteMsg(msg); err != nil { if err := wr.WriteMsg(msg); err != nil {
str.Scope().ReleaseMemory(maxMsgSize)
return 0, nil, nil, fmt.Errorf("failed to write CONNECT message to initiator: %w", err) return 0, nil, nil, fmt.Errorf("failed to write CONNECT message to initiator: %w", err)
} }
// Read SYNC message // Read SYNC message
msg.Reset() msg.Reset()
if err := rd.ReadMsg(msg); err != nil { if err := rd.ReadMsg(msg); err != nil {
str.Scope().ReleaseMemory(maxMsgSize)
return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err) return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err)
} }
if t := msg.GetType(); t != pb.HolePunch_SYNC { if t := msg.GetType(); t != pb.HolePunch_SYNC {
str.Scope().ReleaseMemory(maxMsgSize)
return 0, nil, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t) return 0, nil, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t)
} }
str.Scope().ReleaseMemory(maxMsgSize)
return time.Since(tstart), obsDial, ownAddrs, nil return time.Since(tstart), obsDial, ownAddrs, nil
} }

View File

@ -259,9 +259,7 @@ func (t *tracer) HolePunchAttempt(p peer.ID) {
// gc cleans up the peers map. This is only run when tracer is initialised with a non nil // gc cleans up the peers map. This is only run when tracer is initialised with a non nil
// EventTracer // EventTracer
func (t *tracer) gc() { func (t *tracer) gc() {
defer t.refCount.Done()
timer := time.NewTicker(tracerGCInterval) timer := time.NewTicker(tracerGCInterval)
defer timer.Stop()
for { for {
select { select {
@ -275,6 +273,8 @@ func (t *tracer) gc() {
} }
t.mutex.Unlock() t.mutex.Unlock()
case <-t.ctx.Done(): case <-t.ctx.Done():
t.refCount.Done()
timer.Stop()
return return
} }
} }

View File

@ -68,12 +68,13 @@ func holePunchConnect(ctx context.Context, host host.Host, pi peer.AddrInfo, isC
holePunchCtx := network.WithSimultaneousConnect(ctx, isClient, "hole-punching") holePunchCtx := network.WithSimultaneousConnect(ctx, isClient, "hole-punching")
forceDirectConnCtx := network.WithForceDirectDial(holePunchCtx, "hole-punching") forceDirectConnCtx := network.WithForceDirectDial(holePunchCtx, "hole-punching")
dialCtx, cancel := context.WithTimeout(forceDirectConnCtx, dialTimeout) dialCtx, cancel := context.WithTimeout(forceDirectConnCtx, dialTimeout)
defer cancel()
if err := host.Connect(dialCtx, pi); err != nil { if err := host.Connect(dialCtx, pi); err != nil {
log.Debugw("hole punch attempt with peer failed", "peer ID", pi.ID, "error", err) log.Debugw("hole punch attempt with peer failed", "peer ID", pi.ID, "error", err)
cancel()
return err return err
} }
log.Debugw("hole punch successful", "peer", pi.ID) log.Debugw("hole punch successful", "peer", pi.ID)
cancel()
return nil return nil
} }

View File

@ -258,8 +258,6 @@ func (ids *idService) Start() {
} }
func (ids *idService) loop(ctx context.Context) { func (ids *idService) loop(ctx context.Context) {
defer ids.refCount.Done()
sub, err := ids.Host.EventBus().Subscribe( sub, err := ids.Host.EventBus().Subscribe(
[]any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}}, []any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}},
eventbus.BufSize(256), eventbus.BufSize(256),
@ -267,9 +265,9 @@ func (ids *idService) loop(ctx context.Context) {
) )
if err != nil { if err != nil {
log.Errorf("failed to subscribe to events on the bus, err=%s", err) log.Errorf("failed to subscribe to events on the bus, err=%s", err)
ids.refCount.Done()
return return
} }
defer sub.Close()
// Send pushes from a separate Go routine. // Send pushes from a separate Go routine.
// That way, we can end up with // That way, we can end up with
@ -278,11 +276,10 @@ func (ids *idService) loop(ctx context.Context) {
triggerPush := make(chan struct{}, 1) triggerPush := make(chan struct{}, 1)
ids.refCount.Add(1) ids.refCount.Add(1)
go func() { go func() {
defer ids.refCount.Done()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
ids.refCount.Done()
return return
case <-triggerPush: case <-triggerPush:
ids.sendPushes(ctx) ids.sendPushes(ctx)
@ -294,6 +291,8 @@ func (ids *idService) loop(ctx context.Context) {
select { select {
case e, ok := <-sub.Out(): case e, ok := <-sub.Out():
if !ok { if !ok {
sub.Close()
ids.refCount.Done()
return return
} }
if updated := ids.updateSnapshot(); !updated { if updated := ids.updateSnapshot(); !updated {
@ -307,6 +306,8 @@ func (ids *idService) loop(ctx context.Context) {
default: // we already have one more push queued, no need to queue another one default: // we already have one more push queued, no need to queue another one
} }
case <-ctx.Done(): case <-ctx.Done():
sub.Close()
ids.refCount.Done()
return return
} }
} }
@ -346,19 +347,22 @@ func (ids *idService) sendPushes(ctx context.Context) {
sem <- struct{}{} sem <- struct{}{}
wg.Add(1) wg.Add(1)
go func(c network.Conn) { go func(c network.Conn) {
defer wg.Done()
defer func() { <-sem }()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
str, err := ids.Host.NewStream(ctx, c.RemotePeer(), IDPush) str, err := ids.Host.NewStream(ctx, c.RemotePeer(), IDPush)
if err != nil { // connection might have been closed recently if err != nil { // connection might have been closed recently
cancel()
func() { <-sem }()
wg.Done()
return return
} }
// TODO: find out if the peer supports push if we didn't have any information about push support // TODO: find out if the peer supports push if we didn't have any information about push support
if err := ids.sendIdentifyResp(str, true); err != nil { if err := ids.sendIdentifyResp(str, true); err != nil {
log.Debugw("failed to send identify push", "peer", c.RemotePeer(), "error", err) log.Debugw("failed to send identify push", "peer", c.RemotePeer(), "error", err)
return
} }
cancel()
func() { <-sem }()
wg.Done()
}(c) }(c)
} }
wg.Wait() wg.Wait()
@ -402,7 +406,6 @@ func (ids *idService) IdentifyConn(c network.Conn) {
// If successful, the peer store will contain the peer's addresses and supported protocols. // If successful, the peer store will contain the peer's addresses and supported protocols.
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} { func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
ids.connsMu.Lock() ids.connsMu.Lock()
defer ids.connsMu.Unlock()
e, found := ids.conns[c] e, found := ids.conns[c]
if !found { if !found {
@ -412,6 +415,7 @@ func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
log.Debugw("connection not found in identify service", "peer", c.RemotePeer()) log.Debugw("connection not found in identify service", "peer", c.RemotePeer())
ch := make(chan struct{}) ch := make(chan struct{})
close(ch) close(ch)
ids.connsMu.Unlock()
return ch return ch
} else { } else {
ids.addConnWithLock(c) ids.addConnWithLock(c)
@ -419,6 +423,7 @@ func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
} }
if e.IdentifyWaitChan != nil { if e.IdentifyWaitChan != nil {
ids.connsMu.Unlock()
return e.IdentifyWaitChan return e.IdentifyWaitChan
} }
// First call to IdentifyWait for this connection. Create the channel. // First call to IdentifyWait for this connection. Create the channel.
@ -429,23 +434,23 @@ func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
// already, but that doesn't really matter. We'll fail to open a // already, but that doesn't really matter. We'll fail to open a
// stream then forget the connection. // stream then forget the connection.
go func() { go func() {
defer close(e.IdentifyWaitChan)
if err := ids.identifyConn(c); err != nil { if err := ids.identifyConn(c); err != nil {
log.Warnf("failed to identify %s: %s", c.RemotePeer(), err) log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err}) ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
return
} }
close(e.IdentifyWaitChan)
}() }()
ids.connsMu.Unlock()
return e.IdentifyWaitChan return e.IdentifyWaitChan
} }
func (ids *idService) identifyConn(c network.Conn) error { func (ids *idService) identifyConn(c network.Conn) error {
ctx, cancel := context.WithTimeout(context.Background(), Timeout) ctx, cancel := context.WithTimeout(context.Background(), Timeout)
defer cancel()
s, err := c.NewStream(network.WithAllowLimitedConn(ctx, "identify")) s, err := c.NewStream(network.WithAllowLimitedConn(ctx, "identify"))
if err != nil { if err != nil {
log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err) log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err)
cancel()
return err return err
} }
s.SetDeadline(time.Now().Add(Timeout)) s.SetDeadline(time.Now().Add(Timeout))
@ -459,10 +464,13 @@ func (ids *idService) identifyConn(c network.Conn) error {
if err := msmux.SelectProtoOrFail(ID, s); err != nil { if err := msmux.SelectProtoOrFail(ID, s); err != nil {
log.Infow("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err) log.Infow("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err)
s.Reset() s.Reset()
cancel()
return err return err
} }
return ids.handleIdentifyResponse(s, false) err = ids.handleIdentifyResponse(s, false)
cancel()
return err
} }
// handlePush handles incoming identify push streams // handlePush handles incoming identify push streams
@ -480,7 +488,6 @@ func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
s.Reset() s.Reset()
return fmt.Errorf("failed to attaching stream to identify service: %w", err) return fmt.Errorf("failed to attaching stream to identify service: %w", err)
} }
defer s.Close()
ids.currentSnapshot.Lock() ids.currentSnapshot.Lock()
snapshot := ids.currentSnapshot.snapshot snapshot := ids.currentSnapshot.snapshot
@ -493,6 +500,7 @@ func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
log.Debugf("%s sending message to %s %s", ID, s.Conn().RemotePeer(), s.Conn().RemoteMultiaddr()) log.Debugf("%s sending message to %s %s", ID, s.Conn().RemotePeer(), s.Conn().RemoteMultiaddr())
if err := ids.writeChunkedIdentifyMsg(s, mes); err != nil { if err := ids.writeChunkedIdentifyMsg(s, mes); err != nil {
s.Close()
return err return err
} }
@ -501,17 +509,21 @@ func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
} }
ids.connsMu.Lock() ids.connsMu.Lock()
defer ids.connsMu.Unlock()
e, ok := ids.conns[s.Conn()] e, ok := ids.conns[s.Conn()]
// The connection might already have been closed. // The connection might already have been closed.
// We *should* receive the Connected notification from the swarm before we're able to accept the peer's // We *should* receive the Connected notification from the swarm before we're able to accept the peer's
// Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here. // Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here.
// The only consequence would be that we send a spurious Push to that peer later. // The only consequence would be that we send a spurious Push to that peer later.
if !ok { if !ok {
ids.connsMu.Unlock()
s.Close()
return nil return nil
} }
e.Sequence = snapshot.seq e.Sequence = snapshot.seq
ids.conns[s.Conn()] = e ids.conns[s.Conn()] = e
ids.connsMu.Unlock()
s.Close()
return nil return nil
} }
@ -527,7 +539,6 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
s.Reset() s.Reset()
return err return err
} }
defer s.Scope().ReleaseMemory(signedIDSize)
c := s.Conn() c := s.Conn()
@ -537,11 +548,10 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
if err := readAllIDMessages(r, mes); err != nil { if err := readAllIDMessages(r, mes); err != nil {
log.Warn("error reading identify message: ", err) log.Warn("error reading identify message: ", err)
s.Reset() s.Reset()
s.Scope().ReleaseMemory(signedIDSize)
return err return err
} }
defer s.Close()
log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr()) log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
ids.consumeMessage(mes, c, isPush) ids.consumeMessage(mes, c, isPush)
@ -551,9 +561,12 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
} }
ids.connsMu.Lock() ids.connsMu.Lock()
defer ids.connsMu.Unlock()
e, ok := ids.conns[c] e, ok := ids.conns[c]
if !ok { // might already have disconnected if !ok { // might already have disconnected
ids.connsMu.Unlock()
s.Close()
s.Scope().ReleaseMemory(signedIDSize)
return nil return nil
} }
sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush) sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush)
@ -568,6 +581,10 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
} }
ids.conns[c] = e ids.conns[c] = e
ids.connsMu.Unlock()
s.Close()
s.Scope().ReleaseMemory(signedIDSize)
return nil return nil
} }
@ -612,9 +629,9 @@ func (ids *idService) updateSnapshot() (updated bool) {
} }
ids.currentSnapshot.Lock() ids.currentSnapshot.Lock()
defer ids.currentSnapshot.Unlock()
if ids.currentSnapshot.snapshot.Equal(&snapshot) { if ids.currentSnapshot.snapshot.Equal(&snapshot) {
ids.currentSnapshot.Unlock()
return false return false
} }
@ -622,6 +639,7 @@ func (ids *idService) updateSnapshot() (updated bool) {
ids.currentSnapshot.snapshot = snapshot ids.currentSnapshot.snapshot = snapshot
log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs) log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs)
ids.currentSnapshot.Unlock()
return true return true
} }
@ -1036,12 +1054,12 @@ func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
// Last disconnect. // Last disconnect.
// Undo the setting of addresses to peer.ConnectedAddrTTL we did // Undo the setting of addresses to peer.ConnectedAddrTTL we did
ids.addrMu.Lock() ids.addrMu.Lock()
defer ids.addrMu.Unlock()
// This check MUST happen after acquiring the Lock as identify on a different connection // This check MUST happen after acquiring the Lock as identify on a different connection
// might be trying to add addresses. // might be trying to add addresses.
switch ids.Host.Network().Connectedness(c.RemotePeer()) { switch ids.Host.Network().Connectedness(c.RemotePeer()) {
case network.Connected, network.Limited: case network.Connected, network.Limited:
ids.addrMu.Unlock()
return return
} }
// peerstore returns the elements in a random order as it uses a map to store the addresses // peerstore returns the elements in a random order as it uses a map to store the addresses
@ -1059,6 +1077,7 @@ func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.TempAddrTTL) ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.TempAddrTTL)
ids.Host.Peerstore().AddAddrs(c.RemotePeer(), addrs[:n], peerstore.RecentlyConnectedAddrTTL) ids.Host.Peerstore().AddAddrs(c.RemotePeer(), addrs[:n], peerstore.RecentlyConnectedAddrTTL)
ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.TempAddrTTL, 0) ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.TempAddrTTL, 0)
ids.addrMu.Unlock()
} }
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}

View File

@ -133,7 +133,6 @@ func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
func (t *metricsTracer) TriggeredPushes(ev any) { func (t *metricsTracer) TriggeredPushes(ev any) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
typ := "unknown" typ := "unknown"
switch ev.(type) { switch ev.(type) {
@ -144,19 +143,19 @@ func (t *metricsTracer) TriggeredPushes(ev any) {
} }
*tags = append(*tags, typ) *tags = append(*tags, typ)
pushesTriggered.WithLabelValues(*tags...).Inc() pushesTriggered.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }
func (t *metricsTracer) IncrementPushSupport(s identifyPushSupport) { func (t *metricsTracer) IncrementPushSupport(s identifyPushSupport) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, getPushSupport(s)) *tags = append(*tags, getPushSupport(s))
connPushSupportTotal.WithLabelValues(*tags...).Inc() connPushSupportTotal.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }
func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int) { func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
if isPush { if isPush {
*tags = append(*tags, metricshelper.GetDirection(network.DirOutbound)) *tags = append(*tags, metricshelper.GetDirection(network.DirOutbound))
@ -168,11 +167,11 @@ func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int
protocolsCount.Set(float64(numProtocols)) protocolsCount.Set(float64(numProtocols))
addrsCount.Set(float64(numAddrs)) addrsCount.Set(float64(numAddrs))
metricshelper.PutStringSlice(tags)
} }
func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs int) { func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs int) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
if isPush { if isPush {
*tags = append(*tags, metricshelper.GetDirection(network.DirInbound)) *tags = append(*tags, metricshelper.GetDirection(network.DirInbound))
@ -184,14 +183,15 @@ func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs
numProtocolsReceived.Observe(float64(numProtocols)) numProtocolsReceived.Observe(float64(numProtocols))
numAddrsReceived.Observe(float64(numAddrs)) numAddrsReceived.Observe(float64(numAddrs))
metricshelper.PutStringSlice(tags)
} }
func (t *metricsTracer) ConnPushSupport(support identifyPushSupport) { func (t *metricsTracer) ConnPushSupport(support identifyPushSupport) {
tags := metricshelper.GetStringSlice() tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, getPushSupport(support)) *tags = append(*tags, getPushSupport(support))
connPushSupportTotal.WithLabelValues(*tags...).Inc() connPushSupportTotal.WithLabelValues(*tags...).Inc()
metricshelper.PutStringSlice(tags)
} }
func getPushSupport(s identifyPushSupport) string { func getPushSupport(s identifyPushSupport) string {

View File

@ -53,7 +53,6 @@ func newNATEmitter(h host.Host, o *ObservedAddrManager, eventInterval time.Durat
} }
func (n *natEmitter) worker() { func (n *natEmitter) worker() {
defer n.wg.Done()
subCh := n.reachabilitySub.Out() subCh := n.reachabilitySub.Out()
ticker := time.NewTicker(n.eventInterval) ticker := time.NewTicker(n.eventInterval)
pendingUpdate := false pendingUpdate := false
@ -86,6 +85,7 @@ func (n *natEmitter) worker() {
enoughTimeSinceLastUpdate = false enoughTimeSinceLastUpdate = false
} }
case <-n.ctx.Done(): case <-n.ctx.Done():
n.wg.Done()
return return
} }
} }

View File

@ -116,10 +116,10 @@ func (s *observerSet) cacheMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
} }
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock()
// Check if some other go routine added this while we were waiting // Check if some other go routine added this while we were waiting
res, ok = s.cachedMultiaddrs[addrStr] res, ok = s.cachedMultiaddrs[addrStr]
if ok { if ok {
s.mu.Unlock()
return res return res
} }
if s.cachedMultiaddrs == nil { if s.cachedMultiaddrs == nil {
@ -133,7 +133,9 @@ func (s *observerSet) cacheMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
} }
} }
s.cachedMultiaddrs[addrStr] = ma.Join(s.ObservedTWAddr, addr) s.cachedMultiaddrs[addrStr] = ma.Join(s.ObservedTWAddr, addr)
return s.cachedMultiaddrs[addrStr] mas := s.cachedMultiaddrs[addrStr]
s.mu.Unlock()
return mas
} }
type observation struct { type observation struct {
@ -202,9 +204,9 @@ func (o *ObservedAddrManager) AddrsFor(addr ma.Multiaddr) (addrs []ma.Multiaddr)
return nil return nil
} }
o.mu.RLock() o.mu.RLock()
defer o.mu.RUnlock()
tw, err := thinWaistForm(o.normalize(addr)) tw, err := thinWaistForm(o.normalize(addr))
if err != nil { if err != nil {
o.mu.RUnlock()
return nil return nil
} }
@ -213,13 +215,13 @@ func (o *ObservedAddrManager) AddrsFor(addr ma.Multiaddr) (addrs []ma.Multiaddr)
for _, s := range observerSets { for _, s := range observerSets {
res = append(res, s.cacheMultiaddr(tw.Rest)) res = append(res, s.cacheMultiaddr(tw.Rest))
} }
o.mu.RUnlock()
return res return res
} }
// Addrs return all activated observed addresses // Addrs return all activated observed addresses
func (o *ObservedAddrManager) Addrs() []ma.Multiaddr { func (o *ObservedAddrManager) Addrs() []ma.Multiaddr {
o.mu.RLock() o.mu.RLock()
defer o.mu.RUnlock()
m := make(map[string][]*observerSet) m := make(map[string][]*observerSet)
for localTWStr := range o.externalAddrs { for localTWStr := range o.externalAddrs {
@ -231,6 +233,7 @@ func (o *ObservedAddrManager) Addrs() []ma.Multiaddr {
addrs = append(addrs, s.cacheMultiaddr(t.Rest)) addrs = append(addrs, s.cacheMultiaddr(t.Rest))
} }
} }
o.mu.RUnlock()
return addrs return addrs
} }
@ -282,13 +285,12 @@ func (o *ObservedAddrManager) Record(conn connMultiaddrs, observed ma.Multiaddr)
} }
func (o *ObservedAddrManager) worker() { func (o *ObservedAddrManager) worker() {
defer o.wg.Done()
for { for {
select { select {
case obs := <-o.wch: case obs := <-o.wch:
o.maybeRecordObservation(obs.conn, obs.observed) o.maybeRecordObservation(obs.conn, obs.observed)
case <-o.ctx.Done(): case <-o.ctx.Done():
o.wg.Done()
return return
} }
} }
@ -372,12 +374,12 @@ func (o *ObservedAddrManager) maybeRecordObservation(conn connMultiaddrs, observ
log.Debugw("added own observed listen addr", "observed", observed) log.Debugw("added own observed listen addr", "observed", observed)
o.mu.Lock() o.mu.Lock()
defer o.mu.Unlock()
o.recordObservationUnlocked(conn, localTW, observedTW) o.recordObservationUnlocked(conn, localTW, observedTW)
select { select {
case o.addrRecordedNotif <- struct{}{}: case o.addrRecordedNotif <- struct{}{}:
default: default:
} }
o.mu.Unlock()
} }
func (o *ObservedAddrManager) recordObservationUnlocked(conn connMultiaddrs, localTW, observedTW thinWaist) { func (o *ObservedAddrManager) recordObservationUnlocked(conn connMultiaddrs, localTW, observedTW thinWaist) {
@ -453,16 +455,17 @@ func (o *ObservedAddrManager) removeConn(conn connMultiaddrs) {
return return
} }
o.mu.Lock() o.mu.Lock()
defer o.mu.Unlock()
// normalize before obtaining the thinWaist so that we are always dealing // normalize before obtaining the thinWaist so that we are always dealing
// with the normalized form of the address // with the normalized form of the address
localTW, err := thinWaistForm(o.normalize(conn.LocalMultiaddr())) localTW, err := thinWaistForm(o.normalize(conn.LocalMultiaddr()))
if err != nil { if err != nil {
o.mu.Unlock()
return return
} }
t, ok := o.localAddrs[string(localTW.Addr.Bytes())] t, ok := o.localAddrs[string(localTW.Addr.Bytes())]
if !ok { if !ok {
o.mu.Unlock()
return return
} }
t.Count-- t.Count--
@ -472,11 +475,13 @@ func (o *ObservedAddrManager) removeConn(conn connMultiaddrs) {
observedTWAddr, ok := o.connObservedTWAddrs[conn] observedTWAddr, ok := o.connObservedTWAddrs[conn]
if !ok { if !ok {
o.mu.Unlock()
return return
} }
delete(o.connObservedTWAddrs, conn) delete(o.connObservedTWAddrs, conn)
observer, err := getObserver(conn.RemoteMultiaddr()) observer, err := getObserver(conn.RemoteMultiaddr())
if err != nil { if err != nil {
o.mu.Unlock()
return return
} }
@ -485,11 +490,11 @@ func (o *ObservedAddrManager) removeConn(conn connMultiaddrs) {
case o.addrRecordedNotif <- struct{}{}: case o.addrRecordedNotif <- struct{}{}:
default: default:
} }
o.mu.Unlock()
} }
func (o *ObservedAddrManager) getNATType() (tcpNATType, udpNATType network.NATDeviceType) { func (o *ObservedAddrManager) getNATType() (tcpNATType, udpNATType network.NATDeviceType) {
o.mu.RLock() o.mu.RLock()
defer o.mu.RUnlock()
var tcpCounts, udpCounts []int var tcpCounts, udpCounts []int
var tcpTotal, udpTotal int var tcpTotal, udpTotal int
@ -539,6 +544,7 @@ func (o *ObservedAddrManager) getNATType() (tcpNATType, udpNATType network.NATDe
udpNATType = network.NATDeviceTypeSymmetric udpNATType = network.NATDeviceTypeSymmetric
} }
} }
o.mu.RUnlock()
return return
} }

View File

@ -50,15 +50,11 @@ func (p *PingService) PingHandler(s network.Stream) {
s.Reset() s.Reset()
return return
} }
defer s.Scope().ReleaseMemory(PingSize)
buf := pool.Get(PingSize) buf := pool.Get(PingSize)
defer pool.Put(buf)
errCh := make(chan error, 1) errCh := make(chan error, 1)
defer close(errCh)
timer := time.NewTimer(pingTimeout) timer := time.NewTimer(pingTimeout)
defer timer.Stop()
go func() { go func() {
select { select {
@ -78,12 +74,22 @@ func (p *PingService) PingHandler(s network.Stream) {
_, err := io.ReadFull(s, buf) _, err := io.ReadFull(s, buf)
if err != nil { if err != nil {
errCh <- err errCh <- err
s.Scope().ReleaseMemory(PingSize)
pool.Put(buf)
close(errCh)
timer.Stop()
return return
} }
_, err = s.Write(buf) _, err = s.Write(buf)
if err != nil { if err != nil {
errCh <- err errCh <- err
s.Scope().ReleaseMemory(PingSize)
pool.Put(buf)
close(errCh)
timer.Stop()
return return
} }
@ -134,15 +140,14 @@ func Ping(ctx context.Context, h host.Host, p peer.ID) <-chan Result {
out := make(chan Result) out := make(chan Result)
go func() { go func() {
defer close(out)
defer cancel()
for ctx.Err() == nil { for ctx.Err() == nil {
var res Result var res Result
res.RTT, res.Error = ping(s, ra) res.RTT, res.Error = ping(s, ra)
// canceled, ignore everything. // canceled, ignore everything.
if ctx.Err() != nil { if ctx.Err() != nil {
close(out)
cancel()
return return
} }
@ -154,9 +159,13 @@ func Ping(ctx context.Context, h host.Host, p peer.ID) <-chan Result {
select { select {
case out <- res: case out <- res:
case <-ctx.Done(): case <-ctx.Done():
close(out)
cancel()
return return
} }
} }
close(out)
cancel()
}() }()
context.AfterFunc(ctx, func() { context.AfterFunc(ctx, func() {
// forces the ping to abort. // forces the ping to abort.
@ -172,30 +181,40 @@ func ping(s network.Stream, randReader io.Reader) (time.Duration, error) {
s.Reset() s.Reset()
return 0, err return 0, err
} }
defer s.Scope().ReleaseMemory(2 * PingSize)
buf := pool.Get(PingSize) buf := pool.Get(PingSize)
defer pool.Put(buf)
if _, err := io.ReadFull(randReader, buf); err != nil { if _, err := io.ReadFull(randReader, buf); err != nil {
s.Scope().ReleaseMemory(2 * PingSize)
pool.Put(buf)
return 0, err return 0, err
} }
before := time.Now() before := time.Now()
if _, err := s.Write(buf); err != nil { if _, err := s.Write(buf); err != nil {
s.Scope().ReleaseMemory(2 * PingSize)
pool.Put(buf)
return 0, err return 0, err
} }
rbuf := pool.Get(PingSize) rbuf := pool.Get(PingSize)
defer pool.Put(rbuf)
if _, err := io.ReadFull(s, rbuf); err != nil { if _, err := io.ReadFull(s, rbuf); err != nil {
s.Scope().ReleaseMemory(2 * PingSize)
pool.Put(buf)
pool.Put(rbuf)
return 0, err return 0, err
} }
if !bytes.Equal(buf, rbuf) { if !bytes.Equal(buf, rbuf) {
s.Scope().ReleaseMemory(2 * PingSize)
pool.Put(buf)
pool.Put(rbuf)
return 0, errors.New("ping packet was incorrect") return 0, errors.New("ping packet was incorrect")
} }
s.Scope().ReleaseMemory(2 * PingSize)
pool.Put(buf)
pool.Put(rbuf)
return time.Since(before), nil return time.Since(before), nil
} }

View File

@ -194,15 +194,15 @@ func (t *transport) holePunch(ctx context.Context, raddr ma.Multiaddr, p peer.ID
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer tr.DecreaseCount()
ctx, cancel := context.WithTimeout(ctx, HolePunchTimeout) ctx, cancel := context.WithTimeout(ctx, HolePunchTimeout)
defer cancel()
key := holePunchKey{addr: addr.String(), peer: p} key := holePunchKey{addr: addr.String(), peer: p}
t.holePunchingMx.Lock() t.holePunchingMx.Lock()
if _, ok := t.holePunching[key]; ok { if _, ok := t.holePunching[key]; ok {
t.holePunchingMx.Unlock() t.holePunchingMx.Unlock()
tr.DecreaseCount()
cancel()
return nil, fmt.Errorf("already punching hole for %s", addr) return nil, fmt.Errorf("already punching hole for %s", addr)
} }
connCh := make(chan tpt.CapableConn, 1) connCh := make(chan tpt.CapableConn, 1)
@ -210,11 +210,6 @@ func (t *transport) holePunch(ctx context.Context, raddr ma.Multiaddr, p peer.ID
t.holePunchingMx.Unlock() t.holePunchingMx.Unlock()
var timer *time.Timer var timer *time.Timer
defer func() {
if timer != nil {
timer.Stop()
}
}()
payload := make([]byte, 64) payload := make([]byte, 64)
var punchErr error var punchErr error
@ -247,6 +242,11 @@ loop:
t.holePunchingMx.Lock() t.holePunchingMx.Lock()
delete(t.holePunching, key) delete(t.holePunching, key)
t.holePunchingMx.Unlock() t.holePunchingMx.Unlock()
tr.DecreaseCount()
cancel()
if timer != nil {
timer.Stop()
}
return c, nil return c, nil
case <-timer.C: case <-timer.C:
case <-ctx.Done(): case <-ctx.Done():
@ -256,14 +256,24 @@ loop:
} }
// we only arrive here if punchErr != nil // we only arrive here if punchErr != nil
t.holePunchingMx.Lock() t.holePunchingMx.Lock()
defer func() {
delete(t.holePunching, key)
t.holePunchingMx.Unlock()
}()
select { select {
case c := <-t.holePunching[key].connCh: case c := <-t.holePunching[key].connCh:
tr.DecreaseCount()
cancel()
if timer != nil {
timer.Stop()
}
delete(t.holePunching, key)
t.holePunchingMx.Unlock()
return c, nil return c, nil
default: default:
tr.DecreaseCount()
cancel()
if timer != nil {
timer.Stop()
}
delete(t.holePunching, key)
t.holePunchingMx.Unlock()
return nil, punchErr return nil, punchErr
} }
} }
@ -294,7 +304,6 @@ func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
} }
t.listenersMu.Lock() t.listenersMu.Lock()
defer t.listenersMu.Unlock()
listeners := t.listeners[udpAddr.String()] listeners := t.listeners[udpAddr.String()]
var underlyingListener *listener var underlyingListener *listener
var acceptRunner *acceptLoopRunner var acceptRunner *acceptLoopRunner
@ -304,16 +313,19 @@ func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
acceptRunner = listeners[0].acceptRunnner acceptRunner = listeners[0].acceptRunnner
// Make sure our underlying listener is listening on the specified QUIC version // Make sure our underlying listener is listening on the specified QUIC version
if _, ok := underlyingListener.localMultiaddrs[version]; !ok { if _, ok := underlyingListener.localMultiaddrs[version]; !ok {
t.listenersMu.Unlock()
return nil, fmt.Errorf("can't listen on quic version %v, underlying listener doesn't support it", version) return nil, fmt.Errorf("can't listen on quic version %v, underlying listener doesn't support it", version)
} }
} else { } else {
ln, err := t.connManager.ListenQUIC(addr, &tlsConf, t.allowWindowIncrease) ln, err := t.connManager.ListenQUIC(addr, &tlsConf, t.allowWindowIncrease)
if err != nil { if err != nil {
t.listenersMu.Unlock()
return nil, err return nil, err
} }
l, err := newListener(ln, t, t.localPeer, t.privKey, t.rcmgr) l, err := newListener(ln, t, t.localPeer, t.privKey, t.rcmgr)
if err != nil { if err != nil {
_ = ln.Close() _ = ln.Close()
t.listenersMu.Unlock()
return nil, err return nil, err
} }
underlyingListener = &l underlyingListener = &l
@ -335,7 +347,7 @@ func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
listeners = append(listeners, l) listeners = append(listeners, l)
t.listeners[udpAddr.String()] = listeners t.listeners[udpAddr.String()] = listeners
t.listenersMu.Unlock()
return l, nil return l, nil
} }
@ -373,7 +385,6 @@ func (t *transport) Close() error {
func (t *transport) CloseVirtualListener(l *virtualListener) error { func (t *transport) CloseVirtualListener(l *virtualListener) error {
t.listenersMu.Lock() t.listenersMu.Lock()
defer t.listenersMu.Unlock()
var err error var err error
listeners := t.listeners[l.udpAddr] listeners := t.listeners[l.udpAddr]
@ -381,6 +392,7 @@ func (t *transport) CloseVirtualListener(l *virtualListener) error {
// This is the last virtual listener here, so we can close the underlying listener // This is the last virtual listener here, so we can close the underlying listener
err = l.listener.Close() err = l.listener.Close()
delete(t.listeners, l.udpAddr) delete(t.listeners, l.udpAddr)
t.listenersMu.Unlock()
return err return err
} }
@ -394,6 +406,6 @@ func (t *transport) CloseVirtualListener(l *virtualListener) error {
} }
} }
t.listenersMu.Unlock()
return nil return nil
} }

View File

@ -89,17 +89,18 @@ func (c *ConnManager) ListenQUIC(addr ma.Multiaddr, tlsConf *tls.Config, allowWi
} }
c.quicListenersMu.Lock() c.quicListenersMu.Lock()
defer c.quicListenersMu.Unlock()
key := laddr.String() key := laddr.String()
entry, ok := c.quicListeners[key] entry, ok := c.quicListeners[key]
if !ok { if !ok {
tr, err := c.transportForListen(netw, laddr) tr, err := c.transportForListen(netw, laddr)
if err != nil { if err != nil {
c.quicListenersMu.Unlock()
return nil, err return nil, err
} }
ln, err := newQuicListener(tr, c.serverConfig) ln, err := newQuicListener(tr, c.serverConfig)
if err != nil { if err != nil {
c.quicListenersMu.Unlock()
return nil, err return nil, err
} }
key = tr.LocalAddr().String() key = tr.LocalAddr().String()
@ -110,16 +111,17 @@ func (c *ConnManager) ListenQUIC(addr ma.Multiaddr, tlsConf *tls.Config, allowWi
if entry.refCount <= 0 { if entry.refCount <= 0 {
entry.ln.Close() entry.ln.Close()
} }
c.quicListenersMu.Unlock()
return nil, err return nil, err
} }
entry.refCount++ entry.refCount++
c.quicListeners[key] = entry c.quicListeners[key] = entry
c.quicListenersMu.Unlock()
return l, nil return l, nil
} }
func (c *ConnManager) onListenerClosed(key string) { func (c *ConnManager) onListenerClosed(key string) {
c.quicListenersMu.Lock() c.quicListenersMu.Lock()
defer c.quicListenersMu.Unlock()
entry := c.quicListeners[key] entry := c.quicListeners[key]
entry.refCount = entry.refCount - 1 entry.refCount = entry.refCount - 1
@ -129,6 +131,7 @@ func (c *ConnManager) onListenerClosed(key string) {
} else { } else {
c.quicListeners[key] = entry c.quicListeners[key] = entry
} }
c.quicListenersMu.Unlock()
} }
func (c *ConnManager) transportForListen(network string, laddr *net.UDPAddr) (refCountedQuicTransport, error) { func (c *ConnManager) transportForListen(network string, laddr *net.UDPAddr) (refCountedQuicTransport, error) {

View File

@ -55,16 +55,18 @@ func newQuicListener(tr refCountedQuicTransport, quicConfig *quic.Config) (*quic
SessionTicketsDisabled: true, // This is set for the config for client, but we set it here as well: https://github.com/quic-go/quic-go/issues/4029 SessionTicketsDisabled: true, // This is set for the config for client, but we set it here as well: https://github.com/quic-go/quic-go/issues/4029
GetConfigForClient: func(info *tls.ClientHelloInfo) (*tls.Config, error) { GetConfigForClient: func(info *tls.ClientHelloInfo) (*tls.Config, error) {
cl.protocolsMu.Lock() cl.protocolsMu.Lock()
defer cl.protocolsMu.Unlock()
for _, proto := range info.SupportedProtos { for _, proto := range info.SupportedProtos {
if entry, ok := cl.protocols[proto]; ok { if entry, ok := cl.protocols[proto]; ok {
conf := entry.tlsConf conf := entry.tlsConf
if conf.GetConfigForClient != nil { if conf.GetConfigForClient != nil {
cl.protocolsMu.Unlock()
return conf.GetConfigForClient(info) return conf.GetConfigForClient(info)
} }
cl.protocolsMu.Unlock()
return conf, nil return conf, nil
} }
} }
cl.protocolsMu.Unlock()
return nil, fmt.Errorf("no supported protocol found. offered: %+v", info.SupportedProtos) return nil, fmt.Errorf("no supported protocol found. offered: %+v", info.SupportedProtos)
}, },
} }
@ -81,25 +83,27 @@ func newQuicListener(tr refCountedQuicTransport, quicConfig *quic.Config) (*quic
func (l *quicListener) allowWindowIncrease(conn quic.Connection, delta uint64) bool { func (l *quicListener) allowWindowIncrease(conn quic.Connection, delta uint64) bool {
l.protocolsMu.Lock() l.protocolsMu.Lock()
defer l.protocolsMu.Unlock()
conf, ok := l.protocols[conn.ConnectionState().TLS.NegotiatedProtocol] conf, ok := l.protocols[conn.ConnectionState().TLS.NegotiatedProtocol]
if !ok { if !ok {
l.protocolsMu.Unlock()
return false return false
} }
l.protocolsMu.Unlock()
return conf.allowWindowIncrease(conn, delta) return conf.allowWindowIncrease(conn, delta)
} }
func (l *quicListener) Add(tlsConf *tls.Config, allowWindowIncrease func(conn quic.Connection, delta uint64) bool, onRemove func()) (Listener, error) { func (l *quicListener) Add(tlsConf *tls.Config, allowWindowIncrease func(conn quic.Connection, delta uint64) bool, onRemove func()) (Listener, error) {
l.protocolsMu.Lock() l.protocolsMu.Lock()
defer l.protocolsMu.Unlock()
if len(tlsConf.NextProtos) == 0 { if len(tlsConf.NextProtos) == 0 {
l.protocolsMu.Unlock()
return nil, errors.New("no ALPN found in tls.Config") return nil, errors.New("no ALPN found in tls.Config")
} }
for _, proto := range tlsConf.NextProtos { for _, proto := range tlsConf.NextProtos {
if _, ok := l.protocols[proto]; ok { if _, ok := l.protocols[proto]; ok {
l.protocolsMu.Unlock()
return nil, fmt.Errorf("already listening for protocol %s", proto) return nil, fmt.Errorf("already listening for protocol %s", proto)
} }
} }
@ -119,18 +123,21 @@ func (l *quicListener) Add(tlsConf *tls.Config, allowWindowIncrease func(conn qu
allowWindowIncrease: allowWindowIncrease, allowWindowIncrease: allowWindowIncrease,
} }
} }
l.protocolsMu.Unlock()
return ln, nil return ln, nil
} }
func (l *quicListener) Run() error { func (l *quicListener) Run() error {
defer close(l.running)
defer l.transport.DecreaseCount()
for { for {
conn, err := l.l.Accept(context.Background()) conn, err := l.l.Accept(context.Background())
if err != nil { if err != nil {
if errors.Is(err, quic.ErrServerClosed) || strings.Contains(err.Error(), "use of closed network connection") { if errors.Is(err, quic.ErrServerClosed) || strings.Contains(err.Error(), "use of closed network connection") {
close(l.running)
l.transport.DecreaseCount()
return transport.ErrListenerClosed return transport.ErrListenerClosed
} }
close(l.running)
l.transport.DecreaseCount()
return err return err
} }
proto := conn.ConnectionState().TLS.NegotiatedProtocol proto := conn.ConnectionState().TLS.NegotiatedProtocol
@ -139,6 +146,8 @@ func (l *quicListener) Run() error {
ln, ok := l.protocols[proto] ln, ok := l.protocols[proto]
if !ok { if !ok {
l.protocolsMu.Unlock() l.protocolsMu.Unlock()
close(l.running)
l.transport.DecreaseCount()
return fmt.Errorf("negotiated unknown protocol: %s", proto) return fmt.Errorf("negotiated unknown protocol: %s", proto)
} }
ln.ln.add(conn) ln.ln.add(conn)

View File

@ -82,8 +82,8 @@ func NewUDPMux(socket net.PacketConn) *UDPMux {
func (mux *UDPMux) Start() { func (mux *UDPMux) Start() {
mux.wg.Add(1) mux.wg.Add(1)
go func() { go func() {
defer mux.wg.Done()
mux.readLoop() mux.readLoop()
mux.wg.Done()
}() }()
} }
@ -264,7 +264,6 @@ func (mux *UDPMux) RemoveConnByUfrag(ufrag string) {
} }
mux.mx.Lock() mux.mx.Lock()
defer mux.mx.Unlock()
for _, isIPv6 := range [...]bool{true, false} { for _, isIPv6 := range [...]bool{true, false} {
key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6} key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6}
@ -276,17 +275,18 @@ func (mux *UDPMux) RemoveConnByUfrag(ufrag string) {
delete(mux.ufragAddrMap, key) delete(mux.ufragAddrMap, key)
} }
} }
mux.mx.Unlock()
} }
func (mux *UDPMux) getOrCreateConn(ufrag string, isIPv6 bool, _ *UDPMux, addr net.Addr) (created bool, _ *muxedConnection) { func (mux *UDPMux) getOrCreateConn(ufrag string, isIPv6 bool, _ *UDPMux, addr net.Addr) (created bool, _ *muxedConnection) {
key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6} key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6}
mux.mx.Lock() mux.mx.Lock()
defer mux.mx.Unlock()
if conn, ok := mux.ufragMap[key]; ok { if conn, ok := mux.ufragMap[key]; ok {
mux.addrMap[addr.String()] = conn mux.addrMap[addr.String()] = conn
mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr) mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr)
mux.mx.Unlock()
return false, conn return false, conn
} }
@ -294,5 +294,6 @@ func (mux *UDPMux) getOrCreateConn(ufrag string, isIPv6 bool, _ *UDPMux, addr ne
mux.ufragMap[key] = conn mux.ufragMap[key] = conn
mux.addrMap[addr.String()] = conn mux.addrMap[addr.String()] = conn
mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr) mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr)
mux.mx.Unlock()
return true, conn return true, conn
} }

View File

@ -40,10 +40,10 @@ func NewConn(raw *ws.Conn, secure bool) *Conn {
func (c *Conn) Read(b []byte) (int, error) { func (c *Conn) Read(b []byte) (int, error) {
c.readLock.Lock() c.readLock.Lock()
defer c.readLock.Unlock()
if c.reader == nil { if c.reader == nil {
if err := c.prepNextReader(); err != nil { if err := c.prepNextReader(); err != nil {
c.readLock.Unlock()
return 0, err return 0, err
} }
} }
@ -55,15 +55,18 @@ func (c *Conn) Read(b []byte) (int, error) {
c.reader = nil c.reader = nil
if n > 0 { if n > 0 {
c.readLock.Unlock()
return n, nil return n, nil
} }
if err := c.prepNextReader(); err != nil { if err := c.prepNextReader(); err != nil {
c.readLock.Unlock()
return 0, err return 0, err
} }
// explicitly looping // explicitly looping
default: default:
c.readLock.Unlock()
return n, err return n, err
} }
} }
@ -90,12 +93,13 @@ func (c *Conn) prepNextReader() error {
func (c *Conn) Write(b []byte) (n int, err error) { func (c *Conn) Write(b []byte) (n int, err error) {
c.writeLock.Lock() c.writeLock.Lock()
defer c.writeLock.Unlock()
if err := c.Conn.WriteMessage(c.DefaultMessageType, b); err != nil { if err := c.Conn.WriteMessage(c.DefaultMessageType, b); err != nil {
c.writeLock.Unlock()
return 0, err return 0, err
} }
c.writeLock.Unlock()
return len(b), nil return len(b), nil
} }
@ -148,9 +152,9 @@ func (c *Conn) SetWriteDeadline(t time.Time) error {
// deadline. // deadline.
c.writeLock.Lock() c.writeLock.Lock()
defer c.writeLock.Unlock() err := c.Conn.SetWriteDeadline(t)
c.writeLock.Unlock()
return c.Conn.SetWriteDeadline(t) return err
} }
type capableConn struct { type capableConn struct {

View File

@ -93,12 +93,12 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) {
} }
func (l *listener) serve() { func (l *listener) serve() {
defer close(l.closed)
if !l.isWss { if !l.isWss {
l.server.Serve(l.nl) l.server.Serve(l.nl)
} else { } else {
l.server.ServeTLS(l.nl, "", "") l.server.ServeTLS(l.nl, "", "")
} }
close(l.closed)
} }
func (l *listener) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (l *listener) ServeHTTP(w http.ResponseWriter, r *http.Request) {

View File

@ -5,6 +5,7 @@ import (
"context" "context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"math/bits"
"sync" "sync"
"time" "time"
@ -17,8 +18,12 @@ import (
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/discovery/routing" "github.com/libp2p/go-libp2p/p2p/discovery/routing"
"github.com/libp2p/go-libp2p/p2p/discovery/util" "github.com/libp2p/go-libp2p/p2p/discovery/util"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
"github.com/libp2p/go-libp2p/p2p/net/connmgr" "github.com/libp2p/go-libp2p/p2p/net/connmgr"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap" "go.uber.org/zap"
blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub" blossomsub "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb" "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
@ -111,6 +116,23 @@ func NewBlossomSub(
} }
} }
defaultBootstrapPeers := append([]string{}, p2pConfig.BootstrapPeers...)
if p2pConfig.Network == 0 {
defaultBootstrapPeers = config.BootstrapPeers
}
bootstrappers := []peer.AddrInfo{}
for _, peerAddr := range defaultBootstrapPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
}
bootstrappers = append(bootstrappers, *peerinfo)
}
var privKey crypto.PrivKey var privKey crypto.PrivKey
if p2pConfig.PeerPrivKey != "" { if p2pConfig.PeerPrivKey != "" {
peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey) peerPrivKey, err := hex.DecodeString(p2pConfig.PeerPrivKey)
@ -136,7 +158,17 @@ func NewBlossomSub(
if err != nil { if err != nil {
panic(err) panic(err)
} }
rm, err := resourceManager(
p2pConfig.HighWatermarkConnections,
bootstrappers,
)
if err != nil {
panic(err)
}
opts = append(opts, libp2p.ConnectionManager(cm)) opts = append(opts, libp2p.ConnectionManager(cm))
opts = append(opts, libp2p.ResourceManager(rm))
} }
bs := &BlossomSub{ bs := &BlossomSub{
@ -156,7 +188,14 @@ func NewBlossomSub(
logger.Info("established peer id", zap.String("peer_id", h.ID().String())) logger.Info("established peer id", zap.String("peer_id", h.ID().String()))
kademliaDHT := initDHT(ctx, p2pConfig, logger, h, isBootstrapPeer) kademliaDHT := initDHT(
ctx,
p2pConfig,
logger,
h,
isBootstrapPeer,
bootstrappers,
)
routingDiscovery := routing.NewRoutingDiscovery(kademliaDHT) routingDiscovery := routing.NewRoutingDiscovery(kademliaDHT)
util.Advertise(ctx, routingDiscovery, getNetworkNamespace(p2pConfig.Network)) util.Advertise(ctx, routingDiscovery, getNetworkNamespace(p2pConfig.Network))
@ -223,6 +262,96 @@ func NewBlossomSub(
return bs return bs
} }
// adjusted from Lotus' reference implementation, addressing
// https://github.com/libp2p/go-libp2p/issues/1640
func resourceManager(highWatermark uint, bootstrappers []peer.AddrInfo) (
network.ResourceManager,
error,
) {
defaultLimits := rcmgr.DefaultLimits
libp2p.SetDefaultServiceLimits(&defaultLimits)
defaultLimits.SystemBaseLimit.Memory = 1 << 28
defaultLimits.SystemLimitIncrease.Memory = 1 << 28
defaultLimitConfig := defaultLimits.AutoScale()
changes := rcmgr.PartialLimitConfig{}
if defaultLimitConfig.ToPartialLimitConfig().System.Memory > 2<<30 {
changes.System.Memory = 2 << 30
}
maxconns := uint(highWatermark)
if rcmgr.LimitVal(3*maxconns) > defaultLimitConfig.
ToPartialLimitConfig().System.ConnsInbound {
changes.System.ConnsInbound = rcmgr.LimitVal(1 << bits.Len(3*maxconns))
changes.System.ConnsOutbound = rcmgr.LimitVal(1 << bits.Len(3*maxconns))
changes.System.Conns = rcmgr.LimitVal(1 << bits.Len(6*maxconns))
changes.System.StreamsInbound = rcmgr.LimitVal(1 << bits.Len(36*maxconns))
changes.System.StreamsOutbound = rcmgr.LimitVal(1 << bits.Len(216*maxconns))
changes.System.Streams = rcmgr.LimitVal(1 << bits.Len(216*maxconns))
if rcmgr.LimitVal(3*maxconns) > defaultLimitConfig.
ToPartialLimitConfig().System.FD {
changes.System.FD = rcmgr.LimitVal(1 << bits.Len(3*maxconns))
}
changes.ServiceDefault.StreamsInbound = rcmgr.LimitVal(
1 << bits.Len(12*maxconns),
)
changes.ServiceDefault.StreamsOutbound = rcmgr.LimitVal(
1 << bits.Len(48*maxconns),
)
changes.ServiceDefault.Streams = rcmgr.LimitVal(1 << bits.Len(48*maxconns))
changes.ProtocolDefault.StreamsInbound = rcmgr.LimitVal(
1 << bits.Len(12*maxconns),
)
changes.ProtocolDefault.StreamsOutbound = rcmgr.LimitVal(
1 << bits.Len(48*maxconns),
)
changes.ProtocolDefault.Streams = rcmgr.LimitVal(1 << bits.Len(48*maxconns))
}
changedLimitConfig := changes.Build(defaultLimitConfig)
limiter := rcmgr.NewFixedLimiter(changedLimitConfig)
str, err := rcmgr.NewStatsTraceReporter()
if err != nil {
return nil, errors.Wrap(err, "resource manager")
}
rcmgr.MustRegisterWith(prometheus.DefaultRegisterer)
// Metrics
opts := append(
[]rcmgr.Option{},
rcmgr.WithTraceReporter(str),
)
resolver := madns.DefaultResolver
var bootstrapperMaddrs []ma.Multiaddr
for _, pi := range bootstrappers {
for _, addr := range pi.Addrs {
resolved, err := resolver.Resolve(context.Background(), addr)
if err != nil {
continue
}
bootstrapperMaddrs = append(bootstrapperMaddrs, resolved...)
}
}
opts = append(opts, rcmgr.WithAllowlistedMultiaddrs(bootstrapperMaddrs))
mgr, err := rcmgr.NewResourceManager(limiter, opts...)
if err != nil {
return nil, errors.Wrap(err, "resource manager")
}
return mgr, nil
}
func (b *BlossomSub) PublishToBitmask(bitmask []byte, data []byte) error { func (b *BlossomSub) PublishToBitmask(bitmask []byte, data []byte) error {
return b.ps.Publish(b.ctx, bitmask, data) return b.ps.Publish(b.ctx, bitmask, data)
} }
@ -303,26 +432,11 @@ func initDHT(
logger *zap.Logger, logger *zap.Logger,
h host.Host, h host.Host,
isBootstrapPeer bool, isBootstrapPeer bool,
bootstrappers []peer.AddrInfo,
) *dht.IpfsDHT { ) *dht.IpfsDHT {
logger.Info("establishing dht") logger.Info("establishing dht")
var kademliaDHT *dht.IpfsDHT var kademliaDHT *dht.IpfsDHT
var err error var err error
defaultBootstrapPeers := append([]string{}, p2pConfig.BootstrapPeers...)
if p2pConfig.Network == 0 {
defaultBootstrapPeers = config.BootstrapPeers
}
bootstrappers := []peer.AddrInfo{}
for _, peerAddr := range defaultBootstrapPeers {
peerinfo, err := peer.AddrInfoFromString(peerAddr)
if err != nil {
panic(err)
}
bootstrappers = append(bootstrappers, *peerinfo)
}
if isBootstrapPeer { if isBootstrapPeer {
kademliaDHT, err = dht.New( kademliaDHT, err = dht.New(